hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
d7ba9ab32dabbc6aefdb98a52e78ab43cac03e12
4,510
py
Python
tests/accounts/test_forms.py
sks-sys/djangocicd
c5b1c5b11b38ebd1be1cb2f138ca21e976282ab8
[ "MIT" ]
null
null
null
tests/accounts/test_forms.py
sks-sys/djangocicd
c5b1c5b11b38ebd1be1cb2f138ca21e976282ab8
[ "MIT" ]
null
null
null
tests/accounts/test_forms.py
sks-sys/djangocicd
c5b1c5b11b38ebd1be1cb2f138ca21e976282ab8
[ "MIT" ]
null
null
null
from django.test import TestCase from accounts.forms import EmployeeRegistrationForm, EmployerRegistrationForm from accounts.models import User class TestEmployeeRegistrationForm(TestCase): fixtures = ["accounts_initial_data.json"] def setUp(self) -> None: self.valid_user = { "first_name": "Manjurul", "last_name": "Hoque", "role": "employee", "gender": "male", "email": "rumi1@gmail.com", "password1": "123456", "password2": "123456", } def test_field_required(self): form = EmployeeRegistrationForm(data={}) self.assertEqual(form.errors["gender"], ["Gender is required"]) self.assertEqual(form.errors["email"], ["This field is required."]) self.assertEqual(form.errors["password1"], ["This field is required."]) self.assertEqual(form.errors["password2"], ["This field is required."]) def test_employee_registration_form_valid(self): form = EmployeeRegistrationForm(data=self.valid_user) self.assertEqual(True, form.is_valid(), "Invalid form") def test_invalid_email(self): data = self.valid_user data["email"] = "test" form = EmployeeRegistrationForm(data=data) self.assertFalse(form.is_valid(), "Invalid email") def test_too_short_password(self): data = self.valid_user data["password1"] = "test" form = EmployeeRegistrationForm(data=data) self.assertFalse(form.is_valid()) def test_meta_data(self): self.assertEqual(EmployeeRegistrationForm._meta.model, User) expected_fields = ["first_name", "last_name", "email", "password1", "password2", "gender"] for field in expected_fields: self.assertIn(field, EmployeeRegistrationForm._meta.fields) def test_password_mismatch(self): # Set confirm password field to a different value data = self.valid_user data["password2"] = "54321" form = EmployeeRegistrationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors["password2"][0], "The two password fields didn’t match.") def test_valid_and_save_form(self): form = EmployeeRegistrationForm(data=self.valid_user) form.is_valid() user = form.save() self.assertIsInstance(user, User, "Not an user") class TestEmployerRegistrationForm(TestCase): fixtures = ["accounts_initial_data.json"] def setUp(self) -> None: self.valid_user = { "first_name": "John", "last_name": "Doe", "email": "employer@gmail.com", "password1": "123456", "password2": "123456", } def test_field_required(self): form = EmployerRegistrationForm(data={}) self.assertEqual(form.errors["email"], ["This field is required."]) self.assertEqual(form.errors["password1"], ["This field is required."]) self.assertEqual(form.errors["password2"], ["This field is required."]) def test_employee_registration_form_valid(self): form = EmployerRegistrationForm(data=self.valid_user) self.assertEqual(True, form.is_valid(), "Invalid form") def test_invalid_email(self): data = self.valid_user data["email"] = "test" form = EmployerRegistrationForm(data=data) self.assertFalse(form.is_valid(), "Invalid email") def test_too_short_password(self): data = self.valid_user data["password1"] = "test" form = EmployeeRegistrationForm(data=data) self.assertFalse(form.is_valid()) def test_meta_data(self): self.assertEqual(EmployerRegistrationForm._meta.model, User) expected_fields = ["first_name", "last_name", "email", "password1", "password2"] for field in expected_fields: self.assertIn(field, EmployeeRegistrationForm._meta.fields) def test_password_mismatch(self): # Set confirm password field to a different value data = self.valid_user data["password2"] = "54321" form = EmployerRegistrationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors["password2"][0], "The two password fields didn’t match.") def test_valid_and_save_form(self): form = EmployerRegistrationForm(data=self.valid_user) form.is_valid() user = form.save() self.assertIsInstance(user, User, "Not an user")
35.793651
98
0.648559
485
4,510
5.865979
0.169072
0.056239
0.054833
0.059754
0.855185
0.826714
0.823199
0.789455
0.789455
0.789455
0
0.015836
0.229933
4,510
125
99
36.08
0.80334
0.021064
0
0.734043
0
0
0.174297
0.011786
0
0
0
0
0.244681
1
0.170213
false
0.212766
0.031915
0
0.244681
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
d7bb67e0009d198040dd3126546c63b3bd6344b6
4,037
py
Python
association-backend/src/internal_api/tests/queries/test_user_id_is_member.py
pauloxnet/pycon
82b6eff76dcc785865ea3ffd97a45e931c0add26
[ "MIT" ]
2
2017-07-18T21:51:25.000Z
2017-12-23T11:08:39.000Z
association-backend/src/internal_api/tests/queries/test_user_id_is_member.py
pauloxnet/pycon
82b6eff76dcc785865ea3ffd97a45e931c0add26
[ "MIT" ]
23
2017-07-18T20:22:38.000Z
2018-01-05T05:45:15.000Z
association-backend/src/internal_api/tests/queries/test_user_id_is_member.py
pauloxnet/pycon
82b6eff76dcc785865ea3ffd97a45e931c0add26
[ "MIT" ]
2
2017-07-18T21:27:33.000Z
2017-07-18T22:07:03.000Z
from ward import each, test from src.association.tests.session import db from src.association_membership.domain.entities import SubscriptionStatus from src.association_membership.tests.factories import SubscriptionFactory from src.internal_api.tests.fixtures import internalapi_graphql_client @test("user does not exist") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, ): internalapi_graphql_client.force_service_login( issuer="pycon-backend", audience="association-backend" ) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": "1"}) assert not response.errors assert response.data["userIdIsMember"] is False @test("user is a member") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, ): await SubscriptionFactory(user_id=1, status=SubscriptionStatus.ACTIVE) internalapi_graphql_client.force_service_login( issuer="pycon-backend", audience="association-backend" ) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": "1"}) assert not response.errors assert response.data["userIdIsMember"] is True @test("user has a {status} membership so is not a member") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, status=each(SubscriptionStatus.CANCELED, SubscriptionStatus.PENDING), ): await SubscriptionFactory(user_id=1, status=status) internalapi_graphql_client.force_service_login( issuer="pycon-backend", audience="association-backend" ) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": "1"}) assert not response.errors assert response.data["userIdIsMember"] is False @test("requires authentication") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, ): await SubscriptionFactory(user_id=1, status=SubscriptionStatus.ACTIVE) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": "1"}) assert response.errors[0]["message"] == "Forbidden" assert not response.data @test("requires authentication of allowed service") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, ): await SubscriptionFactory(user_id=1, status=SubscriptionStatus.ACTIVE) internalapi_graphql_client.force_service_login( issuer="random-service", audience="association-backend" ) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": "1"}) assert response.errors[0]["message"] == "Forbidden" assert not response.data @test("invalid id raises an error") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, ): internalapi_graphql_client.force_service_login( issuer="pycon-backend", audience="association-backend" ) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": "abc-1"}) assert ( response.errors[0]["message"] == "invalid literal for int() with base 10: 'abc-1'" ) assert not response.data @test("empty id raises an error") async def _( internalapi_graphql_client=internalapi_graphql_client, db=db, ): internalapi_graphql_client.force_service_login( issuer="pycon-backend", audience="association-backend" ) query = """query($id: ID!) { userIdIsMember(id: $id) }""" response = await internalapi_graphql_client.query(query, variables={"id": ""}) assert response.errors[0]["message"] == "Invalid ID" assert not response.data
28.835714
87
0.701511
457
4,037
6.017505
0.166302
0.183273
0.244364
0.066182
0.809455
0.800364
0.761091
0.761091
0.761091
0.761091
0
0.005113
0.176369
4,037
139
88
29.043165
0.821955
0
0
0.731481
0
0
0.234332
0
0
0
0
0
0.12963
1
0
false
0
0.046296
0
0.046296
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
d7bd708415705c1990c9ef2d25cf89e0758cbf40
42,663
py
Python
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_quantize_ops.py
hito0512/Vitis-AI
996459fb96cb077ed2f7e789d515893b1cccbc95
[ "Apache-2.0" ]
1
2022-02-22T02:05:01.000Z
2022-02-22T02:05:01.000Z
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_quantize_ops.py
hito0512/Vitis-AI
996459fb96cb077ed2f7e789d515893b1cccbc95
[ "Apache-2.0" ]
null
null
null
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_quantize_ops.py
hito0512/Vitis-AI
996459fb96cb077ed2f7e789d515893b1cccbc95
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python support for quantization operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import tensorflow as tf import numpy as np from tensorflow.python.training import moving_averages from tensorflow_model_optimization.python.core.keras import compat as tf_compat from tensorflow.keras import layers from tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils logger = common_utils.VAILogger narrow_range = False def std_round(x): """ROUND_HALF_AWAY_FROM_ZERO, used in std round/py2 round. f(x) = std::round(x) ceil(x), x - floor(x) == 0.5 && x > 0 = round(x), x - floor(x) != 0.5 floor(x), x - floor(x) == 0.5 && x < 0 eg: f(2.3) = 2, f(1.5) = 2, f(-1.5) = -2, f(2.5) = 3, f(-2.5) = -3, f(-2.6) = -3 """ floored = tf.math.floor(x) ceiled = tf.math.ceil(x) rounded = tf.math.round(x) rounded_half = tf.where(x > 0, ceiled, floored) rounded = tf.where(tf.math.equal(x - floored, 0.5), rounded_half, rounded) return rounded def py3_round(x): """ROUND_HALF_TO_EVEN, used in py3 round, tf.round or numpy.round. f(x) = round(x) eg: f(2.3) = 2, f(1.5) = 2, f(-1.5) = -2, f(2.5) = 2, f(-2.5) = -2, f(-2.6) = -3 """ rounded = tf.math.round(x) return rounded def dpu_round(x): """ROUND_HALF_UP, used in dpu round. f(x) = (x - floor(x) == 0.5) ? ceil(x) : round(x) = floor(x + 0.5) eg: f(2.3) = 2, f(1.5) = 2, f(-1.5) = -1, f(2.5) = 3, f(-2.5) = -2, f(-2.6) = -3 """ rounded = tf.math.floor(x + 0.5) return rounded def py3_asym_quantize(inputs, scale, shift, q_min, q_max): """Quantize Kernel. Q(x) = q_min + round[(x-shift) * scale]. """ with tf.name_scope("Py3AsymQuantize"): rounded = py3_round((inputs - shift) * scale) quantized = tf.clip_by_value(q_min + rounded, q_min, q_max) return quantized def dpu_asym_quantize(inputs, scale, shift, q_min, q_max): """DPU Quantize Kernel. Q(x) = q_min + dpu_round[(x - shift) * scale]. """ with tf.name_scope("DPUAsymQuantize"): rounded = dpu_round((inputs - shift) * scale) quantized = tf.clip_by_value(q_min + rounded, q_min, q_max) return quantized def py3_sym_quantize(inputs, scale, q_min, q_max): """Quantize Kernel. Q(x) = round[(x) * scale]. """ with tf.name_scope("Py3SymQuantize"): rounded = py3_round(inputs * scale) quantized = tf.clip_by_value(rounded, q_min, q_max) return quantized def dpu_sym_quantize(inputs, scale, q_min, q_max): """DPU Quantize Kernel. Q(x) = dpu_round[(x) * scale]. """ with tf.name_scope("DpuSymQuantize"): rounded = dpu_round(inputs * scale) quantized = tf.clip_by_value(rounded, q_min, q_max) return quantized def asym_dequantize(inputs, scale, shift, q_min, q_max): """Dequantize Kernel. DQ(x) = (x - q_min) / scale + shift. """ with tf.name_scope("AsymDequantize"): return (inputs - q_min) / scale + shift def sym_dequantize(inputs, scale, q_min, q_max): """Dequantize Kernel. DQ(x) = x / scale. """ with tf.name_scope("SymDequantize"): return inputs / scale def quantize_zero_point(scale, f_min, f_max, q_min, q_max): """Quantize the zero point. """ with tf.name_scope("QuantizeZeroPoint"): f_zero_point = q_min - f_min * scale below_min = (f_zero_point < q_min) above_max = (f_zero_point > q_max) q_zero_point = std_round(f_zero_point) q_zero_point = tf.where(below_min, q_min, q_zero_point) q_zero_point = tf.where(above_max, q_max, q_zero_point) new_f_min = (q_min - q_zero_point) / scale new_f_max = (q_max - q_zero_point) / scale return q_zero_point, new_f_min, new_f_max def get_scale(f_min, f_max, q_min, q_max): """Get quantize scaling factor. """ return (q_max - q_min) / (f_max - f_min) def get_min_max(inputs, bit_width, symmetry=True, per_channel=False, reduce_dims=None): """Get minimum and maximum value of inputs. """ input_shape = inputs.get_shape() input_dim = len(input_shape) if per_channel: if input_dim >= 2: batch_min = tf.math.reduce_min( inputs, axis=reduce_dims, keepdims=True, name='batch_min') batch_max = tf.math.reduce_max( inputs, axis=reduce_dims, keepdims=True, name='batch_max') else: batch_min = inputs batch_max = inputs else: batch_min = tf.math.reduce_min(inputs, name='batch_min') batch_max = tf.math.reduce_max(inputs, name='batch_max') if symmetry: if narrow_range: range_min = tf.minimum(batch_min, -batch_max) range_max = tf.maximum(batch_max, -batch_min) else: # Use full range of bit_width, the negative range is slightly larger than the positive range. min_max_ratio = -((1 << bit_width) - 2) / (1 << bit_width) range_min = tf.minimum(batch_min, batch_max / min_max_ratio) range_max = tf.maximum(batch_max, batch_min * min_max_ratio) else: range_min = tf.math.minimum(batch_min, 0.0, name='range_min') range_max = tf.math.maximum(batch_max, 0.0, name='range_max') return range_min, range_max @tf.custom_gradient def fake_quantize_with_min_max_py3_asym(inputs, f_min, f_max, bit_width): """The fake quantization operation kernel with py3 asymmetry round mode. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithMinMaxPy3Asym"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min quantized = py3_asym_quantize(inputs, scale, shift, q_min, q_max) dequantized = asym_dequantize(quantized, scale, shift, q_min, q_max) def grad_fn(dy): between_min_max = (inputs >= new_f_min) & (inputs <= new_f_max) below_min = (inputs < new_f_min) above_max = (inputs > new_f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum(dy * tf.where(below_min, ones, zeros)) grad_wrt_f_max = tf.reduce_sum(dy * tf.where(above_max, ones, zeros)) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_min_max_py3_asym_perc(inputs, f_min, f_max, bit_width, reduce_dims): """The fake quantization operation kernel with py3 asymmetry per_channel mode. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width reduce_dims: the dimensions to be reduces for per_channel quantization Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithMinMaxPy3AsymPerC"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min quantized = py3_asym_quantize(inputs, scale, shift, q_min, q_max) dequantized = asym_dequantize(quantized, scale, shift, q_min, q_max) def grad_fn(dy): between_min_max = (inputs >= new_f_min) & (inputs <= new_f_max) below_min = (inputs < new_f_min) above_max = (inputs > new_f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum( dy * tf.where(below_min, ones, zeros), reduce_dims, keepdims=True) grad_wrt_f_max = tf.reduce_sum( dy * tf.where(above_max, ones, zeros), reduce_dims, keepdims=True) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_min_max_py3_sym(inputs, f_min, f_max, bit_width): """The fake quantization operation kernel with py3 symmetry mode. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithMinMaxPy3Sym"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) quantized = py3_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): between_min_max = (inputs >= f_min) & (inputs <= f_max) below_min = (inputs < f_min) above_max = (inputs > f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum(dy * tf.where(below_min, ones, zeros)) grad_wrt_f_max = tf.reduce_sum(dy * tf.where(above_max, ones, zeros)) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_min_max_py3_sym_perc(inputs, f_min, f_max, bit_width, reduce_dims): """The fake quantization operation kernel with py3 symmetry perc mode. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width channel_axis: the axis of channel Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithMinMaxPy3SymPerC"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) quantized = py3_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): between_min_max = (inputs >= f_min) & (inputs <= f_max) below_min = (inputs < f_min) above_max = (inputs > f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum( dy * tf.where(below_min, ones, zeros), reduce_dims, keepdims=True) grad_wrt_f_max = tf.reduce_sum( dy * tf.where(above_max, ones, zeros), reduce_dims, keepdims=True) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_quantize_pos_py3_sym(inputs, quantize_pos, bit_width): """The fake quantization operation kernel with py3 symmetry round mode. Args: inputs: a tensor containing values to be quantized. quantize_pos: the quantize postion bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithQuantizePosPy3Sym"): bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = tf.math.pow(2.0, quantize_pos, name="scale") quantized = py3_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): return dy, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_quantize_pos_py3_asym(inputs, quantize_pos, f_min, f_max, bit_width): """The fake quantization operation kernel with py3 asymmetry round mode. Args: inputs: a tensor containing values to be quantized. quantize_pos: the quantize postion bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithQuantizePosPy3Asym"): bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = tf.math.pow(2.0, quantize_pos, name="scale") q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min quantized = py3_asym_quantize(inputs, scale, shift, q_min, q_max) dequantized = asym_dequantize(quantized, scale, shift, q_min, q_max) def grad_fn(dy): return dy, None, None, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_quantize_pos_py3_sym_perc(inputs, quantize_pos, bit_width, reduce_dims): """The fake quantization operation kernel with py3 symmetry round mode. Args: inputs: a tensor containing values to be quantized. quantize_pos: the quantize postion bit_width: the bit width reduce_dims: the dimensions to be reduces for per_channel quantization Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithQuantizePosPy3SymPerC"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = tf.math.pow(2.0, quantize_pos, name="scale") quantized = py3_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): return dy, None, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_quantize_pos_py3_asym_perc(inputs, quantize_pos, f_min, f_max, bit_width, reduce_dims): """The fake quantization operation kernel with py3 asymmetry round mode. Args: inputs: a tensor containing values to be quantized. quantize_pos: the quantize postion bit_width: the bit width reduce_dims: the dimensions to be reduces for per_channel quantization Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithQuantizePosPy3AsymPerC"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = tf.math.pow(2.0, quantize_pos, name="scale") q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min quantized = py3_asym_quantize(inputs, scale, shift, q_min, q_max) dequantized = asym_dequantize(quantized, scale, shift, q_min, q_max) def grad_fn(dy): return dy, None, None, None, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_quantize_pos_dpu_sym(inputs, quantize_pos, bit_width): """The fake quantization operation kernel with dpu symmetry round mode. Args: inputs: a tensor containing values to be quantized. quantize_pos: the quantize postion bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithQuantizePosDpuSym"): bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = tf.math.pow(2.0, quantize_pos, name="scale") quantized = dpu_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): return dy, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_quantize_pos_dpu_asym(inputs, quantize_pos, f_min, f_max, bit_width): """The fake quantization operation kernel with dpu asymmetry round mode. Args: inputs: a tensor containing values to be quantized. quantize_pos: the quantize postion bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithQuantizePosDpuAsym"): bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") scale = tf.math.pow(2.0, quantize_pos, name="scale") q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min quantized = dpu_asym_quantize(inputs, scale, shift, q_min, q_max) dequantized = asym_dequantize(quantized, scale, shift, q_min, q_max) def grad_fn(dy): return dy, None, None, None, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_log_th_py3_sym(inputs, log_th, bit_width): """The fake quantization operation kernel with py3 symmetry round mode Args: inputs: a tensor containing values to be quantized. scale: the scaling factor bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithLogThPy3Sym"): bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") quantize_pos = bit_width - 1 - tf.math.ceil(log_th) scale = tf.math.pow(2.0, quantize_pos, name="scale") quantized = py3_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): # grad_wrt_inputs = 1 if f_min < x < f_max else 0 # [x * s] / s - x, if q_min < [x * s] < q_max # grad_wrt_log_th = ln2 * q_min / s, if [x * s] < f_min # q_max / s, if [x * s] > f_max scaled = inputs * scale rounded = py3_round(scaled) between_min_max = (rounded >= q_min) & (rounded <= q_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_log_th = tf.reduce_sum( dy * tf.math.log(2.0) * tf.where(between_min_max, dequantized - inputs, quantized / scale)) return grad_wrt_inputs, grad_wrt_log_th, None return dequantized, grad_fn @tf.custom_gradient def fake_quantize_with_log_th_dpu_sym(inputs, log_th, bit_width): """The fake quantization operation kernel with dpu symmetry round mode Args: inputs: a tensor containing values to be quantized. scale: the scaling factor bit_width: the bit width Returns: a tensor containing quantized values. """ with tf.name_scope("FakeQuantizeWithLogTh"): bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") quantize_pos = bit_width - 1 - tf.math.ceil(log_th) scale = tf.math.pow(2.0, quantize_pos, name="scale") quantized = dpu_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) def grad_fn(dy): # grad_wrt_inputs = 1 if f_min < x < f_max else 0 # [x * s] / s - x, if q_min < [x * s] < q_max # grad_wrt_log_th = ln2 * q_min / s, if [x * s] < f_min # q_max / s, if [x * s] > f_max scaled = inputs * scale rounded = dpu_round(scaled) between_min_max = (rounded >= q_min) & (rounded <= q_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_log_th = tf.reduce_sum( dy * tf.math.log(2.0) * tf.where(between_min_max, dequantized - inputs, quantized / scale)) return grad_wrt_inputs, grad_wrt_log_th, None return dequantized, grad_fn _QUANTIZE_KERNEL_MAP = { 'MIN_MAX_PY3_SYM': fake_quantize_with_min_max_py3_sym, 'MIN_MAX_PY3_SYM_PERC': fake_quantize_with_min_max_py3_sym_perc, 'MIN_MAX_PY3_ASYM': fake_quantize_with_min_max_py3_asym, 'MIN_MAX_PY3_ASYM_PERC': fake_quantize_with_min_max_py3_asym_perc, 'QUANTIZE_POS_PY3_SYM': fake_quantize_with_quantize_pos_py3_sym, 'QUANTIZE_POS_PY3_ASYM': fake_quantize_with_quantize_pos_py3_asym, 'QUANTIZE_POS_PY3_SYM_PERC': fake_quantize_with_quantize_pos_py3_sym_perc, 'QUANTIZE_POS_PY3_ASYM_PERC': fake_quantize_with_quantize_pos_py3_asym_perc, 'QUANTIZE_POS_DPU_SYM': fake_quantize_with_quantize_pos_dpu_sym, 'QUANTIZE_POS_DPU_ASYM': fake_quantize_with_quantize_pos_dpu_asym, 'LOG_TH_PY3_SYM': fake_quantize_with_log_th_py3_sym, 'LOG_TH_DPU_SYM': fake_quantize_with_log_th_dpu_sym, } def get_quantize_kernel(kernel_type, round_mode, symmetry=True, per_channel=False): key = kernel_type if round_mode == 0: key += '_PY3' elif round_mode == 1: key += '_DPU' elif round_mode == 2: key += '_STD' else: logger.error('Invalid round mode: {}'.format(round_mode)) if symmetry: key += '_SYM' else: key += '_ASYM' if per_channel: key += '_PERC' if key not in _QUANTIZE_KERNEL_MAP: logger.error('Invalid quantize kernel {}'.format(key)) return _QUANTIZE_KERNEL_MAP[key] def get_quantize_pos_non_overflow_sym(inputs, f_min, f_max, q_min, q_max, per_channel, reduce_dims): """Get quantize pos which makes no value overflows. """ with tf.name_scope("GetQuantizePosNonOverflow"): min_scale_inv = tf.math.divide(f_min, q_min) max_scale_inv = tf.math.divide(f_max, q_max) float_scale_inv = tf.math.maximum(min_scale_inv, max_scale_inv) # Avoid inf, using sys.float_info.epsilon, log2(epsilon) ~= 52 float_scale_inv = tf.math.maximum(float_scale_inv, sys.float_info.epsilon) quantize_pos = -tf.math.log(float_scale_inv) / tf.math.log(2.0) quantize_pos = tf.math.floor(quantize_pos) return quantize_pos def get_quantize_pos_non_overflow_asym(inputs, f_min, f_max, q_min, q_max, per_channel, reduce_dims): """Get quantize pos which makes no value overflows. """ with tf.name_scope("GetQuantizePosNonOverflow"): float_scale_inv = (f_max - f_min) / (q_max - q_min) # Avoid inf, using sys.float_info.epsilon, log2(epsilon) ~= 52 float_scale_inv = tf.math.maximum(float_scale_inv, sys.float_info.epsilon) quantize_pos = -tf.math.log(float_scale_inv) / tf.math.log(2.0) quantize_pos = tf.math.floor(quantize_pos) return quantize_pos def get_quantize_pos_min_diffs_py3_sym(inputs, f_min, f_max, q_min, q_max, bit_width, per_channel, reduce_dims): """Get quantize pos which makes min difference between float and quantzed. """ with tf.name_scope("GetQuantizePosMinDiffs"): non_overflow_pos = get_quantize_pos_non_overflow_sym( inputs, f_min, f_max, q_min, q_max, per_channel, reduce_dims) diffs = [] for i in range(5): with tf.name_scope("FakeQuantizeWithScale_{}".format(i)): # fake quantize scale = tf.math.pow(2.0, non_overflow_pos + i, name="scale") quantized = py3_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) diff = tf.pow(inputs - dequantized, 2) diff = tf.reduce_sum(diff) diffs.append(diff) pos_offset = tf.argmin(diffs) quantize_pos = non_overflow_pos + tf.cast(pos_offset, tf.float32) return quantize_pos def get_quantize_pos_min_diffs_dpu_sym(inputs, f_min, f_max, q_min, q_max, bit_width, per_channel, reduce_dims): """Get quantize pos which makes min difference between float and quantzed. """ with tf.name_scope("GetQuantizePosMinDiffs"): non_overflow_pos = get_quantize_pos_non_overflow_sym( inputs, f_min, f_max, q_min, q_max, per_channel, reduce_dims) diffs = [] for i in range(5): with tf.name_scope("FakeQuantizeWithScale_{}".format(i)): # fake quantize scale = tf.math.pow(2.0, non_overflow_pos + i, name="scale") quantized = dpu_sym_quantize(inputs, scale, q_min, q_max) dequantized = sym_dequantize(quantized, scale, q_min, q_max) diff = tf.pow(inputs - dequantized, 2) diff = tf.reduce_sum(diff) diffs.append(diff) pos_offset = tf.argmin(diffs) quantize_pos = non_overflow_pos + tf.cast(pos_offset, tf.float32) return quantize_pos def get_quantize_pos(inputs, f_min, f_max, bit_width, method, round_mode, per_channel, reduce_dims, symmetry): """Interface function to get quantize pos. """ bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") with tf.name_scope("GetQuantizePos"): if not symmetry: return get_quantize_pos_non_overflow_asym(inputs, f_min, f_max, q_min, q_max, per_channel, reduce_dims) if method == 0: return get_quantize_pos_non_overflow_sym(inputs, f_min, f_max, q_min, q_max, per_channel, reduce_dims) elif method == 1 and round_mode == 0: return get_quantize_pos_min_diffs_py3_sym(inputs, f_min, f_max, q_min, q_max, bit_width, per_channel, reduce_dims) elif method == 1 and round_mode == 1: return get_quantize_pos_min_diffs_dpu_sym(inputs, f_min, f_max, q_min, q_max, bit_width, per_channel, reduce_dims) else: logger.error('', NotImplementedError) def get_log_th_non_overflow(inputs, f_min, f_max, q_min, q_max): """Get log threshold which makes no value overflows. """ with tf.name_scope("GetLogThNonOverflow"): f_min_abs = tf.math.abs(f_min) f_max_adj = f_max * tf.math.divide(-q_min, q_max) th = tf.math.maximum(f_min_abs, f_max_adj) th = tf.math.maximum(th, 1e-9) return tf.math.divide(tf.math.log(th), tf.math.log(2.)) def get_log_th(inputs, f_min, f_max, bit_width, method): """Interface function to get log threshold. """ bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, bit_width - 1) q_min = tf.math.negative(bound, name="q_min") if narrow_range: q_min = q_min + 1 q_max = tf.math.subtract(bound, 1, name="q_max") with tf.name_scope("GetLogTh"): if method == 0: return get_log_th_non_overflow(inputs, f_min, f_max, q_min, q_max) elif method == 1: logger.error('Method 1 not implemented.', NotImplementedError) else: logger.error('Method {} not implemented.'.format(method), NotImplementedError) def get_reduce_dims(input_shape, channel_axis): """Helper function to convert channel_axis to reduce_dims.""" input_dim = len(input_shape) if channel_axis < 0: channel_axis = input_dim + channel_axis reduce_dims = [i for i in range(input_dim) if i != channel_axis] return tf.constant(reduce_dims) def LastValueMinMaxQuantize(inputs, min_var, max_var, bit_width, round_mode, mode, is_training, symmetry, per_channel, channel_axis, name_scope="LastValueMinMaxQuantize"): """Last value float scale quantize op. Args: inputs: Input values. min_var: Variable of minimum value of inputs. max_var: Variable of maximum value of inputs. bit_width: Int, bit width of quantized values. round_mode: Int, the mode of rounding function, 0 for PY3 round. Now only PY3 round is supported. mode: String, the mode of quantization, available modes are ['ANALYSE', 'QCB', 'QCBEV', 'QAT'] is_training: Bool, whether in training phase. symmetry: Bool, whether to apply symmetry quantization. per_channel: Bool, whether to apply per_channel quantization. The last dimension is regarded as channel. channel_axis: The axis of the channel, used with per_channel enabled. The last dimension is regarded as channel axis and other dimension will be reduces by default. Return: Quantized inputs. """ with tf.name_scope(name_scope): reduce_dims = None if per_channel: reduce_dims = get_reduce_dims(inputs.get_shape(), channel_axis) quantize_kernel = get_quantize_kernel( kernel_type='MIN_MAX', round_mode=round_mode, symmetry=symmetry, per_channel=per_channel) # ANALYSE branch if mode == 'ANALYSE': batch_min, batch_max = get_min_max( inputs, bit_width, symmetry=symmetry, per_channel=per_channel, reduce_dims=reduce_dims) assign_min = tf_compat.assign(min_var, batch_min, name='assign_min') assign_max = tf_compat.assign(max_var, batch_max, name='assign_max') return tf.identity(inputs, name='identity') if is_training or mode == 'QCB': # Training and calibration branch batch_min, batch_max = get_min_max( inputs, bit_width, symmetry=symmetry, per_channel=per_channel, reduce_dims=reduce_dims) assign_min = tf_compat.assign(min_var, batch_min, name='assign_min') assign_max = tf_compat.assign(max_var, batch_max, name='assign_max') if per_channel: return quantize_kernel(inputs, assign_min, assign_max, bit_width, reduce_dims) else: return quantize_kernel(inputs, assign_min, assign_max, bit_width) else: # Evaluation branch if per_channel: return quantize_kernel(inputs, min_var, max_var, bit_width, reduce_dims) else: return quantize_kernel(inputs, min_var, max_var, bit_width) def MovingAvgMinMaxQuantize(inputs, min_var, max_var, bit_width, round_mode, mode, is_training, per_channel, channel_axis, ema_decay=0.999, name_scope="LastValueMinMaxQuantize"): """Moving average float scale quantize op. Args: inputs: Input values. min_var: Variable of minimum value of inputs. max_var: Variable of maximum value of inputs. bit_width: Int, bit width of quantized values. round_mode: Int, the mode of rounding function, 0 for PY3 round. Now only PY3 round is supported. mode: String, the mode of quantization, available modes are ['ANALYSE', 'QCB', 'QCBEV', 'QAT'] is_training: Bool, whether in training phase. per_channel: Bool, whether to apply per_channel quantization. The last dimension is regarded as channel. channel_axis: The axis of the channel, used with per_channel enabled. The last dimension is regarded as channel axis and other dimension will be reduces by default. ema_decay: Float, EMA decay parameter. Return: Quantized inputs. """ with tf.name_scope(name_scope): symmetry = False reduce_dims = None if per_channel: reduce_dims = get_reduce_dims(inputs.get_shape(), channel_axis) quantize_kernel = get_quantize_kernel( kernel_type='MIN_MAX', round_mode=round_mode, symmetry=symmetry, per_channel=per_channel) # ANALYSE branch if mode == 'ANALYSE': batch_min, batch_max = get_min_max( inputs, bit_width, symmetry=symmetry, per_channel=per_channel, reduce_dims=reduce_dims) assign_min = moving_averages.assign_moving_average( min_var, batch_min, ema_decay, zero_debias=False, name='assign_min_ema') assign_max = moving_averages.assign_moving_average( max_var, batch_max, ema_decay, zero_debias=False, name='assign_max_ema') return tf.identity(inputs, name='identity') if is_training or mode == 'QCB': # Training and calibration branch batch_min, batch_max = get_min_max( inputs, bit_width, symmetry=symmetry, per_channel=per_channel, reduce_dims=reduce_dims) assign_min = moving_averages.assign_moving_average( min_var, batch_min, ema_decay, zero_debias=True, name='assign_min_ema') assign_max = moving_averages.assign_moving_average( max_var, batch_max, ema_decay, zero_debias=True, name='assign_max_ema') if per_channel: return quantize_kernel(inputs, assign_min, assign_max, bit_width, reduce_dims) else: return quantize_kernel(inputs, assign_min, assign_max, bit_width) else: # Evaluation branch if per_channel: return quantize_kernel(inputs, min_var, max_var, bit_width, reduce_dims) else: return quantize_kernel(inputs, min_var, max_var, bit_width) def LastValueQuantPosQuantize(inputs, quant_pos_var, min_var, max_var, bit_width, method, round_mode, mode, is_training, symmetry, per_channel, channel_axis, name_scope="LastValueQuantPosQuantize"): """Last value power of 2 quantize op with quantize position. Args: inputs: Input values. quant_pos_var: Variable of quantize position. min_var: Variable of minimum value of inputs. max_var: Variable of maximum value of inputs. bit_width: Int, bit width of quantized values. method: Int, method of how to get the quantize pos, 0 for non_overflow and 1 for min_diffs. round_mode: Int, the mode of rounding function, 0 for PY3 round, 1 for DPU round. By default, weights are quantized with PY3 round, inputs and activations are quantized with DPU round. mode: String, the mode of quantization, available modes are ['ANALYSE', 'QCB', 'QCBEV', 'QAT'] is_training: Bool, whether in training phase. symmetry: Bool, whether to apply symmetry quantization. per_channel: Bool, whether to apply per_channel quantization. The last dimension is regarded as channel. channel_axis: The axis of the channel, used with per_channel enabled. The last dimension is regarded as channel axis and other dimension will be reduces by default. Return: Quantized inputs. """ with tf.name_scope(name_scope): reduce_dims = None if per_channel: reduce_dims = get_reduce_dims(inputs.get_shape(), channel_axis) quantize_kernel = get_quantize_kernel( kernel_type='QUANTIZE_POS', round_mode=round_mode, symmetry=symmetry, per_channel=per_channel) # ANALYSE branch if mode == 'ANALYSE': batch_min, batch_max = get_min_max( inputs, bit_width, symmetry=symmetry, per_channel=per_channel, reduce_dims=reduce_dims) assign_min = tf_compat.assign(min_var, batch_min, name='assign_min') assign_max = tf_compat.assign(max_var, batch_max, name='assign_max') return tf.identity(inputs, name='identity') if is_training or mode == 'QCB': # Training and calibration branch batch_min, batch_max = get_min_max( inputs, bit_width, symmetry=symmetry, per_channel=per_channel, reduce_dims=reduce_dims) assign_min = tf_compat.assign(min_var, batch_min, name='assign_min') assign_max = tf_compat.assign(max_var, batch_max, name='assign_max') # Get quantize positions batch_quantize_pos = get_quantize_pos(inputs, assign_min, assign_max, bit_width, method, round_mode, per_channel, channel_axis, symmetry) assign_quantize_pos = tf_compat.assign( quant_pos_var, batch_quantize_pos, name="assign_quantize_pos") if per_channel: if symmetry: return quantize_kernel(inputs, assign_quantize_pos, bit_width, reduce_dims) else: return quantize_kernel(inputs, assign_quantize_pos, assign_min, assign_max, bit_width, reduce_dims) else: if symmetry: return quantize_kernel(inputs, assign_quantize_pos, bit_width) else: return quantize_kernel(inputs, assign_quantize_pos, assign_min, assign_max, bit_width) else: # Evaluation branch if per_channel: if symmetry: return quantize_kernel(inputs, quant_pos_var, bit_width, reduce_dims) else: return quantize_kernel(inputs, quant_pos_var, min_var, max_var, bit_width, reduce_dims) else: if symmetry: return quantize_kernel(inputs, quant_pos_var, bit_width) else: return quantize_kernel(inputs, quant_pos_var, min_var, max_var, bit_width) def LastValueLogThQuantize(inputs, log_th_var, min_var, max_var, bit_width, method, round_mode, mode, is_training, name_scope="LastValueLogThQuantize"): """Last value power of 2 quantize op with log threshold. Args: inputs: Input values. log_th_var: Variable of log threshold. min_var: Variable of minimum value of inputs. max_var: Variable of maximum value of inputs. bit_width: Int, bit width of quantized values. method: Int, method of how to get the initial log threshold, 0 for non_overflow and 1 for min_diffs. round_mode: Int, the mode of rounding function, 0 for PY3 round, 1 for DPU round, 2 for STD round. mode: String, the mode of quantization, available modes are ['ANALYSE', 'QCB', 'QCBEV', 'QAT'] is_training: Bool, whether in training phase. Return: Quantized inputs. """ with tf.name_scope(name_scope): quantize_kernel = get_quantize_kernel( kernel_type='LOG_TH', round_mode=round_mode) # ANALYSE branch if mode == 'ANALYSE': batch_min, batch_max = get_min_max(inputs, bit_width) assign_min = tf_compat.assign(min_var, batch_min, name='assign_min') assign_max = tf_compat.assign(max_var, batch_max, name='assign_max') return tf.identity(inputs, name='identity') if is_training or mode == 'QCB': # Training and calibration branch batch_min, batch_max = get_min_max(inputs, bit_width) assign_min = tf_compat.assign(min_var, batch_min, name='assign_min') assign_max = tf_compat.assign(max_var, batch_max, name='assign_max') if mode == 'QCB': batch_log_th = get_log_th(inputs, assign_min, assign_max, bit_width, method) assign_log_th = tf_compat.assign( log_th_var, batch_log_th, name="assign_log_th") return quantize_kernel(inputs, assign_log_th, bit_width) else: return quantize_kernel(inputs, log_th_var, bit_width) else: # Evaluation branch return quantize_kernel(inputs, log_th_var, bit_width)
36.620601
108
0.657244
6,171
42,663
4.248744
0.053638
0.044243
0.014875
0.018918
0.837294
0.822877
0.808231
0.786414
0.758992
0.742324
0
0.010643
0.246818
42,663
1,164
109
36.652062
0.805309
0.230059
0
0.718271
0
0
0.055845
0.022586
0
0
0
0
0
1
0.06834
false
0
0.013947
0.008368
0.181311
0.001395
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
cc2a3b3bf205302407afa91c762470726932dc69
62,615
py
Python
pkgs/ops-pkg/src/genie/libs/ops/lisp/iosxe/tests/lisp_output.py
miott/genielibs
6464642cdd67aa2367bdbb12561af4bb060e5e62
[ "Apache-2.0" ]
94
2018-04-30T20:29:15.000Z
2022-03-29T13:40:31.000Z
pkgs/ops-pkg/src/genie/libs/ops/lisp/iosxe/tests/lisp_output.py
miott/genielibs
6464642cdd67aa2367bdbb12561af4bb060e5e62
[ "Apache-2.0" ]
67
2018-12-06T21:08:09.000Z
2022-03-29T18:00:46.000Z
pkgs/ops-pkg/src/genie/libs/ops/lisp/iosxe/tests/lisp_output.py
miott/genielibs
6464642cdd67aa2367bdbb12561af4bb060e5e62
[ "Apache-2.0" ]
49
2018-06-29T18:59:03.000Z
2022-03-10T02:07:59.000Z
''' Lisp Genie Ops Object Outputs for IOSXE. ''' class LispOutput(object): ############################################################################ # LISP INFO OUTPUTS ############################################################################ # -------------------------------------------------------------------------- # 'show lisp all service <service> summary' # -------------------------------------------------------------------------- # 'show lisp all service ipv4 summary' ShowLispServiceIpv4Summary = '''\ 202-XTR#show lisp all service ipv4 summary ===================================================== Output for router lisp 0 ===================================================== Router-lisp ID: 0 Instance count: 2 Key: DB - Local EID Database entry count (@ - RLOC check pending * - RLOC consistency problem), DB no route - Local EID DB entries with no matching RIB route, Cache - Remote EID mapping cache size, IID - Instance ID, Role - Configured Role Interface DB DB no Cache Incom Cache EID VRF name (.IID) size route size plete Idle Role red LISP0.101 1 0 2 0.0% 0.0% ITR-ETR Number of eid-tables: 2 Total number of database entries: 2 (inactive 0) EID-tables with inconsistent locators: 0 Total number of map-cache entries: 3 EID-tables with incomplete map-cache entries: 0 EID-tables pending map-cache update to FIB: 0 ''' # 'show lisp all service ipv6 summary' ShowLispServiceIpv6Summary = '''\ 202-XTR#show lisp all service ipv6 summary ===================================================== Output for router lisp 0 ===================================================== Router-lisp ID: 0 Instance count: 2 Key: DB - Local EID Database entry count (@ - RLOC check pending * - RLOC consistency problem), DB no route - Local EID DB entries with no matching RIB route, Cache - Remote EID mapping cache size, IID - Instance ID, Role - Configured Role Interface DB DB no Cache Incom Cache EID VRF name (.IID) size route size plete Idle Role red LISP0.101 1 0 2 0.0% 0.0% ITR-ETR Number of eid-tables: 1 Total number of database entries: 1 (inactive 0) EID-tables with inconsistent locators: 0 Total number of map-cache entries: 2 EID-tables with incomplete map-cache entries: 0 EID-tables pending map-cache update to FIB: 0 ''' # 'show lisp all service ethernet summary' ShowLispServiceEthernetSummary = ''' 202-XTR#show lisp all service ethernet summary ================================================= Output for router lisp 0 ================================================= Router-lisp ID: 0 Instance count: 69 Key: DB - Local EID Database entry count (@ - RLOC check pending * - RLOC consistency problem), DB no route - Local EID DB entries with no matching RIB route, Cache - Remote EID mapping cache size, IID - Instance ID, Role - Configured Role Interface DB DB no Cache Incom Cache EID VRF name (.IID) size route size plete Idle Role LISP0.101 2 0 4 0.0% 100% NONE Number of eid-tables: 2 Total number of database entries: 4 (inactive 0) Maximum database entries: 5120 EID-tables with inconsistent locators: 0 Total number of map-cache entries: 4 Maximum map-cache entries: 5120 EID-tables with incomplete map-cache entries: 0 EID-tables pending map-cache update to FIB: 0 ''' # -------------------------------------------------------------------------- # 'show lisp all service <service>' # -------------------------------------------------------------------------- # 'show lisp all service ipv4' ShowLispServiceIpv4 = '''\ 202-XTR#show lisp all service ipv4 ================================================= Output for router lisp 0 ================================================= Router-lisp ID: 0 Locator table: default Ingress Tunnel Router (ITR): enabled Egress Tunnel Router (ETR): enabled Proxy-ITR Router (PITR): enabled RLOCs: 10.10.10.10 Proxy-ETR Router (PETR): disabled NAT-traversal Router (NAT-RTR): disabled Mobility First-Hop Router: disabled Map Server (MS): disabled Map Resolver (MR): disabled Delegated Database Tree (DDT): disabled ITR Map-Resolver(s): 10.64.4.4, 10.166.13.13 ETR Map-Server(s): 10.64.4.4, 10.166.13.13 xTR-ID: 0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7 site-ID: unspecified ITR local RLOC (last resort): *** NOT FOUND *** ITR Solicit Map Request (SMR): accept and process Max SMRs per map-cache entry: 8 more specifics Multiple SMR suppression time: 20 secs ETR accept mapping data: disabled, verify disabled ETR map-cache TTL: 1d00h Locator Status Algorithms: RLOC-probe algorithm: disabled RLOC-probe on route change: N/A (periodic probing disabled) RLOC-probe on member change: disabled LSB reports: process IPv4 RLOC minimum mask length: /0 IPv6 RLOC minimum mask length: /0 Map-cache: Map-cache limit: 1000 Map-cache activity check period: 60 secs Persistent map-cache: disabled Database: Dynamic database mapping limit: 1000 ''' # 'show lisp all service ipv6' ShowLispServiceIpv6 = '''\ 202-XTR#show lisp all service ipv6 ================================================= Output for router lisp 0 ================================================= Router-lisp ID: 0 Locator table: default Ingress Tunnel Router (ITR): enabled Egress Tunnel Router (ETR): enabled Proxy-ITR Router (PITR): disabled Proxy-ETR Router (PETR): disabled NAT-traversal Router (NAT-RTR): disabled Mobility First-Hop Router: disabled Map Server (MS): disabled Map Resolver (MR): disabled Delegated Database Tree (DDT): disabled ITR Map-Resolver(s): 10.64.4.4, 10.166.13.13 ETR Map-Server(s): 10.64.4.4, 10.166.13.13 xTR-ID: 0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7 site-ID: unspecified ITR local RLOC (last resort): *** NOT FOUND *** ITR Solicit Map Request (SMR): accept and process Max SMRs per map-cache entry: 8 more specifics Multiple SMR suppression time: 20 secs ETR accept mapping data: disabled, verify disabled ETR map-cache TTL: 1d00h Locator Status Algorithms: RLOC-probe algorithm: disabled RLOC-probe on route change: N/A (periodic probing disabled) RLOC-probe on member change: disabled LSB reports: process IPv4 RLOC minimum mask length: /0 IPv6 RLOC minimum mask length: /0 Map-cache: Map-cache limit: 1000 Map-cache activity check period: 60 secs Persistent map-cache: disabled Database: Dynamic database mapping limit: 1000 ''' # 'show lisp all service ethernet' ShowLispServiceEthernet = '''\ OTT-LISP-C3K-3-xTR1#show lisp all service ethernet ================================================= Output for router lisp 0 ================================================= Router-lisp ID: 0 Locator table: default Ingress Tunnel Router (ITR): enabled Egress Tunnel Router (ETR): enabled Proxy-ITR Router (PITR): disabled Proxy-ETR Router (PETR): disabled NAT-traversal Router (NAT-RTR): disabled Mobility First-Hop Router: disabled Map Server (MS): disabled Map Resolver (MR): disabled Mr-use-petr: disabled Delegated Database Tree (DDT): disabled ITR Map-Resolver(s): 10.94.44.44 10.84.66.66 ETR Map-Server(s): 10.94.44.44 10.84.66.66 xTR-ID: 0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7 site-ID: unspecified ITR local RLOC (last resort): *** NOT FOUND *** ITR Solicit Map Request (SMR): accept and process Max SMRs per map-cache entry: 8 more specifics Multiple SMR suppression time: 20 secs ETR accept mapping data: disabled, verify disabled ETR map-cache TTL: 1d00h Locator Status Algorithms: RLOC-probe algorithm: disabled RLOC-probe on route change: N/A (periodic probing disabled) RLOC-probe on member change: disabled LSB reports: process IPv4 RLOC minimum mask length: /0 IPv6 RLOC minimum mask length: /0 Map-cache: Map-cache limit: 5120 Map-cache activity check period: 60 secs Persistent map-cache: disabled Source locator configuration: Vlan100: 10.229.11.1 (Loopback0) Vlan101: 10.229.11.1 (Loopback0) Database: Dynamic database mapping limit: 5120 ''' # -------------------------------------------------------------------------- # 'show lisp all instance-id <instance_id> <service>' # -------------------------------------------------------------------------- # 'show lisp all instance-id 101 service ipv4' ShowLispInstance101ServiceIpv4 = ''' 202-XTR#show lisp all instance-id 101 ipv4 ================================================= Output for router lisp 0 ================================================= Instance ID: 101 Router-lisp ID: 0 Locator table: default EID table: vrf red Ingress Tunnel Router (ITR): enabled Egress Tunnel Router (ETR): enabled Proxy-ITR Router (PITR): enabled RLOCs: 10.10.10.10 Proxy-ETR Router (PETR): disabled NAT-traversal Router (NAT-RTR): disabled Mobility First-Hop Router: disabled Map Server (MS): disabled Map Resolver (MR): disabled Delegated Database Tree (DDT): disabled Site Registration Limit: 0 Map-Request source: derived from EID destination ITR Map-Resolver(s): 10.64.4.4, 10.166.13.13 ETR Map-Server(s): 10.64.4.4 (17:49:58), 10.166.13.13 (00:00:35) xTR-ID: 0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7 site-ID: unspecified ITR local RLOC (last resort): 10.16.2.2 ITR use proxy ETR RLOC(s): 10.10.10.10 ITR Solicit Map Request (SMR): accept and process Max SMRs per map-cache entry: 8 more specifics Multiple SMR suppression time: 20 secs ETR accept mapping data: disabled, verify disabled ETR map-cache TTL: 1d00h Locator Status Algorithms: RLOC-probe algorithm: disabled RLOC-probe on route change: N/A (periodic probing disabled) RLOC-probe on member change: disabled LSB reports: process IPv4 RLOC minimum mask length: /0 IPv6 RLOC minimum mask length: /0 Map-cache: Static mappings configured: 0 Map-cache size/limit: 2/1000 Imported route count/limit: 0/1000 Map-cache activity check period: 60 secs Map-cache FIB updates: established Persistent map-cache: disabled Database: Total database mapping size: 1 static database size/limit: 1/65535 dynamic database size/limit: 0/65535 route-import database size/limit: 0/1000 Inactive (deconfig/away) size: 0 Encapsulation type: lisp ''' # 'show lisp all instance-id 101 service ipv6' ShowLispInstance101ServiceIpv6 = '''\ 202-XTR#show lisp all instance-id 101 ipv6 ================================================= Output for router lisp 0 ================================================= Instance ID: 101 Router-lisp ID: 0 Locator table: default EID table: vrf red Ingress Tunnel Router (ITR): enabled Egress Tunnel Router (ETR): enabled Proxy-ITR Router (PITR): disabled Proxy-ETR Router (PETR): disabled NAT-traversal Router (NAT-RTR): disabled Mobility First-Hop Router: disabled Map Server (MS): disabled Map Resolver (MR): disabled Delegated Database Tree (DDT): disabled Site Registration Limit: 0 Map-Request source: derived from EID destination ITR Map-Resolver(s): 10.100.5.5, 10.66.12.12 ETR Map-Server(s): 10.100.5.5 (17:49:58), 10.66.12.12 (00:00:35) xTR-ID: 0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7 site-ID: unspecified ITR local RLOC (last resort): 10.16.2.2 ITR use proxy ETR RLOC(s): 10.10.10.10 ITR Solicit Map Request (SMR): accept and process Max SMRs per map-cache entry: 8 more specifics Multiple SMR suppression time: 20 secs ETR accept mapping data: disabled, verify disabled ETR map-cache TTL: 1d00h Locator Status Algorithms: RLOC-probe algorithm: disabled RLOC-probe on route change: N/A (periodic probing disabled) RLOC-probe on member change: disabled LSB reports: process IPv4 RLOC minimum mask length: /0 IPv6 RLOC minimum mask length: /0 Map-cache: Static mappings configured: 0 Map-cache size/limit: 2/1000 Imported route count/limit: 0/1000 Map-cache activity check period: 60 secs Map-cache FIB updates: established Persistent map-cache: disabled Database: Total database mapping size: 1 static database size/limit: 1/65535 dynamic database size/limit: 0/65535 route-import database size/limit: 0/1000 Inactive (deconfig/away) size: 0 Encapsulation type: lisp ''' # 'show lisp all instance-id 101 service ethernet' ShowLispInstance101ServiceEthernet = '''\ ''' # -------------------------------------------------------------------------- # 'show lisp all instance-id <instance_id> <service> server detail internal' # -------------------------------------------------------------------------- # 'show lisp all instance-id 101 service ipv4 server detail internal' ShowLispInstance101Ipv4ServerDetailInternal = '''\ 204-MSMR#show lisp all instance-id 101 ipv4 server detail internal ===================================================== Output for router lisp 0 ===================================================== LISP Site Registration Information Site name: provider Allowed configured locators: any Allowed EID-prefixes: Site name: xtr1_1 Allowed configured locators: any Allowed EID-prefixes: EID-prefix: 192.168.0.0/24 instance-id 101 First registered: 1w4d Last registered: 02:41:22 Routing table tag: 0 Origin: Configuration, accepting more specifics Merge active: No Proxy reply: No TTL: 00:00:00 State: unknown Registration errors: Authentication failures: 0 Allowed locators mismatch: 0 No registrations. EID-prefix: 192.168.0.1/32 instance-id 101 First registered: 01:12:41 Last registered: 01:12:41 Routing table tag: 0 Origin: Dynamic, more specific of 192.168.0.0/24 Merge active: No Proxy reply: Yes TTL: 1d00h State: complete Registration errors: Authentication failures: 0 Allowed locators mismatch: 0 ETR 10.16.2.2, last registered 01:12:41, proxy-reply, map-notify TTL 1d00h, no merge, hash-function sha1, nonce 0x70D18EF4-0x3A605D67 state complete, no security-capability xTR-ID 0x21EDD25F-0x7598784C-0x769C8E4E-0xC04926EC site-ID unspecified sourced by reliable transport Locator Local State Pri/Wgt Scope 10.16.2.2 yes up 50/50 IPv4 none Site name: xtr1_2 Allowed configured locators: any Allowed EID-prefixes: Site name: xtr2 Allowed configured locators: any Allowed EID-prefixes: EID-prefix: 192.168.9.0/24 instance-id 101 First registered: 01:55:47 Last registered: 01:55:47 Routing table tag: 0 Origin: Configuration Merge active: No Proxy reply: Yes TTL: 1d00h State: complete Registration errors: Authentication failures: 0 Allowed locators mismatch: 0 ETR 10.1.8.8, last registered 01:55:47, proxy-reply, map-notify TTL 1d00h, no merge, hash-function sha1, nonce 0xB06AE31D-0x6ADB0BA5 state complete, no security-capability xTR-ID 0x77200484-0xD134DC48-0x0FBAD9DC-0x4A46CA5D site-ID unspecified sourced by reliable transport Locator Local State Pri/Wgt Scope 10.1.8.8 yes up 50/50 IPv4 none ''' # 'show lisp all instance-id 101 service ipv6 server detail internal' ShowLispInstance101Ipv6ServerDetailInternal = '''\ ''' # 'show lisp all instance-id 101 service ethernet server detail internal' ShowLispInstance101EthernetServerDetailInternal = '''\ ''' # -------------------------------------------------------------------------- # 'show lisp all extranet <extranet> instance-id <instance_id>' # -------------------------------------------------------------------------- # 'show lisp all extranet ext1 instance-id 101' ShowLispExtranet101 = '''\ 204-MSMR#show lisp all extranet ext1 instance-id 101 Output for router lisp 0 ----------------------------------------------------- LISP Extranet table Home Instance ID: 101 Total entries: 6 Provider/Subscriber Inst ID EID prefix Provider 103 10.121.88.0/24 Provider 103 10.220.100.0/24 Provider 103 192.168.195.0/24 Subscriber 102 172.16.1.0/24 Subscriber 101 192.168.0.0/24 Subscriber 101 192.168.9.0/24 ''' # -------------------------------------------------------------------------- # 'show lisp all instance-id <instance_id> <service> statistics' # -------------------------------------------------------------------------- # 'show lisp all instance-id 101 ipv4 statistics' ShowLispInstance101Ipv4Stats = ''' 202-XTR#show lisp all instance-id 101 ipv4 statistics ===================================================== Output for router lisp 0 ===================================================== LISP EID Statistics for instance ID 101 - last cleared: never Control Packets: Map-Requests in/out: 0/4 Encapsulated Map-Requests in/out: 0/3 RLOC-probe Map-Requests in/out: 0/1 SMR-based Map-Requests in/out: 0/0 Map-Requests expired on-queue/no-reply: 0/0 Map-Resolver Map-Requests forwarded: 0 Map-Server Map-Requests forwarded: 0 Map-Reply records in/out: 2/1 Authoritative records in/out: 1/1 Non-authoritative records in/out: 1/0 Negative records in/out: 0/0 RLOC-probe records in/out: 1/1 Map-Server Proxy-Reply records out: 0 WLC Map-Subscribe records in/out: 0/1 Map-Subscribe failures in/out: 0/0 WLC Map-Unsubscribe records in/out: 0/0 Map-Unsubscribe failures in/out: 0/0 Map-Register records in/out: 0/2857 Map-Server AF disabled: 0 Authentication failures: 0 WLC Map-Register records in/out: 0/0 WLC AP Map-Register in/out: 0/0 WLC Client Map-Register in/out: 0/0 WLC Map-Register failures in/out: 0/0 Map-Notify records in/out: 4/0 Authentication failures: 0 WLC Map-Notify records in/out: 0/0 WLC AP Map-Notify in/out: 0/0 WLC Client Map-Notify in/out: 0/0 WLC Map-Notify failures in/out: 0/0 Dropped control packets in input queue: 0 Deferred packet transmission: 0/0 DDT referral deferred/dropped: 0/0 DDT request deferred/dropped: 0/0 Map-Reply deferred/dropped: 0/0 MR negative Map-Reply deferred/dropped: 0/0 MR Map-Request fwd deferred/dropped: 0/0 MS Map-Request fwd deferred/dropped: 0/0 MS proxy Map-Reply deferred/dropped: 0/0 xTR mcast Map-Notify deferred/dropped: 0/0 MS Info-Reply deferred/dropped: 0/0 RTR Map-Register fwd deferred/dropped: 0/0 RTR Map-Notify fwd deferred/dropped: 0/0 ETR Info-Request deferred/dropped: 0/0 Errors: Map-Request invalid source rloc drops: 0 Map-Register invalid source rloc drops: 0 DDT ITR Map-Requests dropped: 0 (nonce-collision: 0, bad-xTR-nonce: 0) Cache Related: Cache entries created/deleted: 3/1 NSF CEF replay entry count 0 Number of EID-prefixes in map-cache: 2 Number of negative entries in map-cache: 1 Total number of RLOCs in map-cache: 1 Average RLOCs per EID-prefix: 1 Forwarding: Number of data signals processed: 1 (+ dropped 0) Number of reachability reports: 0 (+ dropped 0) ITR Map-Resolvers: Map-Resolver LastReply Metric ReqsSent Positive Negative No-Reply 10.64.4.4 03:13:58 4 1 1 0 0 10.166.13.13 03:13:58 26 2 0 0 1 LISP RLOC Statistics - last cleared: never Control Packets: RTR Map-Requests forwarded: 0 RTR Map-Notifies forwarded: 0 DDT-Map-Requests in/out: 0/0 DDT-Map-Referrals in/out: 0/0 Errors: Map-Request format errors: 0 Map-Reply format errors: 0 Map-Referral format errors: 0 Mapping record TTL alerts: 0 DDT Requests failed: 0 LISP Miscellaneous Statistics - last cleared: never Errors: Invalid IP version drops: 0 Invalid IP header drops: 0 Invalid IP proto field drops: 0 Invalid packet size dropss: 0 Invalid LISP control port drops: 0 Invalid LISP checksum drops: 0 Unsupported LISP packet type drops: 0 Unknown packet drops: 0 ''' # 'show lisp all instance-id 101 ipv6 statistics' ShowLispInstance101Ipv6Stats = ''' 202-XTR#show lisp all instance-id 101 ipv6 statistics ===================================================== Output for router lisp 0 ===================================================== LISP EID Statistics for instance ID 101 - last cleared: never Control Packets: Map-Requests in/out: 0/6 Encapsulated Map-Requests in/out: 0/5 RLOC-probe Map-Requests in/out: 0/1 SMR-based Map-Requests in/out: 0/0 Map-Requests expired on-queue/no-reply 0/1 Map-Resolver Map-Requests forwarded: 0 Map-Server Map-Requests forwarded: 0 Map-Reply records in/out: 2/1 Authoritative records in/out: 1/1 Non-authoritative records in/out: 1/0 Negative records in/out: 0/0 RLOC-probe records in/out: 1/1 Map-Server Proxy-Reply records out: 0 WLC Map-Subscribe records in/out: 0/2 Map-Subscribe failures in/out: 0/0 WLC Map-Unsubscribe records in/out: 0/0 Map-Unsubscribe failures in/out: 0/0 Map-Register records in/out: 0/52 Map-Server AF disabled: 0 Authentication failures: 0 WLC Map-Register records in/out: 0/0 WLC AP Map-Register in/out: 0/0 WLC Client Map-Register in/out: 0/0 WLC Map-Register failures in/out: 0/0 Map-Notify records in/out: 2/0 Authentication failures: 0 WLC Map-Notify records in/out: 0/0 WLC AP Map-Notify in/out: 0/0 WLC Client Map-Notify in/out: 0/0 WLC Map-Notify failures in/out: 0/0 Dropped control packets in input queue: 0 Deferred packet transmission: 0/0 DDT referral deferred/dropped: 0/0 DDT request deferred/dropped: 0/0 Map-Reply deferred/dropped: 0/0 MR negative Map-Reply deferred/dropped: 0/0 MR Map-Request fwd deferred/dropped: 0/0 MS Map-Request fwd deferred/dropped: 0/0 MS proxy Map-Reply deferred/dropped: 0/0 xTR mcast Map-Notify deferred/dropped: 0/0 MS Info-Reply deferred/dropped: 0/0 RTR Map-Register fwd deferred/dropped: 0/0 RTR Map-Notify fwd deferred/dropped: 0/0 ETR Info-Request deferred/dropped: 0/0 Errors: Map-Request invalid source rloc drops: 0 Map-Register invalid source rloc drops: 0 DDT ITR Map-Requests dropped: 0 (nonce-collision: 0, bad-xTR-nonce: 0) Cache Related: Cache entries created/deleted: 4/2 NSF CEF replay entry count 0 Number of EID-prefixes in map-cache: 2 Number of negative entries in map-cache: 1 Total number of RLOCs in map-cache: 1 Average RLOCs per EID-prefix: 1 Forwarding: Number of data signals processed: 2 (+ dropped 0) Number of reachability reports: 0 (+ dropped 0) ITR Map-Resolvers: Map-Resolver LastReply Metric ReqsSent Positive Negative No-Reply 10.64.4.4 00:15:36 19 2 1 0 1 10.166.13.13 00:17:11 31 3 0 0 2 LISP RLOC Statistics - last cleared: never Control Packets: RTR Map-Requests forwarded: 0 RTR Map-Notifies forwarded: 0 DDT-Map-Requests in/out: 0/0 DDT-Map-Referrals in/out: 0/0 Errors: Map-Request format errors: 0 Map-Reply format errors: 0 Map-Referral format errors: 0 Mapping record TTL alerts: 0 DDT Requests failed: 0 LISP Miscellaneous Statistics - last cleared: never Errors: Invalid IP version drops: 0 Invalid IP header drops: 0 Invalid IP proto field drops: 0 Invalid packet size dropss: 0 Invalid LISP control port drops: 0 Invalid LISP checksum drops: 0 Unsupported LISP packet type drops: 0 Unknown packet drops: 0 ''' # 'show lisp all instance-id 101 ethernet statistics' ShowLispInstance101EthernetStats = '''\ ''' # -------------------------------------------------------------------------- # 'show lisp all instance-id <instance_id> <service> server summary' # -------------------------------------------------------------------------- ShowLispInstance101Ipv4ServerSummary = ''' 204-MSMR#show lisp all instance-id 101 ipv4 server summary ===================================================== Output for router lisp 0 ===================================================== ----------- IPv4 ----------- Site name Configured Registered Incons xtr1_1 1 1 0 xtr2 1 1 0 Number of configured sites: 2 Number of registered sites: 2 Sites with inconsistent registrations: 0 IPv4 Number of configured EID prefixes: 2 Number of registered EID prefixes: 2 ''' ShowLispInstance101Ipv6ServerSummary = '''\ 204-MSMR#show lisp all instance-id 101 ipv6 server summary ===================================================== Output for router lisp 0 ===================================================== ----------- IPv6 ----------- Site name Configured Registered Incons xtr1_1 1 1 0 xtr2 1 1 0 Number of configured sites: 2 Number of registered sites: 2 Sites with inconsistent registrations: 0 IPv6 Number of configured EID prefixes: 2 Number of registered EID prefixes: 2 ''' ShowLispInstance101EthernetServerSummary = '''\ ''' # -------------------------------------------------------------------------- # 'show lisp all instance-id <instance-d> <service> map-cache' # -------------------------------------------------------------------------- # 'show lisp all instance-id 101 ipv4 map-cache' ShowLispInstance101Ipv4MapCache= '''\ 202-XTR#show lisp all instance-id 101 ipv4 map-cache ===================================================== Output for router lisp 0 ===================================================== LISP IPv4 Mapping Cache for EID-table vrf red (IID 101), 2 entries 0.0.0.0/0, uptime: 15:23:50, expires: never, via static-send-map-request Negative cache entry, action: send-map-request 192.168.9.0/24, uptime: 00:04:02, expires: 23:55:57, via map-reply, complete Locator Uptime State Pri/Wgt Encap-IID 10.1.8.8 00:04:02 up 50/50 - ''' # 'show lisp all instance-id 101 ipv6 map-cache' ShowLispInstance101Ipv6MapCache = '''\ 202-XTR#show lisp all instance-id 101 ipv6 map-cache ===================================================== Output for router lisp 0 ===================================================== LISP IPv6 Mapping Cache for EID-table vrf red (IID 101), 2 entries ::/0, uptime: 00:11:28, expires: never, via static-send-map-request Negative cache entry, action: send-map-request 2001:192:168:9::/64, uptime: 00:06:51, expires: 23:53:08, via map-reply, complete Locator Uptime State Pri/Wgt Encap-IID 10.1.8.8 00:06:51 up 50/50 - 172.16.10.0/24, uptime: 00:00:00, expires: 23:59:59, via map-reply, complete Locator Uptime State Pri/Wgt 172.16.156.134 00:00:00 up 1/50 192.168.65.94 00:00:00 up 1/50 2001:DB8:BBED:2829::80DF:9C86 00:00:00 up 2/100 ''' # 'show lisp all instance-id 101 ethernet map-cache' ShowLispInstance101EthernetMapCache = '''\ ''' # -------------------------------------------------------------------------- # 'show lisp all instance-id <instance_id> <service> dabatase' # -------------------------------------------------------------------------- # 'show lisp all instance-id 101 ipv4 database' ShowLispInstance101Ipv4Database = '''\ 202-XTR#show lisp all instance-id 101 ipv4 database ===================================================== Output for router lisp 0 ===================================================== LISP ETR IPv4 Mapping Database for EID-table vrf red (IID 101), LSBs: 0x1 Entries total 1, no-route 0, inactive 0 192.168.0.0/24, locator-set RLOC Locator Pri/Wgt Source State 10.16.2.2 50/50 cfg-intf site-self, reachable ''' # 'show lisp all instance-id 101 ipv6 database' ShowLispInstance101Ipv6Database = '''\ 202-XTR#show lisp all instance-id 101 ipv6 database ===================================================== Output for router lisp 0 ===================================================== LISP ETR IPv6 Mapping Database for EID-table vrf red (IID 101), LSBs: 0x1 Entries total 1, no-route 0, inactive 0 2001:192:168::/64, locator-set RLOC Locator Pri/Wgt Source State 10.16.2.2 50/50 cfg-intf site-self, reachable ''' # 'show lisp all instance-id 101 ethernet database' ShowLispInstance101EthernetDatabase = ''' 202-XTR#show lisp all instance-id 101 ethernet database ================================================= Output for router lisp 0 ================================================= LISP ETR MAC Mapping Database for EID-table Vlan 101 (IID 101), LSBs: 0x1 Entries total 2, no-route 0, inactive 0 0050.56b0.6a0e/48, dynamic-eid Auto-L2-group-1, inherited from default locator-set RLOC Locator Pri/Wgt Source State 10.229.11.1 1/100 cfg-intf site-self, reachable cafe.cafe.cafe/48, dynamic-eid Auto-L2-group-1, inherited from default locator-set RLOC Locator Pri/Wgt Source State 10.229.11.1 1/100 cfg-intf site-self, reachable ''' ############################################################################ # LISP INFO STRUCTURE ############################################################################ LispInfo = { 'lisp_router_instances': {0: {'lisp_router_id': {'site_id': 'unspecified', 'xtr_id': '0x730E0861-0x12996F6D-0xEFEA2114-0xE1C951F7'}, 'lisp_router_instance_id': 0, 'locator_sets': {'RLOC': {'locator_set_name': 'RLOC'}}, 'service': {'ethernet': {'etr': {'local_eids': {'101': {'dynamic_eids': {'0050.56b0.6a0e/48': {'eid_address': {'address_type': 'ethernet', 'vrf': '101'}, 'id': '0050.56b0.6a0e/48', 'loopback_address': '10.229.11.1', 'priority': 1, 'rlocs': 'RLOC', 'weight': 100}, 'cafe.cafe.cafe/48': {'eid_address': {'address_type': 'ethernet', 'vrf': '101'}, 'id': 'cafe.cafe.cafe/48', 'loopback_address': '10.229.11.1', 'priority': 1, 'rlocs': 'RLOC', 'weight': 100}}, 'vni': '101'}}}, 'service': 'ethernet', 'virtual_network_ids': {'101': {'lisp_role': {'none': {'lisp_role_type': 'none'}}}}}, 'ipv4': {'etr': {'enabled': True, 'encapsulation': 'lisp', 'local_eids': {'101': {'eids': {'192.168.0.0/24': {'eid_address': {'address_type': 'ipv4', 'vrf': 'red'}, 'id': '192.168.0.0/24', 'loopback_address': '10.16.2.2', 'priority': 50, 'rlocs': 'RLOC', 'weight': 50}}, 'use_petrs': {'10.10.10.10': {'use_petr': '10.10.10.10', }, }, 'vni': '101'}}, 'mapping_servers': {'10.166.13.13': {'ms_address': '10.166.13.13'}, '10.64.4.4': {'ms_address': '10.64.4.4'}}}, 'itr': {'enabled': True, 'map_cache': {'101': {'mappings': {'0.0.0.0/0': {'creation_time': '15:23:50', 'eid': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '0.0.0.0/0'}, 'vrf': 'red'}, 'id': '0.0.0.0/0', 'negative_mapping': {'map_reply_action': 'send-map-request'}, 'time_to_live': 'never'}, '192.168.9.0/24': {'creation_time': '00:04:02', 'eid': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.9.0/24'}, 'vrf': 'red'}, 'id': '192.168.9.0/24', 'positive_mapping': {'rlocs': {1: {'id': '1', 'locator_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '10.1.8.8'}, 'virtual_network_id': '101'}, 'priority': 50, 'weight': 50}}}, 'time_to_live': '23:55:57'}}, 'vni': '101'}}, 'map_resolvers': {'10.166.13.13': {'map_resolver': '10.166.13.13'}, '10.64.4.4': {'map_resolver': '10.64.4.4'}}, 'proxy_itrs': {'10.10.10.10': {'proxy_etr_address': '10.10.10.10'}}}, 'map_server': {'enabled': False, 'sites': {'provider': {'site_id': 'provider'}, 'xtr1_1': {'site_id': 'xtr1_1'}, 'xtr1_2': {'site_id': 'xtr1_2'}, 'xtr2': {'site_id': 'xtr2'}}, 'summary': {'af_datum': {'ipv4-afi': {'address_type': 'ipv4-afi', 'number_configured_eids': 2, 'number_registered_eids': 2}}, 'number_configured_sites': 2, 'number_registered_sites': 2}, 'virtual_network_ids': {'101': {'counters': {'map_notify_records_out': '0', 'map_registers_in': '0', 'map_registers_in_auth_failed': '0', 'map_requests_forwarded_out': '0', 'proxy_reply_records_out': '0'}, 'extranets': {'ext1': {'extranet': 'ext1', 'home_instance_id': 101, 'subscriber': {'192.168.0.0/24': {'bidirectional': True, 'eid_record': '192.168.0.0/24'}, '192.168.9.0/24': {'bidirectional': True, 'eid_record': '192.168.9.0/24'}}}}, 'mappings': {'192.168.0.0/24': {'eid_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.0.0/24'}, 'virtual_network_id': '101'}, 'eid_id': '192.168.0.0/24', 'more_specifics_accepted': True, 'site_id': 'xtr1_1'}, '192.168.0.1/32': {'eid_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.0.1/32'}, 'virtual_network_id': '101'}, 'eid_id': '192.168.0.1/32', 'mapping_records': {'0x21EDD25F-0x7598784C-0x769C8E4E-0xC04926EC': {'creation_time': '01:12:41', 'eid': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.0.1/32'}, 'virtual_network_id': '101'}, 'site_id': 'unspecified', 'time_to_live': 86400, 'xtr_id': '0x21EDD25F-0x7598784C-0x769C8E4E-0xC04926EC'}}, 'site_id': 'xtr1_1'}, '192.168.9.0/24': {'eid_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.9.0/24'}, 'virtual_network_id': '101'}, 'eid_id': '192.168.9.0/24', 'mapping_records': {'0x77200484-0xD134DC48-0x0FBAD9DC-0x4A46CA5D': {'creation_time': '01:55:47', 'eid': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.9.0/24'}, 'virtual_network_id': '101'}, 'site_id': 'unspecified', 'time_to_live': 86400, 'xtr_id': '0x77200484-0xD134DC48-0x0FBAD9DC-0x4A46CA5D'}}, 'site_id': 'xtr2'}}, 'vni': '101'}, '102': {'extranets': {'ext1': {'extranet': 'ext1', 'home_instance_id': 101, 'subscriber': {'172.16.1.0/24': {'bidirectional': True, 'eid_record': '172.16.1.0/24'}}}}, 'vni': '102'}, '103': {'extranets': {'ext1': {'extranet': 'ext1', 'home_instance_id': 101, 'provider': {'10.220.100.0/24': {'bidirectional': True, 'eid_record': '10.220.100.0/24'}, '192.168.195.0/24': {'bidirectional': True, 'eid_record': '192.168.195.0/24'}, '10.121.88.0/24': {'bidirectional': True, 'eid_record': '10.121.88.0/24'}}}}, 'vni': '103'}}}, 'service': 'ipv4', 'virtual_network_ids': {'101': {'lisp_role': {'itr-etr': {'lisp_role_type': 'itr-etr'}}}}}, 'ipv6': {'etr': {'enabled': True, 'encapsulation': 'lisp', 'local_eids': {'101': {'eids': {'2001:192:168::/64': {'eid_address': {'address_type': 'ipv6', 'vrf': 'red'}, 'id': '2001:192:168::/64', 'loopback_address': '10.16.2.2', 'priority': 50, 'rlocs': 'RLOC', 'weight': 50}}, 'use_petrs': {'10.10.10.10': {'use_petr': '10.10.10.10', }, }, 'vni': '101'}}, 'mapping_servers': {'10.66.12.12': {'ms_address': '10.66.12.12'}, '10.100.5.5': {'ms_address': '10.100.5.5'}}}, 'itr': {'enabled': True, 'map_cache': {'101': {'mappings': {'172.16.10.0/24': {'creation_time': '00:00:00', 'eid': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '172.16.10.0/24'}, 'vrf': 'red'}, 'id': '172.16.10.0/24', 'positive_mapping': {'rlocs': {1: {'id': '1', 'locator_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '172.16.156.134'}, 'virtual_network_id': '101'}, 'priority': 1, 'weight': 50}, 2: {'id': '2', 'locator_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '192.168.65.94'}, 'virtual_network_id': '101'}, 'priority': 1, 'weight': 50}, 3: {'id': '3', 'locator_address': {'address_type': 'ipv6-afi', 'ipv6': {'ipv6': '2001:DB8:BBED:2829::80DF:9C86'}, 'virtual_network_id': '101'}, 'priority': 2, 'weight': 100}}}, 'time_to_live': '23:59:59'}, '2001:192:168:9::/64': {'creation_time': '00:06:51', 'eid': {'address_type': 'ipv6-afi', 'vrf': 'red'}, 'id': '2001:192:168:9::/64', 'positive_mapping': {'rlocs': {1: {'id': '1', 'locator_address': {'address_type': 'ipv4-afi', 'ipv4': {'ipv4': '10.1.8.8'}, 'virtual_network_id': '101'}, 'priority': 50, 'weight': 50}}}, 'time_to_live': '23:53:08'}, '::/0': {'creation_time': '00:11:28', 'eid': {'address_type': 'ipv6-afi', 'vrf': 'red'}, 'id': '::/0', 'negative_mapping': {'map_reply_action': 'send-map-request'}, 'time_to_live': 'never'}}, 'vni': '101'}}, 'map_resolvers': {'10.66.12.12': {'map_resolver': '10.66.12.12'}, '10.100.5.5': {'map_resolver': '10.100.5.5'}}}, 'map_server': {'enabled': False, 'summary': {'af_datum': {'ipv6-afi': {'address_type': 'ipv6-afi', 'number_configured_eids': 2, 'number_registered_eids': 2}}, 'number_configured_sites': 2, 'number_registered_sites': 2}, 'virtual_network_ids': {'101': {'counters': {'map_notify_records_out': '0', 'map_registers_in': '0', 'map_registers_in_auth_failed': '0', 'map_requests_forwarded_out': '0', 'proxy_reply_records_out': '0', }, }, }, }, 'service': 'ipv6', 'virtual_network_ids': {'101': {'lisp_role': {'itr-etr': {'lisp_role_type': 'itr-etr'}}}}}}}}}
53.517094
114
0.361798
5,069
62,615
4.421977
0.090945
0.008387
0.024537
0.027972
0.869953
0.832166
0.801071
0.778586
0.723176
0.677404
0
0.095445
0.512241
62,615
1,169
115
53.562874
0.638486
0.048423
0
0.734674
0
0.018199
0.673365
0.051267
0
0
0.008933
0
0
1
0
false
0
0.003831
0
0.029693
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
0bcfdedafdd898a11c28f36ad3c198297beeafa8
12,294
py
Python
tests/test_observable/test_doaction.py
mmpio/RxPY
4ed60bb5c04aa85de5210e5537a6adfe1b667d50
[ "MIT" ]
4,342
2015-01-06T09:00:23.000Z
2022-03-28T15:05:50.000Z
tests/test_observable/test_doaction.py
mmpio/RxPY
4ed60bb5c04aa85de5210e5537a6adfe1b667d50
[ "MIT" ]
613
2015-01-07T20:44:56.000Z
2022-03-20T06:14:20.000Z
tests/test_observable/test_doaction.py
mmpio/RxPY
4ed60bb5c04aa85de5210e5537a6adfe1b667d50
[ "MIT" ]
420
2015-01-07T14:30:30.000Z
2022-03-11T22:47:46.000Z
import unittest import rx from rx import operators as _ from rx.testing import TestScheduler, ReactiveTest on_next = ReactiveTest.on_next on_completed = ReactiveTest.on_completed on_error = ReactiveTest.on_error subscribe = ReactiveTest.subscribe subscribed = ReactiveTest.subscribed disposed = ReactiveTest.disposed created = ReactiveTest.created class TestDo(unittest.TestCase): def test_do_should_see_all_values(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) i = [0] sum = [2 + 3 + 4 + 5] def create(): def action(x): i[0] += 1 sum[0] -= x return sum[0] return xs.pipe(_.do_action(action)) scheduler.start(create) self.assertEqual(4, i[0]) self.assertEqual(0, sum[0]) def test_do_plain_action(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) i = [0] def create(): def action(x): i[0] += 1 return i[0] return xs.pipe(_.do_action(action)) scheduler.start(create) self.assertEqual(4, i[0]) def test_do_next_completed(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next( 220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) i = [0] sum = [2 + 3 + 4 + 5] completed = [False] def create(): def on_next(x): i[0] += 1 sum[0] -= x def on_completed(): completed[0] = True return xs.pipe(_.do_action(on_next=on_next, on_completed=on_completed)) scheduler.start(create) self.assertEqual(4, i[0]) self.assertEqual(0, sum[0]) assert(completed[0]) def test_do_next_completed_never(self): scheduler = TestScheduler() i = [0] completed = False def create(): nonlocal completed def on_next(x): i[0] += 1 def on_completed(): nonlocal completed completed = True return rx.never().pipe( _.do_action(on_next=on_next, on_completed=on_completed), ) scheduler.start(create) self.assertEqual(0, i[0]) assert(not completed) def test_do_action_without_next(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) completed = [False] def create(): def on_completed(): completed[0] = True return xs.pipe(_.do_action(on_completed=on_completed)) scheduler.start(create) assert(completed[0]) # def test_do_next_error(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_error(250, ex)) # i = [0] # sum = [2 + 3 + 4 + 5] # saw_error = False # scheduler.start(create) # return xs.do_action(function (x) { # i[0] += 1 # sum -= x # }, function (e) { # saw_error = e == ex # self.assertEqual(4, i) # self.assertEqual(0, sum) # assert(saw_error) # def test_do_next_error_not(self): # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) # i = [0] # sum = [2 + 3 + 4 + 5] # saw_error = False # scheduler.start(create) # return xs.do_action(function (x) { # i[0] += 1 # sum -= x # }, function (e) { # saw_error = True # self.assertEqual(4, i) # self.assertEqual(0, sum) # assert(not saw_error) # def test_do_next_error_completed(self): # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) # i = [0] # sum = [2 + 3 + 4 + 5] # saw_error = False # has_completed = False # scheduler.start(create) # return xs.do_action(function (x) { # i[0] += 1 # sum -= x # }, function (e) { # saw_error = True # }, function () { # has_completed = True # self.assertEqual(4, i) # self.assertEqual(0, sum) # assert(not saw_error) # assert(has_completed) # def test_do_next_error_completed_error(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_error(250, ex)) # i = [0] # sum = [2 + 3 + 4 + 5] # saw_error = False # has_completed = False # scheduler.start(create) # return xs.do_action(function (x) { # i[0] += 1 # sum -= x # }, function (e) { # saw_error = ex == e # }, function () { # has_completed = True # self.assertEqual(4, i) # self.assertEqual(0, sum) # assert(saw_error) # assert(not has_completed) # def test_do_next_error_completed_never(self): # scheduler = TestScheduler() # i = [0] # saw_error = False # has_completed = False # scheduler.start(create) # return rx.never().do_action(function (x) { # i[0] += 1 # }, function (e) { # saw_error = True # }, function () { # has_completed = True # self.assertEqual(0, i) # assert(not saw_error) # assert(not has_completed) # def test_Do_Observer_SomeDataWithError(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_error(250, ex)) # i = [0] # sum = [2 + 3 + 4 + 5] # saw_error = False # has_completed = False # scheduler.start(create) # return xs.do_action(Observer.create(function (x) { # i[0] += 1 # sum -= x # }, function (e) { # saw_error = e == ex # }, function () { # has_completed = True # })) # self.assertEqual(4, i) # self.assertEqual(0, sum) # assert(saw_error) # assert(not has_completed) # def test_do_observer_some_data_with_error(self): # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)) # i = [0] # sum = [2 + 3 + 4 + 5] # saw_error = False # has_completed = False # scheduler.start(create) # return xs.do_action(Observer.create(function (x) { # i[0] += 1 # sum -= x # }, function (e) { # saw_error = True # }, function () { # has_completed = True # })) # self.assertEqual(4, i) # self.assertEqual(0, sum) # assert(not saw_error) # assert(has_completed) # def test_do1422_next_next_throws(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(function () { # raise Exception(ex) # assert results.messages == [on_error(210, ex)] # def test_do1422_next_completed_next_throws(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(function () { # throw ex # }, _undefined, function () { # assert results.messages == [on_error(210, ex)] # def test_do1422_next_completed_completed_throws(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(function () { }, _undefined, function () { # throw ex # assert results.messages == [on_next(210, 2), on_error(250, ex)] # def test_do1422_next_error_next_throws(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(function () { # raise Exception(ex) # }, function () { # assert results.messages == [on_error(210, ex)] # def test_Do1422_NextError_NextThrows(self): # var ex1, ex2, results, scheduler, xs # ex1 = 'ex1' # ex2 = 'ex2' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_error(210, ex1)) # results = scheduler.start(create) # return xs.do_action(function () { }, function () { # raise Exception(ex)2 # assert results.messages == [on_error(210, ex2)] # def test_Do1422_NextErrorCompleted_NextThrows(self): # var ex, results, scheduler, xs # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(function () { # raise Exception(ex) # }, function () { }, function () { # assert results.messages == [on_error(210, ex)] # def test_do1422_next_error_completed_error_throws(self): # var ex1, ex2, results, scheduler, xs # ex1 = 'ex1' # ex2 = 'ex2' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_error(210, ex1)) # results = scheduler.start(create) # return xs.do_action(function () { }, function () { # raise Exception(ex)2 # }, function () { # assert results.messages == [on_error(210, ex2)] # def test_do1422_next_error_completed_completed_throws(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(function () { }, function () { }, function () { # raise Exception(ex) # assert results.messages == [on_next(210, 2), on_error(250, ex)] # def test_do1422_observer_next_throws(self): # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(Observer.create(function () { # raise Exception(ex) # }, function () { }, function () { })) # assert results.messages == [on_error(210, ex)] # def test_do1422_observer_error_throws(self): # var ex1, ex2, results, scheduler, xs # ex1 = 'ex1' # ex2 = 'ex2' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_error(210, ex1)) # results = scheduler.start(create) # return xs.do_action(Observer.create(function () { }, function () { # raise Exception(ex)2 # }, function () { })) # assert results.messages == [on_error(210, ex2)] # def test_do1422_observer_completed_throws(self): # var ex, results, scheduler, xs # ex = 'ex' # scheduler = TestScheduler() # xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250)) # results = scheduler.start(create) # return xs.do_action(Observer.create(function () { }, function () { }, function () { # raise Exception(ex) # })) # assert results.messages == [on_next(210, 2), on_error(250, ex)]
32.099217
146
0.581829
1,534
12,294
4.448501
0.055411
0.067702
0.067409
0.101553
0.898886
0.895223
0.881887
0.843054
0.831038
0.831038
0
0.061095
0.27973
12,294
382
147
32.183246
0.709543
0.705466
0
0.654762
0
0
0
0
0
0
0
0
0.107143
1
0.202381
false
0
0.047619
0
0.345238
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
0be0c12097b17bea91fd6065cf44b5f3a53b141c
60,828
py
Python
app/server.py
DanielOyeniyi/Mock1487
3d2d81ce0bbafcd76af01f44a926d4d2fe13604a
[ "MIT" ]
null
null
null
app/server.py
DanielOyeniyi/Mock1487
3d2d81ce0bbafcd76af01f44a926d4d2fe13604a
[ "MIT" ]
null
null
null
app/server.py
DanielOyeniyi/Mock1487
3d2d81ce0bbafcd76af01f44a926d4d2fe13604a
[ "MIT" ]
null
null
null
import json import os import random import bottle from bottle import HTTPResponse @bottle.route("/") def index(): return "Your Battlesnake is alive!" @bottle.post("/ping") def ping(): """ Used by the Battlesnake Engine to make sure your snake is still working. """ return HTTPResponse(status=200) @bottle.post("/start") def start(): """ Called every time a new Battlesnake game starts and your snake is in it. Your response will control how your snake is displayed on the board. """ data = bottle.request.json print("START:", json.dumps(data)) response = {"color": "#00FF7F", "headType": "bendr", "tailType": "round-bum"} return HTTPResponse( status=200, headers={"Content-Type": "application/json"}, body=json.dumps(response), ) @bottle.post("/move") def move(): """ Called when the Battlesnake Engine needs to know your next move. The data parameter will contain information about the board. Your response must include your move of up, down, left, or right. """ data = bottle.request.json print("MOVE:", json.dumps(data)) move = next_move(data) # Shouts are messages sent to all the other snakes in the game. # Shouts are not displayed on the game board. shout = "I am a python snake!" response = {"move": move, "shout": shout} return HTTPResponse( status=200, headers={"Content-Type": "application/json"}, body=json.dumps(response), ) # dict -> string # takes a dict representing the game board and returns # a string representing the next move of the snake def next_move(data): food = closest_food(data) snakes = make_snakes(data) moves1 = safe_moves(data, snakes, data["you"]["body"][0]) moves2 = free_moves(data, make_enemy_heads(data), snakes, data["you"]["body"][0]) along_the_wall = along_wall(data, make_heads(data)) near_the_wall = near_wall(data, make_heads(data)) for move in moves1: if (move not in moves2): moves1.remove(move) if (len(moves1) == 0): moves1 = safe_moves(data, snakes, data["you"]["body"][0]) if (data["you"]["health"] < 20): return to_target(data, moves1, food) for head in along_the_wall: move = destroy(data, snakes, head) if (move != "no"): return move # for head in near_the_wall: # move = destroy2(data, snakes, head) # if (move != "no"): # return move return sensor_move(data) # dict -> string # deterimins the next move of the snake by using 8 directional sensors def sensor_move(data): head = data["you"]["body"][0] enemy_heads = make_enemy_heads(data) snakes = make_snakes(data) tmp_up = sensor(data, head, "up") tmp_up_right = sensor(data, head, "up_right") tmp_right = sensor(data, head, "right") tmp_down_right = sensor(data, head, "down_right") tmp_down = sensor(data, head, "down") tmp_down_left = sensor(data, head, "down_left") tmp_left = sensor(data, head, "left") tmp_up_left = sensor(data, head, "up_left") up_val = tmp_up + tmp_up_left + tmp_up_right up_right_val = tmp_up_right + tmp_up + tmp_right right_val = tmp_right + tmp_up_right + tmp_down_right down_right_val = tmp_down_right + tmp_right + tmp_down down_val = tmp_down + tmp_down_right + tmp_down_left down_left_val = tmp_down_left + tmp_down + tmp_left left_val = tmp_left + tmp_down_left + tmp_up_left up_left_val = tmp_up_left + tmp_left + tmp_up up = [up_val, 0] up_right = [up_right_val, 1] right = [right_val, 2] down_right = [down_right_val, 3] down = [down_val, 4] down_left = [down_left_val, 5] left = [left_val, 6] up_left = [up_left_val, 7] items = [up, up_right, right, down_right, down, down_left, left, up_left] if (is_enemy_head(data, enemy_heads, snakes, head, "up")): if (not is_enemy_head2(data, enemy_heads, head, "up")): enemy = threat(data, enemy_heads, head, "up") enemy_moves = to_avoid(data, enemy, snakes, "up") if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (up in items and "down" in enemy_moves): items.remove(up) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) else: if (up_left in items): items.remove(up_left) if (up in items): items.remove(up) if (up_right in items): items.remove(up_right) if (is_enemy_head(data, enemy_heads, snakes, head, "up_right")): if (not is_enemy_head2(data, enemy_heads, head, "up_right")): enemy = threat(data, enemy_heads, head, "up_right") enemy_moves = to_avoid(data, enemy, snakes, "up_rigth") if (up in items and "down" in enemy_moves): items.remove(up) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (right in items and "left" in enemy_moves): items.remove(right) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) else: if (up in items): items.remove(up) if (up_right in items): items.remove(up_right) if (up_left in items): items.remove(up_left) if (right in items): items.remove(right) if (down_right in items): items.remove(down_right) if (is_enemy_head(data, enemy_heads, snakes, head, "right")): if (not is_enemy_head2(data, enemy_heads, head, "right")): enemy = threat(data, enemy_heads, head, "right") enemy_moves = to_avoid(data, enemy, snakes, "right") if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (right in items and "left" in enemy_moves): items.remove(right) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) else: if (up_right in items): items.remove(up_right) if (right in items): items.remove(right) if (down_right in items): items.remove(down_right) if (is_enemy_head(data, enemy_heads, snakes, head, "down_right")): if (not is_enemy_head2(data, enemy_heads, head, "down_right")): enemy = threat(data, enemy_heads, head, "down_right") enemy_moves = to_avoid(data, enemy, snakes, "down_right") if (right in items and "left" in enemy_moves): items.remove(right) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (down in items and "up" in enemy_moves): items.remove(down) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) else: if (right in items): items.remove(right) if (down_right in items): items.remove(down_right) if (down in items): items.remove(down) if (down_left in items): items.remove(down_left) if (up_right in items): items.remove(up_right) if (is_enemy_head(data, enemy_heads, snakes, head, "down")): if (not is_enemy_head2(data, enemy_heads, head, "down")): enemy = threat(data, enemy_heads, head, "down") enemy_moves = to_avoid(data, enemy, snakes, "down") if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (down in items and "up" in enemy_moves): items.remove(down) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) else: if (down_right in items): items.remove(down_right) if (down in items): items.remove(down) if (down_left in items): items.remove(down_left) if (is_enemy_head(data, enemy_heads, snakes, head, "down_left")): if (not is_enemy_head2(data, enemy_heads, head, "down_left")): enemy = threat(data, enemy_heads, head, "down_left") enemy_moves = to_avoid(data, enemy, snakes, "down_left") if (down in items and "up" in enemy_moves): items.remove(down) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (left in items and "right" in enemy_moves): items.remove(left) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) else: if (down in items): items.remove(down) if (down_left in items): items.remove(down_left) if (left in items): items.remove(left) if (up_left in items): items.remove(up_left) if (down_right in items): items.remove(down_right) if (is_enemy_head(data, enemy_heads, snakes, head, "left")): if (not is_enemy_head2(data, enemy_heads, head, "left")): enemy = threat(data, enemy_heads, head, "left") enemy_moves = to_avoid(data, enemy, snakes, "left") if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (left in items and "right" in enemy_moves): items.remove(left) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) else: if (down_left in items): items.remove(down_left) if (left in items): items.remove(left) if (up_left in items): items.remove(up_left) if (is_enemy_head(data, enemy_heads, snakes, head, "up_left")): if (not is_enemy_head2(data, enemy_heads, head, "up_left")): enemy = threat(data, enemy_heads, head, "up_left") enemy_moves = to_avoid(data, enemy, snakes, "up_left") if (left in items and "left" in enemy_moves): items.remove(left) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (up in items and "down" in enemy_moves): items.remove(up) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) else: if (left in items): items.remove(left) if (up_left in items): items.remove(up_left) if (up in items): items.remove(up) if (down_left in items): items.remove(down_left) if (up_right in items): items.remove(up_right) if (tmp_up == 0): if (up in items): items.remove(up) if (tmp_right == 0): if (right in items): items.remove(right) if (tmp_down == 0): if (down in items): items.remove(down) if (tmp_left == 0): if (left in items): items.remove(left) if (len(items) == 0): enemy = closest_head(data, enemy_heads) if (enemy != {}): items = [up, up_right, right, down_right, down, down_left, left, up_left] if (is_enemy_head2(data, enemy_heads, head, "up")): if (up_left in items): items.remove(up_left) if (up in items): items.remove(up) if (up_right in items): items.remove(up_right) else: enemy = threat(data, enemy_heads, head, "up") enemy_moves = to_avoid(data, enemy, snakes, "up") if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (up in items and "down" in enemy_moves): items.remove(up) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (is_enemy_head2(data, enemy_heads, head, "up_right")): if (up in items): items.remove(up) if (up_right in items): items.remove(up_right) if (up_left in items): items.remove(up_left) if (right in items): items.remove(right) if (down_right in items): items.remove(down_right) else: enemy = threat(data, enemy_heads, head, "up_right") enemy_moves = to_avoid(data, enemy, snakes, "up_rigth") if (up in items and "down" in enemy_moves): items.remove(up) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (right in items and "left" in enemy_moves): items.remove(right) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (is_enemy_head2(data, enemy_heads, head, "right")): if (up_right in items): items.remove(up_right) if (right in items): items.remove(right) if (down_right in items): items.remove(down_right) else: enemy = threat(data, enemy_heads, head, "right") enemy_moves = to_avoid(data, enemy, snakes, "right") if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (right in items and "left" in enemy_moves): items.remove(right) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (is_enemy_head2(data, enemy_heads, head, "down_right")): if (right in items): items.remove(right) if (down_right in items): items.remove(down_right) if (down in items): items.remove(down) if (down_left in items): items.remove(down_left) if (up_right in items): items.remove(up_right) else: enemy = threat(data, enemy_heads, head, "down_right") enemy_moves = to_avoid(data, enemy, snakes, "down_right") if (right in items and "left" in enemy_moves): items.remove(right) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (down in items and "up" in enemy_moves): items.remove(down) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (is_enemy_head2(data, enemy_heads, head, "down")): if (down_right in items): items.remove(down_right) if (down in items): items.remove(down) if (down_left in items): items.remove(down_left) else: enemy = threat(data, enemy_heads, head, "down") enemy_moves = to_avoid(data, enemy, snakes, "down") if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (down in items and "up" in enemy_moves): items.remove(down) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (is_enemy_head2(data, enemy_heads, head, "down_left")): if (down in items): items.remove(down) if (down_left in items): items.remove(down_left) if (left in items): items.remove(left) if (up_left in items): items.remove(up_left) if (down_right in items): items.remove(down_right) else: enemy = threat(data, enemy_heads, head, "down_left") enemy_moves = to_avoid(data, enemy, snakes, "down_left") if (down in items and "up" in enemy_moves): items.remove(down) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (left in items and "right" in enemy_moves): items.remove(left) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (down_right in items and ("up" in enemy_moves or "left" in enemy_moves)): items.remove(down_right) if (is_enemy_head2(data, enemy_heads, head, "left")): if (down_left in items): items.remove(down_left) if (left in items): items.remove(left) if (up_left in items): items.remove(up_left) else: enemy = threat(data, enemy_heads, head, "left") enemy_moves = to_avoid(data, enemy, snakes, "left") if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (left in items and "right" in enemy_moves): items.remove(left) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (is_enemy_head2(data, enemy_heads, head, "up_left")): if (left in items): items.remove(left) if (up_left in items): items.remove(up_left) if (up in items): items.remove(up) if (down_left in items): items.remove(down_left) if (up_right in items): items.remove(up_right) else: enemy = threat(data, enemy_heads, head, "up_left") enemy_moves = to_avoid(data, enemy, snakes, "up_left") if (left in items and "left" in enemy_moves): items.remove(left) if (up_left in items and ("down" in enemy_moves or "right" in enemy_moves)): items.remove(up_left) if (up in items and "down" in enemy_moves): items.remove(up) if (down_left in items and ("up" in enemy_moves or "right" in enemy_moves)): items.remove(down_left) if (up_right in items and ("down" in enemy_moves or "left" in enemy_moves)): items.remove(up_right) if (tmp_up == 0): if (up in items): items.remove(up) if (tmp_right == 0): if (right in items): items.remove(right) if (tmp_down == 0): if (down in items): items.remove(down) if (tmp_left == 0): if (left in items): items.remove(left) if (len(items) != 0): vals = [] for item in items: vals.append(item[0]) max_val = max(vals) else: max_val = 0 right_block = {"x": head["x"] + 1, "y": head["y"]} left_block = {"x": head["x"] - 1, "y": head["y"]} down_block = {"x": head["x"], "y": head["y"] + 1} up_block = {"x": head["x"], "y": head["y"] - 1} if (is_dead_end(data, head, "up") or is_dead_end(data, head, "right") or is_dead_end(data, head, "down") or is_dead_end(data, head, "left")): print("here") best_paths = [] up_path = num_free(data, up_block) right_path = num_free(data, right_block) down_path = num_free(data, down_block) left_path = num_free(data, left_block) up1 = [up_path, 0] right1 = [right_path, 1] down1 = [down_path, 2] left1 = [left_path, 3] max_path = max(up_path, right_path, down_path, left_path) if (max_path == up1[0]): best_paths.append(up1) if (max_path == right1[0]): best_paths.append(right1) if (max_path == down1[0]): best_paths.append(down1) if (max_path == left1[0]): best_paths.append(left1) best_path = random.choice(best_paths) if (best_path == up1): best_paths.append(up1) return "up" if (best_path == right1): best_paths.append(right1) return "right" if (best_path == down1): best_paths.append(down1) return "down" if (best_path == left1): best_paths.append(left1) return "left" if (max_val != 0): max_items = [] for item in items: if (item[0] == max_val): max_items.append(item) max_item = random.choice(max_items) if (max_item == up and tmp_up != 0): return "up" if (max_item == up_right): if (tmp_right != 0 and tmp_up != 0): if (up_val > right_val): return "up" elif (up_val < right_val): return "rigth" else: return random.choice(["up", "right"]) if (tmp_right != 0): return "right" if (tmp_up != 0): return "up" if (max_item == right and tmp_right != 0): return "right" if (max_item == down_right): if (tmp_down != 0 and tmp_right != 0): if (down_val > right_val): return "down" elif (down_val < right_val): return "rigth" else: return random.choice(["down", "right"]) if (tmp_down != 0): return "down" if (tmp_right != 0): return "right" if (max_item == down and tmp_down != 0): return "down" if (max_item == down_left): if (tmp_down != 0 and tmp_left != 0): if (down_val > left_val): return "down" elif (down_val < left_val): return "left" else: return random.choice(["down", "left"]) if (tmp_down != 0): return "down" if (tmp_left != 0): return "left" if (max_item == left and tmp_left != 0): return "left" if (max_item == up_left): if (tmp_up != 0 and tmp_left != 0): if (up_val > left_val): return "up" elif (up_val < left_val): return "left" else: return random.choice(["up", "left"]) if (tmp_up != 0): return "up" if (tmp_left != 0): return "left" items = [up, right, down, left] if (tmp_up == 0): items.remove(up) if (tmp_right == 0): items.remove(right) if (tmp_down == 0): items.remove(down) if (tmp_left == 0): items.remove(left) vals = [] max_items = [] for item in items: vals.append(item[0]) if (len(vals) == 0): return "up" max_val = max(vals) for item in items: if (item[0] == max_val): max_items.append(item) max_item = random.choice(max_items) if (max_item == up): return "up" if (max_item == right): return "right" if (max_item == down): return "down" if (max_item == left): return "left" # dict, dict, string -> int # returns a count of all the available moves and soon to be available # moves in a given direction def sensor(data, pos, direction): tmp_snakes = make_tmp_snakes(data) return sensor_helper(data, tmp_snakes, pos, direction) # dict, list, dict, int -> int # returns a count of all the available moves and soon to be available # moves in a given direction def sensor_helper(data, tmp_snakes, pos, direction): new_pos = {"x": pos["x"], "y": pos["y"]} if (not is_free_tmp(data, tmp_snakes, pos) and pos != data["you"]["body"][0]): return -1 else: remove_tails(tmp_snakes) if (direction == "up"): new_pos["y"] -= 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "up_right"): remove_tails(tmp_snakes) new_pos["y"] -= 1 new_pos["x"] += 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "right"): new_pos["x"] += 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "down_right"): remove_tails(tmp_snakes) new_pos["y"] += 1 new_pos["x"] += 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "down"): new_pos["y"] += 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "down_left"): remove_tails(tmp_snakes) new_pos["y"] += 1 new_pos["x"] -= 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "left"): new_pos["x"] -= 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 if (direction == "up_left"): remove_tails(tmp_snakes) new_pos["y"] -= 1 new_pos["x"] -= 1 return sensor_helper(data, tmp_snakes, new_pos, direction) + 1 # dict, dict, string -> int # checkes if the given direction is a straight dead end with no turns # and returns a number representing the current state def is_dead_end(data, pos, direction): tmp_snakes = make_tmp_snakes(data) state = is_dead_end_helper(data, pos, tmp_snakes, direction) if (state == 0 and is_free_tmp(data, tmp_snakes, pos)): return True return False # dict, dict, list, string -> int # checkes if the given direction is a straight dead end with no turns # and returns a number representing the current state def is_dead_end_helper(data, pos, tmp_snakes, direction): new_pos = {"x": pos["x"], "y": pos["y"]} if (not is_free_tmp(data, tmp_snakes, pos) and pos != data["you"]["body"][0]): return 0 else: remove_tails(tmp_snakes) if (direction == "up"): new_pos["y"] -= 1 right = {"x": new_pos["x"] + 1, "y": new_pos["y"]} left = {"x": new_pos["x"] - 1, "y": new_pos["y"]} if (is_free_tmp(data, tmp_snakes, right)): return 1 if (is_free_tmp(data, tmp_snakes, left)): return 1 return is_dead_end_helper(data, new_pos, tmp_snakes, direction) if (direction == "right"): new_pos["x"] += 1 up = {"x": new_pos["x"], "y": new_pos["y"] - 1} down = {"x": new_pos["x"], "y": new_pos["y"] + 1} if (is_free_tmp(data, tmp_snakes, up)): return 1 if (is_free_tmp(data, tmp_snakes, down)): return 1 return is_dead_end_helper(data, new_pos, tmp_snakes, direction) if (direction == "down"): new_pos["y"] += 1 right = {"x": new_pos["x"] + 1, "y": new_pos["y"]} left = {"x": new_pos["x"] - 1, "y": new_pos["y"]} if (is_free_tmp(data, tmp_snakes, right)): return 1 if (is_free_tmp(data, tmp_snakes, left)): return 1 return is_dead_end_helper(data, new_pos, tmp_snakes, direction) if (direction == "left"): new_pos["x"] -= 1 up = {"x": new_pos["x"], "y": new_pos["y"] - 1} down = {"x": new_pos["x"], "y": new_pos["y"] + 1} if (is_free_tmp(data, tmp_snakes, up)): return 1 if (is_free_tmp(data, tmp_snakes, down)): return 1 return is_dead_end_helper(data, new_pos, tmp_snakes, direction) # dict, dict -> int # checks all the free blocks conected to the input block # and return that number def num_free(data, block): checked = [] snakes = make_snakes(data) return num_free_helper(data, snakes, checked, block) # dict, dict, list, dict -> int # checks all the free blocks conected to the input block # and return that number def num_free_helper(data, snakes, checked, block): if (not is_free(data, snakes, block) or block in checked): return 0 else: checked.append(block) right_block = {"x": block["x"] + 1, "y": block["y"]} left_block = {"x": block["x"] - 1, "y": block["y"]} down_block = {"x": block["x"], "y": block["y"] + 1} up_block = {"x": block["x"], "y": block["y"] - 1} return (num_free_helper(data, snakes, checked, right_block) + num_free_helper(data, snakes, checked, left_block) + num_free_helper(data, snakes, checked, down_block) + num_free_helper(data, snakes, checked, up_block) + 1) # dict, list, dict -> bool # return true if the block is not in any snake body parts and # outside of the game board def is_free(data, snakes, pos): return not (pos in snakes or pos["x"] == data["board"]["height"] or pos["y"] == data["board"]["width"] or pos["x"] == -1 or pos["y"] == -1) # dict, list, list, dict, string -> bool # determins if there is an enemy head in the next 2 moves in the # given direction and returns a bool corresponding to that result def is_enemy_head(data, enemy_heads, bodies, pos, direction): new_pos = {"x": pos["x"], "y": pos["y"]} if (direction == "up"): new_pos["y"] -= 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["y"] -= 1 return new_pos in enemy_heads if (direction == "up_right"): new_pos["y"] -= 1 new_pos["x"] += 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["y"] -= 1 new_pos["x"] += 1 return new_pos in enemy_heads if (direction == "right"): new_pos["x"] += 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["x"] += 1 return new_pos in enemy_heads if (direction == "down_right"): new_pos["y"] += 1 new_pos["x"] += 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["y"] += 1 new_pos["x"] += 1 return new_pos in enemy_heads if (direction == "down"): new_pos["y"] += 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["y"] += 1 return new_pos in enemy_heads if (direction == "down_left"): new_pos["y"] += 1 new_pos["x"] -= 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["y"] += 1 new_pos["x"] -= 1 return new_pos in enemy_heads if (direction == "left"): new_pos["x"] -= 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["x"] -= 1 return new_pos in enemy_heads if (direction == "up_left"): new_pos["y"] -= 1 new_pos["x"] -= 1 if (new_pos in enemy_heads): return True if (new_pos in bodies): return False new_pos["y"] -= 1 new_pos["x"] -= 1 return new_pos in enemy_heads # dict, list, list, dict, string -> dict # determins if there is an enemy head in the next 2 moves in the # given direction and returns the enemy head def threat(data, enemy_heads, pos, direction): new_pos = {"x": pos["x"], "y": pos["y"]} if (direction == "up"): new_pos["y"] -= 1 if (new_pos in enemy_heads): return new_pos new_pos["y"] -= 1 return new_pos if (direction == "up_right"): new_pos["y"] -= 1 new_pos["x"] += 1 if (new_pos in enemy_heads): return new_pos new_pos["y"] -= 1 new_pos["x"] += 1 return new_pos if (direction == "right"): new_pos["x"] += 1 if (new_pos in enemy_heads): return new_pos new_pos["x"] += 1 return new_pos if (direction == "down_right"): new_pos["y"] += 1 new_pos["x"] += 1 if (new_pos in enemy_heads): return new_pos new_pos["y"] += 1 new_pos["x"] += 1 return new_pos if (direction == "down"): new_pos["y"] += 1 if (new_pos in enemy_heads): return new_pos new_pos["y"] += 1 return new_pos if (direction == "down_left"): new_pos["y"] += 1 new_pos["x"] -= 1 if (new_pos in enemy_heads): return new_pos new_pos["y"] += 1 new_pos["x"] -= 1 return new_pos if (direction == "left"): new_pos["x"] -= 1 if (new_pos in enemy_heads): return new_pos new_pos["x"] -= 1 return new_pos if (direction == "up_left"): new_pos["y"] -= 1 new_pos["x"] -= 1 if (new_pos in enemy_heads): return new_pos new_pos["y"] -= 1 new_pos["x"] -= 1 return new_pos # dict, dict, list, string -> list # returns a list of moves that the enemy can make towards you def to_avoid(data, enemy, snakes, direction): enemy_moves = safe_moves(data, snakes, enemy) if (direction == "up"): if ("down" in enemy_moves): return ["down"] if (direction == "up_right"): if ("down" in enemy_moves and "left" in enemy_moves): return ["down", "left"] if ("down" in enemy_moves): return ["down"] if ("left" in enemy_moves): return ["left"] if (direction == "right"): if ("left" in enemy_moves): return ["left"] if (direction == "down_right"): if ("up" in enemy_moves and "left" in enemy_moves): return ["up", "left"] if ("up" in enemy_moves): return ["up"] if ("left" in enemy_moves): return ["left"] if (direction == "down"): if ("up" in enemy_moves): return ["up"] if (direction == "down_left"): if ("up" in enemy_moves and "right" in enemy_moves): return ["up", "right"] if ("up" in enemy_moves): return ["up"] if ("right" in enemy_moves): return ["right"] if (direction == "left"): if ("right" in enemy_moves): return ["right"] if (direction == "up_left"): if ("down" in enemy_moves and "right" in enemy_moves): return ["down", "right"] if ("down" in enemy_moves): return ["down"] if ("right" in enemy_moves): return ["right"] return [] # dict, list -> dict # returns the closest enemy head to our snakes head def closest_head(data, enemy_heads): own_head = data["you"]["body"][0] distance = 2 closest = {} for head in enemy_heads: x = abs(head["x"] - own_head["y"]) y = abs(head["y"] - own_head["y"]) path = x + y if (path >= distance): closest = head distance = path return closest # dict, list, list, dict, string -> bool # determins if there is an enemy head in the next move in the # given direction and returns a bool corresponding to that result def is_enemy_head2(data, enemy_heads, pos, direction): new_pos = {"x": pos["x"], "y": pos["y"]} if (direction == "up"): new_pos["y"] -= 1 return new_pos in enemy_heads if (direction == "up_right"): new_pos["y"] -= 1 new_pos["x"] += 1 return new_pos in enemy_heads if (direction == "right"): new_pos["x"] += 1 return new_pos in enemy_heads if (direction == "down_right"): new_pos["y"] += 1 new_pos["x"] += 1 return new_pos in enemy_heads if (direction == "down"): new_pos["y"] += 1 return new_pos in enemy_heads if (direction == "down_left"): new_pos["y"] += 1 new_pos["x"] -= 1 return new_pos in enemy_heads if (direction == "left"): new_pos["x"] -= 1 return new_pos in enemy_heads if (direction == "up_left"): new_pos["y"] -= 1 new_pos["x"] -= 1 return new_pos in enemy_heads # dict, list, list, dict -> list # returns a list of moves that aren't affected by the threat of # an enemy snakes head def free_moves(data, enemy_heads, snakes, pos): moves = ["right", "left", "down", "up"] if (is_enemy_head(data, enemy_heads, snakes, pos, "right")): moves.remove("right") if (is_enemy_head(data, enemy_heads, snakes, pos, "left")): moves.remove("left") if (is_enemy_head(data, enemy_heads, snakes, pos, "down")): moves.remove("down") if (is_enemy_head(data, enemy_heads, snakes, pos, "up")): moves.remove("up") if (is_enemy_head(data, enemy_heads, snakes, pos, "up_right")): if ("up" in moves): moves.remove("up") if ("right" in moves): moves.remove("right") if (is_enemy_head(data, enemy_heads, snakes, pos, "up_left")): if ("up" in moves): moves.remove("up") if ("left" in moves): moves.remove("left") if (is_enemy_head(data, enemy_heads, snakes, pos, "down_right")): if ("down" in moves): moves.remove("down") if ("right" in moves): moves.remove("right") if (is_enemy_head(data, enemy_heads, snakes, pos, "down_left")): if ("down" in moves): moves.remove("down") if ("left" in moves): moves.remove("left") return moves # dict, list, dict -> list # returns a list of moves that are available for a snake # head to make def safe_moves(data, snakes, pos): moves = [] right = {"x": pos["x"] + 1, "y": pos["y"]} left = {"x": pos["x"] - 1, "y": pos["y"]} down = {"x": pos["x"], "y": pos["y"] + 1} up = {"x": pos["x"], "y": pos["y"] - 1} if (is_free(data, snakes, right)): moves.append("right") if (is_free(data, snakes, left)): moves.append("left") if (is_free(data, snakes, up)): moves.append("up") if (is_free(data, snakes, down)): moves.append("down") return moves # dict, list, list, dict -> string # attempts to cut off an enemy snake if it along a wall def destroy(data, snakes, pos): head = data["you"]["body"][0] target_moves = safe_moves(data, snakes, pos) enemy_heads = make_enemy_heads(data) moves1 = safe_moves(data, snakes, data["you"]["body"][0]) moves2 = free_moves(data, enemy_heads, snakes, data["you"]["body"][0]) head_moves = [] for move in moves1: if (move in moves2): head_moves.append(move) # top_wall if (pos["y"] == 0): y_distance = head["y"] - pos["y"] if ("right" in target_moves and "left" not in target_moves): x_distance = head["x"] - pos["x"] # if head is close enough and to the right the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "up" in head_moves): return "up" elif (y_distance == x_distance and "up" in head_moves and pos not in enemy_heads): return "up" else: if ("right" in head_moves): return "right" if ("left" in target_moves and "right" not in target_moves): x_distance = pos["x"] - head["x"] # if head is close enough and to the left the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "up" in head_moves): return "up" elif (y_distance == x_distance and "up" in head_moves and pos not in enemy_heads): return "up" else: if ("left" in head_moves): return "left" # right_wall if (pos["x"] == data["board"]["width"] - 1): x_distance = pos["x"] - head["x"] if ("up" in target_moves and "down" not in target_moves): y_distance = pos["y"] - head["y"] # if head is close enough and above the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "right" in head_moves): return "right" elif (x_distance == y_distance and "right" in head_moves and pos not in enemy_heads): return "right" else: if ("up" in head_moves): return "up" if ("down" in target_moves and "up" not in target_moves): y_distance = head["y"] - pos["y"] # if head is close enough and below the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "right" in head_moves): return "right" elif (x_distance == y_distance and "right" in head_moves and pos not in enemy_heads): return "right" else: if ("down" in head_moves): return "down" # bottom_wall if (pos["y"] == data["board"]["height"] - 1): y_distance = pos["y"] - head["y"] if ("right" in target_moves and "left" not in target_moves): x_distance = head["x"] - pos["x"] # if head is close enough and to the right the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "down" in head_moves): return "down" elif (y_distance == x_distance and "down" in head_moves and pos not in enemy_heads): return "down" else: if ("right" in head_moves): return "right" if ("left" in target_moves and "right" not in target_moves): x_distance = pos["x"] - head["x"] # if head is close enough and to the left the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "down" in head_moves): return "down" elif (y_distance == x_distance and "down" in head_moves and pos not in enemy_heads): return "down" else: if ("left" in head_moves): return "left" # left_wall if (pos["x"] == 0): x_distance = head["x"] - pos["x"] if ("up" in target_moves and "down" not in target_moves): y_distance = pos["y"] - head["y"] # if head is close enough and above the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "left" in head_moves): return "left" elif (x_distance == y_distance and "left" in head_moves and pos not in enemy_heads): return "left" else: if ("up" in head_moves): return "up" if ("down" in target_moves and "up" not in target_moves): y_distance = head["y"] - pos["y"] # if head is close enough and below the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "left" in head_moves): return "left" elif (x_distance == y_distance and "left" in head_moves and pos not in enemy_heads): return "left" else: if ("down" in head_moves): return "down" return "no" # dict, list, list, dict -> string # attempts to cut off an enemy snake if it along a wall def destroy2(data, snakes, pos): head = data["you"]["body"][0] target_moves = safe_moves(data, snakes, pos) enemy_heads = make_enemy_heads(data) moves1 = safe_moves(data, snakes, data["you"]["body"][0]) moves2 = free_moves(data, enemy_heads, snakes, data["you"]["body"][0]) head_moves = [] for move in moves1: if (move in moves2): head_moves.append(move) # top_wall if (pos["y"] == 1): y_distance = head["y"] - pos["y"] if ("right" in target_moves and "left" not in target_moves): x_distance = head["x"] - pos["x"] # if head is close enough and to the right the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "up" in head_moves): return "up" elif (y_distance == x_distance and "up" in head_moves and pos not in enemy_heads): return "up" else: if ("right" in head_moves): return "right" if ("left" in target_moves and "right" not in target_moves): x_distance = pos["x"] - head["x"] # if head is close enough and to the left the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "up" in head_moves): return "up" elif (y_distance == x_distance and "up" in head_moves and pos not in enemy_heads): return "up" else: if ("left" in head_moves): return "left" # right_wall if (pos["x"] == data["board"]["width"] - 2): x_distance = pos["x"] - head["x"] if ("up" in target_moves and "down" not in target_moves): y_distance = pos["y"] - head["y"] # if head is close enough and above the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "right" in head_moves): return "right" elif (x_distance == y_distance and "right" in head_moves and pos not in enemy_heads): return "right" else: if ("up" in head_moves): return "up" if ("down" in target_moves and "up" not in target_moves): y_distance = head["y"] - pos["y"] # if head is close enough and below the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "right" in head_moves): return "right" elif (x_distance == y_distance and "right" in head_moves and pos not in enemy_heads): return "right" else: if ("down" in head_moves): return "down" # bottom_wall if (pos["y"] == data["board"]["height"] - 2): y_distance = pos["y"] - head["y"] if ("right" in target_moves and "left" not in target_moves): x_distance = head["x"] - pos["x"] # if head is close enough and to the right the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "down" in head_moves): return "down" elif (y_distance == x_distance and "down" in head_moves and pos not in enemy_heads): return "down" else: if ("right" in head_moves): return "right" if ("left" in target_moves and "right" not in target_moves): x_distance = pos["x"] - head["x"] # if head is close enough and to the left the enemy head if (y_distance <= 2 and x_distance >= 0): if (y_distance < x_distance and "down" in head_moves): return "down" elif (y_distance == x_distance and "down" in head_moves and pos not in enemy_heads): return "down" else: if ("left" in head_moves): return "left" # left_wall if (pos["x"] == 1): x_distance = head["x"] - pos["x"] if ("up" in target_moves and "down" not in target_moves): y_distance = pos["y"] - head["y"] # if head is close enough and above the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "left" in head_moves): return "left" elif (x_distance == y_distance and "left" in head_moves and pos not in enemy_heads): return "left" else: if ("up" in head_moves): return "up" if ("down" in target_moves and "up" not in target_moves): y_distance = head["y"] - pos["y"] # if head is close enough and below the enemy head if (x_distance <= 2 and y_distance >= 0): if (x_distance < y_distance and "left" in head_moves): return "left" elif (x_distance == y_distance and "left" in head_moves and pos not in enemy_heads): return "left" else: if ("down" in head_moves): return "down" return "no" # dict -> list # returns a list of dicts corresponding to the x,y coordinates of # enemy snake heads def make_enemy_heads(data): enemies = [] for snake in data["board"]["snakes"]: if (data["you"]["body"][0] != snake["body"][0] and len(data["you"]["body"]) <= len(snake["body"])): enemies.append(snake["body"][0]) return enemies # dict, list -> list # returns a list of enemy snake head that are along the boundary of the game def along_wall(data, enemies): along_wall = [] for head in enemies: if (head["x"] == data["board"]["width"] - 1 or head["y"] == data["board"]["height"] -1 or head["x"] == 0 or head["y"] == 0): along_wall.append(head) return along_wall # dict, list -> list # returns a list of enemy snake head that are 1 move away from a boundary def near_wall(data, enemies): near_wall = [] for head in enemies: if (head["x"] == data["board"]["width"] - 2 or head["y"] == data["board"]["height"] - 2 or head["x"] == 1 or head["y"] == 1): near_wall.append(head) return near_wall # dict -> list # returns a list of dicts representing snake locations without tails # if they just ate food def make_snakes(data): snakes = [] for snake in data["board"]["snakes"]: for part in snake["body"]: snakes.append(part) if (len(snake) >= 2): if (snake["body"][-1] != snake["body"][-2]): snakes.remove(snake["body"][-1]) return snakes # dict -> list # makes a list of dicts representing snake x,y coordinates def make_tmp_snakes(data): tmp_snakes = [] for snake in data["board"]["snakes"]: tmp_snake = [] for part in snake["body"]: tmp_part = {"x": part["x"], "y": part["y"]} tmp_snake.append(tmp_part) tmp_snakes.append(tmp_snake) return tmp_snakes # list -> none # removes the tails of all the snakes in the given list def remove_tails(tmp_snakes): for tmp_snake in tmp_snakes: if (len(tmp_snake) != 0): tmp_snake.pop() # dict, list, dict -> bool # determins if the given position is in the given list of snake locations # or if it is outside of the game boarder def is_free_tmp(data, tmp_snakes, pos): for tmp_snake in tmp_snakes: if (pos in tmp_snake): return False return not(pos["x"] == data["board"]["height"] or pos["y"] == data["board"]["width"] or pos["x"] == -1 or pos["y"] == -1) # dict, list, dict -> directions # given a list of available directions and a target # it returns a direction that will bring the snake # closer to the target def to_target(data, directions, target): if (len(directions) == 0): return "up" head = data["you"]["body"][0] new_directions = [] if (target["x"] > head["x"] and "right" in directions): new_directions.append("right") if (target["x"] < head["x"] and "left" in directions): new_directions.append("left") if (target["y"] > head["y"] and "down" in directions): new_directions.append("down") if (target["y"] < head["y"] and "up" in directions): new_directions.append("up") if (len(new_directions) != 0): return random.choice(new_directions) else: return sensor_move(data) # dict -> list # makes a list of dicts representing snake x,y coordinates # with tails included def make_static_snakes(data): snakes = [] for snake in data["board"]["snakes"]: for part in snake["body"]: snakes.append(part) return snakes # dict -> list # makes a list of all snake heads in the game def make_heads(data): heads = [] for snake in data["board"]["snakes"]: if (snake != data["you"]): heads.append(snake["body"][0]) return heads # dict -> list # makes a list of all the snake body parts in the game # tailsexcluded def make_bodies(data): bodies = [] for snake in data["board"]["snakes"]: for part in snake["body"]: if (part != snake["body"][-1]): bodies.append(part) return bodies # dict -> dict # returns the closest food item to head def closest_food(data): closest = {} max = 100 for food in data["board"]["food"]: x = abs(data["you"]["body"][0]["x"] - food["x"]) y = abs(data["you"]["body"][0]["y"] - food["y"]) distance = x + y if (distance <= max): max = distance closest = food return closest @bottle.post("/end") def end(): """ Called every time a game with your snake in it ends. """ data = bottle.request.json print("END:", json.dumps(data)) return HTTPResponse(status=200) def main(): bottle.run( application, host=os.getenv("IP", "0.0.0.0"), port=os.getenv("PORT", "8080"), debug=os.getenv("DEBUG", True), ) # Expose WSGI app (so gunicorn can find it) application = bottle.default_app() if __name__ == "__main__": main()
37.781366
102
0.501923
7,647
60,828
3.813522
0.034
0.041527
0.051025
0.044441
0.829196
0.795213
0.770215
0.743227
0.716926
0.68891
0
0.009172
0.385217
60,828
1,610
103
37.781366
0.770644
0.079043
0
0.751192
0
0
0.053329
0
0
0
0
0
0
1
0.028617
false
0
0.003975
0.00159
0.193164
0.00318
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
041717fc66089fd259f95b970cda3573896b1a0b
196
py
Python
tests/integration/images/__init__.py
Rory-Finnegan/bootstrap-vz
e7dc9c26bd72185eacc5cb1954650903db880289
[ "Apache-2.0" ]
null
null
null
tests/integration/images/__init__.py
Rory-Finnegan/bootstrap-vz
e7dc9c26bd72185eacc5cb1954650903db880289
[ "Apache-2.0" ]
null
null
null
tests/integration/images/__init__.py
Rory-Finnegan/bootstrap-vz
e7dc9c26bd72185eacc5cb1954650903db880289
[ "Apache-2.0" ]
null
null
null
def initialize_image(manifest, build_server, bootstrap_info): if manifest.provider['name'] == 'virtualbox': import vbox return vbox.initialize_image(manifest, build_server, bootstrap_info)
28
70
0.790816
24
196
6.208333
0.625
0.201342
0.308725
0.375839
0.630872
0.630872
0.630872
0
0
0
0
0
0.107143
196
6
71
32.666667
0.851429
0
0
0
0
0
0.072165
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
7
04297a008fac4b189c1cbf8164757ee6d44494fe
11,160
py
Python
data/data_load.py
Billy1900/Noise-Adaption-Layer
57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8
[ "MIT" ]
5
2021-04-27T11:44:56.000Z
2022-03-10T02:35:21.000Z
data/data_load.py
Billy1900/Noise-Adaption-Layer
57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8
[ "MIT" ]
null
null
null
data/data_load.py
Billy1900/Noise-Adaption-Layer
57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8
[ "MIT" ]
null
null
null
import torch import numpy as np import torch.utils.data as Data import torchvision.transforms as transforms from torch.utils.data import DataLoader from PIL import Image from data.data_utils import dataset_split from parse_config import args class mnist_dataset(Data.Dataset): def __init__(self, train=True, transform=None, target_transform=None, noise_rate=0.5, split_per=0.9, random_seed=1, num_class=10): self.transform = transform self.target_transform = target_transform self.train = train original_images = np.load('data/mnist/train_images.npy') original_labels = np.load('data/mnist/train_labels.npy') self.train_data, self.val_data, self.train_labels, self.val_labels = dataset_split(original_images, original_labels, noise_rate, split_per, random_seed, num_class) # print(self.val_labels) pass def __getitem__(self, index): if self.train: img, label = self.train_data[index], self.train_labels[index] else: img, label = self.val_data[index], self.val_labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: label = self.target_transform(label) return img, label def __len__(self): if self.train: return len(self.train_data) else: return len(self.val_data) class mnist_test_dataset(Data.Dataset): def __init__(self, transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform self.test_data = np.load('data/mnist/test_images.npy') self.test_labels = np.load('data/mnist/test_labels.npy') - 1 # 0-9 def __getitem__(self, index): img, label = self.test_data[index], self.test_labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: label = self.target_transform(label) return img, label def __len__(self): return len(self.test_data) class cifar10_dataset(Data.Dataset): def __init__(self, train=True, transform=None, target_transform=None, noise_rate=0.0, split_per=0.9, random_seed=1, num_class=10): self.transform = transform self.target_transform = target_transform self.train = train original_images = np.load('data/cifar10/train_images.npy') original_labels = np.load('data/cifar10/train_labels.npy') self.train_data, self.val_data, self.train_labels, self.val_labels = dataset_split(original_images, original_labels, noise_rate, split_per, random_seed, num_class) if self.train: self.train_data = self.train_data.reshape((45000, 3, 32, 32)) self.train_data = self.train_data.transpose((0, 2, 3, 1)) else: self.val_data = self.val_data.reshape((5000, 3, 32, 32)) self.val_data = self.val_data.transpose((0, 2, 3, 1)) def __getitem__(self, index): if self.train: img, label = self.train_data[index], self.train_labels[index] else: img, label = self.val_data[index], self.val_labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: label = self.target_transform(label) return img, label def __len__(self): if self.train: return len(self.train_data) else: return len(self.val_data) class cifar10_test_dataset(Data.Dataset): def __init__(self, transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform self.test_data = np.load('data/cifar10/test_images.npy') self.test_labels = np.load('data/cifar10/test_labels.npy') self.test_data = self.test_data.reshape((10000, 3, 32, 32)) self.test_data = self.test_data.transpose((0, 2, 3, 1)) def __getitem__(self, index): img, label = self.test_data[index], self.test_labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: label = self.target_transform(label) return img, label def __len__(self): return len(self.test_data) class cifar100_dataset(Data.Dataset): def __init__(self, train=True, transform=None, target_transform=None, noise_rate=0.0, split_per=0.9, random_seed=1, num_class=100): self.transform = transform self.target_transform = target_transform self.train = train original_images = np.load('data/cifar100/train_images.npy') original_labels = np.load('data/cifar100/train_labels.npy') self.train_data, self.val_data, self.train_labels, self.val_labels = dataset_split(original_images, original_labels, noise_rate, split_per, random_seed, num_class) if self.train: self.train_data = self.train_data.reshape((45000, 3, 32, 32)) self.train_data = self.train_data.transpose((0, 2, 3, 1)) else: self.val_data = self.val_data.reshape((5000, 3, 32, 32)) self.val_data = self.val_data.transpose((0, 2, 3, 1)) def __getitem__(self, index): if self.train: img, label = self.train_data[index], self.train_labels[index] else: img, label = self.val_data[index], self.val_labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: label = self.target_transform(label) return img, label def __len__(self): if self.train: return len(self.train_data) else: return len(self.val_data) class cifar100_test_dataset(Data.Dataset): def __init__(self, transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform self.test_data = np.load('data/cifar100/test_images.npy') self.test_labels = np.load('data/cifar100/test_labels.npy') self.test_data = self.test_data.reshape((10000, 3, 32, 32)) self.test_data = self.test_data.transpose((0, 2, 3, 1)) def __getitem__(self, index): img, label = self.test_data[index], self.test_labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: label = self.target_transform(label) return img, label def __len__(self): return len(self.test_data) def transform_train(dataset_name): if dataset_name == 'mnist': transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)), ]) else: transform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) return transform def transform_test(dataset_name): if dataset_name == 'mnist': transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)), ]) else: transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) return transform def transform_target(label): label = np.array(label) target = torch.from_numpy(label).long() return target def DataLoad_MNIST_CIFAR(DatasetName, noise_rate=0.0): # dataset global train_data, test_data if DatasetName == 'mnist': train_data = mnist_dataset(True, transform=transform_train(DatasetName), target_transform=transform_target, noise_rate=noise_rate, random_seed=args.seed) test_data = mnist_test_dataset(transform=transform_test(DatasetName), target_transform=transform_target) args.n_epoch = 30 args.num_classes = 10 elif DatasetName == 'cifar10': train_data = cifar10_dataset(True, transform=transform_train(DatasetName), target_transform=transform_target, noise_rate=noise_rate, random_seed=args.seed) test_data = cifar10_test_dataset(transform=transform_test(DatasetName), target_transform=transform_target) args.n_epoch = 30 args.num_classes = 10 elif DatasetName == 'cifar100': train_data = cifar100_dataset(True, transform=transform_train(DatasetName), target_transform=transform_target, noise_rate=noise_rate, random_seed=args.seed) test_data = cifar100_test_dataset(transform=transform_test(DatasetName), target_transform=transform_target) args.n_epoch = 30 args.num_classes = 100 # data_loader train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, num_workers=8) test_loader = DataLoader(dataset=test_data, batch_size=args.batch_size, shuffle=False, num_workers=8) return train_loader, test_loader
35.884244
120
0.560036
1,236
11,160
4.806634
0.084142
0.056051
0.057566
0.036358
0.876452
0.862313
0.862313
0.851204
0.832015
0.813331
0
0.03484
0.351882
11,160
310
121
36
0.786534
0.004122
0
0.756757
0
0
0.034077
0.031299
0
0
0
0
0
1
0.099099
false
0.004505
0.036036
0.013514
0.247748
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
f0e5f447cb8c95306439c7f13b82c8516c09a0ab
22,735
py
Python
arista/workspace/v1/services/gen_pb2_grpc.py
barryCrunch/cloudvision-python
bafb55a57743141ef419ce8b6f3adda31a18ca42
[ "Apache-2.0" ]
8
2020-10-22T13:19:00.000Z
2021-12-16T02:16:47.000Z
arista/workspace/v1/services/gen_pb2_grpc.py
barryCrunch/cloudvision-python
bafb55a57743141ef419ce8b6f3adda31a18ca42
[ "Apache-2.0" ]
6
2020-12-16T11:31:03.000Z
2021-11-19T10:00:37.000Z
arista/workspace/v1/services/gen_pb2_grpc.py
barryCrunch/cloudvision-python
bafb55a57743141ef419ce8b6f3adda31a18ca42
[ "Apache-2.0" ]
7
2020-12-04T01:30:34.000Z
2021-11-11T21:40:12.000Z
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from arista.workspace.v1.services import gen_pb2 as arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2 class WorkspaceServiceStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetOne = channel.unary_unary( '/arista.workspace.v1.WorkspaceService/GetOne', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceResponse.FromString, ) self.GetAll = channel.unary_stream( '/arista.workspace.v1.WorkspaceService/GetAll', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamResponse.FromString, ) self.Subscribe = channel.unary_stream( '/arista.workspace.v1.WorkspaceService/Subscribe', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamResponse.FromString, ) class WorkspaceServiceServicer(object): """Missing associated documentation comment in .proto file.""" def GetOne(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAll(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Subscribe(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_WorkspaceServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetOne': grpc.unary_unary_rpc_method_handler( servicer.GetOne, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceResponse.SerializeToString, ), 'GetAll': grpc.unary_stream_rpc_method_handler( servicer.GetAll, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamResponse.SerializeToString, ), 'Subscribe': grpc.unary_stream_rpc_method_handler( servicer.Subscribe, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'arista.workspace.v1.WorkspaceService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class WorkspaceService(object): """Missing associated documentation comment in .proto file.""" @staticmethod def GetOne(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/arista.workspace.v1.WorkspaceService/GetOne', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAll(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/arista.workspace.v1.WorkspaceService/GetAll', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Subscribe(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/arista.workspace.v1.WorkspaceService/Subscribe', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceStreamResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class WorkspaceBuildServiceStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetOne = channel.unary_unary( '/arista.workspace.v1.WorkspaceBuildService/GetOne', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildResponse.FromString, ) self.GetAll = channel.unary_stream( '/arista.workspace.v1.WorkspaceBuildService/GetAll', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamResponse.FromString, ) self.Subscribe = channel.unary_stream( '/arista.workspace.v1.WorkspaceBuildService/Subscribe', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamResponse.FromString, ) class WorkspaceBuildServiceServicer(object): """Missing associated documentation comment in .proto file.""" def GetOne(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAll(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Subscribe(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_WorkspaceBuildServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetOne': grpc.unary_unary_rpc_method_handler( servicer.GetOne, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildResponse.SerializeToString, ), 'GetAll': grpc.unary_stream_rpc_method_handler( servicer.GetAll, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamResponse.SerializeToString, ), 'Subscribe': grpc.unary_stream_rpc_method_handler( servicer.Subscribe, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'arista.workspace.v1.WorkspaceBuildService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class WorkspaceBuildService(object): """Missing associated documentation comment in .proto file.""" @staticmethod def GetOne(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/arista.workspace.v1.WorkspaceBuildService/GetOne', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAll(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/arista.workspace.v1.WorkspaceBuildService/GetAll', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Subscribe(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/arista.workspace.v1.WorkspaceBuildService/Subscribe', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceBuildStreamResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class WorkspaceConfigServiceStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetOne = channel.unary_unary( '/arista.workspace.v1.WorkspaceConfigService/GetOne', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigResponse.FromString, ) self.GetAll = channel.unary_stream( '/arista.workspace.v1.WorkspaceConfigService/GetAll', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamResponse.FromString, ) self.Subscribe = channel.unary_stream( '/arista.workspace.v1.WorkspaceConfigService/Subscribe', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamResponse.FromString, ) self.Set = channel.unary_unary( '/arista.workspace.v1.WorkspaceConfigService/Set', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigSetRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigSetResponse.FromString, ) self.Delete = channel.unary_unary( '/arista.workspace.v1.WorkspaceConfigService/Delete', request_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigDeleteRequest.SerializeToString, response_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigDeleteResponse.FromString, ) class WorkspaceConfigServiceServicer(object): """Missing associated documentation comment in .proto file.""" def GetOne(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAll(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Subscribe(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Set(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Delete(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_WorkspaceConfigServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetOne': grpc.unary_unary_rpc_method_handler( servicer.GetOne, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigResponse.SerializeToString, ), 'GetAll': grpc.unary_stream_rpc_method_handler( servicer.GetAll, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamResponse.SerializeToString, ), 'Subscribe': grpc.unary_stream_rpc_method_handler( servicer.Subscribe, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamResponse.SerializeToString, ), 'Set': grpc.unary_unary_rpc_method_handler( servicer.Set, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigSetRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigSetResponse.SerializeToString, ), 'Delete': grpc.unary_unary_rpc_method_handler( servicer.Delete, request_deserializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigDeleteRequest.FromString, response_serializer=arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigDeleteResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'arista.workspace.v1.WorkspaceConfigService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class WorkspaceConfigService(object): """Missing associated documentation comment in .proto file.""" @staticmethod def GetOne(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/arista.workspace.v1.WorkspaceConfigService/GetOne', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAll(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/arista.workspace.v1.WorkspaceConfigService/GetAll', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Subscribe(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/arista.workspace.v1.WorkspaceConfigService/Subscribe', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigStreamResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Set(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/arista.workspace.v1.WorkspaceConfigService/Set', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigSetRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigSetResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Delete(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/arista.workspace.v1.WorkspaceConfigService/Delete', arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigDeleteRequest.SerializeToString, arista_dot_workspace_dot_v1_dot_services_dot_gen__pb2.WorkspaceConfigDeleteResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
50.187638
142
0.70706
2,214
22,735
6.83243
0.054201
0.026972
0.079725
0.093012
0.959741
0.956105
0.953659
0.931976
0.931976
0.913797
0
0.00915
0.226083
22,735
452
143
50.298673
0.850583
0.068353
0
0.743094
1
0
0.084215
0.056668
0
0
0
0
0
1
0.077348
false
0
0.005525
0.030387
0.138122
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
9bd41b53dd0ef5b1001521e165a5493f1c32fdfb
152
py
Python
setup.py
steiza/pyshapefile
adf3810128cadf03b9c8592e056e981ad16bf632
[ "MIT" ]
null
null
null
setup.py
steiza/pyshapefile
adf3810128cadf03b9c8592e056e981ad16bf632
[ "MIT" ]
null
null
null
setup.py
steiza/pyshapefile
adf3810128cadf03b9c8592e056e981ad16bf632
[ "MIT" ]
null
null
null
import os if os.path.exists("paver-minilib.zip"): import sys sys.path.insert(0, "paver-minilib.zip") import paver.command paver.command.main()
19
43
0.717105
24
152
4.541667
0.541667
0.220183
0.275229
0.385321
0
0
0
0
0
0
0
0.007576
0.131579
152
7
44
21.714286
0.818182
0
0
0
0
0
0.223684
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
9be9ed9d7497e74a9e18a64f93b2d98a6e9f5ebf
91
py
Python
tests/test_A001220.py
danielsimonney/oeis
16c1dd6e058e49b629f695acb82ec55dd5f052f9
[ "MIT" ]
null
null
null
tests/test_A001220.py
danielsimonney/oeis
16c1dd6e058e49b629f695acb82ec55dd5f052f9
[ "MIT" ]
null
null
null
tests/test_A001220.py
danielsimonney/oeis
16c1dd6e058e49b629f695acb82ec55dd5f052f9
[ "MIT" ]
null
null
null
from oeis import A001220 def test_A001220(): assert A001220(limit=2) == [1093, 3511]
15.166667
43
0.692308
13
91
4.769231
0.846154
0
0
0
0
0
0
0
0
0
0
0.364865
0.186813
91
5
44
18.2
0.472973
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
8
5042b388fc9c44e2336662ef1c21d63c23e5fb4f
33,820
py
Python
scripts/ssc/models/TopoAE_ext/config_libraries/swissroll.py
MrBellamonte/MT-VAEs-TDA
8881b5db607c673fb558f7b74ece27f244b16b77
[ "MIT" ]
null
null
null
scripts/ssc/models/TopoAE_ext/config_libraries/swissroll.py
MrBellamonte/MT-VAEs-TDA
8881b5db607c673fb558f7b74ece27f244b16b77
[ "MIT" ]
1
2020-09-22T13:04:58.000Z
2020-09-22T13:05:23.000Z
scripts/ssc/models/TopoAE_ext/config_libraries/swissroll.py
MrBellamonte/AEs-VAEs-TDA
8881b5db607c673fb558f7b74ece27f244b16b77
[ "MIT" ]
null
null
null
import random import numpy as np from src.datasets.datasets import SwissRoll from src.evaluation.config import ConfigEval from src.models.WitnessComplexAE.config import ConfigGrid_WCAE from src.models.autoencoder.autoencoders import Autoencoder_MLP_topoae swissroll_visualize128 = ConfigGrid_WCAE( learning_rate=[1/100], batch_size=[128], n_epochs=[1000], weight_decay=[0], early_stopping=[50], rec_loss_weight=[1], top_loss_weight=[8192], match_edges=['push_active'], k=[3], r_max=[10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active=False, evaluate_on='test', eval_manifold=False, save_eval_latent=True, save_train_latent=True, online_visualization=True, quant_eval=False, k_min=5, k_max=45, k_step=5, )], uid=[''], toposig_kwargs=[dict()], method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1.25], online_wc=[True], wc_offline=[dict( path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/SwissRoll/nonoise/SwissRoll-bs128-seed5310-d39df50c')]), experiment_dir='/Users/simons/PycharmProjects/MT-VAEs-TDA/output/WAE/online_visualize2', seed=5310, device='cpu', num_threads=1, verbose=True, ) swissroll_visualize256 = ConfigGrid_WCAE( learning_rate=[1/100], batch_size=[256], n_epochs=[1000], weight_decay=[1e-6], early_stopping=[50], rec_loss_weight=[1], top_loss_weight=[8192], match_edges=['push_active'], k=[3], r_max=[10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active=False, evaluate_on='test', eval_manifold=False, save_eval_latent=True, save_train_latent=True, online_visualization=True, quant_eval=False, k_min=5, k_max=45, k_step=5, )], uid=[''], toposig_kwargs=[dict()], method_args=dict(n_jobs=[1], normalize=[True], mu_push=[1.15], online_wc=[True], wc_offline=[dict( path_to_data='/Users/simons/MT_data/sync/euler_sync/schsimo/MT/output/WitnessComplexes/SwissRoll/nonoise/SwissRoll-bs256-seed5310-b344784f')]), experiment_dir='/Users/simons/PycharmProjects/MT-VAEs-TDA/output/WAE/online_visualize', seed=5310, device='cpu', num_threads=1, verbose=True, ) ### SWISSROLL MULTISEED k1_multiseed = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[8,12,31,39,91,102,104,309,567]*5)] k1_multiseed2 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[643,666,678,789,809,1000,1094,1333,1600]*5)] k1_multiseed3 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[2643,2666,2678,2789,2809,3000,3094,3333,3600]*5)] k1_multiseed4 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[3643,3666,3678,3789,3809,4000,4094,4333,4600]*5)] ### SWISSROLL KN-MULTISEED kn_multiseed = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [2,4,8], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/kn_multiseed', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[6019,6023,6187,6199,6203,6205,6207,6213,6271]*5)] kn_multiseed_new = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1,2,4,8], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/kn_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[11,23,44,65,88,102,103,200,6199]*5)] kn_multiseed_new2 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1,2,4,8], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/kn_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[232,247,297,331,382,354,375,376,346]*5)] kn_multiseed_new3 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1,2,4,8], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/kn_multiseed_new', seed = seed, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(9,13,num=5,base = 2.0)],9)),[468,480,427,437,523,501,503,510,608]*5)] ### SWISSROLL SEED 102 k1seed102 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[15], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 10, k_max = 30, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1seed102', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for tlw in [int(i) for i in np.logspace(1, 13, base=2, num=13)]] ### SWISSROLL - MULTISEED K1 ONLY (DO NOT CHANGE!) swissroll_k1multiseed_parallel_batch1 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[10], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 10, k_max = 30, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(1,12,num=12,base = 2.0)],4)),[6,34,79,102]*12)] swissroll_k1multiseed_parallel_batch2 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[10], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1,2,4,8], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 10, k_max = 30, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(1,12,num=12,base = 2.0)],4)),[143,157,193,265]*12)] swissroll_k1multiseed_parallel_batch3 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[10], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 10, k_max = 30, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(1,12,num=12,base = 2.0)],4)),[293,312,376,577]*12)] swissroll_k1multiseed_parallel_batch4 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=random.sample([int(i) for i in np.logspace(3,9,num=7,base = 2.0)], 7), n_epochs=[1000], weight_decay=[0], early_stopping=[10], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 10, k_max = 30, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_multiseed', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for tlw, seed in zip(list(np.repeat([i for i in np.logspace(1,12,num=12,base = 2.0)],4)),[600,654,789,872]*12)] ####### euler_kn_seed1_parallel_push1 = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[int(i) for i in np.logspace(4, 9, base=2, num=6)], n_epochs=[1000], weight_decay=[0], early_stopping=[35], rec_loss_weight=[1], top_loss_weight=[tlw], match_edges = ['push1'], k = [2,4,8,16], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/push1/kn_seed1', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for tlw in [int(i) for i in np.logspace(0, 11, base=2, num=12)]] euler_kn_seed1_parallel = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[int(i) for i in np.logspace(4, 9, base=2, num=6)], n_epochs=[1000], weight_decay=[0], early_stopping=[35], rec_loss_weight=[1], top_loss_weight=[int(i) for i in np.logspace(0, 9, base=2, num=10)], match_edges = ['symmetric'], k = [n], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/kn_seed1', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for n in [2,4,8,16]] euler_k1_seed1 = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[int(i) for i in np.logspace(4, 9, base=2, num=6)], n_epochs=[500], weight_decay=[0], early_stopping=[20], rec_loss_weight=[1], top_loss_weight=[int(i) for i in np.logspace(0, 9, base=2, num=10)], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 4)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/k1_seed1', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) #### nonorm = ConfigGrid_WCAE( learning_rate=[1/100,1/10], batch_size=random.sample([16,256], 2), n_epochs=[500], weight_decay=[1e-2], early_stopping=[20], rec_loss_weight=[1], top_loss_weight=[2048], match_edges = ['symmetric'], k = [2], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 2, normalize = False)], experiment_dir='/output/TopoAE_ext/ver_nonorm', seed = 1, device = 'cpu', num_threads=2, verbose = True, ) nonorm_diffme = [ConfigGrid_WCAE( learning_rate=[1/100], batch_size=random.sample([256], 1), n_epochs=[500], weight_decay=[1e-2], early_stopping=[30], rec_loss_weight=[1], top_loss_weight=[2048], match_edges = [match], k = [2], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1, normalize = False)], experiment_dir='/output/TopoAE_ext/ver_nonorm', seed = 1, device = 'cpu', num_threads=2, verbose = True, ) for match in ['symmetric','push','push_active']] nonorm_mupush = [ConfigGrid_WCAE( learning_rate=[1/25,1/10], batch_size=[256], n_epochs=[1000], weight_decay=[1e-7], early_stopping=[50], rec_loss_weight=[1], top_loss_weight=[8192], match_edges = ['push_active'], k = [k], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1, normalize = True, mu_push = 1.25)], experiment_dir='/output/TopoAE_ext/ver_nonorm3', seed = 1, device = 'cpu', num_threads=2, verbose = True, ) for k in [6,8,10,12,14,16] ] nonorm_mupush2 = [ConfigGrid_WCAE( learning_rate=[1/25,1/10], batch_size=[128], n_epochs=[1000], weight_decay=[1e-7], early_stopping=[50], rec_loss_weight=[1], top_loss_weight=[8192], match_edges = ['push_active'], k = [k], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1, normalize = True, mu_push = 1.25),dict(n_jobs = 1, normalize = True, mu_push = 1.5)], experiment_dir='/output/TopoAE_ext/ver_nonorm3', seed = 1, device = 'cpu', num_threads=2, verbose = True, ) for k in [1,2,3,4,5,6,7,8] ] ### TEST swissroll_testing = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[64], n_epochs=[2], weight_decay=[0], early_stopping=[15], rec_loss_weight=[1], top_loss_weight=[32,64,128,256,512,1024,2048], match_edges = ['symmetric'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [640] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs =1,online_wc = True)], experiment_dir='/output/TopoAE_ext/test', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) swissroll_testing_verification = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[64], n_epochs=[100], weight_decay=[0], early_stopping=[15], rec_loss_weight=[1], top_loss_weight=[256], match_edges = ['verification'], k = [1], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [640] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs =1, verification = True)], experiment_dir='/output/TopoAE_ext/verification', seed = 1, device = 'cpu', num_threads=2, verbose = False, ) swissroll_testing2 = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[16], n_epochs=[10], weight_decay=[0], early_stopping=[35], rec_loss_weight=[1], top_loss_weight=[384], match_edges = ['push1'], k = [10], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 8)], experiment_dir='/output/TopoAE_ext/verification', seed = 1, device = 'cpu', num_threads=8, verbose = False, ) debug = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[16], n_epochs=[10], weight_decay=[0], early_stopping=[35], rec_loss_weight=[1], top_loss_weight=[384], match_edges = ['push1'], k = [10], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [640] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 15, k_step = 5, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1)], experiment_dir='/output/TopoAE_ext/verification', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) swissroll_testing_euler = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[64], n_epochs=[5], weight_decay=[0], early_stopping=[35], rec_loss_weight=[1], top_loss_weight=[384], match_edges = ['symmetric'], k = [10], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1, normalize = True)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/testing', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) swissroll_testing_euler_multi = ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[64], n_epochs=[5], weight_decay=[0], early_stopping=[35], rec_loss_weight=[1], top_loss_weight=[384], match_edges = ['symmetric'], k = [10], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 2, normalize = True)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/testing', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) swissroll_testing_euler_parallel = [ConfigGrid_WCAE( learning_rate=[1/1000], batch_size=[64], n_epochs=[5], weight_decay=[0], early_stopping=[35], rec_loss_weight=[i], top_loss_weight=[384], match_edges = ['symmetric'], k = [10], r_max = [10], model_class=[Autoencoder_MLP_topoae], model_kwargs={ 'input_dim' : [3], 'latent_dim' : [2], 'size_hidden_layers': [[32, 32]] }, dataset=[SwissRoll()], sampling_kwargs={ 'n_samples': [2560] #2560 }, eval=[ConfigEval( active = True, evaluate_on = 'test', save_eval_latent = True, save_train_latent = True, online_visualization = False, k_min = 5, k_max = 80, k_step = 25, )], uid = [''], toposig_kwargs=[dict()], method_args=[dict(n_jobs = 1, normalize = True)], experiment_dir='/cluster/home/schsimo/MT/output/WCTopoAE/SwissRoll/testing', seed = 1, device = 'cpu', num_threads=1, verbose = False, ) for i in [1,2]]
27.8125
151
0.57469
4,344
33,820
4.207182
0.063306
0.031736
0.010834
0.012257
0.929251
0.92575
0.922138
0.922084
0.914095
0.906818
0
0.071489
0.26709
33,820
1,216
152
27.8125
0.665833
0.005736
0
0.865188
0
0.001706
0.108216
0.054257
0
0
0
0
0
1
0
false
0
0.005119
0
0.005119
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
505dd51d723bb4fea287cbc0ad7bdc83de8db09f
31
py
Python
python_dep_generator/__init__.py
ashishb/python_dep_generator
7c070075329ec5fe7330e895d6ef50e80a7c1081
[ "MIT" ]
11
2015-01-26T03:23:29.000Z
2021-01-03T04:42:36.000Z
python_dep_generator/__init__.py
ashishb/python_dep_generator
7c070075329ec5fe7330e895d6ef50e80a7c1081
[ "MIT" ]
null
null
null
python_dep_generator/__init__.py
ashishb/python_dep_generator
7c070075329ec5fe7330e895d6ef50e80a7c1081
[ "MIT" ]
3
2016-04-04T05:11:18.000Z
2018-09-21T05:09:05.000Z
from .generate_dep import main
15.5
30
0.83871
5
31
5
1
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
1
31
31
0.925926
0
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
acb54b01db531dbe34d4280de036ec1b750513cc
17,851
py
Python
test/test_moler_test.py
AdamKlekowski/moler
9d032bad402d9863685b2a8624320566512c14cc
[ "BSD-3-Clause" ]
57
2018-02-20T08:16:47.000Z
2022-03-28T10:36:57.000Z
test/test_moler_test.py
Laymer/moler
2d7b89efdc2ca5e9975112b97934b396e24b5505
[ "BSD-3-Clause" ]
377
2018-07-19T11:56:27.000Z
2021-07-09T13:08:12.000Z
test/test_moler_test.py
Laymer/moler
2d7b89efdc2ca5e9975112b97934b396e24b5505
[ "BSD-3-Clause" ]
24
2018-04-14T20:49:40.000Z
2022-03-29T10:44:26.000Z
# -*- coding: utf-8 -*- __author__ = 'Michal Ernst, Marcin Usielski' __copyright__ = 'Copyright (C) 2018-2019, Nokia' __email__ = 'michal.ernst@nokia.com, marcin.usielski@nokia.com' import pytest from moler.connection_observer import ConnectionObserver from moler.exceptions import ExecutionException from moler.util.moler_test import MolerTest def test_moler_test_warn(): ConnectionObserver.get_unraised_exceptions() MolerTest.warning("Warning test") ConnectionObserver.get_unraised_exceptions() def test_moler_test_not_raise_exception_when_no_steps_end_for_global_method_twice(): ConnectionObserver.get_unraised_exceptions() moler_test_not_raise_exception_when_no_steps_end_for_global_method_twice() ConnectionObserver.get_unraised_exceptions() def test_moler_test_raise_exception_when_not_callable_passed(): ConnectionObserver.get_unraised_exceptions() var = "no callable" with pytest.raises(ExecutionException): MolerTest._decorate(var) ConnectionObserver.get_unraised_exceptions() def test_moler_test_wrapper(): ConnectionObserver.get_unraised_exceptions() decorated = moler_test_raise_exception_when_no_steps_end_for_global_method ret = MolerTest._wrapper(decorated, False) assert decorated == ret ConnectionObserver.get_unraised_exceptions() def test_moler_test_exception_no_exception(): ConnectionObserver.get_unraised_exceptions() from moler.cmd.unix.ls import Ls cmd = Ls(connection=None) cmd.set_exception("wrong exception") cmd._is_done = True with pytest.raises(ExecutionException): moler_test_not_raise_exception_when_no_steps_end_for_global_method() ConnectionObserver.get_unraised_exceptions() def test_moler_test_not_raise_exception_when_steps_end(moler_test_se): ConnectionObserver.get_unraised_exceptions() moler_test_se.test_not_raise_exception_when_steps_end() ConnectionObserver.get_unraised_exceptions() def test_moler_test_test_raise_exception_when_not_call_steps_end(moler_test_se): ConnectionObserver.get_unraised_exceptions() with pytest.raises(ExecutionException): moler_test_se.test_raise_exception_when_not_call_steps_end() ConnectionObserver.get_unraised_exceptions() def test_moler_test_raise_exception_when_log_error(moler_test_se): ConnectionObserver.get_unraised_exceptions() with pytest.raises(ExecutionException): moler_test_se.test_raise_exception_when_log_error() ConnectionObserver.get_unraised_exceptions() def test_moler_test_raise_exception_when_log_error_raise_exception_set(moler_test_se): ConnectionObserver.get_unraised_exceptions() with pytest.raises(ExecutionException): moler_test_se.test_raise_exception_when_log_error_raise_exception_set() ConnectionObserver.get_unraised_exceptions() def test_moler_test_not_raise_exception_when_no_steps_end(moler_test): ConnectionObserver.get_unraised_exceptions() moler_test.test_not_raise_exception_when_no_steps_end() ConnectionObserver.get_unraised_exceptions() def test_moler_test_raise_exception_when_no_steps_end_for_global_method(): with pytest.raises(ExecutionException): moler_test_raise_exception_when_no_steps_end_for_global_method() ConnectionObserver.get_unraised_exceptions() def test_moler_test_not_raise_exception_when_no_steps_end_for_global_method(): ConnectionObserver.get_unraised_exceptions() moler_test_not_raise_exception_when_no_steps_end_for_global_method() ConnectionObserver.get_unraised_exceptions() # connection observer running in background thread may raise exception # but such exception is not visible inside MainThread # However, in all such cases connection observer stores exception via conn_obs.set_exception() def test_exception_in_observer_is_raised_when_result_is_called_after_set_exception(do_nothing_connection_observer, ObserverExceptionClass): exc = ObserverExceptionClass("some error inside observer") def function_using_observer(): observer = do_nothing_connection_observer # for real usage observer should be started to run background thread that will set_exception() # but for unit tests we just call it (simulating background thread) observer.set_exception(exc) observer.result() with pytest.raises(ObserverExceptionClass) as err: function_using_observer() assert err.value == exc ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_ignored_if_no_result_called_nor_decorator_on_function(do_nothing_connection_observer, ObserverExceptionClass): def function_using_observer(): observer = do_nothing_connection_observer observer.set_exception(ObserverExceptionClass("some error inside observer")) function_using_observer() # should not raise so test should pass ConnectionObserver.get_unraised_exceptions() def test_log_error_in_next_test_when_previous_set_exception(do_nothing_connection_observer, ObserverExceptionClass): exc = ObserverExceptionClass("some error inside observer") def function_using_observer_and_set_exception(): observer = do_nothing_connection_observer # for real usage observer should be started to run background thread that will set_exception() # but for unit tests we just call it (simulating background thread) observer.set_exception(exc) @MolerTest.raise_background_exceptions(check_steps_end=True) def function_using_observer(): observer = do_nothing_connection_observer # for real usage observer should be started to run background thread that will set_exception() # but for unit tests we just call it (simulating background thread) observer.result() MolerTest.steps_end() function_using_observer_and_set_exception() with pytest.raises(ExecutionException) as err: function_using_observer() assert "some error inside observer" in str(err.value) ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_decorator_on_function(do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") @MolerTest.raise_background_exceptions() def function_using_observer(): observer = do_nothing_connection_observer observer.set_exception(exc) with pytest.raises(ExecutionException) as err: function_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_parameterless_decorator_on_function( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") @MolerTest.raise_background_exceptions def function_using_observer(): observer = do_nothing_connection_observer observer.set_exception(exc) with pytest.raises(ExecutionException) as err: function_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_decorator_on_method(do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") class MyTest(object): @MolerTest.raise_background_exceptions() # @MolerTest.raise_background_exceptions # doesn't work since it is created by python and given class as first argument # # compare with syntax of @pytest.fixture @pytest.yield_fixture def method_using_observer(self): observer = do_nothing_connection_observer observer.set_exception(exc) with pytest.raises(ExecutionException) as err: MyTest().method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_parameterless_decorator_on_method( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") class MyTest(object): @MolerTest.raise_background_exceptions def method_using_observer(self): observer = do_nothing_connection_observer observer.set_exception(exc) with pytest.raises(ExecutionException) as err: MyTest().method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_decorator_on_classmethod( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") with pytest.raises(ExecutionException) as err: class MyTest(object): # TODO: Add later @MolerTest.raise_background_exceptions() @classmethod def method_using_observer(cls): observer = do_nothing_connection_observer observer.set_exception(exc) MyTest.method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_parameterless_decorator_on_classmethod( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") with pytest.raises(ExecutionException) as err: class MyTest(object): # TODO: Add later support for decorating classmethod and staticmethod @MolerTest.raise_background_exceptions @classmethod def method_using_observer(cls): observer = do_nothing_connection_observer observer.set_exception(exc) MyTest.method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_decorator_on_staticmethod( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") with pytest.raises(ExecutionException) as err: class MyTest(object): # TODO: Add later support for decorating classmethod and staticmethod @MolerTest.raise_background_exceptions() @staticmethod def method_using_observer(): observer = do_nothing_connection_observer observer.set_exception(exc) MyTest.method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_parameterless_decorator_on_staticmethod( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") with pytest.raises(ExecutionException) as err: class MyTest(object): # TODO: Add later @MolerTest.raise_background_exceptions @staticmethod def method_using_observer(): observer = do_nothing_connection_observer observer.set_exception(exc) MyTest.method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_decorator_on_class(do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") @MolerTest.raise_background_exceptions() class MyTest(object): def method_using_observer(self): observer = do_nothing_connection_observer observer.set_exception(exc) with pytest.raises(ExecutionException) as err: MyTest().method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_parameterless_decorator_on_class( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") @MolerTest.raise_background_exceptions class MyTest(object): def method_using_observer(self): observer = do_nothing_connection_observer observer.set_exception(exc) with pytest.raises(ExecutionException) as err: MyTest().method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_decorator_on_derived_class( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") class MyTestBase(object): def method_using_observer(self): observer = do_nothing_connection_observer observer.set_exception(exc) @MolerTest.raise_background_exceptions() class MyTest(MyTestBase): def method_of_derived_class(self): pass with pytest.raises(ExecutionException) as err: MyTest().method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_exception_in_observer_is_raised_if_no_result_called_but_parameterless_decorator_on_derived_class( do_nothing_connection_observer, ObserverExceptionClass): from moler.util.moler_test import MolerTest exc = ObserverExceptionClass("some error inside observer") class MyTestBase(object): def method_using_observer(self): observer = do_nothing_connection_observer observer.set_exception(exc) @MolerTest.raise_background_exceptions class MyTest(MyTestBase): def method_of_derived_class(self): pass with pytest.raises(ExecutionException) as err: MyTest().method_using_observer() ConnectionObserver.get_unraised_exceptions() def test_info_with_dump(): MolerTest.info("Testing info message", dump={'key': 'value'}) def test_warning_with_dump(): MolerTest.warning("Testing warning message", dump={'key': 'value'}) def test_dump(): test_dict = {'key': 'value'} test_string = MolerTest._dump(test_dict) assert test_string == "{'key': 'value'}" def test_get_string_message(): test_dict = {'key': 'value'} test_string = "This is sample message" msg = MolerTest._get_string_message(test_string, test_dict) assert msg == "This is sample message\n{'key': 'value'}" # --------------------------- resources --------------------------- @pytest.yield_fixture def moler_test_se(): from moler.util.moler_test import MolerTest @MolerTest.raise_background_exceptions(check_steps_end=True) class MolerTestExampleSE(object): def test_not_raise_exception_when_steps_end(self): MolerTest.info("Start MolerTest test with log and steps_end") MolerTest.steps_end() def test_raise_exception_when_not_call_steps_end(self): MolerTest.info("Start MolerTest test with log and without steps_end") def test_raise_exception_when_log_error(self): MolerTest.error("Start MolerTest test with log_error") def test_raise_exception_when_log_error_raise_exception_set(self): MolerTest.error("Start MolerTest test with log_error and raise_exception", raise_exception=True) yield MolerTestExampleSE() @pytest.yield_fixture def moler_test(): from moler.util.moler_test import MolerTest @MolerTest.raise_background_exceptions() class MolerTestExample(object): def test_not_raise_exception_when_no_steps_end(self): MolerTest.info("Start MolerTest test with log and steps_end") yield MolerTestExample() @MolerTest.raise_background_exceptions(check_steps_end=True) def moler_test_raise_exception_when_no_steps_end_for_global_method(): MolerTest.info("Start global method with log and without steps_end") @MolerTest.raise_background_exceptions @MolerTest.raise_background_exceptions def moler_test_not_raise_exception_when_no_steps_end_for_global_method_twice(): MolerTest.info("Start global method with log and without steps_end") @MolerTest.raise_background_exceptions def moler_test_not_raise_exception_when_no_steps_end_for_global_method(): MolerTest.info("Start global method with log and without steps_end") @pytest.yield_fixture def do_nothing_connection_observer(): from moler.connection_observer import ConnectionObserver class DoNothingObserver(ConnectionObserver): def data_received(self, data, recv_time): # we need to overwrite it since it is @abstractmethod pass # ignore incoming data observer = DoNothingObserver() ConnectionObserver.get_unraised_exceptions() yield observer ConnectionObserver.get_unraised_exceptions() @pytest.fixture def ObserverExceptionClass(): class ObserverException(Exception): pass return ObserverException
38.471983
128
0.748305
2,021
17,851
6.182583
0.090549
0.033854
0.092837
0.12485
0.846339
0.837695
0.795038
0.779512
0.751661
0.719008
0
0.000623
0.191026
17,851
463
129
38.555076
0.864622
0.071873
0
0.713396
0
0
0.066002
0.002901
0
0
0
0.00216
0.015576
1
0.193146
false
0.015576
0.062305
0
0.308411
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
4a0fc4e68cca766f8615982bf9af6e6ab61ad00c
51,719
py
Python
services/data_generator.py
meredithmurfin/DynamicPlacementGenerator
60c0e3f1892c702ae46db78a89ff5e64f472a1b7
[ "MIT" ]
2
2021-01-14T12:53:13.000Z
2021-03-28T19:28:46.000Z
services/data_generator.py
EccRiley/DynamicPlacementGenerator
60c0e3f1892c702ae46db78a89ff5e64f472a1b7
[ "MIT" ]
null
null
null
services/data_generator.py
EccRiley/DynamicPlacementGenerator
60c0e3f1892c702ae46db78a89ff5e64f472a1b7
[ "MIT" ]
1
2020-04-29T19:24:35.000Z
2020-04-29T19:24:35.000Z
from util import data_util, reader_util, writer_util import data from itertools import combinations, combinations_with_replacement, permutations, chain, groupby from operator import sub import numpy as np import pprint, logging, copy def generate_all_possible_states(): all_states = {1: [], 2: [], 3: [], 4: [], 5: []} for num in range(1, 6): logging.info("Generating all possible states for " + str(num) + " engine(s) to use for the MDP...") sums = find_all_unique_sums_to_n(num) for s in sums: if len(s) == 1: new_states = generate_states_for_sum_length_1(s) all_states[num].extend(new_states) elif len(s) == 2: new_states = generate_states_for_sum_length_2(s) all_states[num].extend(new_states) elif len(s) == 3: new_states = generate_states_for_sum_length_3(s) all_states[num].extend(new_states) elif len(s) == 4: new_states = generate_states_for_sum_length_4(s) all_states[num].extend(new_states) elif len(s) == 5: new_states = generate_states_for_sum_length_5(s) all_states[num].extend(new_states) logging.info("States have been generated.") writer_util.export_all_possible_states(all_states) def generate_states_for_sum_length_1(s): states = [] state = [0, 0, 0, 0, 0, 0, 0] for i in range(7): current_state = state[:] current_state[i] = s[0] states.append(current_state) return states def generate_states_for_sum_length_2(s): states = [] state = [0, 0, 0, 0, 0, 0, 0] for i in range(7): for j in range(7): if indices_are_not_equal([i, j]): current_state = state[:] current_state[i] = s[0] current_state[j] = s[1] states.append(current_state) return states def generate_states_for_sum_length_3(s): states = [] state = [0, 0, 0, 0, 0, 0, 0] for i in range(7): for j in range(7): for k in range(7): if indices_are_not_equal([i, j, k]): current_state = state[:] current_state[i] = s[0] current_state[j] = s[1] current_state[k] = s[2] states.append(current_state) return states def generate_states_for_sum_length_4(s): states = [] state = [0, 0, 0, 0, 0, 0, 0] for i in range(7): for j in range(7): for k in range(7): for l in range(7): if indices_are_not_equal([i, j, k, l]): current_state = state[:] current_state[i] = s[0] current_state[j] = s[1] current_state[k] = s[2] current_state[l] = s[3] states.append(current_state) return states def generate_states_for_sum_length_5(s): states = [] state = [0, 0, 0, 0, 0, 0, 0] for i in range(7): for j in range(7): for k in range(7): for l in range(7): for m in range(7): if indices_are_not_equal([i, j, k, l, m]): current_state = state[:] current_state[i] = s[0] current_state[j] = s[1] current_state[k] = s[2] current_state[l] = s[3] current_state[m] = s[4] states.append(current_state) return states def generate_all_possible_actions(): for num in range(1, 6): logging.info("Generating all possible actions for " + str(num) + " engine(s) to use for the MDP...") all_actions = [] sums = find_all_unique_sums_to_n(num) for s in sums: if len(s) == 1: new_actions = generate_actions_for_sum_length_1(s) all_actions.extend(new_actions) elif len(s) == 2: new_actions = generate_actions_for_sum_length_2(s) all_actions.extend(new_actions) elif len(s) == 3: new_actions = generate_actions_for_sum_length_3(s) all_actions.extend(new_actions) elif len(s) == 4: new_actions = generate_actions_for_sum_length_4(s) all_actions.extend(new_actions) logging.info("Actions have been generated.") writer_util.export_all_possible_actions(num, all_actions) def generate_actions_for_sum_length_1(s): actions = [] action_row = [np.zeros(7)] * 7 action = np.array(action_row) for i in range(7): for j in range(7): current_action = copy.deepcopy(action) current_action[i][j] = s[0] actions.append(current_action) return actions def generate_actions_for_sum_length_2(s): actions = [] action_row = [np.zeros(7)] * 7 action = np.array(action_row) for i in range(7): for j in range(7): for k in range(7): for l in range(7): if index_pairs_are_not_equal([[i, j], [k, l]]): current_action = copy.deepcopy(action) current_action[i][j] = s[0] current_action[k][l] = s[1] actions.append(current_action) return actions def generate_actions_for_sum_length_3(s): actions = [] action_row = [np.zeros(7)] * 7 action = np.array(action_row) for i in range(7): for j in range(7): for k in range(7): for l in range(7): for m in range(7): for n in range(7): if index_pairs_are_not_equal([[i, j], [k, l], [m, n]]): current_action = copy.deepcopy(action) current_action[i][j] = s[0] current_action[k][l] = s[1] current_action[m][n] = s[2] actions.append(current_action) return actions def generate_actions_for_sum_length_4(s): actions = [] action_row = [np.zeros(7)] * 7 action = np.array(action_row) for i in range(7): for j in range(7): for k in range(7): for l in range(7): for m in range(7): for n in range(7): for o in range(7): for p in range(7): if index_pairs_are_not_equal([[i, j], [k, l], [m, n], [o, p]]): current_action = copy.deepcopy(action) current_action[i][j] = s[0] current_action[k][l] = s[1] current_action[m][n] = s[2] current_action[o][p] = s[2] actions.append(current_action) return actions def find_all_unique_sums_to_n(n): beginning, middle, end = [0], list(range(1, n)), [n] splits = (d for i in range(n) for d in combinations(middle, i)) list_of_sums = (list(map(sub, chain(split, end), chain(beginning, split))) for split in splits) unique_list_of_sums = get_unique_list_of_lists(list_of_sums) return unique_list_of_sums def get_unique_list_of_lists(a_list): unique_list_of_lists = [] for l in a_list: l.sort() if l not in unique_list_of_lists: unique_list_of_lists.append(l) return unique_list_of_lists def indices_are_not_equal(indices): if len(indices) != len(set(indices)): return False return True def index_pairs_are_not_equal(index_pairs): for pair in index_pairs: if index_pairs.count(pair) > 1: return False return True def generate_all_possible_removal_situations(engine_subtype): logging.info("Generating all possible removal situations for the " + engine_subtype + "...") removals_info = data.removals_info[engine_subtype] num_different_removals_non_hubs = removals_info['MAX_NUM_REMOVALS_MONTHLY_NON_HUBS'] assert num_different_removals_non_hubs in [0, 1, 2], "This program cannot handle generating all removal situations for non-hub locations having more than 2 total removals. Make sure MAX_NUM_REMOVALS_MONTHLY_NON_HUBS is set to 0, 1, or 2." assert removals_info['MAX_NUM_REMOVALS_MONTHLY_TOTAL'] <= 10, "This program cannot handle generating all removal situations for more than 10 total removals. Make sure MAX_NUM_REMOVALS_MONTHLY_TOTAL is set to a value between 1 and 10." num_allowed_at_hubs = find_num_occurrences_of_max_removals_for_hubs([ removals_info['MAX_NUM_REMOVALS_MONTHLY_ATL'], removals_info['MAX_NUM_REMOVALS_MONTHLY_CVG'], removals_info['MAX_NUM_REMOVALS_MONTHLY_DTW'], removals_info['MAX_NUM_REMOVALS_MONTHLY_LAX'], removals_info['MAX_NUM_REMOVALS_MONTHLY_MSP'], removals_info['MAX_NUM_REMOVALS_MONTHLY_SEA'], removals_info['MAX_NUM_REMOVALS_MONTHLY_SLC']]) logging.info(engine_subtype + " monthly removal information:") logging.info("Expected AOS cost: " + str(data.aos_cost[engine_subtype])) max_num_removals_total = removals_info['MAX_NUM_REMOVALS_MONTHLY_TOTAL'] logging.info("Maximum total number of removals: " + str(max_num_removals_total)) max_removals_non_hubs = removals_info['MAX_NUM_REMOVALS_MONTHLY_NON_HUBS'] logging.info("Maximum number of removals by location: ATL: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_ATL']) + ", CVG: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_CVG']) + ", DTW: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_DTW']) + ", LAX: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_LAX']) + ", MSP: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_MSP']) + ", SEA: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_SEA']) + ", SLC: " + str(removals_info['MAX_NUM_REMOVALS_MONTHLY_SLC']) + ", NON-HUBS: " + str(max_removals_non_hubs)) num_different_removals_hubs = sum(num_allowed_at_hubs.values()) - (num_allowed_at_hubs[0] if 0 in num_allowed_at_hubs else 0) ranges = find_ranges_of_num_removal_values_valid_at_hubs(num_allowed_at_hubs) max_allowed = find_max_removals_allowed(num_allowed_at_hubs, max_removals_non_hubs) removal_sums = {} for num_removals in range(1, max_num_removals_total + 1): removal_sums[num_removals] = find_all_valid_sums_for_current_num_removals( num_removals=num_removals, num_allowed_at_hubs=num_allowed_at_hubs, num_different_removals_hubs=num_different_removals_hubs, num_different_removals_non_hubs=num_different_removals_non_hubs, max_removals_non_hubs=max_removals_non_hubs, ranges=ranges, max_allowed=max_allowed) logging.info("All combinations of values to generate possible removal situations have been found.") removals_generator = RemovalsGenerator(engine_subtype, removals_info, removal_sums, ranges) removals_generator.generate_all_removal_situations() def find_num_occurrences_of_max_removals_for_hubs(max_num_removals_at_hubs): assert max(max_num_removals_at_hubs) <= 10, "This program cannot handle generating all removal situations for more than 10 removals happening at any hub one location. Make sure MAX_NUM_REMOVALS_MONTHLY for each hub is set to a value between 0 and 10." max_num_removals_at_hubs_set = set(max_num_removals_at_hubs) unique_max_num_removals_at_hubs = list(max_num_removals_at_hubs_set) num_allowed_at_hubs = {} for value in unique_max_num_removals_at_hubs: num_allowed_at_hubs[value] = 0 for value in max_num_removals_at_hubs: num_allowed_at_hubs[value] += 1 return num_allowed_at_hubs def find_ranges_of_num_removal_values_valid_at_hubs(num_allowed_at_hubs): ranges = [] num_removals_at_hubs = find_possible_num_removals_at_hubs(num_allowed_at_hubs) if at_least_one_hub_never_has_removals(num_removals_at_hubs[0]): num_removals_at_hubs = num_removals_at_hubs[1:] current_min = 1 for num_removals in num_removals_at_hubs: ranges.append([current_min, num_removals]) current_min = num_removals + 1 ranges.reverse() return ranges def find_possible_num_removals_at_hubs(num_allowed_at_hubs): num_removals_at_hubs = list(num_allowed_at_hubs.keys()) num_removals_at_hubs.sort() return num_removals_at_hubs def at_least_one_hub_never_has_removals(lowest_num_removals_at_hubs): return (lowest_num_removals_at_hubs == 0) def find_max_removals_allowed(num_allowed_at_hubs, max_removals_non_hubs): max_allowed = max(num_allowed_at_hubs.keys()) if only_one_removal_can_happen_at_hubs_but_up_to_two_removals_can_happen_at_non_hubs(max_allowed, max_removals_non_hubs): max_allowed = 2 return max_allowed def only_one_removal_can_happen_at_hubs_but_up_to_two_removals_can_happen_at_non_hubs(max_allowed, max_removals_non_hubs): return (max_allowed == 1) and (max_removals_non_hubs == 2) def find_all_valid_sums_for_current_num_removals(num_removals, num_allowed_at_hubs, num_different_removals_hubs, num_different_removals_non_hubs, ranges, max_removals_non_hubs, max_allowed): all_sums = find_all_sums(num_removals) unique_sums_not_validated = get_unique_list_of_lists(all_sums) sums_validated = [] for values_to_sum in unique_sums_not_validated: if too_many_values_in_this_sum_than_possible_for_a_possible_removal_situation(values_to_sum, num_different_removals_hubs, num_different_removals_non_hubs): continue elif a_value_in_the_sum_exceeds_the_max_allowed(values_to_sum, num_allowed_at_hubs, max_removals_non_hubs, max_allowed): continue elif only_one_removal_is_allowed_anywhere(ranges, max_allowed): sums_validated.append(values_to_sum) elif values_in_sum_invalid_due_to_max_num_removals_possible(values_to_sum, ranges, num_allowed_at_hubs, max_removals_non_hubs, num_different_removals_hubs): continue else: sums_validated.append(values_to_sum) return sums_validated def find_all_sums(n): beginning, middle, end = [0], list(range(1, n)), [n] splits = (d for i in range(n) for d in combinations(middle, i)) return (list(map(sub, chain(split, end), chain(beginning, split))) for split in splits) def get_unique_list_of_lists(a_list): unique_list_of_lists = [] for l in a_list: l.sort() if l not in unique_list_of_lists: unique_list_of_lists.append(l) return unique_list_of_lists def too_many_values_in_this_sum_than_possible_for_a_possible_removal_situation(values_to_sum, num_different_removals_hubs, num_different_removals_non_hubs): return (len(values_to_sum) > (num_different_removals_hubs + num_different_removals_non_hubs)) def a_value_in_the_sum_exceeds_the_max_allowed(values_to_sum, num_allowed_at_hubs, max_removals_non_hubs, max_allowed): for value in values_to_sum: if value > max_allowed: return True return False def only_one_removal_is_allowed_anywhere(ranges, max_allowed): if only_one_range_of_values_to_search(ranges): if range_to_search_is_for_1_removal(ranges[0]): if max_allowed == 1: return True return False def only_one_range_of_values_to_search(ranges): return (len(ranges) == 1) def range_to_search_is_for_1_removal(range_to_search): return (range_to_search == [1, 1]) def values_in_sum_invalid_due_to_max_num_removals_possible(values_to_sum, ranges, num_allowed_at_hubs, max_removals_non_hubs, num_different_removals_hubs): num_allowed_in_each_range = get_num_allowed_in_each_range_to_edit(num_allowed_at_hubs) num_actually_in_each_range = get_num_actually_in_each_range_to_edit(num_allowed_in_each_range) num_allowed_in_each_range, num_actually_in_each_range, ranges = update_ranges_and_removals_allowed_to_reflect_values_to_sum( num_allowed_in_each_range=num_allowed_in_each_range, num_actually_in_each_range=num_actually_in_each_range, ranges=ranges, values_to_sum=values_to_sum, max_removals_non_hubs=max_removals_non_hubs, num_different_removals_hubs=num_different_removals_hubs) num_actually_in_each_range = update_num_actually_in_each_range_to_reflect_values_to_sum( num_actually_in_each_range=num_actually_in_each_range, ranges=ranges, values_to_sum=values_to_sum) if values_in_sum_invalid(num_allowed_in_each_range, num_actually_in_each_range): return True else: return False def get_num_allowed_in_each_range_to_edit(num_allowed_at_hubs): num_allowed_in_each_range = copy.deepcopy(num_allowed_at_hubs) if 0 in num_allowed_in_each_range: del num_allowed_in_each_range[0] return num_allowed_in_each_range def get_num_actually_in_each_range_to_edit(num_allowed_in_each_range): num_actually_in_each_range = copy.deepcopy(num_allowed_in_each_range) for max_removals, max_allowed in num_actually_in_each_range.items(): num_actually_in_each_range[max_removals] = 0 return num_actually_in_each_range def update_ranges_and_removals_allowed_to_reflect_values_to_sum(num_allowed_in_each_range, num_actually_in_each_range, ranges, values_to_sum, max_removals_non_hubs, num_different_removals_hubs): if max_removals_non_hubs == 2: if non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1_1(values_to_sum): if having_2_removals_at_any_non_hub_is_not_possible(values_to_sum, num_different_removals_hubs): ranges = update_ranges_to_include_new_range(ranges, [1, 1]) else: ranges = update_ranges_to_include_new_range(ranges, [2, 2]) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=2) elif non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1(values_to_sum): ranges = update_ranges_to_include_new_range(ranges, [2, 2]) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=1) elif non_hubs_can_have_2_removals_and_values_to_sum_contain_2(values_to_sum): ranges = update_ranges_to_include_new_range(ranges, [2, 2]) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range) elif non_hubs_can_have_2_removals_and_values_to_sum_contains_1_1(values_to_sum): ranges = update_ranges_to_include_new_range(ranges, [1, 1]) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=2) elif non_hubs_can_have_2_removals_and_values_to_sum_contains_1(values_to_sum): ranges = update_ranges_to_include_new_range(ranges, [1, 1]) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=1) else: if non_hubs_can_have_1_removal_and_values_to_sum_conains_1(values_to_sum): ranges = update_ranges_to_include_new_range(ranges, [1, 1]) num_allowed_in_each_range, num_actually_in_each_range = update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible=1) return num_allowed_in_each_range, num_actually_in_each_range, ranges def update_ranges_to_include_new_range(ranges, new_r): new_ranges = ranges[:] if new_r not in ranges: new_r_min, new_r_max = new_r[0], new_r[1] for r in ranges: r_min, r_max = r[0], r[1] if r_min == (new_r_min - 1): if r_max == new_r_max: new_ranges.append(new_r) else: new_ranges.remove(r) new_ranges.append([new_r_min - 1, new_r_max - 1]) new_ranges.append(new_r) if r_max > new_r_max: new_ranges.append([new_r_min - 1, r_max]) return sort_and_reverse_list(new_ranges) elif r_min == new_r_min: new_ranges.remove(r) new_ranges.append(new_r) new_ranges.append([new_r_min + 1, r_max]) return sort_and_reverse_list(new_ranges) return sort_and_reverse_list(new_ranges) def update_data_to_include_2_removals(num_allowed_in_each_range, num_actually_in_each_range): if 2 not in num_allowed_in_each_range: num_allowed_in_each_range[2] = 1 num_actually_in_each_range[2] = 0 else: num_allowed_in_each_range[2] += 1 return num_allowed_in_each_range, num_actually_in_each_range def update_data_to_include_1_removal(num_allowed_in_each_range, num_actually_in_each_range, num_possible): if 1 not in num_allowed_in_each_range: num_allowed_in_each_range[1] = num_possible num_actually_in_each_range[1] = 0 else: num_allowed_in_each_range[1] += num_possible return num_allowed_in_each_range, num_actually_in_each_range def non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1_1(values_to_sum): return (values_to_sum.count(2) > 0) and (values_to_sum.count(1) >= 2) def non_hubs_can_have_2_removals_and_values_to_sum_contain_2_1(values_to_sum): return (values_to_sum.count(2) > 0) and (values_to_sum.count(1) == 1) def non_hubs_can_have_2_removals_and_values_to_sum_contain_2(values_to_sum): return (values_to_sum.count(2) > 0) and (values_to_sum.count(1) == 0) def non_hubs_can_have_2_removals_and_values_to_sum_contains_1_1(values_to_sum): return (values_to_sum.count(1) >= 2) def non_hubs_can_have_2_removals_and_values_to_sum_contains_1(values_to_sum): return (values_to_sum.count(1) == 1) def non_hubs_can_have_1_removal_and_values_to_sum_conains_1(values_to_sum): return (values_to_sum.count(1) >= 1) def having_2_removals_at_any_non_hub_is_not_possible(values_to_sum, num_different_removals_hubs): return ((len(values_to_sum) - 1) > num_different_removals_hubs) def update_num_actually_in_each_range_to_reflect_values_to_sum(num_actually_in_each_range, ranges, values_to_sum): for current_range in ranges: current_min = current_range[0] current_max = current_range[1] for value in values_to_sum: if value_within_range(value, current_min, current_max): num_actually_in_each_range[current_max] += 1 return num_actually_in_each_range def values_in_sum_invalid(num_allowed_in_each_range, num_actually_in_each_range): all_max_num_removals = list(num_allowed_in_each_range.keys()) all_max_num_removals = sort_and_reverse_list(all_max_num_removals) count = 1 for num_removals in all_max_num_removals: if values_in_sum_exceed_removals_allowed_for_that_range(num_allowed_in_each_range, num_actually_in_each_range, num_removals): return True if all_max_num_removals_have_not_been_iterated_yet(count, all_max_num_removals): num_allowed_in_each_range = add_num_not_used_in_range_to_next_range_to_iterate(num_allowed_in_each_range, num_actually_in_each_range, num_removals, all_max_num_removals, count) count += 1 return False def values_in_sum_exceed_removals_allowed_for_that_range(num_allowed_in_each_range, num_actually_in_each_range, num_removals): return (num_actually_in_each_range[num_removals] > num_allowed_in_each_range[num_removals]) def all_max_num_removals_have_not_been_iterated_yet(count, all_max_num_removals): return (count != len(all_max_num_removals)) def add_num_not_used_in_range_to_next_range_to_iterate(num_allowed_in_each_range, num_actually_in_each_range, num_removals, all_max_num_removals, count): num_removals_leftover = (num_allowed_in_each_range[num_removals] - num_actually_in_each_range[num_removals]) num_allowed_in_each_range[all_max_num_removals[count]] += num_removals_leftover return num_allowed_in_each_range def sort_and_reverse_list(a_list): a_list.sort() a_list.reverse() return a_list def value_within_range(value, current_min, current_max): return ((value >= current_min) and (value <= current_max)) class RemovalsGenerator: def __init__(self, engine_subtype, removals_info, removal_sums, ranges): logging.info("Initializing RemovalsGenerator for the " + engine_subtype + " engine...") self.engine_subtype = engine_subtype self.max_removals_ATL = removals_info['MAX_NUM_REMOVALS_MONTHLY_ATL'] self.max_removals_CVG = removals_info['MAX_NUM_REMOVALS_MONTHLY_CVG'] self.max_removals_DTW = removals_info['MAX_NUM_REMOVALS_MONTHLY_DTW'] self.max_removals_LAX = removals_info['MAX_NUM_REMOVALS_MONTHLY_LAX'] self.max_removals_MSP = removals_info['MAX_NUM_REMOVALS_MONTHLY_MSP'] self.max_removals_SEA = removals_info['MAX_NUM_REMOVALS_MONTHLY_SEA'] self.max_removals_SLC = removals_info['MAX_NUM_REMOVALS_MONTHLY_SLC'] self.max_removals_hubs_dict = {'ATL': self.max_removals_ATL, 'CVG': self.max_removals_CVG, 'DTW': self.max_removals_DTW, 'LAX': self.max_removals_LAX, 'MSP': self.max_removals_MSP, 'SEA': self.max_removals_SEA, 'SLC': self.max_removals_SLC} self.max_removals_hubs_list = [self.max_removals_ATL, self.max_removals_CVG, self.max_removals_DTW, self.max_removals_LAX, self.max_removals_MSP, self.max_removals_SEA, self.max_removals_SLC] self.max_removals_non_hubs = removals_info['MAX_NUM_REMOVALS_MONTHLY_NON_HUBS'] self.max_different_removals_hubs = 7 - self.max_removals_hubs_list.count(0) self.max_removals_total = removals_info['MAX_NUM_REMOVALS_MONTHLY_TOTAL'] self.removal_sums = removal_sums self.ranges = ranges self.indices_where_removals_should_not_occur = [] self.find_indices_where_removals_should_not_occur() self.num_all = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52] self.num_hubs = [0, 1, 2, 3, 4, 5, 6] self.num_non_hubs = [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52] self.remove_indices_from_lists_where_removals_should_not_occur() self.valid_indices_for_ranges = {} self.find_indices_for_ranges() self.all_perms = [] self.zero_list_all = [0] * 53 self.num_all_2_removals = [] self.find_all_values_where_2_removals_can_occur() self.indices_to_iterate_for_current_values = [] self.indices_to_iterate_for_current_values_again = [] self.will_need_to_iterate_twice = False self.more_than_one_value_must_occur_at_non_hubs = False data.all_possible_removal_situations[self.engine_subtype] = [] def find_all_values_where_2_removals_can_occur(self): for r in self.ranges: current_min = r[0] current_max = r[1] if (2 >= current_min) and (2 <= current_max): self.num_all_2_removals = self.valid_indices_for_ranges[str(r)][:] self.num_all_2_removals.extend(self.num_non_hubs) def find_indices_where_removals_should_not_occur(self): for index in range(7): if self.max_removals_hubs_list[index] == 0: self.indices_where_removals_should_not_occur.append(index) def remove_indices_from_lists_where_removals_should_not_occur(self): for index in sorted(self.indices_where_removals_should_not_occur, reverse=True): del self.num_all[index] del self.num_hubs[index] def find_indices_for_ranges(self): for r in self.ranges: self.valid_indices_for_ranges[str(r)] = [] for i in range(len(self.max_removals_hubs_list)): hub_max_removals = self.max_removals_hubs_list[i] for r in self.ranges: current_min = r[0] if hub_max_removals >= current_min: self.valid_indices_for_ranges[str(r)].append(i) def generate_all_removal_situations(self): for num_removals, all_sums in self.removal_sums.items(): current_num_removals = num_removals for values in all_sums: self.find_indices_to_iterate(values) if len(values) == 1: self.one_value(values) elif len(values) == 2: self.two_values(values) elif len(values) == 3: self.three_values(values) elif len(values) == 4: self.four_values(values) elif len(values) == 5: self.five_values(values) elif len(values) == 6: self.six_values(values) elif len(values) == 7: self.seven_values(values) elif len(values) == 8: self.eight_values(values) elif len(values) == 9: self.nine_values(values) logging.info("All removal situations for " + self.engine_subtype + " have been generated.") writer_util.export_all_possible_removal_situations( filepath='data_to_read/' + self.engine_subtype + '/' + self.engine_subtype + '_all_possible_removal_situations.csv', engine_subtype=self.engine_subtype, all_possible_removal_situations=data.all_possible_removal_situations[self.engine_subtype]) def indices_not_equal(self, list_of_indices): set_of_indices = set(list_of_indices) list_of_set_of_indices = list(set_of_indices) if len(list_of_set_of_indices) != len(list_of_indices): return False return True def make_perms_unique_and_add_to_all_perms(self, perms): perms.sort() unique_perms_list = list(perm for perm,_ in groupby(perms)) data.all_possible_removal_situations[self.engine_subtype].extend(unique_perms_list) return unique_perms_list def one_value(self, values): perms = [] for i in self.indices_to_iterate_for_current_values[0]: current_list = self.zero_list_all[:] current_list[i] = values[0] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def two_values(self, values): perms = [] for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: if self.indices_not_equal([i, j]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def three_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: if self.indices_not_equal([i, j, k]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: if self.indices_not_equal([i, j, k]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: if self.indices_not_equal([i, j, k]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def four_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: if self.indices_not_equal([i, j, k, l]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: for l in self.indices_to_iterate_for_current_values_again[3]: if self.indices_not_equal([i, j, k, l]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: if self.indices_not_equal([i, j, k, l]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def five_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: if self.indices_not_equal([i, j, k, l, m]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: for l in self.indices_to_iterate_for_current_values_again[3]: for m in self.indices_to_iterate_for_current_values_again[4]: if self.indices_not_equal([i, j, k, l, m]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: if self.indices_not_equal([i, j, k, l, m]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def six_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: if self.indices_not_equal([i, j, k, l, m, n]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: for l in self.indices_to_iterate_for_current_values_again[3]: for m in self.indices_to_iterate_for_current_values_again[4]: for n in self.indices_to_iterate_for_current_values_again[5]: if self.indices_not_equal([i, j, k, l, m, n]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: if self.indices_not_equal([i, j, k, l, m, n]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def seven_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: for o in self.indices_to_iterate_for_current_values[6]: if self.indices_not_equal([i, j, k, l, m, n, o]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: for l in self.indices_to_iterate_for_current_values_again[3]: for m in self.indices_to_iterate_for_current_values_again[4]: for n in self.indices_to_iterate_for_current_values_again[5]: for o in self.indices_to_iterate_for_current_values_again[6]: if self.indices_not_equal([i, j, k, l, m, n, o]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: for o in self.indices_to_iterate_for_current_values[6]: if self.indices_not_equal([i, j, k, l, m, n, o]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def eight_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: for o in self.indices_to_iterate_for_current_values[6]: for p in self.indices_to_iterate_for_current_values[7]: if self.indices_not_equal([i, j, k, l, m, n, o, p]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] current_list[p] = values[7] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: for l in self.indices_to_iterate_for_current_values_again[3]: for m in self.indices_to_iterate_for_current_values_again[4]: for n in self.indices_to_iterate_for_current_values_again[5]: for o in self.indices_to_iterate_for_current_values_again[6]: for p in self.indices_to_iterate_for_current_values_again[7]: if self.indices_not_equal([i, j, k, l, m, n, o, p]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] current_list[p] = values[7] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: for o in self.indices_to_iterate_for_current_values[6]: for p in self.indices_to_iterate_for_current_values[7]: if self.indices_not_equal([i, j, k, l, m, n, o, p]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] current_list[p] = values[7] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def nine_values(self, values): perms = [] if self.will_need_to_iterate_twice: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: for o in self.indices_to_iterate_for_current_values[6]: for p in self.indices_to_iterate_for_current_values[7]: for q in self.indices_to_iterate_for_current_values[8]: if self.indices_not_equal([i, j, k, l, m, n, o, p, q]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] current_list[p] = values[7] current_list[q] = values[8] perms.append(current_list) for i in self.indices_to_iterate_for_current_values_again[0]: for j in self.indices_to_iterate_for_current_values_again[1]: for k in self.indices_to_iterate_for_current_values_again[2]: for l in self.indices_to_iterate_for_current_values_again[3]: for m in self.indices_to_iterate_for_current_values_again[4]: for n in self.indices_to_iterate_for_current_values_again[5]: for o in self.indices_to_iterate_for_current_values_again[6]: for p in self.indices_to_iterate_for_current_values_again[7]: for q in self.indices_to_iterate_for_current_values_again[8]: if self.indices_not_equal([i, j, k, l, m, n, o, p, q]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] current_list[p] = values[7] current_list[q] = values[8] perms.append(current_list) else: for i in self.indices_to_iterate_for_current_values[0]: for j in self.indices_to_iterate_for_current_values[1]: for k in self.indices_to_iterate_for_current_values[2]: for l in self.indices_to_iterate_for_current_values[3]: for m in self.indices_to_iterate_for_current_values[4]: for n in self.indices_to_iterate_for_current_values[5]: for o in self.indices_to_iterate_for_current_values[6]: for p in self.indices_to_iterate_for_current_values[7]: for q in self.indices_to_iterate_for_current_values[8]: if self.indices_not_equal([i, j, k, l, m, n, o, p, q]): current_list = self.zero_list_all[:] current_list[i] = values[0] current_list[j] = values[1] current_list[k] = values[2] current_list[l] = values[3] current_list[m] = values[4] current_list[n] = values[5] current_list[o] = values[6] current_list[p] = values[7] current_list[q] = values[8] perms.append(current_list) unique_perms_list = self.make_perms_unique_and_add_to_all_perms(perms) num_unique_perms = len(unique_perms_list) def reset_values_to_sum_variables(self): self.indices_to_iterate_for_current_values = [] self.indices_to_iterate_for_current_values_again = [] self.will_need_to_iterate_twice = False self.more_than_one_value_must_occur_at_non_hubs = False def more_than_one_value_must_occur_outside_of_hubs(self, values): return ((len(values) - 1) > self.max_different_removals_hubs) def values_to_sum_contain_less_than_two_ones(self, values): return (values.count(1) >= 2) def append_to_beginning_of_indices_to_iterate_for_current_values(self, list_of_lists_to_append): for a_list in list_of_lists_to_append: self.indices_to_iterate_for_current_values.append(a_list) def append_to_beginning_of_indices_to_iterate_for_current_values_again(self, list_of_lists_to_append): for a_list in list_of_lists_to_append: self.indices_to_iterate_for_current_values_again.append(a_list) def append_to_end_of_indices_to_iterate_for_current_values(self, values_to_edit): for value in values_to_edit: for r in self.ranges: current_min = r[0] current_max = r[1] if (value >= current_min) and (value <= current_max): self.indices_to_iterate_for_current_values.append(self.valid_indices_for_ranges[str(r)]) def append_to_end_of_indices_to_iterate_for_current_values_again(self, values_to_edit): for value in values_to_edit: for r in self.ranges: current_min = r[0] current_max = r[1] if (value >= current_min) and (value <= current_max): self.indices_to_iterate_for_current_values_again.append(self.valid_indices_for_ranges[str(r)]) def remove_values_for_which_index_lists_have_been_found(self, values_to_edit, values_to_remove): for value in values_to_remove: values_to_edit.remove(value) return values_to_edit def find_indices_to_iterate(self, values): self.reset_values_to_sum_variables() values_to_edit = values[:] if self.max_removals_non_hubs == 2: if self.more_than_one_value_must_occur_outside_of_hubs(values): self.more_than_one_value_must_occur_at_non_hubs = True assert self.values_to_sum_contain_at_least_two_ones(values), "Two values must occur at non-hubs to generate permutations for this sum." else: self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_non_hubs, self.num_non_hubs]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 1]) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) else: if values.count(2) > 0: if values.count(1) >= 2: # 2 occurs at least once, 1 occurs at least twice self.will_need_to_iterate_twice = True # first iteration list_of_lists_to_append = [] list_of_ones_and_two = [] for i in range(values.count(1)): list_of_ones_and_two.append(1) list_of_lists_to_append.append(self.num_hubs) # all 1s iterate through hubs only list_of_ones_and_two.append(2) list_of_lists_to_append.append(self.num_non_hubs) # 2 iterates through non-hubs only self.append_to_beginning_of_indices_to_iterate_for_current_values(list_of_lists_to_append) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, list_of_ones_and_two) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) # second iteration values_to_edit = values[:] self.append_to_beginning_of_indices_to_iterate_for_current_values_again([self.num_all, self.num_all]) # two 1s iterate through everything values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 1]) self.append_to_end_of_indices_to_iterate_for_current_values_again(values_to_edit) elif values.count(1) == 1: # 2 occurs at least once, 1 occurs only once self.will_need_to_iterate_twice = True # first iteration self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_hubs, self.num_non_hubs]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 2]) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) # second iteration values_to_edit = values[:] self.append_to_beginning_of_indices_to_iterate_for_current_values_again([self.num_all]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1]) self.append_to_end_of_indices_to_iterate_for_current_values_again(values_to_edit) elif values.count(1) == 0: # 2 occurs at least once, 1 never occurs self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all_2_removals]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [2]) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) elif values.count(2) == 0: if values.count(1) >= 2: # 2 never occurs, 1 occurs at least twice self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all, self.num_all]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1, 1]) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) elif values.count(1) == 1: # 2 never occurs, 1 occurs once self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1]) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) elif values.count(1) == 0: # 2 never occurs, 1 never occurs self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) else: if values.count(1) > 0: self.append_to_beginning_of_indices_to_iterate_for_current_values([self.num_all]) values_to_edit = self.remove_values_for_which_index_lists_have_been_found(values_to_edit, [1]) self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit) if values.count(1) == 0: self.append_to_end_of_indices_to_iterate_for_current_values(values_to_edit)
46.426391
252
0.749222
8,563
51,719
4.063296
0.034217
0.045525
0.074955
0.087917
0.882738
0.836409
0.814451
0.749181
0.719406
0.684773
0
0.019221
0.161063
51,719
1,113
253
46.468104
0.782687
0.007657
0
0.620352
0
0.002935
0.040598
0.017541
0
0
0
0
0.003914
0
null
null
0
0.005871
null
null
0.000978
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
c59850ab429568e18ef584a3316c8d10cef81fdd
165
py
Python
netharn/output_shape_for.py
VIAME/netharn
c9491d655c5d91cb0ee6055f30e68282108e6b67
[ "Apache-2.0" ]
38
2018-06-18T07:47:31.000Z
2021-10-31T23:18:58.000Z
netharn/output_shape_for.py
Kitware/netharn
9ebc8ddb33c56fe890684f3a0a6369c52ebe4742
[ "Apache-2.0" ]
10
2018-06-08T01:21:58.000Z
2020-01-11T02:13:36.000Z
netharn/output_shape_for.py
Kitware/netharn
9ebc8ddb33c56fe890684f3a0a6369c52ebe4742
[ "Apache-2.0" ]
6
2018-04-17T22:06:02.000Z
2019-12-13T03:04:53.000Z
import warnings warnings.warn('Deprecated file. Use netharn.analytic.output_shape_for instead', UserWarning) from netharn.analytic.output_shape_for import * # NOQA
41.25
92
0.830303
22
165
6.045455
0.681818
0.225564
0.315789
0.390977
0.43609
0
0
0
0
0
0
0
0.090909
165
3
93
55
0.886667
0.024242
0
0
0
0
0.389937
0.207547
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
c5e07f1da9b0eac0f5394163ee05b40d29114d37
23,896
py
Python
hyperion/deprec/keras1/layers/pooling.py
jsalt2019-diadet/hyperion
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
[ "Apache-2.0" ]
9
2019-09-22T05:19:59.000Z
2022-03-05T18:03:37.000Z
hyperion/deprec/keras1/layers/pooling.py
jsalt2019-diadet/hyperion
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
[ "Apache-2.0" ]
null
null
null
hyperion/deprec/keras1/layers/pooling.py
jsalt2019-diadet/hyperion
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
[ "Apache-2.0" ]
4
2019-10-10T06:34:05.000Z
2022-03-05T18:03:56.000Z
from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy as np import keras.backend as K from keras.engine import InputSpec, Layer, Merge from keras.layers.pooling import _GlobalPooling1D from keras import activations, initializations, regularizers, constraints from ...hyp_defs import float_keras from .. import backend_addons as K2 from .. import constraints as hyp_constraints class GlobalMaskedAveragePooling1D(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalMaskedAveragePooling1D, self).__init__(**kwargs) self.supports_masking = True def call(self, x, mask=None): return K.mean(x[mask.nonzeros(),:],axis=1) class GlobalWeightedAveragePooling1D(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalWeightedAveragePooling1D, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): return (input_shape[0][0], input_shape[0][2]) def call(self, xw, mask=None): x, weights = xw return K.mean(x*weights,axis=1)/K.mean(weights,axis=1) def compute_mask(self, inputs, mask=None): return None class GlobalWeightedSumPooling1D(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalWeightedSumPooling1D, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): return (input_shape[0][0], input_shape[0][2]) def call(self, xw, mask=None): x, weights = xw return K.sum(x*weights,axis=1) def compute_mask(self, inputs, mask=None): return None class GlobalSumPooling1D(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalSumPooling1D, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): return (input_shape[0], input_shape[2]) def call(self, x, mask=None): return K.sum(x, axis=1) def compute_mask(self, inputs, mask=None): return None class GlobalSumWeights(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalSumWeights, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): return (input_shape[0], 1) def call(self, x, mask=None): return K.sum(x,axis=1) def compute_mask(self, inputs, mask=None): return None class GlobalProdRenormDiagNormalCommonCovStdPrior(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalProdRenormDiagNormalCommonCovStdPrior, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): output_shape=(input_shape[0][0], input_shape[0][2]) return [output_shape, output_shape] def call(self, xvw, mask=None): # input: mu_i/sigma2_i, log sigma2_i x, logvar_i, weights = xvw gamma = K.sum(x*weights,axis=1) N = K.sum(weights, axis=1) prec_i = K.exp(-logvar_i) #var_i = K.exp(logvar_i[:,0,:]) prec = 1 + N * (prec_i - 1) mu = gamma/prec logvar = - K.log(prec) return [mu, logvar] def compute_mask(self, inputs, mask=None): return [None, None] # class MultConstDiagCov(Layer): # def __init__(self, output_dim, # weights=None, # regularizer=None, # constraint=None, **kwargs): # self.output_dim = output_dim # self.input_dim = None # self.regularizer = regularizers.get(regularizer) # self.constraint = constraints.get(constraint) # self.initial_weights = weights # self.input_spec = [InputSpec(ndim='2+')] # if self.input_dim: # kwargs['input_shape'] = (self.input_dim,) # super(MultConstDiagCov, self).__init__(**kwargs) # self.supports_masking = True # def build(self, input_shape): # assert len(input_shape) >= 2 # input_dim = input_shape[-1] # self.input_dim = input_dim # self.input_spec = [InputSpec(dtype=K.floatx(), # ndim='2+')] # self.logvar = self.add_weight((self.output_dim,), # initializer='zero', # name='{}_logvar'.format(self.name), # regularizer=self.regularizer, # constraint=self.constraint) # if self.initial_weights is not None: # self.set_weights(self.initial_weights) # del self.initial_weights # self.built = True # def call(self, x, mask=None): # var = K.exp(self.logvar) # mu = x*var # tile_shape = list(K.shape(mu)) # tile_shape[-1] = 1 # logvar = K.tile(self.logvar, tuple(tile_shape)) # return [mu, logvar] # def get_output_shape_for(self, input_shape): # assert input_shape and len(input_shape) >= 2 # assert input_shape[-1] and input_shape[-1] == self.input_dim # return [input_shape, input_shape] # def get_config(self): # config = {'output_dim': self.output_dim, # 'regularizer': self.regularizer.get_config() if self.regularizer else None, # 'constraint': self.constraint.get_config() if self.constraint else None} # base_config = super(MultConstDiagCov, self).get_config() # return dict(list(base_config.items()) + list(config.items())) # def compute_mask(self, inputs, mask=None): # return [None, None] # class MultConstDiagCovStdPrior(Layer): # def __init__(self, output_dim, # weights=None, # regularizer=None, **kwargs): # self.output_dim = output_dim # self.input_dim = None # self.regularizer = regularizers.get(regularizer) # self.constraint = None #constraints.get('nonneg') # self.initial_weights = weights # self.input_spec = [InputSpec(ndim='2+')] # if self.input_dim: # kwargs['input_shape'] = (self.input_dim,) # super(MultConstDiagCovStdPrior, self).__init__(**kwargs) # self.supports_masking = True # def build(self, input_shape): # assert len(input_shape) >= 2 # input_dim = input_shape[-1] # self.input_dim = input_dim # self.input_spec = [InputSpec(dtype=K.floatx(), # ndim='2+')] # self.A = self.add_weight((self.output_dim,), # initializer='zero', # name='{}_A'.format(self.name), # regularizer=self.regularizer, # constraint=self.constraint) # if self.initial_weights is not None: # self.set_weights(self.initial_weights) # del self.initial_weights # self.built = True # def call(self, x, mask=None): # logvar = - K.log(1+K.exp(self.A)) # #logvar = - K.log(1+self.A) # var = K.exp(logvar) # mu = x*var # tile_shape = list(K.shape(mu)) # tile_shape[-1] = 1 # logvar = K.tile(logvar, tuple(tile_shape)) # return [mu, logvar] # def get_output_shape_for(self, input_shape): # assert input_shape and len(input_shape) >= 2 # assert input_shape[-1] and input_shape[-1] == self.input_dim # return [input_shape, input_shape] # def get_config(self): # config = {'output_dim': self.output_dim, # 'regularizer': self.regularizer.get_config() if self.regularizer else None} # base_config = super(MultConstDiagCovStdPrior, self).get_config() # return dict(list(base_config.items()) + list(config.items())) # def compute_mask(self, inputs, mask=None): # return [None, None] class GlobalProdRenormDiagNormalConstCovStdPrior(Layer): def __init__(self, output_dim, weights=None, regularizer=None, constraint=None, **kwargs): self.output_dim = output_dim self.input_dim = None self.regularizer = regularizers.get(regularizer) self.constraint = constraints.get(constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=3)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GlobalProdRenormDiagNormalConstCovStdPrior, self).__init__(**kwargs) self.supports_masking = True def build(self, input_shape): assert len(input_shape[0]) >= 2 input_dim = input_shape[0][-1] self.input_dim = input_dim self.input_spec = [InputSpec(dtype=K.floatx(), ndim='2+')] self.logvar = self.add_weight((self.output_dim,), initializer='zero', name='{}_logvar'.format(self.name), regularizer=self.regularizer, constraint=self.constraint) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, xw, mask=None): x, weights = xw gamma = K.sum(x*weights, axis=1) N = K.sum(weights, axis=1) prec_1 = K.exp(-self.logvar) prec = 1 + N * (prec_1 - 1) logvar = - K.log(prec) mu = gamma*K.exp(logvar) return [mu, logvar] def get_output_shape_for(self, input_shape): assert input_shape[0] and len(input_shape[0]) >= 2 assert input_shape[0][-1] and input_shape[0][-1] == self.input_dim output_shape = (input_shape[0][0], input_shape[0][2]) return [output_shape, output_shape] def get_config(self): config = {'output_dim': self.output_dim, 'regularizer': self.regularizer.get_config() if self.regularizer else None, 'constraint': self.constraint.get_config() if self.constraint else None} base_config = super(GlobalProdRenormDiagNormalConstCovStdPrior, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_mask(self, inputs, mask=None): return [None, None] class GlobalProdRenormDiagNormalConstCovStdPrior2(Layer): def __init__(self, output_dim, weights=None, regularizer=None, constraint=None, **kwargs): self.output_dim = output_dim self.input_dim = None self.regularizer = regularizers.get(regularizer) self.constraint = constraints.get(constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=3)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GlobalProdRenormDiagNormalConstCovStdPrior2, self).__init__(**kwargs) self.supports_masking = True def build(self, input_shape): assert len(input_shape[0]) >= 2 input_dim = input_shape[0][-1] self.input_dim = input_dim self.input_spec = [InputSpec(dtype=K.floatx(), ndim='2+')] self.prec_1 = self.add_weight((self.output_dim,), initializer='zero', name='{}_logvar'.format(self.name), regularizer=self.regularizer, constraint=self.constraint) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, xw, mask=None): x, weights = xw gamma = K.sum(x*weights, axis=1) N = K.sum(weights, axis=1) prec_1 = K.relu(self.prec_1) prec = 1 + N * prec_1 logvar = - K.log(prec) mu = gamma*K.exp(logvar) return [mu, logvar] def get_output_shape_for(self, input_shape): assert input_shape[0] and len(input_shape[0]) >= 2 assert input_shape[0][-1] and input_shape[0][-1] == self.input_dim output_shape = (input_shape[0][0], input_shape[0][2]) return [output_shape, output_shape] def get_config(self): config = {'output_dim': self.output_dim, 'regularizer': self.regularizer.get_config() if self.regularizer else None, 'constraint': self.constraint.get_config() if self.constraint else None} base_config = super(GlobalProdRenormDiagNormalConstCovStdPrior2, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_mask(self, inputs, mask=None): return [None, None] class GlobalProdRenormDiagNormalConstCovStdPrior3(Layer): def __init__(self, output_dim, weights=None, regularizer=None, constraint=None, **kwargs): self.output_dim = output_dim self.input_dim = None self.regularizer = regularizers.get(regularizer) self.constraint = constraints.get(constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=3)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GlobalProdRenormDiagNormalConstCovStdPrior3, self).__init__(**kwargs) self.supports_masking = True def build(self, input_shape): assert len(input_shape[0]) >= 2 input_dim = input_shape[0][-1] self.input_dim = input_dim self.input_spec = [InputSpec(dtype=K.floatx(), ndim='2+')] self.prec_1 = self.add_weight((self.output_dim,), initializer='normal', name='{}_prec'.format(self.name), regularizer=self.regularizer, constraint=self.constraint) self.b = self.add_weight((self.output_dim,), initializer='zero', name='{}_b'.format(self.name)) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, xw, mask=None): x, weights = xw gamma = K.sum(x*weights, axis=1) N = K.sum(weights, axis=1) prec = 1 + K.relu(N * self.prec_1 + self.b) logvar = - K.log(prec) mu = gamma*K.exp(logvar) return [mu, logvar] def get_output_shape_for(self, input_shape): assert input_shape[0] and len(input_shape[0]) >= 2 assert input_shape[0][-1] and input_shape[0][-1] == self.input_dim output_shape = (input_shape[0][0], input_shape[0][2]) return [output_shape, output_shape] def get_config(self): config = {'output_dim': self.output_dim, 'regularizer': self.regularizer.get_config() if self.regularizer else None, 'constraint': self.constraint.get_config() if self.constraint else None} base_config = super(GlobalProdRenormDiagNormalConstCovStdPrior3, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_mask(self, inputs, mask=None): return [None, None] class GlobalProdRenormDiagNormalConstCovStdPrior4(Layer): def __init__(self, output_dim, weights=None, regularizer=None, constraint=None, **kwargs): self.output_dim = output_dim self.input_dim = None self.regularizer = regularizers.get(regularizer) self.constraint = constraints.get(constraint) #self.constraint = constraints.get('nonneg') self.initial_weights = weights self.input_spec = [InputSpec(ndim=3)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GlobalProdRenormDiagNormalConstCovStdPrior4, self).__init__(**kwargs) self.supports_masking = True def build(self, input_shape): assert len(input_shape[0]) >= 2 input_dim = input_shape[0][-1] self.input_dim = input_dim self.input_spec = [InputSpec(dtype=K.floatx(), ndim='2+')] self.prec_1 = self.add_weight((self.output_dim,), initializer='zero', name='{}_prec'.format(self.name), regularizer=self.regularizer, constraint=self.constraint) self.b = self.add_weight((self.output_dim,), initializer='zero', name='{}_b'.format(self.name)) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, xw, mask=None): x, weights = xw gamma = K.sum(x*weights, axis=1) N = K.sum(weights, axis=1) #prec = 1 + K.relu(N * self.prec_1) #prec = 1 + N * K.exp(self.prec_1) prec = K.exp(self.b) + N * K.exp(self.prec_1) #prec = 1 + N * self.prec_1 * self.prec_1 logvar = - K.log(prec) mu = gamma*K.exp(logvar) return [mu, logvar] def get_output_shape_for(self, input_shape): assert input_shape[0] and len(input_shape[0]) >= 2 assert input_shape[0][-1] and input_shape[0][-1] == self.input_dim output_shape = (input_shape[0][0], input_shape[0][2]) return [output_shape, output_shape] def get_config(self): config = {'output_dim': self.output_dim, 'regularizer': self.regularizer.get_config() if self.regularizer else None, 'constraint': self.constraint.get_config() if self.constraint else None} base_config = super(GlobalProdRenormDiagNormalConstCovStdPrior4, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_mask(self, inputs, mask=None): return [None, None] class GlobalProdRenormNormalCommonCovStdPrior(_GlobalPooling1D): def __init__(self,**kwargs): super(GlobalProdRenormDiagNormalCommonCovStdPrior, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): output_shape=(input_shape[0][0], input_shape[0][2]) output_shape_chol=(input_shape[0][0], input_shape[0][2], input_shape[0][2]) return [output_shape, output_shape, output_shape_chol] def call(self, xvw, mask=None): # input: mu_i/sigma2_i, log sigma2_i x, logvar_i, chol_i, weights = xvw gamma = K.sum(x*weights,axis=1) N = K.sum(weights, axis=1) var_i = K.exp(logvar_i) cov_i = K.dot(chol_i.T,var_i*cholT) prec_i = K2.matrix_inverse(cov_i) #var_i = K.exp(logvar_i[:,0,:]) I = K.eye(K.shape(x)[-1]) prec = I + N * (prec_i - I) cov = K2.matrix_inverse(prec) mu = K.dot(gamma,cov) var = K.diag(cov) chol = K2.cholesqy(cov/var) logvar = K.log(var) return [mu, logvar, chol] def compute_mask(self, inputs, mask=None): return [None, None, None] class GlobalProdRenormNormalConstCovStdPrior(Layer): def __init__(self, output_dim, weights=None, D_regularizer=None, chol_regularizer=None, D_constraint=None, chol_constraint=None, **kwargs): self.output_dim = output_dim self.input_dim = None self.D_regularizer = regularizers.get(D_regularizer) self.chol_regularizer = regularizers.get(chol_regularizer) self.D_constraint = None #constraints.get('nonneg') self.chol_constraint = hyp_constraints.Triu(output_dim, 1) self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GlobalProdRenormNormalConstCovStdPrior, self).__init__(**kwargs) self.supports_masking = True def get_output_shape_for(self, input_shape): output_shape=(input_shape[0][0], input_shape[0][2]) output_shape_chol=(input_shape[0][0], input_shape[0][2], input_shape[0][2]) return [output_shape, output_shape, output_shape_chol] def build(self, input_shape): assert len(input_shape) >= 2 input_dim = input_shape[-1] self.input_dim = input_dim self.input_spec = [InputSpec(dtype=K.floatx(), ndim='2+')] self.D = self.add_weight((self.output_dim,), initializer='zero', name='{}_D'.format(self.name), regularizer=self.D_regularizer, constraint=self.D_constraint) self.chol = self.add_weight((self.output_dim, self.output_dim), initializer='identity', name='{}_chol'.format(self.name), regularizer=self.chol_regularizer, constraint=self.chol_constraint) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, xw, mask=None): # input: mu_i/sigma2_i x, weights = xw gamma = K.sum(x*weights,axis=1) N = K.sum(weights, axis=1, keepdims=True) var_i = K.exp(self.D) cov_i = K.dot(self.chol.T*var_i,self.chol) #var_i = K.expand_dims(K.exp(self.D), dim=-1) #cov_i = K.dot(self.chol, var_i*self.chol.T) prec_i = K.expand_dims(K2.matrix_inverse(cov_i), dim=0) I = K.expand_dims(K.eye(self.output_dim, dtype=float_keras()), dim=0) #prec = I + N * (prec_i - I) prec = I + N * prec_i fcov = lambda x: K2.matrix_inverse(x) cov = K.map_fn(fcov, prec) cov = 0.5*(cov + K.permute_dimensions(cov, [0, 2, 1])) mu = K.batch_dot(gamma, cov) fchol = lambda x: K2.cholesky(x, lower=False) chol = K.map_fn(fchol, cov) fdiag = lambda x: K2.diag(x) sigma = K.map_fn(fdiag, chol) chol = chol/K.expand_dims(sigma, dim=-1) logvar = 2*K.log(sigma) return [mu, logvar, chol] def get_config(self): config = {'output_dim': self.output_dim, 'D_regularizer': self.D_regularizer.get_config() if self.D_regularizer else None, 'D_constraint': self.D_constraint.get_config() if self.D_constraint else None, 'chol_regularizer': self.chol_regularizer.get_config() if self.chol_regularizer else None } base_config = super(GlobalProdRenormNormalConstCovStdPrior, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_mask(self, inputs, mask=None): return [None, None, None]
33.943182
95
0.581185
2,785
23,896
4.760503
0.054578
0.072409
0.041484
0.019007
0.810605
0.788053
0.757957
0.754111
0.744004
0.737215
0
0.013425
0.304863
23,896
703
96
33.991465
0.784721
0.205934
0
0.722798
0
0
0.01755
0
0
0
0
0
0.033679
1
0.145078
false
0
0.028497
0.046632
0.305699
0.002591
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
a83d87cb88259811125055e33c6a0cad8c426074
117
py
Python
app/utils/string_helper.py
saury2013/Memento
dbb2031a5aff3064f40bcb5afe631de8724a547e
[ "MIT" ]
null
null
null
app/utils/string_helper.py
saury2013/Memento
dbb2031a5aff3064f40bcb5afe631de8724a547e
[ "MIT" ]
null
null
null
app/utils/string_helper.py
saury2013/Memento
dbb2031a5aff3064f40bcb5afe631de8724a547e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- def trim_string(str): if len(str) > 300: return str[:300] + "..." return str
19.5
32
0.504274
16
117
3.625
0.6875
0.206897
0.413793
0.517241
0
0
0
0
0
0
0
0.083333
0.282051
117
6
33
19.5
0.607143
0.179487
0
0
0
0
0.031579
0
0
0
0
0
0
1
0.25
false
0
0
0
0.75
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
7
a8980012b55a7083df5016f869808ddf75ee07d8
340
py
Python
lib/BucketLib/BucketOperations.py
sumedhpb/TAF
fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449
[ "Apache-2.0" ]
9
2019-02-19T05:55:00.000Z
2022-01-20T10:37:28.000Z
lib/BucketLib/BucketOperations.py
sumedhpb/TAF
fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449
[ "Apache-2.0" ]
2
2019-02-19T07:28:54.000Z
2019-06-18T11:22:29.000Z
lib/BucketLib/BucketOperations.py
sumedhpb/TAF
fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449
[ "Apache-2.0" ]
155
2018-11-13T14:57:07.000Z
2022-03-28T11:53:22.000Z
""" Created on Oct 24, 2017 @author: riteshagarwal """ import mode if mode.java: from BucketOperations_JavaSDK import BucketHelper as bucketlib elif mode.cli: from BucketOperations_CLI import BucketHelper as bucketlib else: from BucketOperations_Rest import BucketHelper as bucketlib class BucketHelper(bucketlib): pass
18.888889
66
0.782353
41
340
6.414634
0.560976
0.228137
0.228137
0.330798
0
0
0
0
0
0
0
0.021201
0.167647
340
17
67
20
0.908127
0.138235
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.111111
0.444444
0
0.555556
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
7
a8adfc73232ab9e58fc27f631be8043b88105bb1
11,165
py
Python
venv/lib/python3.8/site-packages/spaceone/api/identity/v1/role_pb2_grpc.py
choonho/plugin-prometheus-mon-webhook
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
[ "Apache-2.0" ]
null
null
null
venv/lib/python3.8/site-packages/spaceone/api/identity/v1/role_pb2_grpc.py
choonho/plugin-prometheus-mon-webhook
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
[ "Apache-2.0" ]
null
null
null
venv/lib/python3.8/site-packages/spaceone/api/identity/v1/role_pb2_grpc.py
choonho/plugin-prometheus-mon-webhook
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
[ "Apache-2.0" ]
null
null
null
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 from spaceone.api.identity.v1 import role_pb2 as spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2 class RoleStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.create = channel.unary_unary( '/spaceone.api.identity.v1.Role/create', request_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.CreateRoleRequest.SerializeToString, response_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.FromString, ) self.update = channel.unary_unary( '/spaceone.api.identity.v1.Role/update', request_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.UpdateRoleRequest.SerializeToString, response_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.FromString, ) self.delete = channel.unary_unary( '/spaceone.api.identity.v1.Role/delete', request_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.get = channel.unary_unary( '/spaceone.api.identity.v1.Role/get', request_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.GetRoleRequest.SerializeToString, response_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.FromString, ) self.list = channel.unary_unary( '/spaceone.api.identity.v1.Role/list', request_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleQuery.SerializeToString, response_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RolesInfo.FromString, ) self.stat = channel.unary_unary( '/spaceone.api.identity.v1.Role/stat', request_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleStatQuery.SerializeToString, response_deserializer=google_dot_protobuf_dot_struct__pb2.Struct.FromString, ) class RoleServicer(object): """Missing associated documentation comment in .proto file.""" def create(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def update(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def delete(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def get(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def list(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def stat(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_RoleServicer_to_server(servicer, server): rpc_method_handlers = { 'create': grpc.unary_unary_rpc_method_handler( servicer.create, request_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.CreateRoleRequest.FromString, response_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.SerializeToString, ), 'update': grpc.unary_unary_rpc_method_handler( servicer.update, request_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.UpdateRoleRequest.FromString, response_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.SerializeToString, ), 'delete': grpc.unary_unary_rpc_method_handler( servicer.delete, request_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'get': grpc.unary_unary_rpc_method_handler( servicer.get, request_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.GetRoleRequest.FromString, response_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.SerializeToString, ), 'list': grpc.unary_unary_rpc_method_handler( servicer.list, request_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleQuery.FromString, response_serializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RolesInfo.SerializeToString, ), 'stat': grpc.unary_unary_rpc_method_handler( servicer.stat, request_deserializer=spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleStatQuery.FromString, response_serializer=google_dot_protobuf_dot_struct__pb2.Struct.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'spaceone.api.identity.v1.Role', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Role(object): """Missing associated documentation comment in .proto file.""" @staticmethod def create(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.identity.v1.Role/create', spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.CreateRoleRequest.SerializeToString, spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def update(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.identity.v1.Role/update', spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.UpdateRoleRequest.SerializeToString, spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def delete(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.identity.v1.Role/delete', spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def get(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.identity.v1.Role/get', spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.GetRoleRequest.SerializeToString, spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def list(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.identity.v1.Role/list', spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleQuery.SerializeToString, spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RolesInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def stat(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.identity.v1.Role/stat', spaceone_dot_api_dot_identity_dot_v1_dot_role__pb2.RoleStatQuery.SerializeToString, google_dot_protobuf_dot_struct__pb2.Struct.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
47.713675
122
0.683117
1,191
11,165
5.984047
0.09152
0.03143
0.060895
0.073944
0.897994
0.894486
0.876947
0.821945
0.757121
0.741125
0
0.010319
0.244872
11,165
233
123
47.918455
0.835014
0.066547
0
0.541237
1
0
0.073888
0.044391
0
0
0
0
0
1
0.072165
false
0
0.020619
0.030928
0.139175
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7648dc7d7499b59b864b7f9b11a35b97f201f7f5
89,393
py
Python
ietf/meeting/tests_views.py
ekr/ietfdb
8d936836b0b9ff31cda415b0a423e3f5b33ab695
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
ietf/meeting/tests_views.py
ekr/ietfdb
8d936836b0b9ff31cda415b0a423e3f5b33ab695
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
ietf/meeting/tests_views.py
ekr/ietfdb
8d936836b0b9ff31cda415b0a423e3f5b33ab695
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import json import os import shutil import datetime import urlparse import random import debug # pyflakes:ignore from django.urls import reverse as urlreverse from django.conf import settings from django.contrib.auth.models import User from mock import patch from pyquery import PyQuery from StringIO import StringIO from ietf.doc.models import Document from ietf.group.models import Group, Role from ietf.meeting.helpers import can_approve_interim_request, can_view_interim_request from ietf.meeting.helpers import send_interim_approval_request from ietf.meeting.helpers import send_interim_cancellation_notice from ietf.meeting.helpers import send_interim_minutes_reminder, populate_important_dates from ietf.meeting.models import Session, TimeSlot, Meeting from ietf.meeting.test_data import make_meeting_test_data, make_interim_meeting from ietf.meeting.utils import finalize from ietf.name.models import SessionStatusName from ietf.utils.test_utils import TestCase, login_testing_unauthorized, unicontent from ietf.utils.mail import outbox from ietf.utils.text import xslugify from ietf.person.factories import PersonFactory from ietf.group.factories import GroupFactory, GroupEventFactory from ietf.meeting.factories import ( SessionFactory, SessionPresentationFactory, ScheduleFactory, MeetingFactory, FloorPlanFactory ) from ietf.doc.factories import DocumentFactory class MeetingTests(TestCase): def setUp(self): self.materials_dir = self.tempdir('materials') self.saved_agenda_path = settings.AGENDA_PATH settings.AGENDA_PATH = self.materials_dir def tearDown(self): settings.AGENDA_PATH = self.saved_agenda_path shutil.rmtree(self.materials_dir) def write_materials_file(self, meeting, doc, content): path = os.path.join(self.materials_dir, "%s/%s/%s" % (meeting.number, doc.type_id, doc.external_url)) dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname) with open(path, "w") as f: f.write(content) def write_materials_files(self, meeting, session): draft = Document.objects.filter(type="draft", group=session.group).first() self.write_materials_file(meeting, session.materials.get(type="agenda"), "1. WG status (15 minutes)\n\n2. Status of %s\n\n" % draft.name) self.write_materials_file(meeting, session.materials.get(type="minutes"), "1. More work items underway\n\n2. The draft will be finished before next meeting\n\n") self.write_materials_file(meeting, session.materials.filter(type="slides").exclude(states__type__slug='slides',states__slug='deleted').first(), "This is a slideshow") def test_meeting_agenda(self): meeting = make_meeting_test_data() session = Session.objects.filter(meeting=meeting, group__acronym="mars").first() slot = TimeSlot.objects.get(sessionassignments__session=session,sessionassignments__schedule=meeting.agenda) # self.write_materials_files(meeting, session) # future_year = datetime.date.today().year+1 future_num = (future_year-1984)*3 # valid for the mid-year meeting future_meeting = Meeting.objects.create(date=datetime.date(future_year, 7, 22), number=future_num, type_id='ietf', city="Panama City", country="PA", time_zone='America/Panama') # utc time_interval = "%s-%s" % (slot.utc_start_time().strftime("%H:%M").lstrip("0"), (slot.utc_start_time() + slot.duration).strftime("%H:%M").lstrip("0")) r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number,utc='-utc'))) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) agenda_content = q("#content").html() self.assertTrue(session.group.acronym in agenda_content) self.assertTrue(session.group.name in agenda_content) self.assertTrue(session.group.parent.acronym.upper() in agenda_content) self.assertTrue(slot.location.name in agenda_content) self.assertTrue(time_interval in agenda_content) # plain time_interval = "%s-%s" % (slot.time.strftime("%H:%M").lstrip("0"), (slot.time + slot.duration).strftime("%H:%M").lstrip("0")) r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number))) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) agenda_content = q("#content").html() self.assertTrue(session.group.acronym in agenda_content) self.assertTrue(session.group.name in agenda_content) self.assertTrue(session.group.parent.acronym.upper() in agenda_content) self.assertTrue(slot.location.name in agenda_content) self.assertTrue(time_interval in agenda_content) # Make sure there's a frame for the agenda and it points to the right place self.assertTrue(any([session.materials.get(type='agenda').href() in x.attrib["data-src"] for x in q('tr div.modal-body div.frame')])) # Make sure undeleted slides are present and deleted slides are not self.assertTrue(any([session.materials.filter(type='slides').exclude(states__type__slug='slides',states__slug='deleted').first().title in x.text for x in q('tr div.modal-body ul a')])) self.assertFalse(any([session.materials.filter(type='slides',states__type__slug='slides',states__slug='deleted').first().title in x.text for x in q('tr div.modal-body ul a')])) # future meeting, no agenda r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=future_meeting.number))) self.assertEqual(r.status_code, 200) self.assertContains(r, u"There is no agenda available yet.") self.assertTemplateUsed(r, 'meeting/no-agenda.html') # text # the rest of the results don't have as nicely formatted times time_interval = time_interval.replace(":", "") r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number, ext=".txt"))) self.assertEqual(r.status_code, 200) agenda_content = r.content self.assertTrue(session.group.acronym in agenda_content) self.assertTrue(session.group.name in agenda_content) self.assertTrue(session.group.parent.acronym.upper() in agenda_content) self.assertTrue(slot.location.name in agenda_content) self.assertTrue(time_interval in agenda_content) r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number,name=meeting.unofficial_schedule.name,owner=meeting.unofficial_schedule.owner.email()))) self.assertEqual(r.status_code, 200) self.assertTrue('not the official schedule' in unicontent(r)) # future meeting, no agenda r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=future_meeting.number, ext=".txt"))) self.assertEqual(r.status_code, 200) self.assertContains(r, "There is no agenda available yet.") self.assertTemplateUsed(r, 'meeting/no-agenda.txt') # CSV r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number, ext=".csv"))) self.assertEqual(r.status_code, 200) agenda_content = r.content self.assertTrue(session.group.acronym in agenda_content) self.assertTrue(session.group.name in agenda_content) self.assertTrue(session.group.parent.acronym.upper() in agenda_content) self.assertTrue(slot.location.name in agenda_content) self.assertTrue(session.materials.get(type='agenda').external_url in unicontent(r)) self.assertTrue(session.materials.filter(type='slides').exclude(states__type__slug='slides',states__slug='deleted').first().external_url in unicontent(r)) self.assertFalse(session.materials.filter(type='slides',states__type__slug='slides',states__slug='deleted').first().external_url in unicontent(r)) # iCal r = self.client.get(urlreverse("ietf.meeting.views.ical_agenda", kwargs=dict(num=meeting.number)) + "?" + session.group.parent.acronym.upper()) self.assertEqual(r.status_code, 200) agenda_content = r.content self.assertTrue(session.group.acronym in agenda_content) self.assertTrue(session.group.name in agenda_content) self.assertTrue(slot.location.name in agenda_content) self.assertTrue("BEGIN:VTIMEZONE" in agenda_content) self.assertTrue("END:VTIMEZONE" in agenda_content) self.assertTrue(session.agenda().href() in unicontent(r)) self.assertTrue(session.materials.filter(type='slides').exclude(states__type__slug='slides',states__slug='deleted').first().href() in unicontent(r)) # TODO - the ics view uses .all on a queryset in a view so it's showing the deleted slides. #self.assertFalse(session.materials.filter(type='slides',states__type__slug='slides',states__slug='deleted').first().get_absolute_url() in unicontent(r)) # week view r = self.client.get(urlreverse("ietf.meeting.views.week_view", kwargs=dict(num=meeting.number))) self.assertEqual(r.status_code, 200) agenda_content = r.content self.assertTrue(session.group.acronym in agenda_content) self.assertTrue(slot.location.name in agenda_content) def test_agenda_current_audio(self): date = datetime.date.today() meeting = MeetingFactory(type_id='ietf', date=date ) make_meeting_test_data(meeting=meeting) url = urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number)) r = self.client.get(url) self.assertTrue("Audio stream" in unicontent(r)) def test_agenda_by_room(self): meeting = make_meeting_test_data() url = urlreverse("ietf.meeting.views.agenda_by_room",kwargs=dict(num=meeting.number)) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertTrue(all([x in unicontent(r) for x in ['mars','IESG Breakfast','Test Room','Breakfast Room']])) url = urlreverse("ietf.meeting.views.agenda_by_room",kwargs=dict(num=meeting.number,name=meeting.unofficial_schedule.name,owner=meeting.unofficial_schedule.owner.email())) r = self.client.get(url) self.assertTrue(all([x in unicontent(r) for x in ['mars','Test Room',]])) self.assertFalse('IESG Breakfast' in unicontent(r)) def test_agenda_by_type(self): meeting = make_meeting_test_data() url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number)) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertTrue(all([x in unicontent(r) for x in ['mars','IESG Breakfast','Test Room','Breakfast Room']])) url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number,name=meeting.unofficial_schedule.name,owner=meeting.unofficial_schedule.owner.email())) r = self.client.get(url) self.assertTrue(all([x in unicontent(r) for x in ['mars','Test Room',]])) self.assertFalse('IESG Breakfast' in unicontent(r)) url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number,type='session')) r = self.client.get(url) self.assertTrue(all([x in unicontent(r) for x in ['mars','Test Room']])) self.assertFalse(any([x in unicontent(r) for x in ['IESG Breakfast','Breakfast Room']])) url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number,type='lead')) r = self.client.get(url) self.assertFalse(any([x in unicontent(r) for x in ['mars','Test Room']])) self.assertTrue(all([x in unicontent(r) for x in ['IESG Breakfast','Breakfast Room']])) url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number,type='lead',name=meeting.unofficial_schedule.name,owner=meeting.unofficial_schedule.owner.email())) r = self.client.get(url) self.assertFalse(any([x in unicontent(r) for x in ['IESG Breakfast','Breakfast Room']])) def test_agenda_room_view(self): meeting = make_meeting_test_data() url = urlreverse("ietf.meeting.views.room_view",kwargs=dict(num=meeting.number)) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code,200) self.assertTrue(all([x in unicontent(r) for x in ['mars','IESG Breakfast','Test Room','Breakfast Room']])) url = urlreverse("ietf.meeting.views.room_view",kwargs=dict(num=meeting.number,name=meeting.unofficial_schedule.name,owner=meeting.unofficial_schedule.owner.email())) r = self.client.get(url) self.assertEqual(r.status_code,200) self.assertTrue(all([x in unicontent(r) for x in ['mars','Test Room','Breakfast Room']])) self.assertFalse('IESG Breakfast' in unicontent(r)) def test_agenda_week_view(self): meeting = make_meeting_test_data() url = urlreverse("ietf.meeting.views.week_view",kwargs=dict(num=meeting.number)) + "#farfut" r = self.client.get(url) self.assertEqual(r.status_code,200) self.assertTrue(all([x in unicontent(r) for x in ['var all_items', 'maximize', 'draw_calendar', ]])) def test_materials(self): meeting = make_meeting_test_data() session = Session.objects.filter(meeting=meeting, group__acronym="mars").first() self.do_test_materials(meeting, session) def test_interim_materials(self): make_meeting_test_data() group = Group.objects.get(acronym='mars') date = datetime.datetime.today() - datetime.timedelta(days=10) meeting = make_interim_meeting(group=group, date=date, status='sched') session = meeting.session_set.first() self.do_test_materials(meeting, session) def do_test_materials(self, meeting, session): self.write_materials_files(meeting, session) # session agenda r = self.client.get(urlreverse("ietf.meeting.views.materials_document", kwargs=dict(num=meeting.number, document=session.agenda()))) self.assertEqual(r.status_code, 200) self.assertTrue("1. WG status" in unicontent(r)) # session minutes r = self.client.get(urlreverse("ietf.meeting.views.materials_document", kwargs=dict(num=meeting.number, document=session.minutes()))) self.assertEqual(r.status_code, 200) self.assertTrue("1. More work items underway" in unicontent(r)) # test with explicit meeting number in url if meeting.number.isdigit(): r = self.client.get(urlreverse("ietf.meeting.views.materials", kwargs=dict(num=meeting.number))) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) row = q('#content #%s' % str(session.group.acronym)).closest("tr") self.assertTrue(row.find('a:contains("Agenda")')) self.assertTrue(row.find('a:contains("Minutes")')) self.assertTrue(row.find('a:contains("Slideshow")')) self.assertFalse(row.find("a:contains(\"Bad Slideshow\")")) # test with no meeting number in url r = self.client.get(urlreverse("ietf.meeting.views.materials", kwargs=dict())) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) row = q('#content #%s' % str(session.group.acronym)).closest("tr") self.assertTrue(row.find('a:contains("Agenda")')) self.assertTrue(row.find('a:contains("Minutes")')) self.assertTrue(row.find('a:contains("Slideshow")')) self.assertFalse(row.find("a:contains(\"Bad Slideshow\")")) # test with a loggged-in wg chair self.client.login(username="marschairman", password="marschairman+password") r = self.client.get(urlreverse("ietf.meeting.views.materials", kwargs=dict(num=meeting.number))) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) row = q('#content #%s' % str(session.group.acronym)).closest("tr") self.assertTrue(row.find('a:contains("Agenda")')) self.assertTrue(row.find('a:contains("Minutes")')) self.assertTrue(row.find('a:contains("Slideshow")')) self.assertFalse(row.find("a:contains(\"Bad Slideshow\")")) self.assertTrue(row.find('a:contains("Edit materials")')) # FIXME: missing tests of .pdf/.tar generation (some code can # probably be lifted from similar tests in iesg/tests.py) # document-specific urls for doc in session.materials.exclude(states__slug='deleted'): url = urlreverse('ietf.meeting.views.materials_document', kwargs=dict(num=meeting.number, document=doc.name)) r = self.client.get(url) self.assertEqual(unicontent(r), doc.text()) def test_materials_editable_groups(self): meeting = make_meeting_test_data() self.client.login(username="marschairman", password="marschairman+password") r = self.client.get(urlreverse("ietf.meeting.views.materials_editable_groups", kwargs={'num':meeting.number})) self.assertEqual(r.status_code, 200) self.assertTrue(meeting.number in unicontent(r)) self.assertTrue("mars" in unicontent(r)) self.assertFalse("No session requested" in unicontent(r)) self.client.login(username="ad", password="ad+password") r = self.client.get(urlreverse("ietf.meeting.views.materials_editable_groups", kwargs={'num':meeting.number})) self.assertEqual(r.status_code, 200) self.assertTrue(meeting.number in unicontent(r)) self.assertTrue("frfarea" in unicontent(r)) self.assertTrue("No session requested" in unicontent(r)) self.client.login(username="plain",password="plain+password") r = self.client.get(urlreverse("ietf.meeting.views.materials_editable_groups", kwargs={'num':meeting.number})) self.assertEqual(r.status_code, 200) self.assertTrue(meeting.number in unicontent(r)) self.assertTrue("You cannot manage the meeting materials for any groups" in unicontent(r)) def test_proceedings(self): meeting = make_meeting_test_data() session = Session.objects.filter(meeting=meeting, group__acronym="mars").first() GroupEventFactory(group=session.group,type='status_update') SessionPresentationFactory(document__type_id='recording',session=session) SessionPresentationFactory(document__type_id='recording',session=session,document__title="Audio recording for tests") self.write_materials_files(meeting, session) url = urlreverse("ietf.meeting.views.proceedings", kwargs=dict(num=meeting.number)) r = self.client.get(url) self.assertEqual(r.status_code, 200) def test_proceedings_acknowledgements(self): make_meeting_test_data() meeting = MeetingFactory(type_id='ietf', date=datetime.date(2016,7,14), number="96") meeting.acknowledgements = 'test acknowledgements' meeting.save() url = urlreverse('ietf.meeting.views.proceedings_acknowledgements',kwargs={'num':meeting.number}) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTrue('test acknowledgements' in response.content) @patch('urllib2.urlopen') def test_proceedings_attendees(self, mock_urlopen): mock_urlopen.return_value = StringIO('[{"LastName":"Smith","FirstName":"John","Company":"ABC","Country":"US"}]') make_meeting_test_data() meeting = MeetingFactory(type_id='ietf', date=datetime.date(2016,7,14), number="96") finalize(meeting) url = urlreverse('ietf.meeting.views.proceedings_attendees',kwargs={'num':96}) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTrue('Attendee List' in response.content) q = PyQuery(response.content) self.assertEqual(1,len(q("#id_attendees tbody tr"))) @patch('urllib2.urlopen') def test_proceedings_overview(self, mock_urlopen): '''Test proceedings IETF Overview page. Note: old meetings aren't supported so need to add a new meeting then test. ''' mock_urlopen.return_value = StringIO('[{"LastName":"Smith","FirstName":"John","Company":"ABC","Country":"US"}]') make_meeting_test_data() meeting = MeetingFactory(type_id='ietf', date=datetime.date(2016,7,14), number="96") finalize(meeting) url = urlreverse('ietf.meeting.views.proceedings_overview',kwargs={'num':96}) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTrue('The Internet Engineering Task Force' in response.content) def test_proceedings_progress_report(self): make_meeting_test_data() MeetingFactory(type_id='ietf', date=datetime.date(2016,4,3), number="95") MeetingFactory(type_id='ietf', date=datetime.date(2016,7,14), number="96") url = urlreverse('ietf.meeting.views.proceedings_progress_report',kwargs={'num':96}) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTrue('Progress Report' in response.content) def test_feed(self): meeting = make_meeting_test_data() session = Session.objects.filter(meeting=meeting, group__acronym="mars").first() r = self.client.get("/feed/wg-proceedings/") self.assertEqual(r.status_code, 200) self.assertTrue("agenda" in unicontent(r)) self.assertTrue(session.group.acronym in unicontent(r)) def test_important_dates(self): meeting=MeetingFactory(type_id='ietf') populate_important_dates(meeting) url = urlreverse('ietf.meeting.views.important_dates',kwargs={'num':meeting.number}) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertTrue(str(meeting.importantdate_set.first().date) in unicontent(r)) class EditTests(TestCase): def setUp(self): # make sure we have the colors of the area from ietf.group.colors import fg_group_colors, bg_group_colors area_upper = "FARFUT" fg_group_colors[area_upper] = "#333" bg_group_colors[area_upper] = "#aaa" def test_edit_agenda(self): meeting = make_meeting_test_data() self.client.login(username="secretary", password="secretary+password") r = self.client.get(urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number))) self.assertEqual(r.status_code, 200) self.assertTrue("load_assignments" in unicontent(r)) def test_save_agenda_as_and_read_permissions(self): meeting = make_meeting_test_data() # try to get non-existing agenda url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number, owner=meeting.agenda.owner_email(), name="foo")) r = self.client.get(url) self.assertEqual(r.status_code, 404) # save as new name (requires valid existing agenda) url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number, owner=meeting.agenda.owner_email(), name=meeting.agenda.name)) self.client.login(username="ad", password="ad+password") r = self.client.post(url, { 'savename': "foo", 'saveas': "saveas", }) self.assertEqual(r.status_code, 302) # Verify that we actually got redirected to a new place. self.assertNotEqual(urlparse.urlparse(r.url).path, url) # get schedule = meeting.get_schedule_by_name("foo") url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number, owner=schedule.owner_email(), name="foo")) r = self.client.get(url) self.assertEqual(r.status_code, 200) schedule.visible = True schedule.public = False schedule.save() # get as anonymous doesn't work self.client.logout() r = self.client.get(url) self.assertEqual(r.status_code, 403) # public, now anonymous works schedule.public = True schedule.save() r = self.client.get(url) self.assertEqual(r.status_code, 200) # Secretariat can always see it schedule.visible = False schedule.public = False schedule.save() self.client.login(username="secretary", password="secretary+password") r = self.client.get(url) self.assertEqual(r.status_code, 200) def test_save_agenda_broken_names(self): meeting = make_meeting_test_data() # save as new name (requires valid existing agenda) url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number, owner=meeting.agenda.owner_email(), name=meeting.agenda.name)) self.client.login(username="ad", password="ad+password") r = self.client.post(url, { 'savename': "/no/this/should/not/work/it/is/too/long", 'saveas': "saveas", }) self.assertEqual(r.status_code, 302) self.assertEqual(urlparse.urlparse(r.url).path, url) # TODO: Verify that an error message was in fact returned. r = self.client.post(url, { 'savename': "/invalid/chars/", 'saveas': "saveas", }) # TODO: Verify that an error message was in fact returned. self.assertEqual(r.status_code, 302) self.assertEqual(urlparse.urlparse(r.url).path, url) # Non-ASCII alphanumeric characters r = self.client.post(url, { 'savename': u"f\u00E9ling", 'saveas': "saveas", }) # TODO: Verify that an error message was in fact returned. self.assertEqual(r.status_code, 302) self.assertEqual(urlparse.urlparse(r.url).path, url) def test_edit_timeslots(self): meeting = make_meeting_test_data() self.client.login(username="secretary", password="secretary+password") r = self.client.get(urlreverse("ietf.meeting.views.edit_timeslots", kwargs=dict(num=meeting.number))) self.assertEqual(r.status_code, 200) self.assertTrue(meeting.room_set.all().first().name in unicontent(r)) def test_slot_to_the_right(self): meeting = make_meeting_test_data() session = Session.objects.filter(meeting=meeting, group__acronym="mars").first() mars_scheduled = session.timeslotassignments.get(schedule__name='test-agenda') mars_slot = TimeSlot.objects.get(sessionassignments__session=session,sessionassignments__schedule__name='test-agenda') mars_ends = mars_slot.time + mars_slot.duration session = Session.objects.filter(meeting=meeting, group__acronym="ames").first() ames_slot_qs = TimeSlot.objects.filter(sessionassignments__session=session,sessionassignments__schedule__name='test-agenda') ames_slot_qs.update(time=mars_ends + datetime.timedelta(seconds=11 * 60)) self.assertTrue(not mars_slot.slot_to_the_right) self.assertTrue(not mars_scheduled.slot_to_the_right) ames_slot_qs.update(time=mars_ends + datetime.timedelta(seconds=10 * 60)) self.assertTrue(mars_slot.slot_to_the_right) self.assertTrue(mars_scheduled.slot_to_the_right) class SessionDetailsTests(TestCase): def test_session_details(self): group = GroupFactory.create(type_id='wg',state_id='active') session = SessionFactory.create(meeting__type_id='ietf',group=group, meeting__date=datetime.date.today()+datetime.timedelta(days=90)) SessionPresentationFactory.create(session=session,document__type_id='draft',rev=None) SessionPresentationFactory.create(session=session,document__type_id='minutes') SessionPresentationFactory.create(session=session,document__type_id='slides') SessionPresentationFactory.create(session=session,document__type_id='agenda') url = urlreverse('ietf.meeting.views.session_details', kwargs=dict(num=session.meeting.number, acronym=group.acronym)) r = self.client.get(url) self.assertTrue(all([x in unicontent(r) for x in ('slides','agenda','minutes','draft')])) self.assertFalse('deleted' in unicontent(r)) def test_add_session_drafts(self): group = GroupFactory.create(type_id='wg',state_id='active') group_chair = PersonFactory.create() group.role_set.create(name_id='chair',person = group_chair, email = group_chair.email()) session = SessionFactory.create(meeting__type_id='ietf',group=group, meeting__date=datetime.date.today()+datetime.timedelta(days=90)) SessionPresentationFactory.create(session=session,document__type_id='draft',rev=None) old_draft = session.sessionpresentation_set.filter(document__type='draft').first().document new_draft = DocumentFactory(type_id='draft') url = urlreverse('ietf.meeting.views.add_session_drafts', kwargs=dict(num=session.meeting.number, session_id=session.pk)) r = self.client.get(url) self.assertEqual(r.status_code, 404) self.client.login(username="plain",password="plain+password") r = self.client.get(url) self.assertEqual(r.status_code, 404) self.client.login(username=group_chair.user.username, password='%s+password'%group_chair.user.username) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertTrue(old_draft.name in unicontent(r)) r = self.client.post(url,dict(drafts=[new_draft.name,old_draft.name])) self.assertTrue(r.status_code, 200) q = PyQuery(r.content) self.assertTrue("Already linked:" in q('form .alert-danger').text()) self.assertEqual(1,session.sessionpresentation_set.count()) r = self.client.post(url,dict(drafts=[new_draft.name,])) self.assertTrue(r.status_code, 302) self.assertEqual(2,session.sessionpresentation_set.count()) session.meeting.date -= datetime.timedelta(days=180) session.meeting.save() r = self.client.get(url) self.assertEqual(r.status_code,404) self.client.login(username='secretary',password='secretary+password') r = self.client.get(url) self.assertEqual(r.status_code,200) q = PyQuery(r.content) self.assertEqual(1,len(q(".alert-warning:contains('may affect published proceedings')"))) class EditScheduleListTests(TestCase): def setUp(self): self.mtg = MeetingFactory(type_id='ietf') ScheduleFactory(meeting=self.mtg,name='Empty-Schedule') def test_list_agendas(self): url = urlreverse('ietf.meeting.views.list_agendas',kwargs={'num':self.mtg.number}) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertTrue(r.status_code, 200) def test_delete_schedule(self): url = urlreverse('ietf.meeting.views.delete_schedule', kwargs={'num':self.mtg.number, 'owner':self.mtg.agenda.owner.email_address(), 'name':self.mtg.agenda.name, }) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertTrue(r.status_code, 403) r = self.client.post(url,{'save':1}) self.assertTrue(r.status_code, 403) self.assertEqual(self.mtg.schedule_set.count(),2) self.mtg.agenda=None self.mtg.save() r = self.client.get(url) self.assertTrue(r.status_code, 200) r = self.client.post(url,{'save':1}) self.assertTrue(r.status_code, 302) self.assertEqual(self.mtg.schedule_set.count(),1) def test_make_schedule_official(self): schedule = self.mtg.schedule_set.exclude(id=self.mtg.agenda.id).first() url = urlreverse('ietf.meeting.views.make_schedule_official', kwargs={'num':self.mtg.number, 'owner':schedule.owner.email_address(), 'name':schedule.name, }) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertTrue(r.status_code, 200) r = self.client.post(url,{'save':1}) self.assertTrue(r.status_code, 302) mtg = Meeting.objects.get(number=self.mtg.number) self.assertEqual(mtg.agenda,schedule) # ------------------------------------------------- # Interim Meeting Tests # ------------------------------------------------- class InterimTests(TestCase): def setUp(self): self.materials_dir = self.tempdir('materials') self.saved_agenda_path = settings.AGENDA_PATH settings.AGENDA_PATH = self.materials_dir def tearDown(self): settings.AGENDA_PATH = self.saved_agenda_path shutil.rmtree(self.materials_dir) def check_interim_tabs(self, url): '''Helper function to check interim meeting list tabs''' # no logged in - no tabs r = self.client.get(url) q = PyQuery(r.content) self.assertEqual(len(q("ul.nav-tabs")), 0) # plain user - no tabs username = "plain" self.client.login(username=username, password=username + "+password") r = self.client.get(url) q = PyQuery(r.content) self.assertEqual(len(q("ul.nav-tabs")), 0) self.client.logout() # privileged user username = "ad" self.client.login(username=username, password=username + "+password") r = self.client.get(url) q = PyQuery(r.content) self.assertEqual(len(q("a:contains('Pending')")), 1) self.assertEqual(len(q("a:contains('Announce')")), 0) self.client.logout() # secretariat username = "secretary" self.client.login(username=username, password=username + "+password") r = self.client.get(url) q = PyQuery(r.content) self.assertEqual(len(q("a:contains('Pending')")), 1) self.assertEqual(len(q("a:contains('Announce')")), 1) self.client.logout() def test_interim_announce(self): make_meeting_test_data() url = urlreverse("ietf.meeting.views.interim_announce") meeting = Meeting.objects.filter(type='interim', session__group__acronym='mars').first() session = meeting.session_set.first() session.status = SessionStatusName.objects.get(slug='scheda') session.save() login_testing_unauthorized(self, "secretary", url) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertTrue(meeting.number in r.content) def test_interim_skip_announcement(self): make_meeting_test_data() group = Group.objects.get(acronym='irg') date = datetime.date.today() + datetime.timedelta(days=30) meeting = make_interim_meeting(group=group, date=date, status='scheda') url = urlreverse("ietf.meeting.views.interim_skip_announcement", kwargs={'number': meeting.number}) login_testing_unauthorized(self, "secretary", url) r = self.client.get(url) self.assertEqual(r.status_code, 200) # check post len_before = len(outbox) r = self.client.post(url) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_announce')) self.assertEqual(meeting.session_set.first().status.slug,'sched') self.assertEqual(len(outbox), len_before) def test_interim_send_announcement(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim', session__status='apprw', session__group__acronym='mars').first() url = urlreverse("ietf.meeting.views.interim_send_announcement", kwargs={'number': meeting.number}) login_testing_unauthorized(self, "secretary", url) r = self.client.get(url) self.assertEqual(r.status_code, 200) initial = r.context['form'].initial # send announcement len_before = len(outbox) r = self.client.post(url, initial) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_announce')) self.assertEqual(len(outbox), len_before + 1) self.assertTrue('WG Virtual Meeting' in outbox[-1]['Subject']) def test_interim_approve_by_ad(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim', session__status='apprw', session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number}) length_before = len(outbox) login_testing_unauthorized(self, "ad", url) r = self.client.post(url, {'approve': 'approve'}) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_pending')) for session in meeting.session_set.all(): self.assertEqual(session.status.slug, 'scheda') self.assertEqual(len(outbox), length_before + 1) self.assertTrue('ready for announcement' in outbox[-1]['Subject']) def test_interim_approve_by_secretariat(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim', session__status='apprw', session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number}) login_testing_unauthorized(self, "secretary", url) r = self.client.post(url, {'approve': 'approve'}) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_send_announcement', kwargs={'number': meeting.number})) for session in meeting.session_set.all(): self.assertEqual(session.status.slug, 'scheda') def test_past(self): today = datetime.date.today() last_week = today - datetime.timedelta(days=7) ietf = SessionFactory(meeting__type_id='ietf',meeting__date=last_week,group__state_id='active',group__parent=GroupFactory(state_id='active')) interim = SessionFactory(meeting__type_id='interim',meeting__date=last_week,status_id='canceled',group__state_id='active',group__parent=GroupFactory(state_id='active')) url = urlreverse('ietf.meeting.views.past') r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertTrue('IETF - %02d'%int(ietf.meeting.number) in unicontent(r)) q = PyQuery(r.content) id="-%s" % interim.group.acronym self.assertTrue('CANCELLED' in q('[id*="'+id+'"]').text()) def test_upcoming(self): make_meeting_test_data() url = urlreverse("ietf.meeting.views.upcoming") r = self.client.get(url) self.assertEqual(r.status_code, 200) today = datetime.date.today() mars_interim = Meeting.objects.filter(date__gt=today, type='interim', session__group__acronym='mars', session__status='sched').first() ames_interim = Meeting.objects.filter(date__gt=today, type='interim', session__group__acronym='ames', session__status='canceled').first() self.assertTrue(mars_interim.number in r.content) self.assertTrue(ames_interim.number in r.content) self.assertTrue('IETF - 42' in r.content) # cancelled session q = PyQuery(r.content) self.assertTrue('CANCELLED' in q('[id*="-ames"]').text()) self.check_interim_tabs(url) def test_upcoming_ical(self): make_meeting_test_data() url = urlreverse("ietf.meeting.views.upcoming_ical") r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertEqual(r.get('Content-Type'), "text/calendar") self.assertEqual(r.content.count('UID'), 7) # check filtered output url = url + '?filters=mars' r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertEqual(r.get('Content-Type'), "text/calendar") # print r.content self.assertEqual(r.content.count('UID'), 2) def test_interim_request_permissions(self): '''Ensure only authorized users see link to request interim meeting''' make_meeting_test_data() # test unauthorized not logged in upcoming_url = urlreverse("ietf.meeting.views.upcoming") request_url = urlreverse("ietf.meeting.views.interim_request") r = self.client.get(upcoming_url) self.assertNotContains(r,'Request new interim meeting') # test unauthorized user login_testing_unauthorized(self,"plain",request_url) r = self.client.get(upcoming_url) self.assertNotContains(r,'Request new interim meeting') r = self.client.get(request_url) self.assertEqual(r.status_code, 403) self.client.logout() # test authorized for username in ('secretary','ad','marschairman','irtf-chair','irgchairman'): self.client.login(username=username, password= username + "+password") r = self.client.get(upcoming_url) self.assertContains(r,'Request new interim meeting') r = self.client.get(request_url) self.assertEqual(r.status_code, 200) self.client.logout() def test_interim_request_options(self): make_meeting_test_data() # secretariat can request for any group self.client.login(username="secretary", password="secretary+password") r = self.client.get("/meeting/interim/request/") self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertEqual(Group.objects.filter(type__in=('wg', 'rg'), state__in=('active', 'proposed')).count(), len(q("#id_group option")) - 1) # -1 for options placeholder self.client.logout() # wg chair self.client.login(username="marschairman", password="marschairman+password") r = self.client.get("/meeting/interim/request/") self.assertEqual(r.status_code, 200) q = PyQuery(r.content) user = User.objects.get(username='marschairman') person = user.person count = person.role_set.filter(name='chair',group__type__in=('wg', 'rg'), group__state__in=('active', 'proposed')).count() self.assertEqual(count, len(q("#id_group option")) - 1) # -1 for options placeholder # wg AND rg chair group = Group.objects.get(acronym='irg') Role.objects.create(name_id='chair',group=group,person=person,email=person.email()) r = self.client.get("/meeting/interim/request/") self.assertEqual(r.status_code, 200) q = PyQuery(r.content) count = person.role_set.filter(name='chair',group__type__in=('wg', 'rg'), group__state__in=('active', 'proposed')).count() self.assertEqual(count, len(q("#id_group option")) - 1) # -1 for options placeholder def test_interim_request_single_virtual(self): make_meeting_test_data() group = Group.objects.get(acronym='mars') date = datetime.date.today() + datetime.timedelta(days=30) time = datetime.datetime.now().time().replace(microsecond=0,second=0) dt = datetime.datetime.combine(date, time) duration = datetime.timedelta(hours=3) remote_instructions = 'Use webex' agenda = 'Intro. Slides. Discuss.' agenda_note = 'On second level' length_before = len(outbox) meeting_count = Meeting.objects.filter(number__contains='-%s-'%group.acronym, date__year=date.year).count() next_num = "%02d" % (meeting_count+1) self.client.login(username="marschairman", password="marschairman+password") data = {'group':group.pk, 'meeting_type':'single', 'city':'', 'country':'', 'time_zone':'UTC', 'session_set-0-date':date.strftime("%Y-%m-%d"), 'session_set-0-time':time.strftime('%H:%M'), 'session_set-0-requested_duration':'03:00:00', 'session_set-0-remote_instructions':remote_instructions, 'session_set-0-agenda':agenda, 'session_set-0-agenda_note':agenda_note, 'session_set-TOTAL_FORMS':1, 'session_set-INITIAL_FORMS':0, 'session_set-MIN_NUM_FORMS':0, 'session_set-MAX_NUM_FORMS':1000} r = self.client.post(urlreverse("ietf.meeting.views.interim_request"),data) self.assertRedirects(r,urlreverse('ietf.meeting.views.upcoming')) meeting = Meeting.objects.order_by('id').last() self.assertEqual(meeting.type_id,'interim') self.assertEqual(meeting.date,date) self.assertEqual(meeting.number,'interim-%s-%s-%s' % (date.year, group.acronym, next_num)) self.assertEqual(meeting.city,'') self.assertEqual(meeting.country,'') self.assertEqual(meeting.time_zone,'UTC') session = meeting.session_set.first() self.assertEqual(session.remote_instructions,remote_instructions) self.assertEqual(session.agenda_note,agenda_note) self.assertEqual(session.status.slug,'scheda') timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,dt) self.assertEqual(timeslot.duration,duration) # ensure agenda document was created self.assertEqual(session.materials.count(),1) doc = session.materials.first() path = os.path.join(doc.get_file_path(),doc.filename_with_rev()) self.assertTrue(os.path.exists(path)) # check notice to secretariat self.assertEqual(len(outbox), length_before + 1) self.assertTrue('interim meeting ready for announcement' in outbox[-1]['Subject']) self.assertTrue('iesg-secretary@ietf.org' in outbox[-1]['To']) def test_interim_request_single_in_person(self): make_meeting_test_data() group = Group.objects.get(acronym='mars') date = datetime.date.today() + datetime.timedelta(days=30) time = datetime.datetime.now().time().replace(microsecond=0,second=0) dt = datetime.datetime.combine(date, time) duration = datetime.timedelta(hours=3) city = 'San Francisco' country = 'US' time_zone = 'US/Pacific' remote_instructions = 'Use webex' agenda = 'Intro. Slides. Discuss.' agenda_note = 'On second level' meeting_count = Meeting.objects.filter(number__contains='-%s-'%group.acronym, date__year=date.year).count() next_num = "%02d" % (meeting_count+1) self.client.login(username="secretary", password="secretary+password") data = {'group':group.pk, 'meeting_type':'single', 'city':city, 'country':country, 'time_zone':time_zone, 'session_set-0-date':date.strftime("%Y-%m-%d"), 'session_set-0-time':time.strftime('%H:%M'), 'session_set-0-requested_duration':'03:00:00', 'session_set-0-remote_instructions':remote_instructions, 'session_set-0-agenda':agenda, 'session_set-0-agenda_note':agenda_note, 'session_set-TOTAL_FORMS':1, 'session_set-INITIAL_FORMS':0} r = self.client.post(urlreverse("ietf.meeting.views.interim_request"),data) self.assertRedirects(r,urlreverse('ietf.meeting.views.upcoming')) meeting = Meeting.objects.order_by('id').last() self.assertEqual(meeting.type_id,'interim') self.assertEqual(meeting.date,date) self.assertEqual(meeting.number,'interim-%s-%s-%s' % (date.year, group.acronym, next_num)) self.assertEqual(meeting.city,city) self.assertEqual(meeting.country,country) self.assertEqual(meeting.time_zone,time_zone) session = meeting.session_set.first() self.assertEqual(session.remote_instructions,remote_instructions) self.assertEqual(session.agenda_note,agenda_note) timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,dt) self.assertEqual(timeslot.duration,duration) def test_interim_request_multi_day(self): make_meeting_test_data() date = datetime.date.today() + datetime.timedelta(days=30) date2 = date + datetime.timedelta(days=1) time = datetime.datetime.now().time().replace(microsecond=0,second=0) dt = datetime.datetime.combine(date, time) dt2 = datetime.datetime.combine(date2, time) duration = datetime.timedelta(hours=3) group = Group.objects.get(acronym='mars') city = 'San Francisco' country = 'US' time_zone = 'US/Pacific' remote_instructions = 'Use webex' agenda = 'Intro. Slides. Discuss.' agenda_note = 'On second level' meeting_count = Meeting.objects.filter(number__contains='-%s-'%group.acronym, date__year=date.year).count() next_num = "%02d" % (meeting_count+1) self.client.login(username="secretary", password="secretary+password") data = {'group':group.pk, 'meeting_type':'multi-day', 'city':city, 'country':country, 'time_zone':time_zone, 'session_set-0-date':date.strftime("%Y-%m-%d"), 'session_set-0-time':time.strftime('%H:%M'), 'session_set-0-requested_duration':'03:00:00', 'session_set-0-remote_instructions':remote_instructions, 'session_set-0-agenda':agenda, 'session_set-0-agenda_note':agenda_note, 'session_set-1-date':date2.strftime("%Y-%m-%d"), 'session_set-1-time':time.strftime('%H:%M'), 'session_set-1-requested_duration':'03:00:00', 'session_set-1-remote_instructions':remote_instructions, 'session_set-1-agenda':agenda, 'session_set-1-agenda_note':agenda_note, 'session_set-TOTAL_FORMS':2, 'session_set-INITIAL_FORMS':0} r = self.client.post(urlreverse("ietf.meeting.views.interim_request"),data) self.assertRedirects(r,urlreverse('ietf.meeting.views.upcoming')) meeting = Meeting.objects.order_by('id').last() self.assertEqual(meeting.type_id,'interim') self.assertEqual(meeting.date,date) self.assertEqual(meeting.number,'interim-%s-%s-%s' % (date.year, group.acronym, next_num)) self.assertEqual(meeting.city,city) self.assertEqual(meeting.country,country) self.assertEqual(meeting.time_zone,time_zone) self.assertEqual(meeting.session_set.count(),2) # first sesstion session = meeting.session_set.all()[0] self.assertEqual(session.remote_instructions,remote_instructions) timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,dt) self.assertEqual(timeslot.duration,duration) self.assertEqual(session.agenda_note,agenda_note) # second sesstion session = meeting.session_set.all()[1] self.assertEqual(session.remote_instructions,remote_instructions) timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,dt2) self.assertEqual(timeslot.duration,duration) self.assertEqual(session.agenda_note,agenda_note) def test_interim_request_multi_day_non_consecutive(self): make_meeting_test_data() date = datetime.date.today() + datetime.timedelta(days=30) date2 = date + datetime.timedelta(days=2) time = datetime.datetime.now().time().replace(microsecond=0,second=0) group = Group.objects.get(acronym='mars') city = 'San Francisco' country = 'US' time_zone = 'US/Pacific' remote_instructions = 'Use webex' agenda = 'Intro. Slides. Discuss.' agenda_note = 'On second level' self.client.login(username="secretary", password="secretary+password") data = {'group':group.pk, 'meeting_type':'multi-day', 'city':city, 'country':country, 'time_zone':time_zone, 'session_set-0-date':date.strftime("%Y-%m-%d"), 'session_set-0-time':time.strftime('%H:%M'), 'session_set-0-requested_duration':'03:00:00', 'session_set-0-remote_instructions':remote_instructions, 'session_set-0-agenda':agenda, 'session_set-0-agenda_note':agenda_note, 'session_set-1-date':date2.strftime("%Y-%m-%d"), 'session_set-1-time':time.strftime('%H:%M'), 'session_set-1-requested_duration':'03:00:00', 'session_set-1-remote_instructions':remote_instructions, 'session_set-1-agenda':agenda, 'session_set-1-agenda_note':agenda_note, 'session_set-TOTAL_FORMS':2, 'session_set-INITIAL_FORMS':0} r = self.client.post(urlreverse("ietf.meeting.views.interim_request"),data) self.assertEqual(r.status_code, 200) self.assertTrue('days must be consecutive' in r.content) def test_interim_request_series(self): make_meeting_test_data() meeting_count_before = Meeting.objects.filter(type='interim').count() date = datetime.date.today() + datetime.timedelta(days=30) date2 = date + datetime.timedelta(days=1) time = datetime.datetime.now().time().replace(microsecond=0,second=0) dt = datetime.datetime.combine(date, time) dt2 = datetime.datetime.combine(date2, time) duration = datetime.timedelta(hours=3) group = Group.objects.get(acronym='mars') city = '' country = '' time_zone = 'US/Pacific' remote_instructions = 'Use webex' agenda = 'Intro. Slides. Discuss.' agenda_note = 'On second level' meeting_count = Meeting.objects.filter(number__contains='-%s-'%group.acronym, date__year=date.year).count() next_num = "%02d" % (meeting_count+1) next_num2 = "%02d" % (meeting_count+2) self.client.login(username="secretary", password="secretary+password") r = self.client.get(urlreverse("ietf.meeting.views.interim_request")) self.assertEqual(r.status_code, 200) data = {'group':group.pk, 'meeting_type':'series', 'city':city, 'country':country, 'time_zone':time_zone, 'session_set-0-date':date.strftime("%Y-%m-%d"), 'session_set-0-time':time.strftime('%H:%M'), 'session_set-0-requested_duration':'03:00:00', 'session_set-0-remote_instructions':remote_instructions, 'session_set-0-agenda':agenda, 'session_set-0-agenda_note':agenda_note, 'session_set-1-date':date2.strftime("%Y-%m-%d"), 'session_set-1-time':time.strftime('%H:%M'), 'session_set-1-requested_duration':'03:00:00', 'session_set-1-remote_instructions':remote_instructions, 'session_set-1-agenda':agenda, 'session_set-1-agenda_note':agenda_note, 'session_set-TOTAL_FORMS':2, 'session_set-INITIAL_FORMS':0} r = self.client.post(urlreverse("ietf.meeting.views.interim_request"),data) self.assertRedirects(r,urlreverse('ietf.meeting.views.upcoming')) meeting_count_after = Meeting.objects.filter(type='interim').count() self.assertEqual(meeting_count_after,meeting_count_before + 2) meetings = Meeting.objects.order_by('-id')[:2] # first meeting meeting = meetings[1] self.assertEqual(meeting.type_id,'interim') self.assertEqual(meeting.date,date) self.assertEqual(meeting.number,'interim-%s-%s-%s' % (date.year, group.acronym, next_num)) self.assertEqual(meeting.city,city) self.assertEqual(meeting.country,country) self.assertEqual(meeting.time_zone,time_zone) self.assertEqual(meeting.session_set.count(),1) session = meeting.session_set.first() self.assertEqual(session.remote_instructions,remote_instructions) timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,dt) self.assertEqual(timeslot.duration,duration) self.assertEqual(session.agenda_note,agenda_note) # second meeting meeting = meetings[0] self.assertEqual(meeting.type_id,'interim') self.assertEqual(meeting.date,date2) self.assertEqual(meeting.number,'interim-%s-%s-%s' % (date2.year, group.acronym, next_num2)) self.assertEqual(meeting.city,city) self.assertEqual(meeting.country,country) self.assertEqual(meeting.time_zone,time_zone) self.assertEqual(meeting.session_set.count(),1) session = meeting.session_set.first() self.assertEqual(session.remote_instructions,remote_instructions) timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,dt2) self.assertEqual(timeslot.duration,duration) self.assertEqual(session.agenda_note,agenda_note) def test_interim_pending(self): make_meeting_test_data() url = urlreverse('ietf.meeting.views.interim_pending') count = Meeting.objects.filter(type='interim',session__status='apprw').distinct().count() # unpriviledged user login_testing_unauthorized(self,"plain",url) r = self.client.get(url) self.assertEqual(r.status_code, 403) # secretariat login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertEqual(len(q("#pending-interim-meetings-table tr"))-1, count) self.client.logout() def test_can_approve_interim_request(self): make_meeting_test_data() # unprivileged user user = User.objects.get(username='plain') group = Group.objects.get(acronym='mars') meeting = Meeting.objects.filter(type='interim',session__status='apprw',session__group=group).first() self.assertFalse(can_approve_interim_request(meeting=meeting,user=user)) # Secretariat user = User.objects.get(username='secretary') self.assertTrue(can_approve_interim_request(meeting=meeting,user=user)) # related AD user = User.objects.get(username='ad') self.assertTrue(can_approve_interim_request(meeting=meeting,user=user)) # other AD user = User.objects.get(username='ops-ad') self.assertFalse(can_approve_interim_request(meeting=meeting,user=user)) # WG Chair user = User.objects.get(username='marschairman') self.assertFalse(can_approve_interim_request(meeting=meeting,user=user)) def test_can_view_interim_request(self): make_meeting_test_data() # unprivileged user user = User.objects.get(username='plain') group = Group.objects.get(acronym='mars') meeting = Meeting.objects.filter(type='interim',session__status='apprw',session__group=group).first() self.assertFalse(can_view_interim_request(meeting=meeting,user=user)) # Secretariat user = User.objects.get(username='secretary') self.assertTrue(can_view_interim_request(meeting=meeting,user=user)) # related AD user = User.objects.get(username='ad') self.assertTrue(can_view_interim_request(meeting=meeting,user=user)) # other AD user = User.objects.get(username='ops-ad') self.assertTrue(can_view_interim_request(meeting=meeting,user=user)) # WG Chair user = User.objects.get(username='marschairman') self.assertTrue(can_view_interim_request(meeting=meeting,user=user)) # Other WG Chair user = User.objects.get(username='ameschairman') self.assertFalse(can_view_interim_request(meeting=meeting,user=user)) def test_interim_request_details(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim',session__status='apprw',session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_details',kwargs={'number':meeting.number}) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) def test_interim_request_details_announcement(self): '''Test access to Announce / Skip Announce features''' make_meeting_test_data() date = datetime.date.today() + datetime.timedelta(days=30) group = Group.objects.get(acronym='mars') meeting = make_interim_meeting(group=group, date=date, status='scheda') url = urlreverse('ietf.meeting.views.interim_request_details',kwargs={'number':meeting.number}) # Chair, no access self.client.login(username="marschairman", password="marschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertEqual(len(q("a.btn:contains('Announce')")),0) # Secretariat has access self.client.login(username="secretary", password="secretary+password") r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertEqual(len(q("a.btn:contains('Announce')")),2) def test_interim_request_disapprove(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim',session__status='apprw',session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_details',kwargs={'number':meeting.number}) login_testing_unauthorized(self,"secretary",url) r = self.client.post(url,{'disapprove':'Disapprove'}) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_pending')) for session in meeting.session_set.all(): self.assertEqual(session.status_id,'disappr') def test_interim_request_cancel(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim', session__status='apprw', session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number}) # ensure no cancel button for unauthorized user self.client.login(username="ameschairman", password="ameschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertEqual(len(q("a.btn:contains('Cancel')")), 0) # ensure cancel button for authorized user self.client.login(username="marschairman", password="marschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertEqual(len(q("a.btn:contains('Cancel')")), 1) # ensure fail unauthorized url = urlreverse('ietf.meeting.views.interim_request_cancel', kwargs={'number': meeting.number}) comments = 'Bob cannot make it' self.client.login(username="ameschairman", password="ameschairman+password") r = self.client.post(url, {'comments': comments}) self.assertEqual(r.status_code, 403) # test cancelling before announcement self.client.login(username="marschairman", password="marschairman+password") length_before = len(outbox) r = self.client.post(url, {'comments': comments}) self.assertRedirects(r, urlreverse('ietf.meeting.views.upcoming')) for session in meeting.session_set.all(): self.assertEqual(session.status_id, 'canceledpa') self.assertEqual(session.agenda_note, comments) self.assertEqual(len(outbox), length_before) # no email notice # test cancelling after announcement meeting = Meeting.objects.filter(type='interim', session__status='sched', session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_cancel', kwargs={'number': meeting.number}) r = self.client.post(url, {'comments': comments}) self.assertRedirects(r, urlreverse('ietf.meeting.views.upcoming')) for session in meeting.session_set.all(): self.assertEqual(session.status_id, 'canceled') self.assertEqual(session.agenda_note, comments) self.assertEqual(len(outbox), length_before + 1) self.assertTrue('Interim Meeting Cancelled' in outbox[-1]['Subject']) def test_interim_request_edit_no_notice(self): '''Edit a request. No notice should go out if it hasn't been announced yet''' make_meeting_test_data() meeting = Meeting.objects.filter(type='interim', session__status='apprw', session__group__acronym='mars').first() group = meeting.session_set.first().group url = urlreverse('ietf.meeting.views.interim_request_edit', kwargs={'number': meeting.number}) # test unauthorized access self.client.login(username="ameschairman", password="ameschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 403) # test authorized use login_testing_unauthorized(self, "secretary", url) r = self.client.get(url) self.assertEqual(r.status_code, 200) # post changes length_before = len(outbox) form_initial = r.context['form'].initial formset_initial = r.context['formset'].forms[0].initial new_time = formset_initial['time'] + datetime.timedelta(hours=1) data = {'group':group.pk, 'meeting_type':'single', 'session_set-0-id':meeting.session_set.first().id, 'session_set-0-date':formset_initial['date'].strftime('%Y-%m-%d'), 'session_set-0-time':new_time.strftime('%H:%M'), 'session_set-0-requested_duration':formset_initial['requested_duration'], 'session_set-0-remote_instructions':formset_initial['remote_instructions'], #'session_set-0-agenda':formset_initial['agenda'], 'session_set-0-agenda_note':formset_initial['agenda_note'], 'session_set-TOTAL_FORMS':1, 'session_set-INITIAL_FORMS':1} data.update(form_initial) r = self.client.post(url, data) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number})) self.assertEqual(len(outbox),length_before) session = meeting.session_set.first() timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,new_time) def test_interim_request_edit(self): '''Edit request. Send notice of change''' make_meeting_test_data() meeting = Meeting.objects.filter(type='interim', session__status='sched', session__group__acronym='mars').first() group = meeting.session_set.first().group url = urlreverse('ietf.meeting.views.interim_request_edit', kwargs={'number': meeting.number}) # test unauthorized access self.client.login(username="ameschairman", password="ameschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 403) # test authorized use login_testing_unauthorized(self, "secretary", url) r = self.client.get(url) self.assertEqual(r.status_code, 200) # post changes length_before = len(outbox) form_initial = r.context['form'].initial formset_initial = r.context['formset'].forms[0].initial new_time = formset_initial['time'] + datetime.timedelta(hours=1) new_duration = formset_initial['requested_duration'] + datetime.timedelta(hours=1) data = {'group':group.pk, 'meeting_type':'single', 'session_set-0-id':meeting.session_set.first().id, 'session_set-0-date':formset_initial['date'].strftime('%Y-%m-%d'), 'session_set-0-time':new_time.strftime('%H:%M'), 'session_set-0-requested_duration':self.strfdelta(new_duration, '{hours}:{minutes}'), 'session_set-0-remote_instructions':formset_initial['remote_instructions'], #'session_set-0-agenda':formset_initial['agenda'], 'session_set-0-agenda_note':formset_initial['agenda_note'], 'session_set-TOTAL_FORMS':1, 'session_set-INITIAL_FORMS':1} data.update(form_initial) r = self.client.post(url, data) self.assertRedirects(r, urlreverse('ietf.meeting.views.interim_request_details', kwargs={'number': meeting.number})) self.assertEqual(len(outbox),length_before+1) self.assertTrue('CHANGED' in outbox[-1]['Subject']) session = meeting.session_set.first() timeslot = session.official_timeslotassignment().timeslot self.assertEqual(timeslot.time,new_time) self.assertEqual(timeslot.duration,new_duration) def strfdelta(self, tdelta, fmt): d = {"days": tdelta.days} d["hours"], rem = divmod(tdelta.seconds, 3600) d["minutes"], d["seconds"] = divmod(rem, 60) return fmt.format(**d) def test_interim_request_details_permissions(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim',session__status='apprw',session__group__acronym='mars').first() url = urlreverse('ietf.meeting.views.interim_request_details',kwargs={'number':meeting.number}) # unprivileged user login_testing_unauthorized(self,"plain",url) r = self.client.get(url) self.assertEqual(r.status_code, 403) def test_send_interim_approval_request(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim',session__status='apprw',session__group__acronym='mars').first() length_before = len(outbox) send_interim_approval_request(meetings=[meeting]) self.assertEqual(len(outbox),length_before+1) self.assertTrue('New Interim Meeting Request' in outbox[-1]['Subject']) def test_send_interim_cancellation_notice(self): make_meeting_test_data() meeting = Meeting.objects.filter(type='interim',session__status='sched',session__group__acronym='mars').first() length_before = len(outbox) send_interim_cancellation_notice(meeting=meeting) self.assertEqual(len(outbox),length_before+1) self.assertTrue('Interim Meeting Cancelled' in outbox[-1]['Subject']) def test_send_interim_minutes_reminder(self): make_meeting_test_data() group = Group.objects.get(acronym='mars') date = datetime.datetime.today() - datetime.timedelta(days=10) meeting = make_interim_meeting(group=group, date=date, status='sched') length_before = len(outbox) send_interim_minutes_reminder(meeting=meeting) self.assertEqual(len(outbox),length_before+1) self.assertTrue('Action Required: Minutes' in outbox[-1]['Subject']) class AjaxTests(TestCase): def test_ajax_get_utc(self): # test bad queries url = urlreverse('ietf.meeting.views.ajax_get_utc') + "?date=2016-1-1&time=badtime&timezone=UTC" r = self.client.get(url) self.assertEqual(r.status_code, 200) data = json.loads(r.content) self.assertEqual(data["error"], True) url = urlreverse('ietf.meeting.views.ajax_get_utc') + "?date=2016-1-1&time=25:99&timezone=UTC" r = self.client.get(url) self.assertEqual(r.status_code, 200) data = json.loads(r.content) self.assertEqual(data["error"], True) url = urlreverse('ietf.meeting.views.ajax_get_utc') + "?date=2016-1-1&time=10:00am&timezone=UTC" r = self.client.get(url) self.assertEqual(r.status_code, 200) data = json.loads(r.content) self.assertEqual(data["error"], True) # test good query url = urlreverse('ietf.meeting.views.ajax_get_utc') + "?date=2016-1-1&time=12:00&timezone=US/Pacific" r = self.client.get(url) self.assertEqual(r.status_code, 200) data = json.loads(r.content) self.assertTrue('timezone' in data) self.assertTrue('time' in data) self.assertTrue('utc' in data) self.assertTrue('error' not in data) self.assertEqual(data['utc'], '20:00') class FloorPlanTests(TestCase): def setUp(self): pass def tearDown(self): pass def test_floor_plan_page(self): make_meeting_test_data() meeting = Meeting.objects.filter(type_id='ietf').order_by('id').last() floorplan = FloorPlanFactory.create(meeting=meeting) url = urlreverse('ietf.meeting.views.floor_plan') r = self.client.get(url) self.assertEqual(r.status_code, 200) url = urlreverse('ietf.meeting.views.floor_plan', kwargs={'floor': xslugify(floorplan.name)} ) r = self.client.get(url) self.assertEqual(r.status_code, 200) class IphoneAppJsonTests(TestCase): def setUp(self): pass def tearDown(self): pass def test_iphone_app_json(self): make_meeting_test_data() meeting = Meeting.objects.filter(type_id='ietf').order_by('id').last() floorplan = FloorPlanFactory.create(meeting=meeting) for room in meeting.room_set.all(): room.floorplan = floorplan room.x1 = random.randint(0,100) room.y1 = random.randint(0,100) room.x2 = random.randint(0,100) room.y2 = random.randint(0,100) room.save() url = urlreverse('ietf.meeting.views.json_agenda',kwargs={'num':meeting.number}) r = self.client.get(url) self.assertEqual(r.status_code,200) class FinalizeProceedingsTests(TestCase): @patch('urllib2.urlopen') def test_finalize_proceedings(self, mock_urlopen): mock_urlopen.return_value = StringIO('[{"LastName":"Smith","FirstName":"John","Company":"ABC","Country":"US"}]') make_meeting_test_data() meeting = Meeting.objects.filter(type_id='ietf').order_by('id').last() meeting.session_set.filter(group__acronym='mars').first().sessionpresentation_set.create(document=Document.objects.filter(type='draft').first(),rev=None) url = urlreverse('ietf.meeting.views.finalize_proceedings',kwargs={'num':meeting.number}) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertEqual(meeting.proceedings_final,False) self.assertEqual(meeting.session_set.filter(group__acronym="mars").first().sessionpresentation_set.filter(document__type="draft").first().rev,None) r = self.client.post(url,{'finalize':1}) self.assertEqual(r.status_code, 302) meeting = Meeting.objects.get(pk=meeting.pk) self.assertEqual(meeting.proceedings_final,True) self.assertEqual(meeting.session_set.filter(group__acronym="mars").first().sessionpresentation_set.filter(document__type="draft").first().rev,'00') class MaterialsTests(TestCase): def setUp(self): self.materials_dir = self.tempdir('materials') if not os.path.exists(self.materials_dir): os.mkdir(self.materials_dir) self.saved_agenda_path = settings.AGENDA_PATH settings.AGENDA_PATH = self.materials_dir def tearDown(self): settings.AGENDA_PATH = self.saved_agenda_path shutil.rmtree(self.materials_dir) def test_upload_bluesheets(self): session = SessionFactory(meeting__type_id='ietf') url = urlreverse('ietf.meeting.views.upload_session_bluesheets',kwargs={'num':session.meeting.number,'session_id':session.id}) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Upload' in unicode(q("title"))) self.assertFalse(session.sessionpresentation_set.exists()) test_file = StringIO(b'%PDF-1.4\n%âãÏÓ\nthis is some text for a test') test_file.name = "not_really.pdf" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 302) bs_doc = session.sessionpresentation_set.filter(document__type_id='bluesheets').first().document self.assertEqual(bs_doc.rev,'00') r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Revise' in unicode(q("title"))) test_file = StringIO('%PDF-1.4\n%âãÏÓ\nthis is some different text for a test') test_file.name = "also_not_really.pdf" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 302) bs_doc = Document.objects.get(pk=bs_doc.pk) self.assertEqual(bs_doc.rev,'01') def test_upload_bluesheets_chair_access(self): make_meeting_test_data() mars = Group.objects.get(acronym='mars') session=SessionFactory(meeting__type_id='ietf',group=mars) url = urlreverse('ietf.meeting.views.upload_session_bluesheets',kwargs={'num':session.meeting.number,'session_id':session.id}) self.client.login(username="marschairman", password="marschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 403) def test_upload_bluesheets_interim(self): session=SessionFactory(meeting__type_id='interim') url = urlreverse('ietf.meeting.views.upload_session_bluesheets',kwargs={'num':session.meeting.number,'session_id':session.id}) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Upload' in unicode(q("title"))) self.assertFalse(session.sessionpresentation_set.exists()) test_file = StringIO(b'%PDF-1.4\n%âãÏÓ\nthis is some text for a test') test_file.name = "not_really.pdf" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 302) bs_doc = session.sessionpresentation_set.filter(document__type_id='bluesheets').first().document self.assertEqual(bs_doc.rev,'00') def test_upload_bluesheets_interim_chair_access(self): make_meeting_test_data() mars = Group.objects.get(acronym='mars') session=SessionFactory(meeting__type_id='interim',group=mars) url = urlreverse('ietf.meeting.views.upload_session_bluesheets',kwargs={'num':session.meeting.number,'session_id':session.id}) self.client.login(username="marschairman", password="marschairman+password") r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Upload' in unicode(q("title"))) def test_upload_minutes_agenda(self): for doctype in ('minutes','agenda'): session = SessionFactory(meeting__type_id='ietf') if doctype == 'minutes': url = urlreverse('ietf.meeting.views.upload_session_minutes',kwargs={'num':session.meeting.number,'session_id':session.id}) else: url = urlreverse('ietf.meeting.views.upload_session_agenda',kwargs={'num':session.meeting.number,'session_id':session.id}) self.client.logout() login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Upload' in unicode(q("Title"))) self.assertFalse(session.sessionpresentation_set.exists()) self.assertFalse(q('form input[type="checkbox"]')) session2 = SessionFactory(meeting=session.meeting,group=session.group) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue(q('form input[type="checkbox"]')) test_file = StringIO('this is some text for a test') test_file.name = "not_really.json" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue(q('form .has-error')) test_file = StringIO('this is some text for a test'*1510000) test_file.name = "not_really.pdf" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue(q('form .has-error')) test_file = StringIO('<html><frameset><frame src="foo.html"></frame><frame src="bar.html"></frame></frameset></html>') test_file.name = "not_really.html" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue(q('form .has-error')) test_file = StringIO('this is some text for a test') test_file.name = "not_really.txt" r = self.client.post(url,dict(file=test_file,apply_to_all=False)) self.assertEqual(r.status_code, 302) doc = session.sessionpresentation_set.filter(document__type_id=doctype).first().document self.assertEqual(doc.rev,'00') self.assertFalse(session2.sessionpresentation_set.filter(document__type_id=doctype)) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Revise' in unicode(q("Title"))) test_file = StringIO('this is some different text for a test') test_file.name = "also_not_really.txt" r = self.client.post(url,dict(file=test_file,apply_to_all=True)) self.assertEqual(r.status_code, 302) doc = Document.objects.get(pk=doc.pk) self.assertEqual(doc.rev,'01') self.assertTrue(session2.sessionpresentation_set.filter(document__type_id=doctype)) def test_upload_minutes_agenda_interim(self): session=SessionFactory(meeting__type_id='interim') for doctype in ('minutes','agenda'): if doctype=='minutes': url = urlreverse('ietf.meeting.views.upload_session_minutes',kwargs={'num':session.meeting.number,'session_id':session.id}) else: url = urlreverse('ietf.meeting.views.upload_session_agenda',kwargs={'num':session.meeting.number,'session_id':session.id}) self.client.logout() login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Upload' in unicode(q("title"))) self.assertFalse(session.sessionpresentation_set.filter(document__type_id=doctype)) test_file = StringIO('this is some text for a test') test_file.name = "not_really.txt" r = self.client.post(url,dict(file=test_file)) self.assertEqual(r.status_code, 302) doc = session.sessionpresentation_set.filter(document__type_id=doctype).first().document self.assertEqual(doc.rev,'00') def test_upload_slides(self): session1 = SessionFactory(meeting__type_id='ietf') session2 = SessionFactory(meeting=session1.meeting,group=session1.group) url = urlreverse('ietf.meeting.views.upload_session_slides',kwargs={'num':session1.meeting.number,'session_id':session1.id}) login_testing_unauthorized(self,"secretary",url) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Upload' in unicode(q("title"))) self.assertFalse(session1.sessionpresentation_set.filter(document__type_id='slides')) test_file = StringIO('this is not really a slide') test_file.name = 'not_really.txt' r = self.client.post(url,dict(file=test_file,title='a test slide file',apply_to_all=True)) self.assertEqual(r.status_code, 302) self.assertEqual(session1.sessionpresentation_set.count(),1) self.assertEqual(session2.sessionpresentation_set.count(),1) sp = session2.sessionpresentation_set.first() self.assertEqual(sp.document.name, 'slides-%s-%s-a-test-slide-file' % (session1.meeting.number,session1.group.acronym ) ) self.assertEqual(sp.order,1) url = urlreverse('ietf.meeting.views.upload_session_slides',kwargs={'num':session2.meeting.number,'session_id':session2.id}) test_file = StringIO('some other thing still not slidelike') test_file.name = 'also_not_really.txt' r = self.client.post(url,dict(file=test_file,title='a different slide file',apply_to_all=False)) self.assertEqual(r.status_code, 302) self.assertEqual(session1.sessionpresentation_set.count(),1) self.assertEqual(session2.sessionpresentation_set.count(),2) sp = session2.sessionpresentation_set.get(document__name__endswith='-a-different-slide-file') self.assertEqual(sp.order,2) self.assertEqual(sp.rev,u'00') self.assertEqual(sp.document.rev,u'00') url = urlreverse('ietf.meeting.views.upload_session_slides',kwargs={'num':session2.meeting.number,'session_id':session2.id,'name':session2.sessionpresentation_set.get(order=2).document.name}) r = self.client.get(url) self.assertTrue(r.status_code, 200) q = PyQuery(r.content) self.assertTrue('Revise' in unicode(q("title"))) test_file = StringIO('new content for the second slide deck') test_file.name = 'doesnotmatter.txt' r = self.client.post(url,dict(file=test_file,title='rename the presentation',apply_to_all=False)) self.assertEqual(r.status_code, 302) self.assertEqual(session1.sessionpresentation_set.count(),1) self.assertEqual(session2.sessionpresentation_set.count(),2) sp = session2.sessionpresentation_set.get(order=2) self.assertEqual(sp.rev,u'01') self.assertEqual(sp.document.rev,u'01') def test_remove_sessionpresentation(self): session = SessionFactory(meeting__type_id='ietf') doc = DocumentFactory(type_id='slides') session.sessionpresentation_set.create(document=doc) url = urlreverse('ietf.meeting.views.remove_sessionpresentation',kwargs={'num':session.meeting.number,'session_id':session.id,'name':'no-such-doc'}) response = self.client.get(url) self.assertEqual(response.status_code, 404) url = urlreverse('ietf.meeting.views.remove_sessionpresentation',kwargs={'num':session.meeting.number,'session_id':0,'name':doc.name}) response = self.client.get(url) self.assertEqual(response.status_code, 404) url = urlreverse('ietf.meeting.views.remove_sessionpresentation',kwargs={'num':session.meeting.number,'session_id':session.id,'name':doc.name}) login_testing_unauthorized(self,"secretary",url) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual(1,session.sessionpresentation_set.count()) response = self.client.post(url,{'remove_session':''}) self.assertEqual(response.status_code, 302) self.assertEqual(0,session.sessionpresentation_set.count()) self.assertEqual(2,doc.docevent_set.count())
51.14016
199
0.66013
10,833
89,393
5.284316
0.059356
0.06446
0.02671
0.048598
0.830186
0.793851
0.754669
0.730841
0.706175
0.694087
0
0.011923
0.209088
89,393
1,747
200
51.169433
0.797743
0.030047
0
0.665278
0
0.001389
0.158112
0.07536
0
0
0
0.001145
0.293056
0
null
null
0.025694
0.024306
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
766d4055b66af4876a626bf33f4adab848d23a3d
82
py
Python
src/UnitTests/TestData/Grammar/FStringEquals.py
shayash22/python-language-server
b7982ca4889c40a06fd8ec8a557c985a49bc663f
[ "Apache-2.0" ]
695
2019-05-06T23:49:37.000Z
2022-03-30T01:56:00.000Z
src/UnitTests/TestData/Grammar/FStringEquals.py
shayash22/python-language-server
b7982ca4889c40a06fd8ec8a557c985a49bc663f
[ "Apache-2.0" ]
1,043
2019-05-07T02:24:11.000Z
2022-03-31T22:21:24.000Z
src/UnitTests/TestData/Grammar/FStringEquals.py
shayash22/python-language-server
b7982ca4889c40a06fd8ec8a557c985a49bc663f
[ "Apache-2.0" ]
131
2019-05-09T15:34:23.000Z
2022-03-23T17:52:35.000Z
f"{name=}" f"{name =}" f"{name = }" f"{foo.bar()=}" f'{user=!s} {delta.days=:,d}'
16.4
30
0.463415
15
82
2.533333
0.6
0.394737
0.473684
0.526316
0.421053
0
0
0
0
0
0
0
0.109756
82
5
30
16.4
0.520548
0
0
0.6
0
0
0.759036
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
76bdebebb2ab8dbae9dc06d832631594c8b04b2c
23,101
py
Python
swagger_client/api/notification_rules_api.py
chbndrhnns/finapi-client
259beda8b05e912c49d2dc4c3ed71205134e5d8a
[ "MIT" ]
2
2019-04-15T05:58:21.000Z
2021-11-15T18:26:37.000Z
swagger_client/api/notification_rules_api.py
chbndrhnns/finapi-client
259beda8b05e912c49d2dc4c3ed71205134e5d8a
[ "MIT" ]
1
2021-06-18T09:46:25.000Z
2021-06-18T20:12:41.000Z
swagger_client/api/notification_rules_api.py
chbndrhnns/finapi-client
259beda8b05e912c49d2dc4c3ed71205134e5d8a
[ "MIT" ]
2
2019-07-08T13:41:09.000Z
2020-12-07T12:10:04.000Z
# coding: utf-8 """ finAPI RESTful Services finAPI RESTful Services # noqa: E501 OpenAPI spec version: v1.42.1 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from swagger_client.api_client import ApiClient class NotificationRulesApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_notification_rule(self, body, **kwargs): # noqa: E501 """Create a new notification rule # noqa: E501 Create a new notification rule for a specific user. Must pass the user's access_token.<br/><br/>Setting up notification rules for a user allows your client application to get notified about changes in the user's data, e.g. when new transactions were downloaded, an account's balance has changed, or the user's banking credentials are no longer correct. Note that currently, this feature is implemented only for finAPI's automatic batch update, i.e. notification rules are only relevant when the user has activated the automatic updates (and when the automatic batch update is activated in general for your client).<br/><br/>There are different kinds of notification rules. The kind of a rule is depicted by the 'triggerEvent'. The trigger event specifies what data you have to pass when creating a rule (specifically, the contents of the 'params' field), on which events finAPI will send notifications to your client application, as well as what data is contained in those notifications. The specifics of the different trigger events are documented in the following article on our Dev Portal: <a href='https://finapi.zendesk.com/hc/en-us/articles/232324608-How-to-create-notification-rules-and-receive-notifications'>How to create notification rules and receive notifications</a> # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_notification_rule(body, async=True) >>> result = thread.get() :param async bool :param NotificationRuleParams body: Notification rule parameters (required) :return: NotificationRule If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.create_notification_rule_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.create_notification_rule_with_http_info(body, **kwargs) # noqa: E501 return data def create_notification_rule_with_http_info(self, body, **kwargs): # noqa: E501 """Create a new notification rule # noqa: E501 Create a new notification rule for a specific user. Must pass the user's access_token.<br/><br/>Setting up notification rules for a user allows your client application to get notified about changes in the user's data, e.g. when new transactions were downloaded, an account's balance has changed, or the user's banking credentials are no longer correct. Note that currently, this feature is implemented only for finAPI's automatic batch update, i.e. notification rules are only relevant when the user has activated the automatic updates (and when the automatic batch update is activated in general for your client).<br/><br/>There are different kinds of notification rules. The kind of a rule is depicted by the 'triggerEvent'. The trigger event specifies what data you have to pass when creating a rule (specifically, the contents of the 'params' field), on which events finAPI will send notifications to your client application, as well as what data is contained in those notifications. The specifics of the different trigger events are documented in the following article on our Dev Portal: <a href='https://finapi.zendesk.com/hc/en-us/articles/232324608-How-to-create-notification-rules-and-receive-notifications'>How to create notification rules and receive notifications</a> # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_notification_rule_with_http_info(body, async=True) >>> result = thread.get() :param async bool :param NotificationRuleParams body: Notification rule parameters (required) :return: NotificationRule If the method is called asynchronously, returns the request thread. """ all_params = ['body'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_notification_rule" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_notification_rule`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # Authentication setting auth_settings = ['finapi_auth'] # noqa: E501 return self.api_client.call_api( '/api/v1/notificationRules', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NotificationRule', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_all_notification_rules(self, **kwargs): # noqa: E501 """Delete all notification rules # noqa: E501 Delete all notification rules of the user that is authorized by the access_token. Must pass the user's access_token. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_all_notification_rules(async=True) >>> result = thread.get() :param async bool :return: IdentifierList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_all_notification_rules_with_http_info(**kwargs) # noqa: E501 else: (data) = self.delete_all_notification_rules_with_http_info(**kwargs) # noqa: E501 return data def delete_all_notification_rules_with_http_info(self, **kwargs): # noqa: E501 """Delete all notification rules # noqa: E501 Delete all notification rules of the user that is authorized by the access_token. Must pass the user's access_token. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_all_notification_rules_with_http_info(async=True) >>> result = thread.get() :param async bool :return: IdentifierList If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_all_notification_rules" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['finapi_auth'] # noqa: E501 return self.api_client.call_api( '/api/v1/notificationRules', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='IdentifierList', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_notification_rule(self, id, **kwargs): # noqa: E501 """Delete a notification rule # noqa: E501 Delete a single notification rule of the user that is authorized by the access_token. Must pass the notification rule's identifier and the user's access_token. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_notification_rule(id, async=True) >>> result = thread.get() :param async bool :param int id: Identifier of the notification rule to delete (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_notification_rule_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.delete_notification_rule_with_http_info(id, **kwargs) # noqa: E501 return data def delete_notification_rule_with_http_info(self, id, **kwargs): # noqa: E501 """Delete a notification rule # noqa: E501 Delete a single notification rule of the user that is authorized by the access_token. Must pass the notification rule's identifier and the user's access_token. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_notification_rule_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param int id: Identifier of the notification rule to delete (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_notification_rule" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_notification_rule`") # noqa: E501 if 'id' in params and not re.search('[\\d]+', params['id']): # noqa: E501 raise ValueError("Invalid value for parameter `id` when calling `delete_notification_rule`, must conform to the pattern `/[\\d]+/`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['finapi_auth'] # noqa: E501 return self.api_client.call_api( '/api/v1/notificationRules/{id}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_and_search_all_notification_rules(self, **kwargs): # noqa: E501 """Get and search all notification rules # noqa: E501 Get notification rules of the user that is authorized by the access_token. Must pass the user's access_token. You can set optional search criteria to get only those notification rules that you are interested in. If you do not specify any search criteria, then this service functions as a 'get all' service. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_and_search_all_notification_rules(async=True) >>> result = thread.get() :param async bool :param list[int] ids: A comma-separated list of notification rule identifiers. If specified, then only notification rules whose identifier match any of the given identifiers will be regarded. The maximum number of identifiers is 1000. :param str trigger_event: If specified, then only notification rules with given trigger event will be regarded. :param bool include_details: If specified, then only notification rules that include or not include details will be regarded. :return: NotificationRuleList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_and_search_all_notification_rules_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_and_search_all_notification_rules_with_http_info(**kwargs) # noqa: E501 return data def get_and_search_all_notification_rules_with_http_info(self, **kwargs): # noqa: E501 """Get and search all notification rules # noqa: E501 Get notification rules of the user that is authorized by the access_token. Must pass the user's access_token. You can set optional search criteria to get only those notification rules that you are interested in. If you do not specify any search criteria, then this service functions as a 'get all' service. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_and_search_all_notification_rules_with_http_info(async=True) >>> result = thread.get() :param async bool :param list[int] ids: A comma-separated list of notification rule identifiers. If specified, then only notification rules whose identifier match any of the given identifiers will be regarded. The maximum number of identifiers is 1000. :param str trigger_event: If specified, then only notification rules with given trigger event will be regarded. :param bool include_details: If specified, then only notification rules that include or not include details will be regarded. :return: NotificationRuleList If the method is called asynchronously, returns the request thread. """ all_params = ['ids', 'trigger_event', 'include_details'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_and_search_all_notification_rules" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'ids' in params: query_params.append(('ids', params['ids'])) # noqa: E501 collection_formats['ids'] = 'multi' # noqa: E501 if 'trigger_event' in params: query_params.append(('triggerEvent', params['trigger_event'])) # noqa: E501 if 'include_details' in params: query_params.append(('includeDetails', params['include_details'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['finapi_auth'] # noqa: E501 return self.api_client.call_api( '/api/v1/notificationRules', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NotificationRuleList', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_notification_rule(self, id, **kwargs): # noqa: E501 """Get a notification rule # noqa: E501 Get a single notification rule of the user that is authorized by the access_token. Must pass the notification rule's identifier and the user's access_token. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_notification_rule(id, async=True) >>> result = thread.get() :param async bool :param int id: Identifier of requested notification rule (required) :return: NotificationRule If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_notification_rule_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_notification_rule_with_http_info(id, **kwargs) # noqa: E501 return data def get_notification_rule_with_http_info(self, id, **kwargs): # noqa: E501 """Get a notification rule # noqa: E501 Get a single notification rule of the user that is authorized by the access_token. Must pass the notification rule's identifier and the user's access_token. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_notification_rule_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param int id: Identifier of requested notification rule (required) :return: NotificationRule If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_notification_rule" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `get_notification_rule`") # noqa: E501 if 'id' in params and not re.search('[\\d]+', params['id']): # noqa: E501 raise ValueError("Invalid value for parameter `id` when calling `get_notification_rule`, must conform to the pattern `/[\\d]+/`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['finapi_auth'] # noqa: E501 return self.api_client.call_api( '/api/v1/notificationRules/{id}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='NotificationRule', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
47.04888
1,301
0.647504
2,842
23,101
5.082336
0.095355
0.038217
0.019385
0.024924
0.945029
0.938521
0.933536
0.919551
0.916228
0.915397
0
0.014763
0.272802
23,101
490
1,302
47.144898
0.84505
0.036319
0
0.746154
1
0
0.182001
0.063312
0
0
0
0
0
0
null
null
0
0.015385
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
1
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
10
4f4090ad766af0d1d174a6049f43be7f5842f8bf
11,070
py
Python
file_server.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
null
null
null
file_server.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
1
2019-10-22T21:28:31.000Z
2019-10-22T21:39:12.000Z
file_server.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
2
2019-06-06T15:06:46.000Z
2020-07-20T02:03:22.000Z
#!/usr/bin/env python """Upload and download files across the network, from and to the FPGA timing system. Setup: A server program, named "file-server" to be running on the timing system (in "/home/timing_system"). Usage examples: wput("test\n","//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt") wput("."*1000000,"//id14timing3.cars.aps.anl.gov:2001/tmp/test.dat") data = wget("//id14timing3.cars.aps.anl.gov:2001/tmp/test.dat") wdel("//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt") Transfer speed: 8.2 MB/s upload, 8.1 MB/s download : 15 us per file upload, 8 ms per file download Friedrich Schotte, Nov 21, 2015 - Aug 28, 2017 """ __version__ = "1.4" # (ip_address,port) -> ip_address_and_port from logging import debug,info,warn,error from tcp_client import connection default_port_number = 2001 from thread import allocate_lock lock = allocate_lock() def wput(data,URL): """Upload a file across the network data: content of the file to upload. URL: e.g. "//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt" """ ##debug("%s, %d bytes %r " % (URL,len(data),data[0:21])) with lock: # Allow only one thread at a time inside this function. import socket url = URL default_port = 80 if url.startswith("http:") else default_port_number url = url.replace("http:","") if url.startswith("//"): url = url[2:] ip_address_and_port = url.split("/")[0].split("@")[-1] if not ":" in ip_address_and_port: ip_address_and_port += ":"+str(default_port) pathname = "/"+"/".join(url.split("/")[1:]) s = "PUT %s\n" % pathname s += "Content-Length: %d\n" % len(data) s += "\n" s += data for attempt in range(0,2): try: c = connection(ip_address_and_port) if c is None: break c.sendall(s) except socket.error: continue break def wget(URL): """Download a file from the network URL: e.g. "//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt" """ ##debug("wget %r queued" % URL) with lock: # Allow only one thread at a time inside this function. ##debug("wget %r..." % URL) import socket url = URL default_port = 80 if url.startswith("http:") else default_port_number url = url.replace("http:","") if url.startswith("//"): url = url[2:] ip_address_and_port = url.split("/")[0] if not ":" in ip_address_and_port: ip_address_and_port += ":"+str(default_port) pathname = "/"+"/".join(url.split("/")[1:]) s = "GET %s\n" % pathname s += "\n" data = "" for attempt in range(0,2): try: c = connection(ip_address_and_port) if c is None: break c.sendall(s) reply = "" while not "\n\n" in reply: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue header_size = reply.find("\n\n")+2 keyword = "Content-Length: " if not keyword in reply: return "" start = reply.find(keyword)+len(keyword) end = start+reply[start:].find("\n") file_size = int(reply[start:end]) while len(reply) < header_size+file_size: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue data = reply[header_size:] if len(data) != file_size: warn("file server %s: expecting %d,got %d bytes" % (ip_address_and_port,file_size,len(data))) except socket.error: continue break ##debug("wget %r: %-.20r" % (URL,data)) return data def wdel(URL): """Download a file from the network URL: e.g. "//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt" """ with lock: # Allow only one thread at a time inside this function. import socket url = URL default_port = 80 if url.startswith("http:") else default_port_number url = url.replace("http:","") if url.startswith("//"): url = url[2:] ip_address_and_port = url.split("/")[0] if not ":" in ip_address_and_port: ip_address_and_port += ":"+str(default_port) pathname = "/"+"/".join(url.split("/")[1:]) s = "DEL %s\n" % pathname s += "\n" for attempt in range(0,2): try: c = connection(ip_address_and_port) if c is None: break c.sendall(s) except socket.error: continue break def wexists(URL): """Download a file from the network url: e.g. "//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt" """ with lock: # Allow only one thread at a time inside this function. import socket url = URL default_port = 80 if url.startswith("http:") else default_port_number url = url.replace("http:","") if url.startswith("//"): url = url[2:] ip_address_and_port = url.split("/")[0] if not ":" in ip_address_and_port: ip_address_and_port += ":"+str(default_port) pathname = "/"+"/".join(url.split("/")[1:]) s = "EXISTS %s\n" % pathname s += "\n" data = "" for attempt in range(0,2): try: c = connection(ip_address_and_port) if c is None: break c.sendall(s) reply = "" while not "\n\n" in reply: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue header_size = reply.find("\n\n")+2 keyword = "Content-Length: " if not keyword in reply: return "" start = reply.find(keyword)+len(keyword) end = start+reply[start:].find("\n") file_size = int(reply[start:end]) while len(reply) < header_size+file_size: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue data = reply[header_size:] if len(data) != file_size: warn("file server %s: expecting %d,got %d bytes" % (ip_address_and_port,file_size,len(data))) except socket.error: continue break return data == "True\n" def wdir(URL): """Download a file from the network URL: e.g. "//id14timing3.cars.aps.anl.gov:2001/tmp/*" """ with lock: # Allow only one thread at a time inside this function. import socket url = URL default_port = 80 if url.startswith("http:") else default_port_number url = url.replace("http:","") if url.startswith("//"): url = url[2:] ip_address_and_port = url.split("/")[0] if not ":" in ip_address_and_port: ip_address_and_port += ":"+str(default_port) pathname = "/"+"/".join(url.split("/")[1:]) s = "DIR %s\n" % pathname s += "\n" data = "" for attempt in range(0,2): try: c = connection(ip_address_and_port) if c is None: break c.sendall(s) reply = "" while not "\n\n" in reply: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue header_size = reply.find("\n\n")+2 keyword = "Content-Length: " if not keyword in reply: return "" start = reply.find(keyword)+len(keyword) end = start+reply[start:].find("\n") file_size = int(reply[start:end]) while len(reply) < header_size+file_size: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue data = reply[header_size:] if len(data) != file_size: warn("file server %s: expecting %d,got %d bytes" % (ip_address_and_port,file_size,len(data))) except socket.error: continue break return data def wsize(URL): """Download a file from the network URL: e.g. "//id14timing3.cars.aps.anl.gov:2001/tmp/test.txt" """ with lock: # Allow only one thread at a time inside this function. import socket url = URL default_port = 80 if url.startswith("http:") else default_port_number url = url.replace("http:","") if url.startswith("//"): url = url[2:] ip_address_and_port = url.split("/")[0] if not ":" in ip_address_and_port: ip_address_and_port += ":"+str(default_port) pathname = "/"+"/".join(url.split("/")[1:]) s = "SIZE %s\n" % pathname s += "\n" data = "" for attempt in range(0,2): try: c = connection(ip_address_and_port) if c is None: break c.sendall(s) reply = "" while not "\n\n" in reply: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue header_size = reply.find("\n\n")+2 keyword = "Content-Length: " if not keyword in reply: return "" start = reply.find(keyword)+len(keyword) end = start+reply[start:].find("\n") file_size = int(reply[start:end]) while len(reply) < header_size+file_size: r = c.recv(65536) if len(r) == 0: break reply += r if len(r) == 0: continue data = reply[header_size:] if len(data) != file_size: warn("file server %s: expecting %d,got %d bytes" % (ip_address_and_port,file_size,len(data))) except socket.error: continue break data = data.strip() try: size = int(data) except: warn("file server %s: expecting integer, got %r" % (ip_address_and_port,data)) size = 0 return size if __name__ == "__main__": from pdb import pm from sys import argv,stderr if len(argv) != 3: stderr.write("Usage: %s test.txt http://id14timing3.cars.aps.anl.gov:2001/tmp/test.txt\n" % argv[0]) else: filename,URL = argv[1],argv[2] wput(file(filename).read(),URL) ##wput('22'.ljust(22)+'\n','id14timing3.cars.aps.anl.gov:2001/tmp/sequencer_fs/queue_max_repeat_count')
39.255319
108
0.510208
1,417
11,070
3.869442
0.122089
0.050885
0.065657
0.087543
0.822542
0.812329
0.812329
0.812329
0.80631
0.771658
0
0.032619
0.354743
11,070
281
109
39.395018
0.734985
0.165763
0
0.837719
0
0.004386
0.065008
0
0.026316
0
0
0
0
1
0.026316
false
0
0.048246
0
0.092105
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
4f55295f1873a0c1191faa72c5cbc2ad7126eddf
85,913
py
Python
art.py
ndoxx/drowned_phish
a0f6e49944eda104ed2e727234caa8455efc35af
[ "MIT" ]
3
2022-03-08T15:59:04.000Z
2022-03-09T07:10:37.000Z
art.py
ndoxx/drowned_phish
a0f6e49944eda104ed2e727234caa8455efc35af
[ "MIT" ]
null
null
null
art.py
ndoxx/drowned_phish
a0f6e49944eda104ed2e727234caa8455efc35af
[ "MIT" ]
null
null
null
def show_logo(): print('\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;80;80;80m\033[1;48;2;160;160;160mt;;t:;t;;tt%%%%SS%S%%%XXXXX@XXXXSXXX@88\033[0m\033[1;38;2;160;160;160m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mS@XSt%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160mX%%t;;:.:\033[0m') print('\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mSSS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8SSSS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8SSSSSSSS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8@t\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80m.\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m888\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m@8XS@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m') print('\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:: .:.. .. .::.:..: \033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;160m\033[1;48;2;240;80;80mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160mX8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mt\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;0;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m:\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;.::\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:....:.:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m.:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m::........:\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m;\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m88\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160mt\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m888\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m@8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m88X\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8S8;\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m%t\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8S\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mt\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mXX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m%88\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0mX8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8@\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m%8S\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m:\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160mtt\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160mX8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m:\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m%\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mt\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m888\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS@@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m.\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m88@X\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0mt\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m;\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240mX@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mt\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m.\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m:\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m:\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8888\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.;;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m:\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m@8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8888\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m888@X\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m;\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mXS\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8888S\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m.\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m:\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m:\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m888\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m%\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0mXX\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mt\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m;\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;160;0;160m\033[1;48;2;160;0;0m%SX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;240;80;80m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m8@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m88888\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m;\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m \033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80m:\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m%\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;240;80;80mt\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0mt\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m:.\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m:.\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m \033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m:\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m:\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m%\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8888\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m;\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;240m\033[1;48;2;160;160;160m%\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m.\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80m \033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m.\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m \033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;80;0m.\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m \033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;240;80mS\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80mX\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m::;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8888\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m \033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m888\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m88\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mX\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt:;;\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m:\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;240;80;80m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mSX8\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;160m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m:\033[0m\033[1;38;2;160;160;160m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;240;80;80m%\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;80;240m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8;tt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m.;t@\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@X\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m88\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mS\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;240m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mt8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;80;80m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m8\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m \033[0m\033[1;38;2;160;0;160m\033[1;48;2;240;80;80m \033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m@8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;240;80;80m;\033[0m\033[1;38;2;240;240;240m\033[1;48;2;160;160;160m;\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;160m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m88\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m@88\033[0m\033[1;38;2;160;160;160m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;160;160m\033[1;48;2;240;240;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;0;0mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m@\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.;;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;0;0;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m@\033[0m\033[1;38;2;0;160;160m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m@t\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m8:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;%\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt%\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m.: :\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m;%\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;240;240;80m\033[1;48;2;160;160;160mS\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m%\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mXX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m%:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m%;\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160m%\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m ;:.\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m \033[0m\033[1;38;2;0;160;160m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt; t\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m;:\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m::\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80mX\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160m%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;0m8\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m: \033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m8%\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;80;80;80mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0m%\033[0m\033[1;38;2;0;160;160m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m;\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160m@\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160mS8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160mt8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m%\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m \033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m@\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mXt\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m \033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m%S\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m. \033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m.\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;0;0m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m;\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m .\033[0m\033[1;38;2;0;160;160m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;0;0;0m\033[1;48;2;0;160;0m8\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160mS\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m..\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m@\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt \033[0m\033[1;38;2;0;160;160m\033[1;48;2;0;160;0m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mX\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m \033[0m\033[1;38;2;80;240;80m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m \033[0m\033[1;38;2;80;240;80m\033[1;48;2;0;160;160m8\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m.\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt;\033[0m\033[1;38;2;80;240;240m\033[1;48;2;0;160;160m%\033[0m\033[1;38;2;0;160;160m\033[1;48;2;0;160;0m8\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mS\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240m%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;160;160m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mX%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;0;0mX\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;80;0mX\033[0m\033[1;38;2;160;0;0m\033[1;48;2;160;80;0m8\033[0m\033[1;38;2;240;80;80m\033[1;48;2;160;80;0m88\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160mX\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m@\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m..:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:::::\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m::\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mXt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m%%\033[0m\033[1;38;2;80;80;80m\033[1;48;2;0;160;160mt::::\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m8\033[0m\033[1;38;2;0;160;160m\033[1;48;2;80;240;240mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;.:::\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;:;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;:::\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;160;160m\033[1;48;2;80;80;80m@\033[0m\033[1;38;2;160;80;0m\033[1;48;2;160;160;160m@\033[0m\033[1;38;2;80;80;80m\033[1;48;2;160;160;160m8\033[0m\033[1;38;2;160;80;0m\033[1;48;2;80;80;80m8\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;;;;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m') print('\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m.\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;tS@%:\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m:\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m%\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;160;0;0m\033[1;48;2;0;0;0mt\033[0m\033[1;38;2;0;0;160m\033[1;48;2;0;0;0m;\033[0m\033[1;38;2;0;160;0m\033[1;48;2;0;0;0m;\033[0m')
3,181.962963
3,876
0.659411
28,157
85,913
2.011969
0.002628
0.247974
0.281407
0.216978
0.996805
0.996805
0.996717
0.995728
0.995499
0.995322
0
0.590894
0.001211
85,913
27
3,877
3,181.962963
0.069305
0
0
0
0
0.962963
0.996473
0.995717
0
0
0
0
0
1
0.037037
true
0
0
0
0.037037
0.962963
0
0
0
null
1
1
1
1
1
1
1
1
1
0
1
0
0
0
1
1
1
0
0
0
0
1
1
1
null
0
0
0
0
0
0
1
0
0
0
0
1
0
18
96fccccbfd6cac788cb4a3469ddccc8606c17adf
258
py
Python
lizardanalysis/calculations/__init__.py
JojoReikun/ClimbingLizardDLCAnalysis
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
[ "MIT" ]
1
2021-03-09T19:12:44.000Z
2021-03-09T19:12:44.000Z
lizardanalysis/calculations/__init__.py
JojoReikun/ClimbingLizardDLCAnalysis
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
[ "MIT" ]
null
null
null
lizardanalysis/calculations/__init__.py
JojoReikun/ClimbingLizardDLCAnalysis
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
[ "MIT" ]
null
null
null
from lizardanalysis.calculations.read_in_files import analyze_files, initialize from lizardanalysis.calculations import write_result_files from lizardanalysis.calculations import lizard_morphometrics from lizardanalysis.calculations import step_wise_summary
51.6
79
0.910853
30
258
7.566667
0.533333
0.317181
0.528634
0.475771
0
0
0
0
0
0
0
0
0.065891
258
4
80
64.5
0.941909
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
4ffe82cedf9fd1e1ac34a93e87ec41aff028ced4
1,972
py
Python
simulators/2_big_simulations/plot_graphs_from_results.py
Arseni1919/dcop_simulator_3
a4e2f5b7b2ecf10b595d3cdd1df0c07c833cb15b
[ "MIT" ]
null
null
null
simulators/2_big_simulations/plot_graphs_from_results.py
Arseni1919/dcop_simulator_3
a4e2f5b7b2ecf10b595d3cdd1df0c07c833cb15b
[ "MIT" ]
null
null
null
simulators/2_big_simulations/plot_graphs_from_results.py
Arseni1919/dcop_simulator_3
a4e2f5b7b2ecf10b595d3cdd1df0c07c833cb15b
[ "MIT" ]
null
null
null
from simulators.plots.collisions_vs_iters import * from simulators.plots.coverage_vs_iters import * def main(): # file_name = '25.07.2021-01:08:51_10T-20R_20Bi-8Si_50PRBLMS_complex.results' # file_name = '24.07.2021-15:19:18_10T-20R_20Bi-8Si_50PRBLMS_grid.results' # file_name = '26.08.2021-19:39:38_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' # file_name = '27.08.2021-12:44:18_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex.results' # file_name = '2021.09.14-15:50:09_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex.results' # file_name = '2021.09.14-16:07:23_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' # file_name = '2021.09.23-14:00:41_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' # file_name = '2021.09.23-14:22:40_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex.results' # file_name = '2021.10.05-18:35:07_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex_3_7.results' # file_name = '2021.10.06-10:49:35_20T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex_8_12.results' # file_name = '2021.12.24-11:36:06_200_req_10T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' # file_name = '2021.12.24-11:36:06_200_req_10T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' # file_name = '2021.12.24-15:49:11_200_req_10T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex_3_7.results' # file_name = '2021.12.25-10:35:46_400_req_5T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' file_name = '2021.12.25-12:37:43_400_req_5T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex_3_7.results' # file_name = '2021.12.25-14:42:04_400_req_10T-30R_20Bi-8Si_50PRBLMS_targets_apart_grid.results' # file_name = '2021.12.25-19:15:54_400_req_10T-30R_20Bi-8Si_50PRBLMS_targets_apart_complex_3_7.results' file_name = f'results/{file_name}' print_t_test(file_name) plot_coverage_vs_iters(file_name) plot_collisions_vs_iters(file_name) if __name__ == '__main__': main()
38.666667
107
0.78144
352
1,972
3.911932
0.213068
0.127814
0.196078
0.196078
0.733479
0.700073
0.700073
0.700073
0.668845
0.65069
0
0.237585
0.10142
1,972
50
108
39.44
0.539503
0.735801
0
0
0
0
0.230612
0.17551
0
0
0
0
0
1
0.1
false
0
0.2
0
0.3
0.1
0
0
0
null
0
1
1
0
1
1
1
0
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
8b3d24a77aa7345c96f127bf0c90c051b983aff7
1,430
py
Python
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/test/jser2_classes.py
ChisdealHD/DetlasWorldLinux
336465a4df1a48c9a273329fc7a09d8099c4e4d5
[ "MIT" ]
3
2015-07-29T02:31:52.000Z
2017-01-07T15:48:44.000Z
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/test/jser2_classes.py
ChisdealHD/DetlasWorldLinux
336465a4df1a48c9a273329fc7a09d8099c4e4d5
[ "MIT" ]
4
2018-02-22T07:42:13.000Z
2021-12-13T10:53:09.000Z
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/test/jser2_classes.py
ChisdealHD/DetlasWorldLinux
336465a4df1a48c9a273329fc7a09d8099c4e4d5
[ "MIT" ]
4
2015-09-09T11:54:37.000Z
2018-05-26T05:08:14.000Z
import java class A: def __init__(self, a): self.a = a def __eq__(self, other): return self.__class__ == other.__class__ and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class AJ(java.lang.Object, java.io.Serializable): def __init__(self, a): self.a = a def __eq__(self, other): return self.__class__ == other.__class__ and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class N(object): def __init__(self, a): self.a = a def __eq__(self, other): return self.__class__ == other.__class__ and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class NL(list): def __init__(self, a, *x): list.__init__(self, x) self.a = a def __eq__(self, other): return (self.__class__ == other.__class__ and list.__eq__(self, other) and self.__dict__ == other.__dict__) def __ne__(self, other): return not (self == other) class NT(tuple): def __new__(typ, a, *x): nt = tuple.__new__(typ, x) nt.a = a return nt def __eq__(self, other): return (self.__class__ == other.__class__ and tuple.__eq__(self, other) and self.__dict__ == other.__dict__) def __ne__(self, other): return not (self == other)
29.791667
84
0.602098
189
1,430
3.730159
0.142857
0.217021
0.212766
0.099291
0.798582
0.798582
0.798582
0.798582
0.798582
0.798582
0
0
0.276923
1,430
47
85
30.425532
0.681818
0
0
0.658537
0
0
0
0
0
0
0
0
0
1
0.365854
false
0
0.02439
0.243902
0.780488
0
0
0
0
null
1
1
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
10
ba06988cc237ae2cd987d0ced292879646999719
20
py
Python
test/tokenize/t07.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
2,671
2015-01-03T08:23:25.000Z
2022-03-31T06:15:48.000Z
test/tokenize/t07.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
972
2015-01-05T08:11:00.000Z
2022-03-29T13:47:15.000Z
test/tokenize/t07.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
845
2015-01-03T19:53:36.000Z
2022-03-29T18:34:22.000Z
0xdeadc0de & 012345
10
19
0.8
2
20
8
1
0
0
0
0
0
0
0
0
0
0
0.470588
0.15
20
1
20
20
0.470588
0
0
0
0
0
0
0
0
0
0.5
0
0
0
null
null
0
0
null
null
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
1
0
0
1
0
0
0
0
0
0
0
0
7
e84115bffb4ff906c412275ce81f08144a7e1627
1,029
py
Python
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/activations/__init__.py
Con-Mi/lambda-packs
b23a8464abdd88050b83310e1d0e99c54dac28ab
[ "MIT" ]
3
2019-04-01T11:03:04.000Z
2019-12-31T02:17:15.000Z
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/activations/__init__.py
Con-Mi/lambda-packs
b23a8464abdd88050b83310e1d0e99c54dac28ab
[ "MIT" ]
1
2021-04-15T18:46:45.000Z
2021-04-15T18:46:45.000Z
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/activations/__init__.py
Con-Mi/lambda-packs
b23a8464abdd88050b83310e1d0e99c54dac28ab
[ "MIT" ]
1
2021-09-23T13:43:07.000Z
2021-09-23T13:43:07.000Z
"""Imports for Python API. This file is MACHINE GENERATED! Do not edit. Generated by: tensorflow/tools/api/generator/create_python_api.py script. """ from tensorflow.python.keras._impl.keras.activations import deserialize from tensorflow.python.keras._impl.keras.activations import elu from tensorflow.python.keras._impl.keras.activations import get from tensorflow.python.keras._impl.keras.activations import hard_sigmoid from tensorflow.python.keras._impl.keras.activations import linear from tensorflow.python.keras._impl.keras.activations import relu from tensorflow.python.keras._impl.keras.activations import selu from tensorflow.python.keras._impl.keras.activations import serialize from tensorflow.python.keras._impl.keras.activations import sigmoid from tensorflow.python.keras._impl.keras.activations import softmax from tensorflow.python.keras._impl.keras.activations import softplus from tensorflow.python.keras._impl.keras.activations import softsign from tensorflow.python.keras._impl.keras.activations import tanh
57.166667
73
0.854227
141
1,029
6.120567
0.262411
0.210892
0.301275
0.376593
0.784473
0.784473
0.784473
0.784473
0.134415
0
0
0
0.067055
1,029
18
74
57.166667
0.898958
0.13897
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
10
fa211cdee62223fc273073f4106bbf68990b25cb
3,057
py
Python
package.py
dbeinke/OpenFormicaria
29717e5204e1fbf0620a2be1e958ff05080d4936
[ "BSD-4-Clause" ]
10
2020-11-05T11:16:47.000Z
2022-02-04T09:33:26.000Z
package.py
dbeinke/OpenFormicaria
29717e5204e1fbf0620a2be1e958ff05080d4936
[ "BSD-4-Clause" ]
4
2020-11-07T18:29:51.000Z
2021-11-03T14:10:36.000Z
package.py
dbeinke/OpenFormicaria
29717e5204e1fbf0620a2be1e958ff05080d4936
[ "BSD-4-Clause" ]
6
2020-11-05T23:41:34.000Z
2021-12-07T20:43:38.000Z
#!/usr/bin/python from zipfile import ZipFile import os from os.path import split EXPORT = "package/" if not os.path.exists(EXPORT): os.makedirs(EXPORT) # Connectors with ZipFile(EXPORT + "connectors.zip", 'w') as zipObj: dir = "Connectors/STL/" zipObj.write("LICENSE") # Iterate over all the files in directory for folderName, subfolders, filenames in os.walk("docs/"): for filename in filenames: if filename != "CNAME": #create complete filepath of file in directory filePath = os.path.join(folderName, filename) # Add file to zip zipObj.write(filePath, filePath) for folderName, subfolders, filenames in os.walk(dir): for filename in filenames: #create complete filepath of file in directory filePath = os.path.join(folderName, filename) safename = filePath.replace(dir, "") # Add file to zip zipObj.write(filePath, safename) # Modules with ZipFile(EXPORT + "modules.zip", 'w') as zipObj: dir = "Modules/STL/" zipObj.write("LICENSE") # Iterate over all the files in directory for folderName, subfolders, filenames in os.walk("docs/"): for filename in filenames: if filename != "CNAME": #create complete filepath of file in directory filePath = os.path.join(folderName, filename) # Add file to zip zipObj.write(filePath, filePath) for folderName, subfolders, filenames in os.walk(dir): for filename in filenames: #create complete filepath of file in directory filePath = os.path.join(folderName, filename) safename = filePath.replace(dir, "") # Add file to zip zipObj.write(filePath, safename) # Formicaria with ZipFile(EXPORT + "formicaria.zip", 'w') as zipObj: dir = "Formicaria/STL/" zipObj.write("LICENSE") # Iterate over all the files in directory for folderName, subfolders, filenames in os.walk("docs/"): for filename in filenames: if filename != "CNAME": #create complete filepath of file in directory filePath = os.path.join(folderName, filename) # Add file to zip zipObj.write(filePath, filePath) for folderName, subfolders, filenames in os.walk(dir): for filename in filenames: #create complete filepath of file in directory filePath = os.path.join(folderName, filename) safename = filePath.replace(dir, "") # Add file to zip zipObj.write(filePath, safename) for folderName, subfolders, filenames in os.walk("Formicaria/Inserts/"): for filename in filenames: #create complete filepath of file in directory filePath = os.path.join(folderName, filename) safename = filePath.replace("Formicaria/", "") # Add file to zip zipObj.write(filePath, safename)
39.701299
76
0.615636
348
3,057
5.408046
0.152299
0.058448
0.085547
0.119022
0.853348
0.829437
0.829437
0.808183
0.78746
0.78746
0
0
0.294079
3,057
77
77
39.701299
0.872104
0.193981
0
0.72
0
0
0.070786
0
0
0
0
0
0
1
0
false
0
0.06
0
0.06
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d7665cf5e0c5562b61c10b01cb79b57bed2d5af9
43,554
py
Python
tccli/services/tbaas/tbaas_client.py
zqfan/tencentcloud-cli
b6ad9fced2a2b340087e4e5522121d405f68b615
[ "Apache-2.0" ]
null
null
null
tccli/services/tbaas/tbaas_client.py
zqfan/tencentcloud-cli
b6ad9fced2a2b340087e4e5522121d405f68b615
[ "Apache-2.0" ]
null
null
null
tccli/services/tbaas/tbaas_client.py
zqfan/tencentcloud-cli
b6ad9fced2a2b340087e4e5522121d405f68b615
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os import json import tccli.options_define as OptionsDefine import tccli.format_output as FormatOutput from tccli import __version__ from tccli.utils import Utils from tccli.exceptions import ConfigurationError from tencentcloud.common import credential from tencentcloud.common.profile.http_profile import HttpProfile from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.tbaas.v20180416 import tbaas_client as tbaas_client_v20180416 from tencentcloud.tbaas.v20180416 import models as models_v20180416 def doInvokeBcosTrans(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.InvokeBcosTransRequest() model.from_json_string(json.dumps(args)) rsp = client.InvokeBcosTrans(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBcosBlockByNumber(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBcosBlockByNumberRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBcosBlockByNumber(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetChaincodeLogForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetChaincodeLogForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetChaincodeLogForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBcosTransByHash(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBcosTransByHashRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBcosTransByHash(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBlockTransactionListForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBlockTransactionListForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBlockTransactionListForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetClusterListForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetClusterListForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetClusterListForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBcosBlockList(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBcosBlockListRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBcosBlockList(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBlockListHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBlockListHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBlockListHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDeployDynamicContractHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DeployDynamicContractHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.DeployDynamicContractHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doInvoke(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.InvokeRequest() model.from_json_string(json.dumps(args)) rsp = client.Invoke(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetClusterSummary(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetClusterSummaryRequest() model.from_json_string(json.dumps(args)) rsp = client.GetClusterSummary(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetPeerLogForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetPeerLogForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetPeerLogForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDeployDynamicBcosContract(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DeployDynamicBcosContractRequest() model.from_json_string(json.dumps(args)) rsp = client.DeployDynamicBcosContract(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDownloadUserCert(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DownloadUserCertRequest() model.from_json_string(json.dumps(args)) rsp = client.DownloadUserCert(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doCreateChaincodeAndInstallForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.CreateChaincodeAndInstallForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.CreateChaincodeAndInstallForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetLatesdTransactionList(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetLatesdTransactionListRequest() model.from_json_string(json.dumps(args)) rsp = client.GetLatesdTransactionList(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doApplyUserCert(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.ApplyUserCertRequest() model.from_json_string(json.dumps(args)) rsp = client.ApplyUserCert(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doBlockByNumberHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.BlockByNumberHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.BlockByNumberHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetInvokeTx(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetInvokeTxRequest() model.from_json_string(json.dumps(args)) rsp = client.GetInvokeTx(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetChaincodeInitializeResultForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetChaincodeInitializeResultForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetChaincodeInitializeResultForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetChannelListForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetChannelListForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetChannelListForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doInitializeChaincodeForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.InitializeChaincodeForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.InitializeChaincodeForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetTransactionDetailForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetTransactionDetailForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetTransactionDetailForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doSrvInvoke(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.SrvInvokeRequest() model.from_json_string(json.dumps(args)) rsp = client.SrvInvoke(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBcosTransList(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBcosTransListRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBcosTransList(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doTransByDynamicContractHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.TransByDynamicContractHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.TransByDynamicContractHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doSendTransactionHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.SendTransactionHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.SendTransactionHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetChaincodeCompileLogForUser(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetChaincodeCompileLogForUserRequest() model.from_json_string(json.dumps(args)) rsp = client.GetChaincodeCompileLogForUser(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetTransListHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetTransListHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.GetTransListHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetBlockList(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetBlockListRequest() model.from_json_string(json.dumps(args)) rsp = client.GetBlockList(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doQuery(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.QueryRequest() model.from_json_string(json.dumps(args)) rsp = client.Query(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doGetTransByHashHandler(args, parsed_globals): g_param = parse_global_arg(parsed_globals) cred = credential.Credential( g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token] ) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.TbaasClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.GetTransByHashHandlerRequest() model.from_json_string(json.dumps(args)) rsp = client.GetTransByHashHandler(model) result = rsp.to_json_string() try: json_obj = json.loads(result) except TypeError as e: json_obj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) CLIENT_MAP = { "v20180416": tbaas_client_v20180416, } MODELS_MAP = { "v20180416": models_v20180416, } ACTION_MAP = { "InvokeBcosTrans": doInvokeBcosTrans, "GetBcosBlockByNumber": doGetBcosBlockByNumber, "GetChaincodeLogForUser": doGetChaincodeLogForUser, "GetBcosTransByHash": doGetBcosTransByHash, "GetBlockTransactionListForUser": doGetBlockTransactionListForUser, "GetClusterListForUser": doGetClusterListForUser, "GetBcosBlockList": doGetBcosBlockList, "GetBlockListHandler": doGetBlockListHandler, "DeployDynamicContractHandler": doDeployDynamicContractHandler, "Invoke": doInvoke, "GetClusterSummary": doGetClusterSummary, "GetPeerLogForUser": doGetPeerLogForUser, "DeployDynamicBcosContract": doDeployDynamicBcosContract, "DownloadUserCert": doDownloadUserCert, "CreateChaincodeAndInstallForUser": doCreateChaincodeAndInstallForUser, "GetLatesdTransactionList": doGetLatesdTransactionList, "ApplyUserCert": doApplyUserCert, "BlockByNumberHandler": doBlockByNumberHandler, "GetInvokeTx": doGetInvokeTx, "GetChaincodeInitializeResultForUser": doGetChaincodeInitializeResultForUser, "GetChannelListForUser": doGetChannelListForUser, "InitializeChaincodeForUser": doInitializeChaincodeForUser, "GetTransactionDetailForUser": doGetTransactionDetailForUser, "SrvInvoke": doSrvInvoke, "GetBcosTransList": doGetBcosTransList, "TransByDynamicContractHandler": doTransByDynamicContractHandler, "SendTransactionHandler": doSendTransactionHandler, "GetChaincodeCompileLogForUser": doGetChaincodeCompileLogForUser, "GetTransListHandler": doGetTransListHandler, "GetBlockList": doGetBlockList, "Query": doQuery, "GetTransByHashHandler": doGetTransByHashHandler, } AVAILABLE_VERSION_LIST = [ "v20180416", ] def action_caller(): return ACTION_MAP def parse_global_arg(parsed_globals): g_param = parsed_globals is_exist_profile = True if not parsed_globals["profile"]: is_exist_profile = False g_param["profile"] = "default" configure_path = os.path.join(os.path.expanduser("~"), ".tccli") is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure") is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential") conf = {} cred = {} if is_conf_exist: conf = Utils.load_json_msg(conf_path) if is_cred_exist: cred = Utils.load_json_msg(cred_path) if not (isinstance(conf, dict) and isinstance(cred, dict)): raise ConfigurationError( "file: %s or %s is not json format" % (g_param["profile"] + ".configure", g_param["profile"] + ".credential")) if OptionsDefine.Token not in cred: cred[OptionsDefine.Token] = None if not is_exist_profile: if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY): cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID) cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY) cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN) if os.environ.get(OptionsDefine.ENV_REGION): conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION) for param in g_param.keys(): if g_param[param] is None: if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]: if param in cred: g_param[param] = cred[param] else: raise ConfigurationError("%s is invalid" % param) elif param in [OptionsDefine.Region, OptionsDefine.Output]: if param in conf: g_param[param] = conf[param] else: raise ConfigurationError("%s is invalid" % param) try: if g_param[OptionsDefine.ServiceVersion]: g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '') else: version = conf["tbaas"][OptionsDefine.Version] g_param[OptionsDefine.Version] = "v" + version.replace('-', '') if g_param[OptionsDefine.Endpoint] is None: g_param[OptionsDefine.Endpoint] = conf["tbaas"][OptionsDefine.Endpoint] except Exception as err: raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err))) if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST: raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST)) return g_param
43.423729
105
0.728659
4,909
43,554
6.2253
0.044612
0.078927
0.2232
0.057003
0.82644
0.820517
0.818161
0.815805
0.812729
0.763253
0
0.009041
0.164485
43,554
1,002
106
43.467066
0.83075
0.007829
0
0.699317
0
0
0.042753
0.009079
0
0
0
0
0
1
0.038724
false
0
0.013667
0.001139
0.05467
0
0
0
0
null
0
1
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d77ccdf2dffd7804c032b13e14de97f9952d2684
120
py
Python
feapder/network/downloader/__init__.py
gyco/feapder
6d7f6f318b3dd93168cbd76d9ba165b04285a05e
[ "MIT" ]
null
null
null
feapder/network/downloader/__init__.py
gyco/feapder
6d7f6f318b3dd93168cbd76d9ba165b04285a05e
[ "MIT" ]
null
null
null
feapder/network/downloader/__init__.py
gyco/feapder
6d7f6f318b3dd93168cbd76d9ba165b04285a05e
[ "MIT" ]
null
null
null
from .base import Downloader from ._requests import RequestsDownloader from ._requests import RequestsSessionDownloader
30
48
0.875
12
120
8.583333
0.583333
0.23301
0.349515
0
0
0
0
0
0
0
0
0
0.1
120
3
49
40
0.953704
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
d77ce2a4695d36fb80bd26cd3ea13b0924a2ad93
7,822
py
Python
tests/testcases/rabbit/rabbitutil_tests.py
sashakames/esgf-pid
c78305c1a6c3b80f551008e8f7c35d52808a8234
[ "Apache-2.0" ]
null
null
null
tests/testcases/rabbit/rabbitutil_tests.py
sashakames/esgf-pid
c78305c1a6c3b80f551008e8f7c35d52808a8234
[ "Apache-2.0" ]
null
null
null
tests/testcases/rabbit/rabbitutil_tests.py
sashakames/esgf-pid
c78305c1a6c3b80f551008e8f7c35d52808a8234
[ "Apache-2.0" ]
null
null
null
import unittest import logging import esgfpid.defaults import esgfpid.rabbit.rabbitutils as rutils # Logging LOGGER = logging.getLogger(__name__) LOGGER.addHandler(logging.NullHandler()) LOGGER_TO_PASS = logging.getLogger('utils') LOGGER_TO_PASS.addHandler(logging.NullHandler()) ''' Unit tests for esgfpid.rabbit.rabbitutils. This module does not need any other module to function, so we don't need to use or to mock any other objects. ''' class RabbitUtilsTestCase(unittest.TestCase): def setUp(self): LOGGER.info('######## Next test (%s) ##########', __name__) def tearDown(self): LOGGER.info('#############################') # # Test getting routing key and string message # def test_get_message_and_routing_key_string_ok(self): # Test variables: passed_message = '{"bla":"foo", "ROUTING_KEY":"roukey"}' LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') self.assertEquals(received_key, 'roukey', 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_string_singlequtoes_ok(self): # Test variables: passed_message = "{'bla':'foo', 'ROUTING_KEY':'roukey'}" LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') self.assertEquals(received_key, 'roukey', 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_string_no_key(self): # Test variables: passed_message = '{"bla":"foo", "no_key":"no_key"}' LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') expected_key = esgfpid.utils.RABBIT_DEFAULT_ROUTING_KEY self.assertEquals(received_key, expected_key, 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_none_error(self): # Test variables: passed_message = None LOGGER.info('Message: %s' % passed_message) # Run code to be checked: with self.assertRaises(ValueError): received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) def test_get_message_and_routing_key_characters(self): # Test variables: passed_message = 'foo' LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') expected_key = esgfpid.utils.RABBIT_DEFAULT_ROUTING_KEY self.assertEquals(received_key, expected_key, 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_json_ok(self): # Test variables: passed_message = {"bla":"foo", "ROUTING_KEY":"roukey"} LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') self.assertEquals(received_key, 'roukey', 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_json_no_rk(self): # Test variables: passed_message = {"bla":"foo", "no_key":"no_key"} LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') expected_key = esgfpid.utils.RABBIT_DEFAULT_ROUTING_KEY self.assertEquals(received_key, expected_key, 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_list(self): # Test variables: passed_message = [345] LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') expected_key = esgfpid.utils.RABBIT_DEFAULT_ROUTING_KEY self.assertEquals(received_key, expected_key, 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_int(self): # Test variables: passed_message = 345 LOGGER.info('Message: %s' % passed_message) # Run code to be checked: received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message) # Check result: received_message = received_message.replace("'", '"') expected_message = str(passed_message).replace("'", '"') expected_key = esgfpid.utils.RABBIT_DEFAULT_ROUTING_KEY self.assertEquals(received_key, expected_key, 'Wrong routing key: %s' % received_key) self.assertEquals(expected_message, received_message, 'Wrong message.\nExpected: %s\nReceived: %s' % (expected_message, received_message)) def test_get_message_and_routing_key_bogus(self): # Test variables: def foo(): print('yeah') passed_message = foo LOGGER.info('Message: %s' % passed_message) # Run code to be checked: with self.assertRaises(ValueError): received_key, received_message = rutils.get_routing_key_and_string_message_from_message_if_possible(passed_message)
43.21547
146
0.692534
916
7,822
5.550218
0.10262
0.123918
0.103855
0.094414
0.892604
0.879622
0.879622
0.879622
0.873721
0.865264
0
0.000955
0.196881
7,822
180
147
43.455556
0.808341
0.071976
0
0.634409
0
0
0.125442
0.010619
0
0
0
0
0.193548
1
0.139785
false
0.430108
0.043011
0
0.193548
0.010753
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
d78b9b75848a3816d08307605a315865d09cfa9e
179
py
Python
mmdet/ops/roi_align/__init__.py
arthur801031/3d-multi-resolution-rcnn
8e5454a72f8daa174bf3eabfa5964152f04ab287
[ "Apache-2.0" ]
16
2021-03-02T07:41:01.000Z
2022-03-14T08:55:45.000Z
mmdet/ops/roi_align/__init__.py
arthur801031/3d-multi-resolution-rcnn
8e5454a72f8daa174bf3eabfa5964152f04ab287
[ "Apache-2.0" ]
2
2022-01-06T20:54:13.000Z
2022-02-24T03:50:51.000Z
mmdet/ops/roi_align/__init__.py
arthur801031/3d-multi-resolution-rcnn
8e5454a72f8daa174bf3eabfa5964152f04ab287
[ "Apache-2.0" ]
2
2021-05-26T19:23:35.000Z
2022-01-06T20:30:24.000Z
from .functions.roi_align import roi_align from .modules.roi_align import RoIAlign from .modules.roi_align_3d import RoIAlign3D __all__ = ['roi_align', 'RoIAlign', 'RoIAlign3D']
29.833333
49
0.804469
25
179
5.36
0.4
0.298507
0.208955
0.283582
0
0
0
0
0
0
0
0.018634
0.100559
179
5
50
35.8
0.813665
0
0
0
0
0
0.150838
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
7
ad1fa16776de7de0c0e4fcb4b0a15fcbf2a72065
4,909
py
Python
tests/test_baking_baker.py
ludovicchabant/PieCrust2
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
[ "Apache-2.0" ]
43
2015-04-24T05:30:04.000Z
2022-02-03T17:47:35.000Z
tests/test_baking_baker.py
ludovicchabant/PieCrust2
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
[ "Apache-2.0" ]
54
2015-01-03T01:58:44.000Z
2021-05-06T21:56:26.000Z
tests/test_baking_baker.py
ludovicchabant/PieCrust2
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
[ "Apache-2.0" ]
8
2015-05-10T01:50:46.000Z
2016-12-26T20:53:15.000Z
import time from .mockutil import get_mock_app, mock_fs, mock_fs_scope def test_bake_and_add_post(): fs = (mock_fs() .withConfig() .withPage('pages/_index.html', {'layout': 'none', 'format': 'none'}, "{% for p in pagination.posts -%}\n" "{{p.title}}\n" "{% endfor %}") .withPage('posts/2017-01-01_first.html', {'title': "First"}, "something")) with mock_fs_scope(fs): fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'First\n' time.sleep(1) fs.withPage('posts/2017-01-02_second.html', {'title': "Second"}, "something else") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second\nFirst\n' def test_bake_four_times(): fs = (mock_fs() .withConfig({'site': { 'default_format': 'none', 'default_page_layout': 'none', 'default_post_layout': 'none', }}) .withPage('pages/_index.html', {'layout': 'none', 'format': 'none'}, "{% for p in pagination.posts -%}\n" "{{p.title}}\n" "{% endfor %}") .withPage('posts/2017-01-01_first.html', {'title': "First"}, "something 1") .withPage('posts/2017-01-02_second.html', {'title': "Second"}, "something 2")) with mock_fs_scope(fs): fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second\nFirst\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' print("\n\n\n") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second\nFirst\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' print("\n\n\n") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second\nFirst\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' print("\n\n\n") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second\nFirst\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' def test_bake_four_times_again(): fs = (mock_fs() .withConfig({'site': { 'default_format': 'none', 'default_page_layout': 'none', 'default_post_layout': 'none', }}) .withPage('pages/_index.html', {'layout': 'none', 'format': 'none'}, "{% for p in pagination.posts -%}\n" "{{p.title}} : {{p.content}}\n" "{% endfor %}") .withPage('posts/2017-01-01_first.html', {'title': "First"}, "something 1") .withPage('posts/2017-01-02_second.html', {'title': "Second"}, "something 2")) with mock_fs_scope(fs): fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second : something 2\nFirst : something 1\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' print("\n\n\n") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second : something 2\nFirst : something 1\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' print("\n\n\n") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second : something 2\nFirst : something 1\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2' print("\n\n\n") fs.runChef('bake') structure = fs.getStructure('kitchen/_counter') assert structure['index.html'] == 'Second : something 2\nFirst : something 1\n' assert structure['2017']['01']['01']['first.html'] == 'something 1' assert structure['2017']['01']['02']['second.html'] == 'something 2'
43.061404
87
0.536973
539
4,909
4.790353
0.105751
0.151046
0.117738
0.130132
0.950039
0.934547
0.934547
0.934547
0.934547
0.934547
0
0.056787
0.264616
4,909
113
88
43.442478
0.658449
0
0
0.878788
0
0
0.366137
0.033619
0
0
0
0
0.262626
1
0.030303
false
0
0.020202
0
0.050505
0.060606
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
ad711603e1c21a48e99b8174488f8ac89ec8300d
12,097
py
Python
pomodoro_system/web_app/tests/test_projects_rest_endpoints.py
kamil559/pomodorr_v2
33b53db4f1e82025b186e551a4e55840ee75c740
[ "MIT" ]
null
null
null
pomodoro_system/web_app/tests/test_projects_rest_endpoints.py
kamil559/pomodorr_v2
33b53db4f1e82025b186e551a4e55840ee75c740
[ "MIT" ]
null
null
null
pomodoro_system/web_app/tests/test_projects_rest_endpoints.py
kamil559/pomodorr_v2
33b53db4f1e82025b186e551a4e55840ee75c740
[ "MIT" ]
null
null
null
import uuid import pytest from flask import testing from foundation.value_objects import Priority from pomodoros_infrastructure import ProjectModel from pony.orm import db_session class TestProjectsRestAPI: def test_get_project(self, client: testing.FlaskClient, project_owner_authorization_token, orm_project): response = client.get( f"projects/{orm_project.id}", headers={"Authorization": project_owner_authorization_token} ) assert response.status_code == 200 def test_get_project_with_non_existing_project_id( self, client: testing.FlaskClient, project_owner_authorization_token ): random_uuid = uuid.uuid4() response = client.get( f"projects/{str(random_uuid)}", headers={"Authorization": project_owner_authorization_token} ) assert response.status_code == 404 def test_get_project_with_non_authenticated_user(self, client: testing.FlaskClient, orm_project): response = client.get(f"projects/{orm_project.id}") assert response.status_code == 401 def test_get_project_with_non_authorized_user( self, client: testing.FlaskClient, random_project_owner_authorization_token, orm_project ): response = client.get( f"projects/{orm_project.id}", headers={"Authorization": random_project_owner_authorization_token} ) assert response.status_code == 403 def test_get_project_list( self, client: testing.FlaskClient, project_owner_authorization_token, orm_project, orm_second_project, orm_random_project, ): response = client.get("projects/", headers={"Authorization": project_owner_authorization_token}) assert response.status_code == 200 assert len(response.json) == 2 @pytest.mark.parametrize( "page_size, page, expected_length", [(1, 1, 1), (1, 2, 1), (2, 1, 2), (2, 2, 0), (1, 3, 0)] ) def test_get_paginated_project_list( self, page_size, page, expected_length, client: testing.FlaskClient, project_owner_authorization_token, orm_project, orm_second_project, orm_random_project, ): response = client.get( f"projects/?page_size={page_size}&page={page}", headers={"Authorization": project_owner_authorization_token}, ) assert response.status_code == 200 assert len(response.json) == expected_length @pytest.mark.parametrize( "page_size, page, expected_length", [ ("", 1, 2), (1, "", 2), ("", "", 2), ("xyz", "xyz", 2), (1, "null", 2), ("null", 1, 2), ], ) def test_get_paginated_project_list_with_wrong_params_returns_default_project_list( self, page_size, page, expected_length, client: testing.FlaskClient, project_owner_authorization_token, orm_project, orm_second_project, orm_random_project, ): response = client.get( f"projects/?page_size={page_size}&page={page}", headers={"Authorization": project_owner_authorization_token}, ) assert response.status_code == 200 assert len(response.json) == expected_length @pytest.mark.parametrize( "sort_field", [ "name", "-name", "ordering", "-ordering", "created_at", "-created_at", ], ) def test_get_sorted_project_list( self, sort_field, client: testing.FlaskClient, project_owner_authorization_token, orm_project, orm_second_project, ): response = client.get( f"projects/?sort={sort_field}", headers={"Authorization": project_owner_authorization_token}, ) clean_sort_field = sort_field.lstrip("-") first_response_project = response.json[0] first_sorted_project = sorted( [orm_project, orm_second_project], reverse=sort_field.startswith("-"), key=lambda project: getattr(project, clean_sort_field), )[0] assert response.status_code == 200 assert first_response_project["id"] == str(first_sorted_project.id) @pytest.mark.parametrize("sort_field", ["", 323, "xyz", b"xyz", "null"]) def test_get_sorted_project_list_with_wrong_fields_returns_default_project_list( self, sort_field, client: testing.FlaskClient, project_owner_authorization_token, orm_project, orm_second_project, ): response = client.get( f"projects/?sort={sort_field}", headers={"Authorization": project_owner_authorization_token}, ) default_sorted_project = sorted([orm_project, orm_second_project], key=lambda project: project.created_at) assert response.status_code == 200 assert response.json[0]["id"] == str(default_sorted_project[0].id) def test_get_project_list_with_non_authenticated_user( self, client: testing.FlaskClient, orm_project, orm_second_project, ): response = client.get("projects/", headers={}) assert response.status_code == 401 def test_create_project_with_valid_data( self, client: testing.FlaskClient, project_owner_authorization_token, project_data ): response = client.post( "projects/", headers={ "Authorization": project_owner_authorization_token, }, json=project_data, ) assert response.status_code == 201 def test_create_project_without_priority_key_saves_default_values( self, client: testing.FlaskClient, project_owner_authorization_token, project_data, ): project_data["priority"] = None response = client.post( "projects/", headers={ "Authorization": project_owner_authorization_token, }, json=project_data, ) expected_priority_data = {"color": Priority.color.hex, "priority_level": Priority.priority_level.value} assert response.status_code == 201 assert response.json["priority"] == expected_priority_data @db_session def test_create_project_saves_authenticated_user_as_owner( self, client: testing.FlaskClient, project_owner, random_project_owner, project_owner_authorization_token, project_data, ): project_data["owner_id"] = str(random_project_owner.id) response = client.post( "projects/", headers={ "Authorization": project_owner_authorization_token, }, json=project_data, ) fetched_project = ProjectModel[response.json["id"]] assert response.status_code == 201 assert fetched_project.owner.id == project_owner.id @pytest.mark.parametrize( "data_key, invalid_value", [ ("priority", {"color": "", "priority_level": ""}), ("priority", ""), ], ) def test_create_project_with_invalid_data( self, data_key, invalid_value, client: testing.FlaskClient, project_owner_authorization_token, project_data ): project_data[data_key] = invalid_value response = client.post( "projects/", headers={ "Authorization": project_owner_authorization_token, }, json=project_data, ) assert response.status_code == 400 assert response.json[data_key] def test_create_project_with_non_authenticated_user(self, client: testing.FlaskClient, project_data): response = client.post("projects/", json=project_data) assert response.status_code == 401 def test_update_project_with_valid_data( self, client: testing.FlaskClient, project_owner_authorization_token, orm_project, project_data ): response = client.patch( f"projects/{str(orm_project.id)}", headers={ "Authorization": project_owner_authorization_token, }, json=project_data, ) assert response.status_code == 200 @pytest.mark.parametrize( "data_key, invalid_value", [ ("priority", {"color": "", "priority_level": ""}), ("priority", ""), ], ) def test_update_project_with_invalid_data( self, data_key, invalid_value, client: testing.FlaskClient, project_owner_authorization_token, orm_project, project_data, ): project_data[data_key] = invalid_value response = client.patch( f"projects/{str(orm_project.id)}", headers={ "Authorization": project_owner_authorization_token, }, json=project_data, ) assert response.status_code == 400 assert response.json[data_key] def test_update_project_with_non_authenticated_user(self, client: testing.FlaskClient, orm_project, project_data): response = client.patch(f"projects/{str(orm_project.id)}", json=project_data) assert response.status_code == 401 def test_update_project_with_non_authorized_user( self, client: testing.FlaskClient, random_project_owner_authorization_token, orm_project, project_data ): response = client.patch( f"projects/{str(orm_project.id)}", headers={ "Authorization": random_project_owner_authorization_token, }, json=project_data, ) assert response.status_code == 403 @db_session def test_soft_delete_project( self, client: testing.FlaskClient, project_owner_authorization_token, orm_project, ): response = client.delete( f"projects/{str(orm_project.id)}", headers={ "Authorization": project_owner_authorization_token, }, ) fetched_project = ProjectModel[orm_project.id] assert response.status_code == 204 assert fetched_project.is_removed @db_session def test_delete_project_permanently( self, client: testing.FlaskClient, project_owner_authorization_token, orm_project, ): response = client.delete( f"projects/{str(orm_project.id)}?permanently=1", headers={ "Authorization": project_owner_authorization_token, }, ) fetched_project_exists = ProjectModel.exists(id=orm_project.id) assert response.status_code == 204 assert not fetched_project_exists def test_delete_project_with_non_existing_project_id( self, client: testing.FlaskClient, project_owner_authorization_token, ): random_uud = uuid.uuid4() response = client.delete( f"projects/{str(random_uud)}", headers={ "Authorization": project_owner_authorization_token, }, ) assert response.status_code == 404 def test_delete_project_with_non_authenticated_user(self, client: testing.FlaskClient, orm_project): response = client.delete( f"projects/{str(orm_project.id)}", ) assert response.status_code == 401 def test_delete_project_with_non_authorized_user( self, client: testing.FlaskClient, orm_project, random_project_owner_authorization_token, ): response = client.delete( f"projects/{str(orm_project.id)}", headers={ "Authorization": random_project_owner_authorization_token, }, ) assert response.status_code == 403
31.339378
118
0.620484
1,227
12,097
5.752241
0.09454
0.073109
0.134599
0.161519
0.838056
0.807594
0.751629
0.744262
0.708416
0.668745
0
0.012516
0.286683
12,097
385
119
31.420779
0.805424
0
0
0.606061
0
0
0.095396
0.043151
0
0
0
0
0.106061
1
0.072727
false
0
0.018182
0
0.093939
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
a8f2ad787681015be1914d60d90b1a48ef86429b
128
py
Python
G/__init__.py
xiangpengm/G
e1f532412a6513ca8aab001c688f77befd12e577
[ "MIT" ]
null
null
null
G/__init__.py
xiangpengm/G
e1f532412a6513ca8aab001c688f77befd12e577
[ "MIT" ]
null
null
null
G/__init__.py
xiangpengm/G
e1f532412a6513ca8aab001c688f77befd12e577
[ "MIT" ]
null
null
null
from .G import G from .G import html_content from .G import Request from .G import redirect from .G import response_with_headers
25.6
36
0.8125
23
128
4.391304
0.434783
0.247525
0.544554
0
0
0
0
0
0
0
0
0
0.148438
128
5
36
25.6
0.926606
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
d11f0eef0ad4a12173a7a274a24f94f9df87f277
22,957
py
Python
tb_rest_client/api/api_ce/event_controller_api.py
jernkuan/thingsboard-python-rest-client
3fb25272507494e6d494b27ca2380d3c543562e5
[ "Apache-2.0" ]
null
null
null
tb_rest_client/api/api_ce/event_controller_api.py
jernkuan/thingsboard-python-rest-client
3fb25272507494e6d494b27ca2380d3c543562e5
[ "Apache-2.0" ]
null
null
null
tb_rest_client/api/api_ce/event_controller_api.py
jernkuan/thingsboard-python-rest-client
3fb25272507494e6d494b27ca2380d3c543562e5
[ "Apache-2.0" ]
1
2021-11-26T11:24:56.000Z
2021-11-26T11:24:56.000Z
# coding: utf-8 """ ThingsBoard REST API For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501 OpenAPI spec version: 2.0 Contact: info@thingsboard.io Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from tb_rest_client.api_client import ApiClient class EventControllerApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_events_using_get(self, entity_type, entity_id, event_type, tenant_id, page_size, page, **kwargs): # noqa: E501 """getEvents # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_events_using_get(entity_type, entity_id, event_type, tenant_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_type: entityType (required) :param str entity_id: entityId (required) :param str event_type: eventType (required) :param str tenant_id: tenantId (required) :param int page_size: pageSize (required) :param int page: page (required) :param str text_search: textSearch :param str sort_property: sortProperty :param str sort_order: sortOrder :param int start_time: startTime :param int end_time: endTime :return: PageDataEvent If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_events_using_get_with_http_info(entity_type, entity_id, event_type, tenant_id, page_size, page, **kwargs) # noqa: E501 else: (data) = self.get_events_using_get_with_http_info(entity_type, entity_id, event_type, tenant_id, page_size, page, **kwargs) # noqa: E501 return data def get_events_using_get_with_http_info(self, entity_type, entity_id, event_type, tenant_id, page_size, page, **kwargs): # noqa: E501 """getEvents # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_events_using_get_with_http_info(entity_type, entity_id, event_type, tenant_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_type: entityType (required) :param str entity_id: entityId (required) :param str event_type: eventType (required) :param str tenant_id: tenantId (required) :param int page_size: pageSize (required) :param int page: page (required) :param str text_search: textSearch :param str sort_property: sortProperty :param str sort_order: sortOrder :param int start_time: startTime :param int end_time: endTime :return: PageDataEvent If the method is called asynchronously, returns the request thread. """ all_params = ['entity_type', 'entity_id', 'event_type', 'tenant_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order', 'start_time', 'end_time'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_events_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_type' is set if ('entity_type' not in params or params['entity_type'] is None): raise ValueError("Missing the required parameter `entity_type` when calling `get_events_using_get`") # noqa: E501 # verify the required parameter 'entity_id' is set if ('entity_id' not in params or params['entity_id'] is None): raise ValueError("Missing the required parameter `entity_id` when calling `get_events_using_get`") # noqa: E501 # verify the required parameter 'event_type' is set if ('event_type' not in params or params['event_type'] is None): raise ValueError("Missing the required parameter `event_type` when calling `get_events_using_get`") # noqa: E501 # verify the required parameter 'tenant_id' is set if ('tenant_id' not in params or params['tenant_id'] is None): raise ValueError("Missing the required parameter `tenant_id` when calling `get_events_using_get`") # noqa: E501 # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_events_using_get`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_events_using_get`") # noqa: E501 collection_formats = {} path_params = {} if 'entity_type' in params: path_params['entityType'] = params['entity_type'] # noqa: E501 if 'entity_id' in params: path_params['entityId'] = params['entity_id'] # noqa: E501 if 'event_type' in params: path_params['eventType'] = params['event_type'] # noqa: E501 query_params = [] if 'tenant_id' in params: query_params.append(('tenantId', params['tenant_id'])) # noqa: E501 if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 if 'start_time' in params: query_params.append(('startTime', params['start_time'])) # noqa: E501 if 'end_time' in params: query_params.append(('endTime', params['end_time'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['*/*']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/events/{entityType}/{entityId}/{eventType}{?tenantId,pageSize,page,textSearch,sortProperty,sortOrder,startTime,endTime}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEvent', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_events_using_get1(self, entity_type, entity_id, tenant_id, page_size, page, **kwargs): # noqa: E501 """getEvents # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_events_using_get1(entity_type, entity_id, tenant_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_type: entityType (required) :param str entity_id: entityId (required) :param str tenant_id: tenantId (required) :param int page_size: pageSize (required) :param int page: page (required) :param str text_search: textSearch :param str sort_property: sortProperty :param str sort_order: sortOrder :param int start_time: startTime :param int end_time: endTime :return: PageDataEvent If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_events_using_get1_with_http_info(entity_type, entity_id, tenant_id, page_size, page, **kwargs) # noqa: E501 else: (data) = self.get_events_using_get1_with_http_info(entity_type, entity_id, tenant_id, page_size, page, **kwargs) # noqa: E501 return data def get_events_using_get1_with_http_info(self, entity_type, entity_id, tenant_id, page_size, page, **kwargs): # noqa: E501 """getEvents # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_events_using_get1_with_http_info(entity_type, entity_id, tenant_id, page_size, page, async_req=True) >>> result = thread.get() :param async_req bool :param str entity_type: entityType (required) :param str entity_id: entityId (required) :param str tenant_id: tenantId (required) :param int page_size: pageSize (required) :param int page: page (required) :param str text_search: textSearch :param str sort_property: sortProperty :param str sort_order: sortOrder :param int start_time: startTime :param int end_time: endTime :return: PageDataEvent If the method is called asynchronously, returns the request thread. """ all_params = ['entity_type', 'entity_id', 'tenant_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order', 'start_time', 'end_time'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_events_using_get1" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'entity_type' is set if ('entity_type' not in params or params['entity_type'] is None): raise ValueError("Missing the required parameter `entity_type` when calling `get_events_using_get1`") # noqa: E501 # verify the required parameter 'entity_id' is set if ('entity_id' not in params or params['entity_id'] is None): raise ValueError("Missing the required parameter `entity_id` when calling `get_events_using_get1`") # noqa: E501 # verify the required parameter 'tenant_id' is set if ('tenant_id' not in params or params['tenant_id'] is None): raise ValueError("Missing the required parameter `tenant_id` when calling `get_events_using_get1`") # noqa: E501 # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_events_using_get1`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_events_using_get1`") # noqa: E501 collection_formats = {} path_params = {} if 'entity_type' in params: path_params['entityType'] = params['entity_type'] # noqa: E501 if 'entity_id' in params: path_params['entityId'] = params['entity_id'] # noqa: E501 query_params = [] if 'tenant_id' in params: query_params.append(('tenantId', params['tenant_id'])) # noqa: E501 if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 if 'start_time' in params: query_params.append(('startTime', params['start_time'])) # noqa: E501 if 'end_time' in params: query_params.append(('endTime', params['end_time'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['*/*']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/events/{entityType}/{entityId}{?tenantId,pageSize,page,textSearch,sortProperty,sortOrder,startTime,endTime}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEvent', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_events_using_post(self, body, tenant_id, page_size, page, entity_type, entity_id, **kwargs): # noqa: E501 """getEvents # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_events_using_post(body, tenant_id, page_size, page, entity_type, entity_id, async_req=True) >>> result = thread.get() :param async_req bool :param EventFilter body: eventFilter (required) :param str tenant_id: tenantId (required) :param int page_size: pageSize (required) :param int page: page (required) :param str entity_type: entityType (required) :param str entity_id: entityId (required) :param str text_search: textSearch :param str sort_property: sortProperty :param str sort_order: sortOrder :param int start_time: startTime :param int end_time: endTime :return: PageDataEvent If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_events_using_post_with_http_info(body, tenant_id, page_size, page, entity_type, entity_id, **kwargs) # noqa: E501 else: (data) = self.get_events_using_post_with_http_info(body, tenant_id, page_size, page, entity_type, entity_id, **kwargs) # noqa: E501 return data def get_events_using_post_with_http_info(self, body, tenant_id, page_size, page, entity_type, entity_id, **kwargs): # noqa: E501 """getEvents # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_events_using_post_with_http_info(body, tenant_id, page_size, page, entity_type, entity_id, async_req=True) >>> result = thread.get() :param async_req bool :param EventFilter body: eventFilter (required) :param str tenant_id: tenantId (required) :param int page_size: pageSize (required) :param int page: page (required) :param str entity_type: entityType (required) :param str entity_id: entityId (required) :param str text_search: textSearch :param str sort_property: sortProperty :param str sort_order: sortOrder :param int start_time: startTime :param int end_time: endTime :return: PageDataEvent If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'tenant_id', 'page_size', 'page', 'entity_type', 'entity_id', 'text_search', 'sort_property', 'sort_order', 'start_time', 'end_time'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_events_using_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `get_events_using_post`") # noqa: E501 # verify the required parameter 'tenant_id' is set if ('tenant_id' not in params or params['tenant_id'] is None): raise ValueError("Missing the required parameter `tenant_id` when calling `get_events_using_post`") # noqa: E501 # verify the required parameter 'page_size' is set if ('page_size' not in params or params['page_size'] is None): raise ValueError("Missing the required parameter `page_size` when calling `get_events_using_post`") # noqa: E501 # verify the required parameter 'page' is set if ('page' not in params or params['page'] is None): raise ValueError("Missing the required parameter `page` when calling `get_events_using_post`") # noqa: E501 # verify the required parameter 'entity_type' is set if ('entity_type' not in params or params['entity_type'] is None): raise ValueError("Missing the required parameter `entity_type` when calling `get_events_using_post`") # noqa: E501 # verify the required parameter 'entity_id' is set if ('entity_id' not in params or params['entity_id'] is None): raise ValueError("Missing the required parameter `entity_id` when calling `get_events_using_post`") # noqa: E501 collection_formats = {} path_params = {} if 'entity_type' in params: path_params['entityType'] = params['entity_type'] # noqa: E501 if 'entity_id' in params: path_params['entityId'] = params['entity_id'] # noqa: E501 query_params = [] if 'tenant_id' in params: query_params.append(('tenantId', params['tenant_id'])) # noqa: E501 if 'page_size' in params: query_params.append(('pageSize', params['page_size'])) # noqa: E501 if 'page' in params: query_params.append(('page', params['page'])) # noqa: E501 if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) # noqa: E501 if 'sort_property' in params: query_params.append(('sortProperty', params['sort_property'])) # noqa: E501 if 'sort_order' in params: query_params.append(('sortOrder', params['sort_order'])) # noqa: E501 if 'start_time' in params: query_params.append(('startTime', params['start_time'])) # noqa: E501 if 'end_time' in params: query_params.append(('endTime', params['end_time'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['*/*']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['X-Authorization'] # noqa: E501 return self.api_client.call_api( '/api/events/{entityType}/{entityId}{?tenantId,pageSize,page,textSearch,sortProperty,sortOrder,startTime,endTime}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageDataEvent', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
46.94683
183
0.630701
2,785
22,957
4.951167
0.066068
0.046994
0.038581
0.03307
0.945899
0.938067
0.936399
0.929219
0.92349
0.91689
0
0.015728
0.271595
22,957
488
184
47.043033
0.808875
0.318116
0
0.775281
0
0.011236
0.273403
0.066974
0
0
0
0
0
1
0.026217
false
0
0.014981
0
0.078652
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
0f39d90bd34d1ebe1bee510f013ce8053127c132
4,856
py
Python
tests/sentry/web/frontend/test_2fa.py
JannKleen/sentry
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
[ "BSD-3-Clause" ]
1
2019-02-27T15:13:06.000Z
2019-02-27T15:13:06.000Z
tests/sentry/web/frontend/test_2fa.py
rmax/sentry
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
[ "BSD-3-Clause" ]
5
2020-07-17T11:20:41.000Z
2021-05-09T12:16:53.000Z
tests/sentry/web/frontend/test_2fa.py
zaasmi/codeerrorhelp
1ab8d3e314386b9b2d58dad9df45355bf6014ac9
[ "BSD-3-Clause" ]
2
2021-01-26T09:53:39.000Z
2022-03-22T09:01:47.000Z
from __future__ import absolute_import from django.core.urlresolvers import reverse from sentry.testutils import TestCase from sentry.models import TotpInterface class TwoFactorAuthTest(TestCase): def test_security_renders_without_2fa(self): user = self.create_user('foo@example.com') self.login_as(user) path = reverse('sentry-account-security') resp = self.client.get(path) assert resp.status_code == 200 self.assertTemplateUsed('sentry/account/security.html') assert 'has_2fa' in resp.context assert resp.context['has_2fa'] is False self.assertContains(resp, 'Enable') def test_security_renders_with_2fa(self): user = self.create_user('foo@example.com') self.login_as(user) TotpInterface().enroll(user) path = reverse('sentry-account-security') resp = self.client.get(path) self.assertTemplateUsed('sentry/account/security.html') assert 'has_2fa' in resp.context assert resp.context['has_2fa'] is True self.assertContains(resp, 'Manage') def test_2fa_settings_render_without_2fa(self): user = self.create_user('foo@example.com') path = reverse('sentry-account-settings-2fa') self.login_as(user) resp = self.client.get(path) assert resp.status_code == 200 self.assertTemplateUsed('sentry/account/twofactor.html') assert 'has_2fa' in resp.context assert resp.context['has_2fa'] is False self.assertContains(resp, 'Add</button>') self.assertContains(resp, 'this can only be managed if 2FA is enabled') self.assertNotContains(resp, '<span class="icon-trash">') def test_2fa_settings_render_with_2fa(self): user = self.create_user('foo@example.com') path = reverse('sentry-account-settings-2fa') self.login_as(user) TotpInterface().enroll(user) resp = self.client.get(path) assert resp.status_code == 200 self.assertTemplateUsed('sentry/account/twofactor.html') assert 'has_2fa' in resp.context assert resp.context['has_2fa'] is True self.assertNotContains(resp, 'this can only be managed if 2FA is enabled') self.assertContains(resp, '<span class="icon-trash">') def test_add_2fa_SSO(self): user = self.create_user('foo@example.com') user.set_unusable_password() user.save() path = reverse('sentry-account-settings-2fa-totp') self.login_as(user) resp = self.client.post(path, data={'enroll': ''}) assert resp.status_code == 200 self.assertTemplateUsed('sentry/account/twofactor/enroll_totp.html') assert 'otp_form' in resp.context self.assertContains(resp, 'One-time password') self.assertContains(resp, 'Authenticator App') self.assertNotContains(resp, 'Sentry account password') def test_add_2fa_password(self): user = self.create_user('foo@example.com') path = reverse('sentry-account-settings-2fa-totp') self.login_as(user) resp = self.client.post(path, data={'enroll': ''}) self.assertContains(resp, 'Scan the below QR code') self.assertContains(resp, 'Sentry account password') self.assertNotContains(resp, 'Method is currently not enabled') def test_totp_get_path_render(self): user = self.create_user('foo@example.com') path = reverse('sentry-account-settings-2fa-totp') self.login_as(user) resp = self.client.get(path) self.assertNotContains(resp, 'Scan the below QR code') self.assertNotContains(resp, 'Sentry account password') self.assertContains(resp, 'Method is currently not enabled') def test_remove_2fa_SSO(self): user = self.create_user('foo@example.com') user.set_unusable_password() user.save() TotpInterface().enroll(user) path = reverse('sentry-account-settings-2fa-totp') self.login_as(user) resp = self.client.post(path, data={'remove': ''}) assert resp.status_code == 200 self.assertTemplateUsed('sentry/account/twofactor/remove.html') self.assertContains(resp, 'Do you want to remove the method?') self.assertNotContains(resp, 'Sentry account password') def test_remove_2fa_password(self): user = self.create_user('foo@example.com') TotpInterface().enroll(user) path = reverse('sentry-account-settings-2fa-totp') self.login_as(user) resp = self.client.post(path, data={'remove': ''}) assert resp.status_code == 200 self.assertTemplateUsed('sentry/account/twofactor/remove.html') self.assertContains(resp, 'Do you want to remove the method?') self.assertContains(resp, 'Sentry account password')
42.973451
82
0.667628
596
4,856
5.310403
0.15604
0.086256
0.090363
0.051185
0.831912
0.81643
0.777251
0.73049
0.67425
0.672038
0
0.011802
0.214786
4,856
112
83
43.357143
0.818253
0
0
0.71
0
0
0.244852
0.100288
0
0
0
0
0.42
1
0.09
false
0.1
0.04
0
0.14
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
1
0
0
0
1
0
0
0
0
0
8
0f6c156da7d42d592b0a855b1914eb96acdaace8
62,970
py
Python
test/test_git_history_comparison.py
peterrosell/gocddash
c1004135dc66b0d8394d6025750b94e12650ab36
[ "MIT" ]
4
2016-01-20T19:53:06.000Z
2019-09-16T11:49:13.000Z
test/test_git_history_comparison.py
peterrosell/gocddash
c1004135dc66b0d8394d6025750b94e12650ab36
[ "MIT" ]
null
null
null
test/test_git_history_comparison.py
peterrosell/gocddash
c1004135dc66b0d8394d6025750b94e12650ab36
[ "MIT" ]
5
2016-07-13T10:41:20.000Z
2018-04-10T07:41:18.000Z
import unittest from unittest.mock import MagicMock from gocddash.console_parsers import git_history_comparison # noinspection PyPep8 git_history_html = """<!DOCTYPE HTML> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title> Compare Pipelines Page - Go </title> <link debug="false" href="/go/assets/application-a4883f8829c786a9ac744ef3e7261e87210209fd708426d9c042622b549feab9.css" media="all" rel="stylesheet" /> <link debug="false" href="/go/assets/patterns/application-6348f0a8b1b1b32e15371ea7fb2c6d108ef02d953359fe7d83fa4e6a0f8ec8c2.css" media="all" rel="stylesheet" /> <script debug="false" src="/go/assets/application-1c3052b5a6a5a931f22b19dc4fca679c4d802763ebb89df8d3d28f4c5bc1a268.js"></script> <![if !IE]> <script src="/go/assets/lib/d3-3.1.5.min-a8bc188bf658d35d44f7dfc030984253b60843821bffa82b54edd65740c8174b.js"></script> <![endif]> <!--[if gt IE 8]><!--> <script src="/go/assets/lib/d3-3.1.5.min-a8bc188bf658d35d44f7dfc030984253b60843821bffa82b54edd65740c8174b.js"></script> <!--<![endif]--> <link rel="shortcut icon" href="/go/assets/cruise-1592088ba651470e554a54371b6be6b1336462c2186e74fb24f54f177377b538.ico"/> </head> <body id="comparison" class="comparison"> <div id="body_bg"> <div id="header"> <div class="header clear_float"> <a href="/go/pipelines" id="application_logo">&nbsp;</a> <div class="application_nav"> <input id="server_timestamp" name="server_time" type="hidden" value="1471509939" /> <ul class="user"> <li class="help"> <a href="https://go.cd/help" target="_blank">Need Help?</a> </li> <li class="current_user icon"> <a href="#" class="current_user_name dropdown-arrow-icon">name</a> <ul class='enhanced_dropdown hidden'> <li> <a href="/go/tab/mycruise/user">Preferences</a> </li> <li class="logout"> <a class="sign_out" href="/go/auth/logout" id="nav-logout">Sign out</a> </li> </ul> </li> </ul> <ul class="tabs"> <li id='cruise-header-tab-pipelines' class=""> <a href="/go/pipelines">PIPELINES</a> </li> <li id='cruise-header-tab-environments' class=""> <a href="/go/environments">ENVIRONMENTS</a> </li> <li id='cruise-header-tab-agents' class=""> <a href="/go/agents">AGENTS</a> </li> <li id="cruise-header-tab-admin" class=""> <a class="dropdown-arrow-icon" data-toggle="dropdown" href="#">ADMIN</a> <ul class="dropdown-menu" role="menu"> <li role="presentation"> <a href="/go/admin/pipelines">Pipelines</a></li> <li role="presentation"> <a href="/go/admin/pipelines/snippet">Config XML</a></li> <li role="presentation"> <a href="/go/admin/plugins">Plugins</a></li> <li role="presentation"> <a href="/go/admin/package_repositories/new">Package Repositories</a></li> </ul> </li> </ul> <div class="error_messaging_counter"> <div id="cruise_message_counts" class="cruise_messages"> </div> <div id="cruise_message_body" style="display:none;" class="cruise_message_body"> </div> <script type="text/javascript"> Util.on_load(function() { new AjaxRefresher('/go/server/messages.json', null, { executeImmediately: true, afterRefresh: function(){ jQuery(document).trigger("server-health-messages-refresh-completed"); } }); }); </script> </div> </div> <div id="back_to_top" class='back_to_top' title="Scroll to Top">Top</div> </div> </div> <div id='body_content'> <div class="messaging_wrapper" id="messaging_wrapper"> <div class="flash" id="message_pane"> </div> </div> <div id="pipeline_header"> <div class="entity_status_wrapper page_header"> <ul class="entity_title"> <!--<li><a href="/go/pipelines">Pipelines</a></li>--> <li class="name"><a href="/go/tab/pipeline/history/big_repository">big_repository</a></li> <li class="last"><h1>Compare</h1> </li> </ul> </div> </div> <div class="content_wrapper_outer"><div class="content_wrapper_inner"> <div id="pipeline_status_bar" class="pipeline_flow"> <table> <tbody> <tr> <td width="360px" valign="top"><div class="compare_pipeline_page pipeline"> <div class="current_instance" id='compare_pipeline_from'> <input class="compare_pipeline_input" id="from_pipeline" name="from_pipeline" type="text" value="827" /> <div class="autocomplete"></div> <div class="enhanced_dropdown from hidden"> <div class="compare_search_instructions"> <p>Search for a pipeline instance by label, commiter, date, etc.</p> <p>or</p> <p><a class="more_pipelines" id="browse_timeline_link_from"> Browse the timeline </a></p> </div> </div> </div> <div class="selected_pipeline_from stages"> <div class="pipeline_details"> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/827/build/1"> <div class="stage_bar Passed" title="build (Passed)"> </div> </a> </div> </div> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/827/test/2"> <div class="stage_bar Failed" title="test (Failed)"> </div> </a> </div> </div> <div class="triggered_by"> <span class='label'>Automatically triggered</span>&nbsp;on&nbsp;<span class='time'>18 Aug, 2016 at 08:45:17 [+0200]</span></div> </div> </div> </div> <script type="text/javascript"> Util.on_load(function() { var pipelineSelector = "#from_pipeline"; jQuery(pipelineSelector).autocomplete("/go/compare/big_repository/list/compare_with/817", { minChars: 1, width: 500, scrollHeight: 500, matchContains: "word", selectFirst: false, autoFill: false, delay: 1000, cacheLength: 0, multiClickTrigger: false, formatItem: function(row, i, max) { return row; }, formatMatch: function(row, i, max) { return ""; }, formatResult: function(row) { return row.value; }, parse: function(data) { return data.html; }, dataType: 'json', highlight: function(value, term) { return value;//no-op } }); jQuery(pipelineSelector).result(function(event, data, formatted) { var dest = compare_path("from", formatted, "817"); if (formatted == -1) { // indicates no match, see list.json.erb resetField(event.target); } else { window.location.href = dest; } }); jQuery(pipelineSelector).blur(function(event) { var val = jQuery.trim(jQuery(event.target).val()); if (val == "" || val != '827') { resetField(event.target); } }); function resetField(field) { jQuery(field).val('827'); } ; function compare_path(suffix, counter, fixed_counter) { if (suffix == "from") { var from_counter = counter; var to_counter = fixed_counter; } else { var from_counter = fixed_counter; var to_counter = counter; } return "/go/compare/big_repository/" + from_counter + "/with/" + to_counter; } var instructionsPopup = new MicroContentPopup(jQuery('.enhanced_dropdown.from').get(0), new MicroContentPopup.NoOpHandler()); var instructionsPopupShower = new MicroContentPopup.ClickShower(instructionsPopup); jQuery(pipelineSelector).bind('keypress', function(event){ instructionsPopupShower.close(); }); instructionsPopupShower.bindShowButton( jQuery(pipelineSelector).get(0)); jQuery("#browse_timeline_link_from").click(function(event) { Modalbox.show('/go/compare/big_repository/timeline/1?other_pipeline_counter=817&amp;suffix=from', { overlayClose: false, title: "Select a pipeline to compare" }); }); }); </script></td> <td valign="top"><div class="compared_to">compared to</div></td> <td width="360px" valign="top"><div class="compare_pipeline_page pipeline"> <div class="current_instance" id='compare_pipeline_to'> <input class="compare_pipeline_input" id="to_pipeline" name="to_pipeline" type="text" value="817" /> <div class="autocomplete"></div> <div class="enhanced_dropdown to hidden"> <div class="compare_search_instructions"> <p>Search for a pipeline instance by label, commiter, date, etc.</p> <p>or</p> <p><a class="more_pipelines" id="browse_timeline_link_to"> Browse the timeline </a></p> </div> </div> </div> <div class="selected_pipeline_to stages"> <div class="pipeline_details"> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/817/build/1"> <div class="stage_bar Passed" title="build (Passed)"> </div> </a> </div> </div> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/817/test/1"> <div class="stage_bar Passed" title="test (Passed)"> </div> </a> </div> </div> <div class="triggered_by"> <span class='label'>Automatically triggered</span>&nbsp;on&nbsp;<span class='time'>16 Aug, 2016 at 13:59:34 [+0200]</span></div> </div> </div> </div> <script type="text/javascript"> Util.on_load(function() { var pipelineSelector = "#to_pipeline"; jQuery(pipelineSelector).autocomplete("/go/compare/big_repository/list/compare_with/827", { minChars: 1, width: 500, scrollHeight: 500, matchContains: "word", selectFirst: false, autoFill: false, delay: 1000, cacheLength: 0, multiClickTrigger: false, formatItem: function(row, i, max) { return row; }, formatMatch: function(row, i, max) { return ""; }, formatResult: function(row) { return row.value; }, parse: function(data) { return data.html; }, dataType: 'json', highlight: function(value, term) { return value;//no-op } }); jQuery(pipelineSelector).result(function(event, data, formatted) { var dest = compare_path("to", formatted, "827"); if (formatted == -1) { // indicates no match, see list.json.erb resetField(event.target); } else { window.location.href = dest; } }); jQuery(pipelineSelector).blur(function(event) { var val = jQuery.trim(jQuery(event.target).val()); if (val == "" || val != '817') { resetField(event.target); } }); function resetField(field) { jQuery(field).val('817'); } ; function compare_path(suffix, counter, fixed_counter) { if (suffix == "from") { var from_counter = counter; var to_counter = fixed_counter; } else { var from_counter = fixed_counter; var to_counter = counter; } return "/go/compare/big_repository/" + from_counter + "/with/" + to_counter; } var instructionsPopup = new MicroContentPopup(jQuery('.enhanced_dropdown.to').get(0), new MicroContentPopup.NoOpHandler()); var instructionsPopupShower = new MicroContentPopup.ClickShower(instructionsPopup); jQuery(pipelineSelector).bind('keypress', function(event){ instructionsPopupShower.close(); }); instructionsPopupShower.bindShowButton( jQuery(pipelineSelector).get(0)); jQuery("#browse_timeline_link_to").click(function(event) { Modalbox.show('/go/compare/big_repository/timeline/1?other_pipeline_counter=827&amp;suffix=to', { overlayClose: false, title: "Select a pipeline to compare" }); }); }); </script></td> </tr> </tbody> </table> </div> <script src="/go/gadgets/js/rpc.js?v=1.1-beta5" type="text/javascript"></script> <script type="text/javascript"> Util.on_load(function() { tw_gadget.init('/go/gadgets/ifr'); }); </script> <div class="clear-float"></div> <div class="sub_tab_container rounded-corner-for-tab-container"> <div class="sub_tabs_container"> <ul> <li class="checkins current_tab"> <a class="tab_button_body_match_text">checkins</a> <a>Changes</a> </li> <li class="card_activity"> <a class="tab_button_body_match_text">card_activity</a> <a>Card Activity</a> </li> </ul> </div> <div class="sub_tab_container_content"> <div id="tab-content-of-card_activity"> <div id="card_activity_gadget" class="gadget-container"> <div class="information">No mingle project configured for this pipeline. <a href="http://www.go.cd/documentation/user/current/integration/mingle_card_activity_gadget.html" target="_blank">More Information</a></div> <!-- gadget goes here --> </div> </div> <div id="tab-content-of-checkins" class="material_revision_diff"> <div style="padding: 1em;"> <div> <div class="material_title"> <strong> Git - URL: ssh://git@git/testonline/services/big_repository/app_server.git, Branch: master</strong> </div> <table class="list_table material_modifications"> <tr> <th class="revision">Revision</th> <th class="modified_by">Modified by</th> <th class="comment">Comment: </th> </tr> <tr class="change"> <td class="revision wrapped_word"> efe8f8d9a2e5aa87398e2d338246fd8e950df60d </td> <td class="modified_by"> <span class="wrapped_word"> ab &lt;ab@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T15:55:37+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net" target="story_tracker"></a>testing</p> </td> </tr> </table> </div> <div> <div class="material_title"> <strong> Git - URL: ssh://git@git/testonline/services/big_repository/big_repository.git, Branch: master</strong> </div> <table class="list_table material_modifications"> <tr> <th class="revision">Revision</th> <th class="modified_by">Modified by</th> <th class="comment">Comment: </th> </tr> <tr class="change"> <td class="revision wrapped_word"> 9ca18056f017b5869bef325e8f30fa3c2b9c7198 </td> <td class="modified_by"> <span class="wrapped_word"> lg &lt;lg@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-18T08:39:43+02:00</span> </td> <td class="comment"> <p>asdf</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 6ec66540aa09abfda527630d8ab0ecfa651c11d2 </td> <td class="modified_by"> <span class="wrapped_word"> rm &lt;rm@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-18T08:27:41+02:00</span> </td> <td class="comment"> <p>qwerty</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> f493dfcf50591e6efdd7578e461f977699cde1c2 </td> <td class="modified_by"> <span class="wrapped_word"> ab &lt;ab@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T17:35:51+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker"></a>hello</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 1bfdd417c37dd04d356fe1f6e75113afbdc300f5 </td> <td class="modified_by"> <span class="wrapped_word"> eb &lt;eb@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T16:44:42+02:00</span> </td> <td class="comment"> <p>debug</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> e8f4dc265be0d0d20f9ad2f9fd74978d1d5ea07e </td> <td class="modified_by"> <span class="wrapped_word"> lg &lt;lg@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T16:33:41+02:00</span> </td> <td class="comment"> <p>ids</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> ba7b91b6d45960e9130096139974a0a0f4cd2df2 </td> <td class="modified_by"> <span class="wrapped_word"> go-agent &lt;go-agent@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T16:25:12+02:00</span> </td> <td class="comment"> <p>gomongo</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 32df04538efe309e3ea245fdcedd12e1c6ad773b </td> <td class="modified_by"> <span class="wrapped_word"> ja &lt;ja@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T15:31:38+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker">cherry </a>pick</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 944004c49589b276603d983871d1a531e9933837 </td> <td class="modified_by"> <span class="wrapped_word"> ja &lt;ja@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T15:30:43+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker"></a>cherry pick again</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> fb543f0e1f9aeaaa70d6a9bc4bfa748d04093ed3 </td> <td class="modified_by"> <span class="wrapped_word"> ja &lt;ja@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T13:21:04+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker">testing</a> stuff</p> </td> </tr> </table> </div> <div> <div class="material_title"> <strong class="wrapped_word"> Pipeline - app_server</strong> </div> <table class="list_table dependency_material_modifications"> <tr> <th class="dmr revision">Revision</th> <th class="dmr label">Label</th> <th class="dmr completed_at">Completed at</th> </tr> <tr class="change"> <td class="revision"> <a href="/go/pipelines/app_server/59/build/1">app_server/59/build/1</a> </td> <td class="label"> <a href="/go/pipelines/value_stream_map/app_server/59">59</a> </td> <td class="completed_at wrapped_word"> 2016-08-17T15:57:20+02:00 </td> </tr> </table> </div> </div> </div> </div> </div> <script type="text/javascript"> new TabsManager(undefined, 'comparison_page', 'big_repository>', 'checkins'); </script> </div></div> </div> <div id='footer-new-foundation'> <footer class="footer"> <div class="row"> <div class="small-12 medium-6 large-8 columns"> <p class="copyright">Copyright &copy; 2016 <a href="https://www.thoughtworks.com/products" target='_blank'>ThoughtWorks, Inc.</a> Licensed under <a href="https://www.apache.org/licenses/LICENSE-2.0" target="_blank">Apache License, Version 2.0</a>.<br/> Go includes <a href="/go/NOTICE/cruise_notice_file.pdf" target="_blank">third-party software</a>. Go Version: 16.7.0 (3819-b0b9921bdea58101121cc181d697355177d2f197). </p> </div> <div class="small-12 medium-6 large-4 columns"> <span class="inline-list social"> <a href="https://twitter.com/goforcd" title="twitter" class="twitter"></a> <a href="https://github.com/gocd/gocd" title="github" class="github"></a> <a href="https://groups.google.com/d/forum/go-cd" title="forums" class="forums"></a> <a href="https://docs.go.cd/current" title="documentation" class="documentation"></a> <a href="https://www.go.cd/community/plugins.html" title="plugins" class="plugins"></a> <a href="https://api.go.cd/current" title="api" class="api"></a> <a href="/go/about" title="about" class="server-details"></a> <a href="/go/cctray.xml" title="cctray" class="cctray"></a> </span> </div> </div> </footer> <script type="text/javascript"> var updater = new VersionUpdater('http://go.test.local/go/api/version_infos/stale', 'http://go.test.local/go/api/version_infos/go_server'); updater.update(); </script> </div> </div> </body> </html> """ # noinspection PyPep8 material_revision_diff = """<!DOCTYPE HTML> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title> Compare Pipelines Page - Go </title> <link debug="false" href="/go/assets/application-a4883f8829c786a9ac744ef3e7261e87210209fd708426d9c042622b549feab9.css" media="all" rel="stylesheet" /> <link debug="false" href="/go/assets/patterns/application-6348f0a8b1b1b32e15371ea7fb2c6d108ef02d953359fe7d83fa4e6a0f8ec8c2.css" media="all" rel="stylesheet" /> <script debug="false" src="/go/assets/application-1c3052b5a6a5a931f22b19dc4fca679c4d802763ebb89df8d3d28f4c5bc1a268.js"></script> <![if !IE]> <script src="/go/assets/lib/d3-3.1.5.min-a8bc188bf658d35d44f7dfc030984253b60843821bffa82b54edd65740c8174b.js"></script> <![endif]> <!--[if gt IE 8]><!--> <script src="/go/assets/lib/d3-3.1.5.min-a8bc188bf658d35d44f7dfc030984253b60843821bffa82b54edd65740c8174b.js"></script> <!--<![endif]--> <link rel="shortcut icon" href="/go/assets/cruise-1592088ba651470e554a54371b6be6b1336462c2186e74fb24f54f177377b538.ico"/> </head> <body id="comparison" class="comparison"> <div id="body_bg"> <div id="header"> <div class="header clear_float"> <a href="/go/pipelines" id="application_logo">&nbsp;</a> <div class="application_nav"> <input id="server_timestamp" name="server_time" type="hidden" value="1471509939" /> <ul class="user"> <li class="help"> <a href="https://go.cd/help" target="_blank">Need Help?</a> </li> <li class="current_user icon"> <a href="#" class="current_user_name dropdown-arrow-icon">name</a> <ul class='enhanced_dropdown hidden'> <li> <a href="/go/tab/mycruise/user">Preferences</a> </li> <li class="logout"> <a class="sign_out" href="/go/auth/logout" id="nav-logout">Sign out</a> </li> </ul> </li> </ul> <ul class="tabs"> <li id='cruise-header-tab-pipelines' class=""> <a href="/go/pipelines">PIPELINES</a> </li> <li id='cruise-header-tab-environments' class=""> <a href="/go/environments">ENVIRONMENTS</a> </li> <li id='cruise-header-tab-agents' class=""> <a href="/go/agents">AGENTS</a> </li> <li id="cruise-header-tab-admin" class=""> <a class="dropdown-arrow-icon" data-toggle="dropdown" href="#">ADMIN</a> <ul class="dropdown-menu" role="menu"> <li role="presentation"> <a href="/go/admin/pipelines">Pipelines</a></li> <li role="presentation"> <a href="/go/admin/pipelines/snippet">Config XML</a></li> <li role="presentation"> <a href="/go/admin/plugins">Plugins</a></li> <li role="presentation"> <a href="/go/admin/package_repositories/new">Package Repositories</a></li> </ul> </li> </ul> <div class="error_messaging_counter"> <div id="cruise_message_counts" class="cruise_messages"> </div> <div id="cruise_message_body" style="display:none;" class="cruise_message_body"> </div> <script type="text/javascript"> Util.on_load(function() { new AjaxRefresher('/go/server/messages.json', null, { executeImmediately: true, afterRefresh: function(){ jQuery(document).trigger("server-health-messages-refresh-completed"); } }); }); </script> </div> </div> <div id="back_to_top" class='back_to_top' title="Scroll to Top">Top</div> </div> </div> <div id='body_content'> <div class="messaging_wrapper" id="messaging_wrapper"> <div class="flash" id="message_pane"> </div> </div> <div id="pipeline_header"> <div class="entity_status_wrapper page_header"> <ul class="entity_title"> <!--<li><a href="/go/pipelines">Pipelines</a></li>--> <li class="name"><a href="/go/tab/pipeline/history/big_repository">big_repository</a></li> <li class="last"><h1>Compare</h1> </li> </ul> </div> </div> <div class="content_wrapper_outer"><div class="content_wrapper_inner"> <div id="pipeline_status_bar" class="pipeline_flow"> <table> <tbody> <tr> <td width="360px" valign="top"><div class="compare_pipeline_page pipeline"> <div class="current_instance" id='compare_pipeline_from'> <input class="compare_pipeline_input" id="from_pipeline" name="from_pipeline" type="text" value="827" /> <div class="autocomplete"></div> <div class="enhanced_dropdown from hidden"> <div class="compare_search_instructions"> <p>Search for a pipeline instance by label, commiter, date, etc.</p> <p>or</p> <p><a class="more_pipelines" id="browse_timeline_link_from"> Browse the timeline </a></p> </div> </div> </div> <div class="selected_pipeline_from stages"> <div class="pipeline_details"> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/827/build/1"> <div class="stage_bar Passed" title="build (Passed)"> </div> </a> </div> </div> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/827/test/2"> <div class="stage_bar Failed" title="test (Failed)"> </div> </a> </div> </div> <div class="triggered_by"> <span class='label'>Automatically triggered</span>&nbsp;on&nbsp;<span class='time'>18 Aug, 2016 at 08:45:17 [+0200]</span></div> </div> </div> </div> <script type="text/javascript"> Util.on_load(function() { var pipelineSelector = "#from_pipeline"; jQuery(pipelineSelector).autocomplete("/go/compare/big_repository/list/compare_with/817", { minChars: 1, width: 500, scrollHeight: 500, matchContains: "word", selectFirst: false, autoFill: false, delay: 1000, cacheLength: 0, multiClickTrigger: false, formatItem: function(row, i, max) { return row; }, formatMatch: function(row, i, max) { return ""; }, formatResult: function(row) { return row.value; }, parse: function(data) { return data.html; }, dataType: 'json', highlight: function(value, term) { return value;//no-op } }); jQuery(pipelineSelector).result(function(event, data, formatted) { var dest = compare_path("from", formatted, "817"); if (formatted == -1) { // indicates no match, see list.json.erb resetField(event.target); } else { window.location.href = dest; } }); jQuery(pipelineSelector).blur(function(event) { var val = jQuery.trim(jQuery(event.target).val()); if (val == "" || val != '827') { resetField(event.target); } }); function resetField(field) { jQuery(field).val('827'); } ; function compare_path(suffix, counter, fixed_counter) { if (suffix == "from") { var from_counter = counter; var to_counter = fixed_counter; } else { var from_counter = fixed_counter; var to_counter = counter; } return "/go/compare/big_repository/" + from_counter + "/with/" + to_counter; } var instructionsPopup = new MicroContentPopup(jQuery('.enhanced_dropdown.from').get(0), new MicroContentPopup.NoOpHandler()); var instructionsPopupShower = new MicroContentPopup.ClickShower(instructionsPopup); jQuery(pipelineSelector).bind('keypress', function(event){ instructionsPopupShower.close(); }); instructionsPopupShower.bindShowButton( jQuery(pipelineSelector).get(0)); jQuery("#browse_timeline_link_from").click(function(event) { Modalbox.show('/go/compare/big_repository/timeline/1?other_pipeline_counter=817&amp;suffix=from', { overlayClose: false, title: "Select a pipeline to compare" }); }); }); </script></td> <td valign="top"><div class="compared_to">compared to</div></td> <td width="360px" valign="top"><div class="compare_pipeline_page pipeline"> <div class="current_instance" id='compare_pipeline_to'> <input class="compare_pipeline_input" id="to_pipeline" name="to_pipeline" type="text" value="817" /> <div class="autocomplete"></div> <div class="enhanced_dropdown to hidden"> <div class="compare_search_instructions"> <p>Search for a pipeline instance by label, commiter, date, etc.</p> <p>or</p> <p><a class="more_pipelines" id="browse_timeline_link_to"> Browse the timeline </a></p> </div> </div> </div> <div class="selected_pipeline_to stages"> <div class="pipeline_details"> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/817/build/1"> <div class="stage_bar Passed" title="build (Passed)"> </div> </a> </div> </div> <div style='width: 48.0%' class="stage"> <div class="stage_bar_wrapper"> <a href="/go/pipelines/big_repository/817/test/1"> <div class="stage_bar Passed" title="test (Passed)"> </div> </a> </div> </div> <div class="triggered_by"> <span class='label'>Automatically triggered</span>&nbsp;on&nbsp;<span class='time'>16 Aug, 2016 at 13:59:34 [+0200]</span></div> </div> </div> </div> <script type="text/javascript"> Util.on_load(function() { var pipelineSelector = "#to_pipeline"; jQuery(pipelineSelector).autocomplete("/go/compare/big_repository/list/compare_with/827", { minChars: 1, width: 500, scrollHeight: 500, matchContains: "word", selectFirst: false, autoFill: false, delay: 1000, cacheLength: 0, multiClickTrigger: false, formatItem: function(row, i, max) { return row; }, formatMatch: function(row, i, max) { return ""; }, formatResult: function(row) { return row.value; }, parse: function(data) { return data.html; }, dataType: 'json', highlight: function(value, term) { return value;//no-op } }); jQuery(pipelineSelector).result(function(event, data, formatted) { var dest = compare_path("to", formatted, "827"); if (formatted == -1) { // indicates no match, see list.json.erb resetField(event.target); } else { window.location.href = dest; } }); jQuery(pipelineSelector).blur(function(event) { var val = jQuery.trim(jQuery(event.target).val()); if (val == "" || val != '817') { resetField(event.target); } }); function resetField(field) { jQuery(field).val('817'); } ; function compare_path(suffix, counter, fixed_counter) { if (suffix == "from") { var from_counter = counter; var to_counter = fixed_counter; } else { var from_counter = fixed_counter; var to_counter = counter; } return "/go/compare/big_repository/" + from_counter + "/with/" + to_counter; } var instructionsPopup = new MicroContentPopup(jQuery('.enhanced_dropdown.to').get(0), new MicroContentPopup.NoOpHandler()); var instructionsPopupShower = new MicroContentPopup.ClickShower(instructionsPopup); jQuery(pipelineSelector).bind('keypress', function(event){ instructionsPopupShower.close(); }); instructionsPopupShower.bindShowButton( jQuery(pipelineSelector).get(0)); jQuery("#browse_timeline_link_to").click(function(event) { Modalbox.show('/go/compare/big_repository/timeline/1?other_pipeline_counter=827&amp;suffix=to', { overlayClose: false, title: "Select a pipeline to compare" }); }); }); </script></td> </tr> </tbody> </table> </div> <script src="/go/gadgets/js/rpc.js?v=1.1-beta5" type="text/javascript"></script> <script type="text/javascript"> Util.on_load(function() { tw_gadget.init('/go/gadgets/ifr'); }); </script> <div class="clear-float"></div> <div class="sub_tab_container rounded-corner-for-tab-container"> <div class="sub_tabs_container"> <ul> <li class="checkins current_tab"> <a class="tab_button_body_match_text">checkins</a> <a>Changes</a> </li> <li class="card_activity"> <a class="tab_button_body_match_text">card_activity</a> <a>Card Activity</a> </li> </ul> </div> <div class="sub_tab_container_content"> <div id="tab-content-of-card_activity"> <div class="information"> <div class="message"> <span> This comparison involves a pipeline instance that was triggered with a non-sequential material revision. </span> <span class="prompt"> <a class="link_as_header_button" href="/go/compare/paysol/800/with/817?show_bisect=true">Continue</a> </span> </div> </div> <div id="card_activity_gadget" class="gadget-container"> <div class="information">No mingle project configured for this pipeline. <a href="http://www.go.cd/documentation/user/current/integration/mingle_card_activity_gadget.html" target="_blank">More Information</a></div> <!-- gadget goes here --> </div> </div> <div id="tab-content-of-checkins" class="material_revision_diff"> <div style="padding: 1em;"> <div> <div class="material_title"> <strong> Git - URL: ssh://git@git/testonline/services/big_repository/app_server.git, Branch: master</strong> </div> <table class="list_table material_modifications"> <tr> <th class="revision">Revision</th> <th class="modified_by">Modified by</th> <th class="comment">Comment: </th> </tr> <tr class="change"> <td class="revision wrapped_word"> efe8f8d9a2e5aa87398e2d338246fd8e950df60d </td> <td class="modified_by"> <span class="wrapped_word"> ab &lt;ab@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T15:55:37+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net" target="story_tracker"></a>testing</p> </td> </tr> </table> </div> <div> <div class="material_title"> <strong> Git - URL: ssh://git@git/testonline/services/big_repository/big_repository.git, Branch: master</strong> </div> <table class="list_table material_modifications"> <tr> <th class="revision">Revision</th> <th class="modified_by">Modified by</th> <th class="comment">Comment: </th> </tr> <tr class="change"> <td class="revision wrapped_word"> 9ca18056f017b5869bef325e8f30fa3c2b9c7198 </td> <td class="modified_by"> <span class="wrapped_word"> lg &lt;lg@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-18T08:39:43+02:00</span> </td> <td class="comment"> <p>asdf</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 6ec66540aa09abfda527630d8ab0ecfa651c11d2 </td> <td class="modified_by"> <span class="wrapped_word"> rm &lt;rm@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-18T08:27:41+02:00</span> </td> <td class="comment"> <p>qwerty</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> f493dfcf50591e6efdd7578e461f977699cde1c2 </td> <td class="modified_by"> <span class="wrapped_word"> ab &lt;ab@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T17:35:51+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker"></a>hello</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 1bfdd417c37dd04d356fe1f6e75113afbdc300f5 </td> <td class="modified_by"> <span class="wrapped_word"> eb &lt;eb@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T16:44:42+02:00</span> </td> <td class="comment"> <p>debug</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> e8f4dc265be0d0d20f9ad2f9fd74978d1d5ea07e </td> <td class="modified_by"> <span class="wrapped_word"> lg &lt;lg@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T16:33:41+02:00</span> </td> <td class="comment"> <p>ids</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> ba7b91b6d45960e9130096139974a0a0f4cd2df2 </td> <td class="modified_by"> <span class="wrapped_word"> go-agent &lt;go-agent@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T16:25:12+02:00</span> </td> <td class="comment"> <p>gomongo</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 32df04538efe309e3ea245fdcedd12e1c6ad773b </td> <td class="modified_by"> <span class="wrapped_word"> ja &lt;ja@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T15:31:38+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker">cherry </a>pick</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> 944004c49589b276603d983871d1a531e9933837 </td> <td class="modified_by"> <span class="wrapped_word"> ja &lt;ja@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T15:30:43+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker"></a>cherry pick again</p> </td> </tr> <tr class="change"> <td class="revision wrapped_word"> fb543f0e1f9aeaaa70d6a9bc4bfa748d04093ed3 </td> <td class="modified_by"> <span class="wrapped_word"> ja &lt;ja@test.com&gt;</span> <br/> <span class="wrapped_word"> 2016-08-17T13:21:04+02:00</span> </td> <td class="comment"> <p><a href="https://test.atlassian.net/" target="story_tracker">testing</a> stuff</p> </td> </tr> </table> </div> <div> <div class="material_title"> <strong class="wrapped_word"> Pipeline - app_server</strong> </div> <table class="list_table dependency_material_modifications"> <tr> <th class="dmr revision">Revision</th> <th class="dmr label">Label</th> <th class="dmr completed_at">Completed at</th> </tr> <tr class="change"> <td class="revision"> <a href="/go/pipelines/app_server/59/build/1">app_server/59/build/1</a> </td> <td class="label"> <a href="/go/pipelines/value_stream_map/app_server/59">59</a> </td> <td class="completed_at wrapped_word"> 2016-08-17T15:57:20+02:00 </td> </tr> </table> </div> </div> </div> </div> </div> <script type="text/javascript"> new TabsManager(undefined, 'comparison_page', 'big_repository>', 'checkins'); </script> </div></div> </div> <div id='footer-new-foundation'> <footer class="footer"> <div class="row"> <div class="small-12 medium-6 large-8 columns"> <p class="copyright">Copyright &copy; 2016 <a href="https://www.thoughtworks.com/products" target='_blank'>ThoughtWorks, Inc.</a> Licensed under <a href="https://www.apache.org/licenses/LICENSE-2.0" target="_blank">Apache License, Version 2.0</a>.<br/> Go includes <a href="/go/NOTICE/cruise_notice_file.pdf" target="_blank">third-party software</a>. Go Version: 16.7.0 (3819-b0b9921bdea58101121cc181d697355177d2f197). </p> </div> <div class="small-12 medium-6 large-4 columns"> <span class="inline-list social"> <a href="https://twitter.com/goforcd" title="twitter" class="twitter"></a> <a href="https://github.com/gocd/gocd" title="github" class="github"></a> <a href="https://groups.google.com/d/forum/go-cd" title="forums" class="forums"></a> <a href="https://docs.go.cd/current" title="documentation" class="documentation"></a> <a href="https://www.go.cd/community/plugins.html" title="plugins" class="plugins"></a> <a href="https://api.go.cd/current" title="api" class="api"></a> <a href="/go/about" title="about" class="server-details"></a> <a href="/go/cctray.xml" title="cctray" class="cctray"></a> </span> </div> </div> </footer> <script type="text/javascript"> var updater = new VersionUpdater('http://go.test.local/go/api/version_infos/stale', 'http://go.test.local/go/api/version_infos/go_server'); updater.update(); </script> </div> </div> </body> </html>""" class TestGitBlame(unittest.TestCase): def test_git_blame(self): pipeline_name = "banana" current = "2029" comparison = "2029" class Stub: pass go_client = git_history_comparison.go_client git_history_comparison.go_client = Stub git_history_comparison.go_client.request_comparison_html = MagicMock(return_value=git_history_html) output = git_history_comparison.get_git_comparison(pipeline_name, current, comparison, 'banana') git_history_comparison.go_client = go_client self.assertEqual(output, [ ('app_server.git, Branch: master', [ ('efe8f8d9a2e5aa87398e2d338246fd8e950df60d', 'ab <ab@test.com> 2016-08-17T15:55:37+02:00', 'testing') ]), ('big_repository.git, Branch: master', [ ('9ca18056f017b5869bef325e8f30fa3c2b9c7198', 'lg <lg@test.com> 2016-08-18T08:39:43+02:00', 'asdf'), ('6ec66540aa09abfda527630d8ab0ecfa651c11d2', 'rm <rm@test.com> 2016-08-18T08:27:41+02:00', 'qwerty'), ('f493dfcf50591e6efdd7578e461f977699cde1c2', 'ab <ab@test.com> 2016-08-17T17:35:51+02:00', 'hello'), ('1bfdd417c37dd04d356fe1f6e75113afbdc300f5', 'eb <eb@test.com> 2016-08-17T16:44:42+02:00', 'debug'), ('e8f4dc265be0d0d20f9ad2f9fd74978d1d5ea07e', 'lg <lg@test.com> 2016-08-17T16:33:41+02:00', 'ids'), ('32df04538efe309e3ea245fdcedd12e1c6ad773b', 'ja <ja@test.com> 2016-08-17T15:31:38+02:00', 'cherry pick'), ('944004c49589b276603d983871d1a531e9933837', 'ja <ja@test.com> 2016-08-17T15:30:43+02:00', 'cherry pick again'), ('fb543f0e1f9aeaaa70d6a9bc4bfa748d04093ed3', 'ja <ja@test.com> 2016-08-17T13:21:04+02:00', 'testing stuff') ]) ]) def test_material_revision_diff(self): pipeline_name = "banana" current = "295" comparison = "294" class Stub: pass go_client = git_history_comparison.go_client git_history_comparison.go_client = Stub git_history_comparison.go_client.request_comparison_html = MagicMock(return_value=material_revision_diff) output = git_history_comparison.get_git_comparison(pipeline_name, current, comparison, 'banana') git_history_comparison.go_client = go_client self.assertEqual(output, None) if __name__ == '__main__': unittest.main()
49.660883
234
0.432349
5,336
62,970
4.991192
0.088456
0.02343
0.017234
0.030038
0.962453
0.957797
0.955732
0.948485
0.948485
0.948485
0
0.06433
0.448261
62,970
1,267
235
49.700079
0.702242
0.000619
0
0.917165
0
0.092229
0.972095
0.265128
0
0
0
0
0.001708
1
0.001708
false
0.006832
0.002562
0
0.027327
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
7e83431787289bd5e26b9df0d1100d765c7d3661
5,480
py
Python
loaddata/word_embedding_loader.py
garminwu/CNN
30888cece75b347eccca248a688c5233b95833b5
[ "Apache-2.0" ]
null
null
null
loaddata/word_embedding_loader.py
garminwu/CNN
30888cece75b347eccca248a688c5233b95833b5
[ "Apache-2.0" ]
null
null
null
loaddata/word_embedding_loader.py
garminwu/CNN
30888cece75b347eccca248a688c5233b95833b5
[ "Apache-2.0" ]
null
null
null
import torch import numpy as np import random torch.manual_seed(233) random.seed(233) def vector_loader(text_field_words): # load word2vec_raw # path = 'word_embedding/glove.6B.300d.txt' path = 'word2vec/glove.sentiment.conj.pretrained.txt' words = [] words_dict = {} file = open(path, 'rt', encoding='utf-8') lines = file.readlines() t = 300 for line in lines: line_split = line.split(' ') word = line_split[0] nums = line_split[1:] nums = [float(e) for e in nums] # data.append(line_list) words.append(word) words_dict[word] = nums # match count_list2 = [] count = 0 dict_cat = [] for word in text_field_words: if word in words_dict: count += 1 dict_cat.append(words_dict[word]) else: dict_cat.append([0.0] * t) count += 1 count_list2.append(count - 1) count_data = len(text_field_words) - len(count_list2) # modify zero sum = [] for j in range(t): sum_col = 0.0 for i in range(len(dict_cat)): sum_col += dict_cat[i][j] sum_col = float(sum_col / count_data) sum_col = round(sum_col, 6) sum.append(sum_col) print("sum ",sum) # sum_none = [] # for i in range(t): # sum_total = sum[i] / (len(dict_cat) - len(count_list2)) # sum_total = round(sum_total, 6) # sum_none.append(sum_total) # # print(sum_none) for i in range(len(count_list2)): dict_cat[count_list2[i]] = sum return dict_cat def vector_loader_zero(text_field_words): # load word2vec_raw path = 'word_embedding/glove.6B.300d.txt' words = [] words_dict = {} file = open(path, 'rt', encoding='utf-8') lines = file.readlines() t = 300 for line in lines: line_split = line.split(' ') word = line_split[0] nums = line_split[1:] nums = [float(e) for e in nums] # data.append(line_list) words.append(word) words_dict[word] = nums # match count_list2 = [] count = 0 dict_cat = [] for word in text_field_words: if word in words_dict: count += 1 dict_cat.append(words_dict[word]) else: dict_cat.append([0.0] * t) # count += 1 # count_list2.append(count - 1) # # modify zero # sum = [] # for j in range(t): # sum_col = 0.0 # for i in range(len(dict_cat)): # sum_col += dict_cat[i][j] # sum_col = round(sum_col, 6) # # sum.append(sum_col) # # sum_none = [] # for i in range(t): # sum_total = sum[i] / (len(sum) - len(count_list2)) # sum_total = round(sum_total, 6) # sum_none.append(sum_total) # # print(sum_none) # # for i in range(len(count_list2)): # dict_cat[count_list2[i]] = sum_none return dict_cat def vector_loader_modify(text_field_words): # load word2vec_raw path = 'word_embedding/glove.6B.300d.txt' words = [] words_dict = {} file = open(path, 'rt', encoding='utf-8') lines = file.readlines() t = 300 for line in lines: line_split = line.split(' ') word = line_split[0] nums = line_split[1:] nums = [float(e) for e in nums] # data.append(line_list) words.append(word) words_dict[word] = nums uniform = np.random.uniform(-0.1, 0.1, t).round(6).tolist() # uniform distribution U(a,b).均匀分布 # match count_list2 = [] count = 0 dict_cat = [] for word in text_field_words: if word in words_dict: count += 1 dict_cat.append(words_dict[word]) else: # a = torch.normal(mean=0.0, std=torch.arange(0.09, 0, -0.09)) dict_cat.append(uniform) count += 1 count_list2.append(count - 1) # count_data = len(text_field_words) - len(count_list2) # # modify uniform # sum = [] # for j in range(t): # sum_col = 0.0 # for i in range(len(dict_cat)): # sum_col += dict_cat[i][j] # sum_col = float(sum_col / count_data) # sum_col = round(sum_col, 6) # sum.append(sum_col) # sum_none = [] # for i in range(t): # sum_total = sum[i] / (len(sum) - len(count_list2)) # sum_total = round(sum_total, 6) # sum_none.append(sum_total) # # print(sum_none) # # for i in range(len(count_list2)): # dict_cat[count_list2[i]] = sum_none return dict_cat import torch import numpy def vector_loader_rand(text_field_words): t = 300 # match text_words_size = len(text_field_words) dict_cat = torch.randn(text_words_size, t) dict_cat = dict_cat.numpy() dict_cat = dict_cat.tolist() return dict_cat # {'ü', 'q', 'ó', 'á', '=', 'l', 'â', ':', 'i', 'ö', 'à', ',', '(', '4', 'û', 'b', 'n', 'e', 's', '`', "'", 'm', '1', 'c', '\\', '/', '.', 'h', ';', '&', 'f', '%', '$', 'ï', 'u', 'a', 'v', 'o', 'z', '#', 'ã', 'y', '0', '2', '7', '5', 'j', '?', '<unk>', '-', '<pad>', '*', '!', 'w', 'd', 'p', 'è', 'í', 'k', '8', 'é', '+', ')', 'r', '9', '3', 'ñ', 'æ', 'g', 'x', '6', 't'}
29.304813
372
0.513139
744
5,480
3.573925
0.170699
0.073712
0.052651
0.037232
0.805942
0.805942
0.789771
0.789771
0.789771
0.789771
0
0.030543
0.330839
5,480
186
373
29.462366
0.694573
0.338139
0
0.728155
0
0
0.04032
0.032019
0
0
0
0
0
1
0.038835
false
0
0.048544
0
0.126214
0.009709
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7ea4cebae79abc2b2dc30649a25f65103ccdb01f
38,267
py
Python
pycity_calc/toolbox/analyze/save_load_profiles.py
RWTH-EBC/pyCity_calc
99fd0dab7f9a9030fd84ba4715753364662927ec
[ "MIT" ]
4
2020-06-22T14:14:25.000Z
2021-11-08T11:47:01.000Z
pycity_calc/toolbox/analyze/save_load_profiles.py
RWTH-EBC/pyCity_calc
99fd0dab7f9a9030fd84ba4715753364662927ec
[ "MIT" ]
4
2019-08-28T19:42:28.000Z
2019-08-28T19:43:44.000Z
pycity_calc/toolbox/analyze/save_load_profiles.py
RWTH-EBC/pyCity_calc
99fd0dab7f9a9030fd84ba4715753364662927ec
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code extracts and saves load profiles of all buildings of city object """ from __future__ import division import os import warnings import pickle import numpy as np import matplotlib.pyplot as plt try: import openpyxl except: msg = 'Could not import openpyxl. Which is required, if you want to' \ ' save profiles directly into xlsx files. Please install via ' \ 'pip or set save_as_xlsx to False.' warnings.warn(msg) import pycity_calc.visualization.city_visual as citvis import pycity_calc.cities.scripts.city_generator.city_generator as citgen import pycity_calc.toolbox.analyze.save_city_data as savcit def gen_path_if_not_existent(dir): """ Generate directory, if not existent Parameters ---------- dir : str Directory path """ if not os.path.exists(dir): os.makedirs(dir) def extract_build_base_data(city, id, file_path, use_german=False): """ Extract and save building base data to txt file Parameters ---------- city : object City object id : int Building node id file_path : str Path to save file to (e.g. ...\building_data.txt) use_german : bool, optional Defines, if English or German language should be used (default: False). If False, uses English language. """ # Building pointer build = city.nodes[id]['entity'] if use_german: with open(file_path, mode='w') as f: f.write(u'Gebäude-ID: ' + str(id) + '\n') x_coord = city.nodes[id]['position'].x y_coord = city.nodes[id]['position'].y f.write('X-Koordinate in m: ' + str(int(x_coord)) + '\n') f.write('Y-Koordinate in m: ' + str(int(y_coord)) + '\n') if build.build_year is not None: build_year = int(build.build_year) else: build_year = None f.write( 'Baujahr: ' + str(build_year) + '\n') if build.mod_year is not None: mod_year = int(build.mod_year) else: mod_year = None f.write('Letztes Sanierungsjahr: ' + str(mod_year) + '\n') f.write(u'Nummer Gebäudetyp: ' + str(build.build_type) + '\n') build_name = citgen.conv_build_type_nb_to_name(build.build_type) f.write( u'(Engl.) Erläuterung Gebäudetyp: ' + str(build_name) + '\n') # Write building data to file f.write('Anzahl Zonen/Apartments: ' + str( len(build.apartments)) + '\n') f.write(u'Nutzbare PV-Fläche in m2: ' + str(build.roof_usabl_pv_area) + '\n') f.write(u'Nettogrundfläche in m2: ' + str(build.net_floor_area) + '\n') f.write( u'Bebaute Grundfläche in m2: ' + str(build.ground_area) + '\n') f.write(u'Mittlere Geschosshöhe in m: ' + str(build.height_of_floors) + '\n') f.write('Anzahl Geschosse: ' + str(build.nb_of_floors) + '\n') ann_th_sh_demand = build.get_annual_space_heat_demand() ann_el_demand = build.get_annual_el_demand() ann_dhw_demand = build.get_annual_dhw_demand() f.write(u'Jährlicher Nutzenergiebedarf für Raumwärme in kWh/a: ' + str(int(ann_th_sh_demand)) + '\n') f.write(u'Jährlicher, elektrischer Energiebedarf (ohne Warmwasser)' ' in kWh/a: ' + str(int(ann_el_demand)) + '\n') f.write(u'Jährlicher Nutzenergiebedarf Warmwasser in kWh/a: ' + str(int(ann_dhw_demand)) + '\n') f.write('\n') if 'osm_id' in city.nodes[id]: f.write( 'openstreetmap id: ' + str(city.nodes[id]['osm_id']) + '\n') if 'name' in city.nodes[id]: f.write('OSM name: ' + str(city.nodes[id]['name']) + '\n') if 'addr_street' in city.nodes[id]: f.write('Street: ' + str(city.nodes[id]['addr_street']) + '\n') if 'addr_housenumber' in city.nodes[id]: f.write('Street nb.: ' + str(city.nodes[id]['addr_housenumber']) + '\n') if 'comment' in city.nodes[id]: f.write('OSM comment: ' + str(city.nodes[id]['comment']) + '\n') # print(vars(build)) f.close() else: with open(file_path, mode='w') as f: f.write('Building node id: ' + str(id) + '\n') x_coord = city.nodes[id]['position'].x y_coord = city.nodes[id]['position'].y f.write('X-coordinate in m: ' + str(int(x_coord)) + '\n') f.write('Y-coordinate in m: ' + str(int(y_coord)) + '\n') if build.build_year is not None: build_year = int(build.build_year) else: build_year = None f.write( 'Year of construction: ' + str(build_year) + '\n') if build.mod_year is not None: mod_year = int(build.mod_year) else: mod_year = None f.write('Last year of modernization: ' + str(mod_year) + '\n') f.write('Building type number: ' + str(build.build_type) + '\n') build_name = citgen.conv_build_type_nb_to_name(build.build_type) f.write('Building type explanation: ' + str(build_name) + '\n') # Write building data to file f.write('Nb. of zones/apartments: ' + str( len(build.apartments)) + '\n') f.write('Usable PV roof area in m2: ' + str(build.roof_usabl_pv_area) + '\n') f.write('Net floor area (NFA) in m2: ' + str(build.net_floor_area) + '\n') f.write('Ground area in m2: ' + str(build.ground_area) + '\n') f.write('Height of single floor in m: ' + str(build.height_of_floors) + '\n') f.write('Number of floors: ' + str(build.nb_of_floors) + '\n') ann_th_sh_demand = build.get_annual_space_heat_demand() ann_el_demand = build.get_annual_el_demand() ann_dhw_demand = build.get_annual_dhw_demand() f.write('Annual net space heating energy demand in kWh/a: ' + str(int(ann_th_sh_demand)) + '\n') f.write('Annual electric energy demand in kWh/a: ' + str(int(ann_el_demand)) + '\n') f.write('Annual net hot water energy demand in kWh/a: ' + str(int(ann_dhw_demand)) + '\n') f.write('\n') if 'osm_id' in city.nodes[id]: f.write( 'openstreetmap id: ' + str(city.nodes[id]['osm_id']) + '\n') if 'name' in city.nodes[id]: f.write('OSM name: ' + str(city.nodes[id]['name']) + '\n') if 'addr_street' in city.nodes[id]: f.write('Street: ' + str(city.nodes[id]['addr_street']) + '\n') if 'addr_housenumber' in city.nodes[id]: f.write('Street nb.: ' + str(city.nodes[id]['addr_housenumber']) + '\n') if 'comment' in city.nodes[id]: f.write('OSM comment: ' + str(city.nodes[id]['comment']) + '\n') # print(vars(build)) f.close() def extract_build_profiles(city, id, file_path, do_plot=False, use_german=False, save_tikz=False, save_as_xlsx=True): """ Extract and save building profiles to file Parameters ---------- city : object City object id : int Building node id file_path : str Path to save file to (e.g. ...\building_data.txt) do_plot : bool, optional Defines, if profiles should be plotted (default: False) use_german : bool, optional Defines, if English or German language should be used (default: False). If False, uses English language. save_tikz : bool, optional Define, if figure should be saved as tikz (default: False) save_as_xlsx : bool, optional Define, if load curves should also be saved as xlsx files (default: True) """ # Building pointer build = city.nodes[id]['entity'] # Get power curves sh_profile = build.get_space_heating_power_curve() el_profile = build.get_electric_power_curve() dhw_profile = build.get_dhw_power_curve() # Generate time array timestep = city.environment.timer.timeDiscretization year_in_seconds = 365 * 24 * 3600 time_array = np.arange(0, year_in_seconds, timestep) # Stack results together res_array = np.vstack((time_array, sh_profile)) res_array = np.vstack((res_array, el_profile)) res_array = np.vstack((res_array, dhw_profile)) # Transpose array res_array = np.transpose(res_array) # Define header if use_german: # Define header header = u'Zeit in Sekunden\tThermische Leistung Raumwärme in Watt\t' \ u'Elektrische Leistung in Watt' \ u'\tLeistung Warmwasser in Watt' else: header = 'Time in seconds\tNet space heating power in Watt\t' \ 'Electric power in Watt\tNet hot water power in Watt' # Save numpy array to txt np.savetxt(fname=file_path, X=res_array, delimiter='\t', header=header) if save_as_xlsx: # Get workbook wb = openpyxl.Workbook() # Get worksheet ws = wb.active if use_german: ws['A1'].value = 'Zeit in Sekunden' ws['B1'].value = u'Thermische Leistung Raumwärme in Watt' ws['C1'].value = u'Elektrische Leistung in Watt' ws['D1'].value = u'Leistung Warmwasser in Watt' xlsx_filename = str(id) + '_Lastgang.xlsx' else: ws['A1'].value = 'Time in seconds' ws['B1'].value = 'Net space heating power in Watt' ws['C1'].value = 'Electric power in Watt' ws['D1'].value = 'Net hot water power in Watt' xlsx_filename = str(id) + '_profiles.xlsx' # Loop over columns for j in range(len(res_array[0])): # Loop over rows for i in range(len(res_array)): ws.cell(row=i + 2, column=j + 1, value=res_array[i][j]) workbook_path = os.path.join(os.path.dirname(file_path), xlsx_filename) wb.save(workbook_path) if do_plot: try: import ebc_ues_plot.line_plots as uesline except: msg = 'Cannot import ebc_ues_plot / simple_plot package.' \ 'Thus, cannot perform plotting in EBC style!' raise AssertionError(msg) # Generate time array nb_timesteps = 365 * 24 * 3600 / timestep time_array = np.arange(0, nb_timesteps, timestep / 3600) plotdata = uesline.PlottingData() plotdata.add_data_entry(time_array, sh_profile / 1000) plotdata.add_data_entry(time_array, el_profile / 1000) plotdata.add_data_entry(time_array, dhw_profile / 1000) # Perform plotting if use_german: output_path = os.path.join(os.path.dirname(file_path), 'Lastgaenge') else: output_path = os.path.join(os.path.dirname(file_path), 'power_curves_graphics') uesline.plot_multi_language_multi_color(plot_data=plotdata, plot_sub=True, output_path=output_path, output_filename=str(id), show_plot=False, use_tight=True, title_engl=None, xlab_engl='Time in hours', ylab_engl='Power in kW', list_labels_engl=[ 'Space heating\npower in kW', 'Electric\npower in kW', 'Hot water\npower in kW'], title_dt=None, xlab_dt='Zeit in Stunden', ylab_dt='Leistung in kW', list_labels_dt=[ 'Heizleistung\nin kW', 'Elektrische\nLeistung in kW', 'Warmwasser-\nleistung in kW'], fontsize=12, fig_adjust='a4', legend_pos_within=True, put_leg='below', dpi=500, # linewidth=1, set_zero_point=True, set_x_limits=True, xmin=0, xmax=8760, set_y_limits=False, # ymin=ymin, ymax=ymax, use_grid=False, # input_path=input_path, save_tikz=save_tikz, # rotate_x_labels=rotate_x_labels, copy_py=True, copy_input=False, save_data_array=True, use_font='arial') def extract_city_base_data(city, out_file_path, do_plot=False, use_german=False, save_tikz=False, save_as_xlsx=True): """ Extract and save basic city data Parameters ---------- city : object City object of pyCity_calc out_file_path : str Path to save data to do_plot : bool, optional Defines, if profiles should be plotted (default: False) use_german : bool, optional Defines, if English or German language should be used (default: False). If False, uses English language. save_tikz : bool, optional Define, if figure should be saved as tikz (default: False) save_as_xlsx : bool, optional Define, if load curves should also be saved as xlsx files (default: True) """ # Extract basic city data to path (.txt) if use_german: with open(out_file_path, mode='w') as f: f.write('Anzahl Knoten: ' + str(len(city.nodes())) + '\n') f.write(u'(Z.b. Gebäude, Straßen etc.)\n') nb_build_entities = city.get_nb_of_building_entities() f.write(u'Anzahl Gebäude: ' + str(nb_build_entities) + '\n') list_ent = city.get_list_build_entity_node_ids() f.write(u'Liste mit Gebäude-IDs: ' + str(list_ent) + '\n') location = city.environment.location f.write( u'Längen-/Breitengrad der Stadt: ' + (str(location)) + '\n') altitude = city.environment.weather.altitude f.write(u'Höhe über NN: ' + str(altitude) + '\n') nb_occ = city.get_nb_occupants() f.write('Anzahl Bewohner: ' + str(nb_occ) + '\n') ann_th_sh_demand = city.get_annual_space_heating_demand() ann_el_demand = city.get_annual_el_demand() ann_dhw_demand = city.get_annual_dhw_demand() f.write(u'Jährlicher Nutzenergiebedarf für Raumwärme in kWh/a: ' + str(int(ann_th_sh_demand)) + '\n') f.write(u'Jährlicher, elektrischer Energiebedarf (ohne Warmwasser)' ' in kWh/a: ' + str(int(ann_el_demand)) + '\n') f.write(u'Jährlicher Nutzenergiebedarf Warmwasser in kWh/a: ' + str(int(ann_dhw_demand)) + '\n') f.write('\n') f.close() if do_plot: # Plot energy demands as bar plots try: import ebc_ues_plot.bar_plots as uesbar except: msg = 'Could not import ebc_ues_plot module.' raise AssertionError(msg) dataset = np.array([[ann_th_sh_demand], [ann_el_demand], [ann_dhw_demand]]) output_path = os.path.join(os.path.dirname(out_file_path), 'Stadt_Saulendiagramm_Energie') f_name = 'Stadt_Saulendiagramm_Energie' uesbar.plot_multi_language_multi_color_bar(dataset=dataset, output_path=output_path, output_filename=f_name, show_plot=False, use_tight=True, title_engl=None, xlab_engl=None, ylab_engl='Energy demands in kWh/a', list_labels_engl=[ 'Space heating', 'Electric energy', 'Hot water energy'], title_dt=None, xlab_dt=None, ylab_dt=u'Energiebedarf in kWh/a', list_labels_dt=[ u'Raumwärme', u'Elektr. Energie', u'Warmwasser'], fontsize=16, fig_adjust=None, dpi=300, copy_py=True, copy_input=False, input_path=None, save_data_array=True, save_tikz=save_tikz, list_labels_leg_engl=None, list_labels_leg_dt=None, use_autolabel=False, bar_width=0.7, set_ylimit=False, ymin=None, ymax=None, rotate_x_labels=False, use_font='arial', legend_pos='inside') else: with open(out_file_path, mode='w') as f: f.write('Number of nodes: ' + str(len(city.nodes())) + '\n') nb_build_entities = city.get_nb_of_building_entities() f.write('Number of buildings: ' + str(nb_build_entities) + '\n') list_ent = city.get_list_build_entity_node_ids() f.write('List of building ids: ' + str(list_ent) + '\n') location = city.environment.location f.write('Location (lat/long): ' + (str(location)) + '\n') altitude = city.environment.weather.altitude f.write('Altitude in m above NN: ' + str(altitude) + '\n') nb_occ = city.get_nb_occupants() f.write('Total number of occupants: ' + str(nb_occ) + '\n') ann_th_sh_demand = city.get_annual_space_heating_demand() ann_el_demand = city.get_annual_el_demand() ann_dhw_demand = city.get_annual_dhw_demand() f.write('Annual net space heating energy demand in kWh/a: ' + str(int(ann_th_sh_demand)) + '\n') f.write('Annual electric energy demand in kWh/a: ' + str(int(ann_el_demand)) + '\n') f.write('Annual net hot water energy demand in kWh/a: ' + str(int(ann_dhw_demand)) + '\n') f.write('\n') f.close() if do_plot: # Plot energy demands as bar plots try: import ebc_ues_plot.bar_plots as uesbar except: msg = 'Could not import ebc_ues_plot module.' raise AssertionError(msg) dataset = np.array([[ann_th_sh_demand], [ann_el_demand], [ann_dhw_demand]]) output_path = os.path.join(os.path.dirname(out_file_path), 'city_energy_bars') f_name = 'city_bar_plot' uesbar.plot_multi_language_multi_color_bar(dataset=dataset, output_path=output_path, output_filename=f_name, show_plot=False, use_tight=True, title_engl=None, xlab_engl=None, ylab_engl='Energy demands in kWh/a', list_labels_engl=[ 'Space heating', 'Electric energy', 'Hot water energy'], title_dt=None, xlab_dt=None, ylab_dt='Energiebedarf in kWh/a', list_labels_dt=[ u'Raumwärme', 'Elektr. Energie', 'Warmwasser'], fontsize=16, fig_adjust=None, dpi=300, copy_py=True, copy_input=False, input_path=None, save_data_array=True, save_tikz=save_tikz, list_labels_leg_engl=None, list_labels_leg_dt=None, use_autolabel=False, bar_width=0.7, set_ylimit=False, ymin=None, ymax=None, rotate_x_labels=False, use_font='arial', legend_pos='inside') def extract_city_profiles(city, city_path, do_plot, use_german=False, save_tikz=False, save_as_xlsx=True): """ Parameters ---------- city : object City object of pyCity_calc city_path : str Path to folder, where profiles should be saved do_plot : bool, optional Defines, if profiles should be plotted (default: False) use_german : bool, optional Defines, if English or German language should be used (default: False). If False, uses English language save_tikz : bool, optional Define, if figure should be saved as tikz (default: False) save_as_xlsx : bool, optional Define, if load curves should also be saved as xlsx files (default: True) """ # Get power curves sh_profile = city.get_aggr_space_h_power_curve() el_profile = city.get_aggr_el_power_curve() dhw_profile = city.get_aggr_dhw_power_curve() # Generate time array timestep = city.environment.timer.timeDiscretization year_in_seconds = 365 * 24 * 3600 time_array = np.arange(0, year_in_seconds, timestep) # Stack results together res_array = np.vstack((time_array, sh_profile)) res_array = np.vstack((res_array, el_profile)) res_array = np.vstack((res_array, dhw_profile)) # Transpose array res_array = np.transpose(res_array) if use_german: # Define header header = u'Zeit in Sekunden\tThermische Leistung Raumwärme in Watt\t' \ u'Elektrische Leistung in Watt' \ u'\tLeistung Warmwasser in Watt' data_f_name = 'Stadt_Profile.txt' else: # Define header header = 'Time in seconds\tNet space heating power in Watt\t' \ 'Electric power in Watt\tNet hot water power in Watt' data_f_name = 'city_profiles.txt' data_f_path = os.path.join(city_path, data_f_name) # Save numpy array to txt np.savetxt(fname=data_f_path, X=res_array, delimiter='\t', header=header) if save_as_xlsx: # Get workbook wb = openpyxl.Workbook() # Get worksheet ws = wb.active if use_german: ws['A1'].value = 'Zeit in Sekunden' ws['B1'].value = u'Thermische Leistung Raumwärme in Watt' ws['C1'].value = u'Elektrische Leistung in Watt' ws['D1'].value = u'Leistung Warmwasser in Watt' xlsx_filename = 'Stadt_Profile.xlsx' else: ws['A1'].value = 'Time in seconds' ws['B1'].value = 'Net space heating power in Watt' ws['C1'].value = 'Electric power in Watt' ws['D1'].value = 'Net hot water power in Watt' xlsx_filename = 'city_profiles.xlsx' # Loop over columns for j in range(len(res_array[0])): # Loop over rows for i in range(len(res_array)): ws.cell(row=i + 2, column=j + 1, value=res_array[i][j]) workbook_path = os.path.join(city_path, xlsx_filename) wb.save(workbook_path) if do_plot: # Plot city profiles to path try: import ebc_ues_plot.line_plots as uesline except: msg = 'Cannot import ebc_ues_plot / simple_plot package.' \ 'Thus, cannot perform plotting in EBC style!' raise AssertionError(msg) # Generate time array nb_timesteps = 365 * 24 * 3600 / timestep time_array = np.arange(0, nb_timesteps, timestep / 3600) plotdata = uesline.PlottingData() plotdata.add_data_entry(time_array, sh_profile / 1000) plotdata.add_data_entry(time_array, el_profile / 1000) plotdata.add_data_entry(time_array, dhw_profile / 1000) # Perform plotting if use_german: output_path = os.path.join(city_path, 'Stadt_Lastgaenge') output_filename = 'Stadt_Lastgaenge' else: output_path = os.path.join(city_path, 'city_power_curves') output_filename = 'city_power_curves' uesline.plot_multi_language_multi_color(plot_data=plotdata, plot_sub=True, output_path=output_path, output_filename=output_filename, show_plot=False, use_tight=True, title_engl=None, xlab_engl='Time in hours', ylab_engl='Power in kW', list_labels_engl=[ 'Space heating\npower in kW', 'Electric\npower in kW', 'Hot water\npower in kW'], title_dt=None, xlab_dt='Zeit in Stunden', ylab_dt='Leistung in kW', list_labels_dt=[ u'Heizleistung\nin kW', u'Elektrische\nLeistung in kW', u'Warmwasser-\nleistung in kW'], fontsize=12, fig_adjust='a4', legend_pos_within=True, put_leg='below', dpi=500, # linewidth=1, set_zero_point=True, set_x_limits=True, xmin=0, xmax=8760, set_y_limits=False, # ymin=ymin, ymax=ymax, use_grid=False, # input_path=input_path, save_tikz=save_tikz, # rotate_x_labels=rotate_x_labels, copy_py=True, copy_input=False, save_data_array=True, use_font='arial') def extract_city_data(city, out_path, do_plot=False, use_german=False, save_tikz=False, save_as_xlsx=True): """ Extract and save city data to file. Parameters ---------- city : object City object of pyCity_calc out_path : str Path to save city data to do_plot: bool, optional Defines, if load profiles should be plotted use_german : bool, optional Defines, if English or German language should be used (default: False). If False, uses English language. save_tikz : bool, optional Define, if figure should be saved as tikz (default: False) save_as_xlsx : bool, optional Define, if load curves should also be saved as xlsx files (default: True) """ if use_german: city_path = os.path.join(out_path, 'Stadt') gen_path_if_not_existent(city_path) city_out = 'Stadt_Daten.txt' data_file = os.path.join(city_path, city_out) else: city_path = os.path.join(out_path, 'city') gen_path_if_not_existent(city_path) city_out = 'city_data.txt' data_file = os.path.join(city_path, city_out) # Extract city base data extract_city_base_data(city=city, out_file_path=data_file, do_plot=do_plot, use_german=use_german, save_tikz=save_tikz) # Extract data into single file if use_german: save_path = os.path.join(city_path, 'Stadt_Gebaeudedaten.txt') x_label = 'X-Koordinate in m' y_label = 'Y-Koordinate in m' else: save_path = os.path.join(city_path, 'city_data_buildings.txt') x_label = 'x-coordinate in m' y_label = 'y-coordinate in m' savcit.save_city_data_to_file(city=city, save_path=save_path, use_german=use_german, save_as_xlsx=save_as_xlsx) # Generate plot with ids and save it to out_path citvis.plot_city_district(city=city, city_list=None, plot_buildings=True, plot_street=True, plot_lhn=False, plot_deg=False, plot_esys=False, offset=7, plot_build_labels=True, plot_str_labels=False, plot_heat_labels=False, equal_axis=False, font_size=16, plt_title=None, x_label=x_label, y_label=y_label, show_plot=False, fig_adjust=None, plot_elec_labels=False, save_plot=True, save_path=city_path, dpi=300, plot_color=True, plot_engl=not use_german, auto_close=True, plot_str_dist=150, node_size=50) # Extract and save city profiles extract_city_profiles(city=city, city_path=city_path, do_plot=do_plot, use_german=use_german, save_tikz=save_tikz, save_as_xlsx=save_as_xlsx) def extract_city_n_build_data(city, out_path, use_german=False, save_tikz=False, save_as_xlsx=True): """ Parameters ---------- city : object City object of pyCity_calc out_path : str Path to save profiles to use_german : bool, optional Defines, if English or German language should be used (default: False). If False, uses English language. save_tikz : bool, optional Define, if figure should be saved as tikz (default: False) save_as_xlsx : bool, optional Define, if load curves should also be saved as xlsx files (default: True) """ # Get all building nodes list_ids = city.get_list_build_entity_node_ids() # Extract city data extract_city_data(city=city, out_path=out_path, do_plot=True, use_german=use_german, save_tikz=save_tikz, save_as_xlsx=save_as_xlsx) # Extract building data for n in list_ids: # Generate folder with node id name if use_german: curr_path = os.path.join(out_path, 'Gebaeude', str(n)) else: curr_path = os.path.join(out_path, 'buildings', str(n)) gen_path_if_not_existent(curr_path) # Open txt file and add if use_german: data_f_name = str(n) + '_Daten.txt' else: data_f_name = str(n) + '_data.txt' data_f_path = os.path.join(curr_path, data_f_name) # Extract building base data and save them to file extract_build_base_data(city=city, id=n, file_path=data_f_path, use_german=use_german) # Open txt file and add if use_german: data_f_name = str(n) + '_Profile.txt' else: data_f_name = str(n) + '_profiles.txt' data_f_path = os.path.join(curr_path, data_f_name) extract_build_profiles(city=city, id=n, file_path=data_f_path, do_plot=True, use_german=use_german, save_tikz=save_tikz, save_as_xlsx=save_as_xlsx) if __name__ == '__main__': this_path = os.path.dirname(os.path.abspath(__file__)) city_f_name = 'city_3_buildings_mixed.pkl' input_path = os.path.join(this_path, 'input', city_f_name) out_name = city_f_name[:-4] out_path = os.path.join(this_path, 'output', 'extracted', out_name) use_german = False save_tikz = True save_as_xlsx = True # Make out_path, if not existent gen_path_if_not_existent(out_path) city = pickle.load(open(input_path, mode='rb')) if use_german == True and save_tikz == True: msg = 'Choose use_german=True. Thus, save_tikz is set to False,' \ ' due to possible utf-8 errors.' warnings.warn(msg) save_tikz = False extract_city_n_build_data(city=city, out_path=out_path, use_german=use_german, save_tikz=save_tikz, save_as_xlsx=save_as_xlsx)
42.518889
95
0.469543
4,031
38,267
4.220045
0.106425
0.022926
0.01111
0.015637
0.807125
0.779672
0.767621
0.742167
0.729293
0.710246
0
0.007231
0.447069
38,267
899
96
42.566185
0.79673
0.120234
0
0.70826
0
0
0.137138
0.007035
0
0
0
0
0.00703
1
0.012302
false
0
0.033392
0
0.045694
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
7d00f812caa84757d993e741ffa0fd87cd7f1ff1
2,106
py
Python
Day36_LSTMs_and_Convolutional_networks_NLP/imdb_review_with_LSTM.py
activatedbonkers/100-Days-of-Code-Challenge
a1a376e5373d8fc5fda5df4004115760aa92bfda
[ "MIT" ]
8
2020-07-12T22:45:30.000Z
2021-06-01T10:44:28.000Z
Day36_LSTMs_and_Convolutional_networks_NLP/imdb_review_with_LSTM.py
activatedbonkers/100-Days-of-Code-Challenge
a1a376e5373d8fc5fda5df4004115760aa92bfda
[ "MIT" ]
null
null
null
Day36_LSTMs_and_Convolutional_networks_NLP/imdb_review_with_LSTM.py
activatedbonkers/100-Days-of-Code-Challenge
a1a376e5373d8fc5fda5df4004115760aa92bfda
[ "MIT" ]
6
2020-06-29T18:36:27.000Z
2022-01-16T21:18:03.000Z
# Without LSTM imdb, info = tfds.load('imdb_review", with_infor = True, as_supervised = True) model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length), tf.keras.layers.Flatten(), tf.keras.layers.Dense(6, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # Trainable parameters = 171,533 parameters # With LSTM imdb, info = tfds.load('imdb_review", with_infor = True, as_supervised = True) model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(6, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # Trainable parameters = 30,129 # Using GRU imdb, info = tfds.load('imdb_review", with_infor = True, as_supervised = True) model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length), tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)), tf.keras.layers.Dense(6, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # Trainable parameters = 169,997 parameters # With Conv1D imdb, info = tfds.load('imdb_review", with_infor = True, as_supervised = True) model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length), tf.keras.layers.Conv1D(128, 5, activation ='relu'), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(6, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # Trainable parameters = 171,149 parameters
32.4
84
0.717474
273
2,106
5.417582
0.208791
0.108857
0.167005
0.097363
0.917512
0.899256
0.899256
0.899256
0.899256
0.899256
0
0.022926
0.130104
2,106
64
85
32.90625
0.784389
0.094967
0
0.864865
0
0
0.090669
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
adb8aa8c1e7b938666d5b065802a18cdbe2281fb
3,106
py
Python
ui/tests/testRestGame.py
ludek77/gow
f3609e7a70c953d83d2a56d871101d118b534bf5
[ "CC0-1.0" ]
null
null
null
ui/tests/testRestGame.py
ludek77/gow
f3609e7a70c953d83d2a56d871101d118b534bf5
[ "CC0-1.0" ]
null
null
null
ui/tests/testRestGame.py
ludek77/gow
f3609e7a70c953d83d2a56d871101d118b534bf5
[ "CC0-1.0" ]
null
null
null
from ui.tests.TestRest import TestRest from django.core.management import call_command class TestRestGame(TestRest): def setUp(self): call_command('loaddata', 'user', verbosity=0) call_command('loaddata', 'init', verbosity=0) call_command('loaddata', 'test/testWorld', verbosity=0) call_command('loaddata', 'test/testUnits', verbosity=0) def testRestGame(self): self.doMove1999() self.doMove2000() def doMove1999(self): print('---- test 1999 ----') # set russian commands self.loginRussia() self.doTestRest('ui/tests/rest/game/1/move0', 'index1') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=1') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=2') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=4') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=5') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=6') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=7') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=8') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=9') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=10') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=12') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=14') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=15') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_get:f=29') self.doTestRest('ui/tests/rest/game/1/move0/rus', 'unit_command:f=7&ct=2&args=5') # 7 -> 5 self.logout() # set spanish commands self.loginSpain() self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=4') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=6') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=7') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=9') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=10') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=12') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=14') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=15') self.doTestRest('ui/tests/rest/game/1/move0/spa', 'unit_get:f=29') self.logout() # end move self.endMove('ui/tests/rest/game/1/move0', 'index2') # test units self.loginRussia() self.doTestRest('ui/tests/rest/game/1/move0/result', 'unit_get:f=7') self.doTestRest('ui/tests/rest/game/1/move0/result', 'unit_get:f=5') def doMove2000(self): print('---- test 2000 ----') # set russian commands self.loginRussia() self.logout() # set spanish commands self.loginSpain() self.logout() # end move #self.endMove('ui/tests/rest/game/1/move1', 'index1')
45.676471
98
0.620734
458
3,106
4.144105
0.146288
0.106955
0.162276
0.221286
0.82666
0.809273
0.744995
0.744995
0.700738
0.700738
0
0.050481
0.196394
3,106
67
99
46.358209
0.709936
0.055055
0
0.176471
0
0
0.428181
0.28591
0
0
0
0
0
1
0.078431
false
0
0.039216
0
0.137255
0.039216
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
adc343b3cbf3b8c2f62d0690d1016927b1d9ece2
456,997
py
Python
python/mxnet/symbol/gen_op.py
sneaxiy/NVIDIA-MxNet
ce30b18212fbf23f68c006a02cc034e417bb5518
[ "Apache-2.0" ]
null
null
null
python/mxnet/symbol/gen_op.py
sneaxiy/NVIDIA-MxNet
ce30b18212fbf23f68c006a02cc034e417bb5518
[ "Apache-2.0" ]
null
null
null
python/mxnet/symbol/gen_op.py
sneaxiy/NVIDIA-MxNet
ce30b18212fbf23f68c006a02cc034e417bb5518
[ "Apache-2.0" ]
3
2021-07-20T07:40:15.000Z
2021-08-03T08:39:17.000Z
# coding: utf-8# File content is auto-generated. Do not modify. # pylint: skip-file from ._internal import SymbolBase from ..base import _Null def Activation(data=None, act_type=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies an activation function element-wise to the input. The following activation functions are supported: - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}` - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` - `softsign`: :math:`y = \frac{x}{1 + abs(x)}` Defined in ../src/operator/nn/activation.cc:L165 Parameters ---------- data : Symbol The input array. act_type : {'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required Activation function to be applied. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. Examples -------- A one-hidden-layer MLP with ReLU activation: >>> data = Variable('data') >>> mlp = FullyConnected(data=data, num_hidden=128, name='proj') >>> mlp = Activation(data=mlp, act_type='relu', name='activation') >>> mlp = FullyConnected(data=mlp, num_hidden=10, name='mlp') >>> mlp <Symbol mlp> ReLU activation >>> test_suites = [ ... ('relu', lambda x: np.maximum(x, 0)), ... ('sigmoid', lambda x: 1 / (1 + np.exp(-x))), ... ('tanh', lambda x: np.tanh(x)), ... ('softrelu', lambda x: np.log(1 + np.exp(x))) ... ] >>> x = test_utils.random_arrays((2, 3, 4)) >>> for act_type, numpy_impl in test_suites: ... op = Activation(act_type=act_type, name='act') ... y = test_utils.simple_forward(op, act_data=x) ... y_np = numpy_impl(x) ... print('%s: %s' % (act_type, test_utils.almost_equal(y, y_np))) relu: True sigmoid: True tanh: True softrelu: True """ return (0,) def BNStatsFinalize(sum=None, sum_squares=None, gamma=None, beta=None, moving_mean=None, moving_var=None, eps=_Null, momentum=_Null, fix_gamma=_Null, use_global_stats=_Null, output_mean_var=_Null, elem_count=_Null, name=None, attr=None, out=None, **kwargs): r"""Batch normalization stats finalize. This is an experimental operator designed to work in concert with the NormalizedConvolution op. Think of batchnorm as split into: 1) input data statistics generation (but now sum and sum_squares, not mean and variance) 2) statistics finalize (maps sum, sum_squares, beta and gamma to an equiv_scale and equiv_bias) 3) apply equiv_scale and equiv_bias to data With this picture, the NormalizedConvolution includes parts 1) and 3) from above: NormalizedConvolution == StatsApply -> Relu -> Convolution -> StatsGen What's left over from this NormalizedConvolution is BNStatsFinalize, which performs the mapping of part 2) above, plus the running mean, running variance state machine update of Batchnorm. Legacy description of Batchnorm: Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as well as offset ``beta``. Assume the input has more than one dimension and we normalize along axis 1. We first compute the mean and variance along this axis: .. math:: data\_mean[i] = mean(data[:,i,:,...]) \\ data\_var[i] = var(data[:,i,:,...]) Then compute the normalized output, which has the same shape as input, as following: .. math:: out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] Both *mean* and *var* returns a scalar by treating the input as a vector. Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these two outputs are blocked. Besides the inputs and the outputs, this operator accepts two auxiliary states, ``moving_mean`` and ``moving_var``, which are *k*-length vectors. They are global statistics for the whole dataset, which are updated by:: moving_mean = moving_mean * momentum + data_mean * (1 - momentum) moving_var = moving_var * momentum + data_var * (1 - momentum) If ``use_global_stats`` is set to be true, then ``moving_mean`` and ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute the output. It is often used during inference. The parameter ``axis`` specifies which axis of the input shape denotes the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel axis to be the last item in the input shape. Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, then set ``gamma`` to 1 and its gradient to 0. .. Note:: When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, the sparse tensors will fallback. Defined in ../src/operator/nn/bn_stats_finalize.cc:L226 Parameters ---------- sum : Symbol sum of input data to be normalized sum_squares : Symbol sum of squares of input data to be normalized gamma : Symbol gamma array beta : Symbol beta array moving_mean : Symbol running mean of input moving_var : Symbol running variance of input eps : double, optional, default=0.0010000000474974513 Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) momentum : float, optional, default=0.899999976 Momentum for moving average fix_gamma : boolean, optional, default=1 Fix gamma while training use_global_stats : boolean, optional, default=0 Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. output_mean_var : boolean, optional, default=0 Output the mean and inverse std elem_count : long (non-negative), required Number of elements accumulated in 'sum' and 'sum_squares'. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def BatchNorm(data=None, gamma=None, beta=None, moving_mean=None, moving_var=None, eps=_Null, momentum=_Null, fix_gamma=_Null, use_global_stats=_Null, output_mean_var=_Null, axis=_Null, cudnn_off=_Null, min_calib_range=_Null, max_calib_range=_Null, act_type=_Null, bn_group=_Null, xbuf_ptr=_Null, name=None, attr=None, out=None, **kwargs): r"""Batch normalization. Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as well as offset ``beta``. Assume the input has more than one dimension and we normalize along axis 1. We first compute the mean and variance along this axis: .. math:: data\_mean[i] = mean(data[:,i,:,...]) \\ data\_var[i] = var(data[:,i,:,...]) Then compute the normalized output, which has the same shape as input, as following: .. math:: out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] Both *mean* and *var* returns a scalar by treating the input as a vector. Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these two outputs are blocked. Besides the inputs and the outputs, this operator accepts two auxiliary states, ``moving_mean`` and ``moving_var``, which are *k*-length vectors. They are global statistics for the whole dataset, which are updated by:: moving_mean = moving_mean * momentum + data_mean * (1 - momentum) moving_var = moving_var * momentum + data_var * (1 - momentum) If ``use_global_stats`` is set to be true, then ``moving_mean`` and ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute the output. It is often used during inference. The parameter ``axis`` specifies which axis of the input shape denotes the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel axis to be the last item in the input shape. Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, then set ``gamma`` to 1 and its gradient to 0. .. Note:: When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, the sparse tensors will fallback. Defined in ../src/operator/nn/batch_norm.cc:L620 Parameters ---------- data : Symbol Input data to batch normalization gamma : Symbol gamma array beta : Symbol beta array moving_mean : Symbol running mean of input moving_var : Symbol running variance of input eps : double, optional, default=0.0010000000474974513 Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) momentum : float, optional, default=0.899999976 Momentum for moving average fix_gamma : boolean, optional, default=1 Fix gamma while training use_global_stats : boolean, optional, default=0 Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. output_mean_var : boolean, optional, default=0 Output the mean and inverse std axis : int, optional, default='1' Specify which shape axis the channel is specified cudnn_off : boolean, optional, default=0 Do not select CUDNN operator, if available min_calib_range : float or None, optional, default=None The minimum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. max_calib_range : float or None, optional, default=None The maximum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. act_type : {None, 'relu', 'sigmoid', 'softrelu', 'tanh'},optional, default='None' Fused activation function to be applied. bn_group : int, optional, default='1' BN group xbuf_ptr : long (non-negative), optional, default=0 xbuf ptr name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def BatchNormAddRelu(data=None, gamma=None, beta=None, moving_mean=None, moving_var=None, addend=None, eps=_Null, momentum=_Null, fix_gamma=_Null, use_global_stats=_Null, output_mean_var=_Null, axis=_Null, cudnn_off=_Null, bn_group=_Null, xbuf_ptr=_Null, name=None, attr=None, out=None, **kwargs): r"""Batch normalization with built-in addition and ReLU Activation. Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as well as offset ``beta``. This version is somewhat special purpose in that the usual normalized data output is then added to an additional data input, followed by ReLU activation. Assume the input has more than one dimension and we normalize along axis 1. We first compute the mean and variance along this axis: .. math:: data\_mean[i] = mean(data[:,i,:,...]) \\ data\_var[i] = var(data[:,i,:,...]) Then compute the normalized output, which has the same shape as input, as following: .. math:: out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] Both *mean* and *var* returns a scalar by treating the input as a vector. Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these two outputs are blocked. Besides the inputs and the outputs, this operator accepts two auxiliary states, ``moving_mean`` and ``moving_var``, which are *k*-length vectors. They are global statistics for the whole dataset, which are updated by:: moving_mean = moving_mean * momentum + data_mean * (1 - momentum) moving_var = moving_var * momentum + data_var * (1 - momentum) If ``use_global_stats`` is set to be true, then ``moving_mean`` and ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute the output. It is often used during inference. The parameter ``axis`` specifies which axis of the input shape denotes the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel axis to be the last item in the input shape. Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, then set ``gamma`` to 1 and its gradient to 0. Note:: When fix_gamma is set to True, no sparse support is provided. If fix_gamma is set to False, the sparse tensors will fallback. Defined in ../src/operator/nn/batch_norm_add_relu.cc:L518 Parameters ---------- data : Symbol Input data to batch normalization gamma : Symbol gamma array beta : Symbol beta array moving_mean : Symbol running mean of input moving_var : Symbol running variance of input addend : Symbol input summed with BN output before relu eps : double, optional, default=0.0010000000474974513 Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) momentum : float, optional, default=0.899999976 Momentum for moving average fix_gamma : boolean, optional, default=1 Fix gamma while training use_global_stats : boolean, optional, default=0 Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. output_mean_var : boolean, optional, default=0 Output the mean and inverse std axis : int, optional, default='1' Specify which shape axis the channel is specified cudnn_off : boolean, optional, default=0 Do not select CUDNN operator, if available bn_group : int, optional, default='1' BN group xbuf_ptr : long (non-negative), optional, default=0 xbuf ptr name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def BatchNorm_v1(data=None, gamma=None, beta=None, eps=_Null, momentum=_Null, fix_gamma=_Null, use_global_stats=_Null, output_mean_var=_Null, name=None, attr=None, out=None, **kwargs): r"""Batch normalization. This operator is DEPRECATED. Perform BatchNorm on the input. Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as well as offset ``beta``. Assume the input has more than one dimension and we normalize along axis 1. We first compute the mean and variance along this axis: .. math:: data\_mean[i] = mean(data[:,i,:,...]) \\ data\_var[i] = var(data[:,i,:,...]) Then compute the normalized output, which has the same shape as input, as following: .. math:: out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] Both *mean* and *var* returns a scalar by treating the input as a vector. Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and ``data_var`` as well, which are needed for the backward pass. Besides the inputs and the outputs, this operator accepts two auxiliary states, ``moving_mean`` and ``moving_var``, which are *k*-length vectors. They are global statistics for the whole dataset, which are updated by:: moving_mean = moving_mean * momentum + data_mean * (1 - momentum) moving_var = moving_var * momentum + data_var * (1 - momentum) If ``use_global_stats`` is set to be true, then ``moving_mean`` and ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute the output. It is often used during inference. Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, then set ``gamma`` to 1 and its gradient to 0. There's no sparse support for this operator, and it will exhibit problematic behavior if used with sparse tensors. Defined in ../src/operator/batch_norm_v1.cc:L95 Parameters ---------- data : Symbol Input data to batch normalization gamma : Symbol gamma array beta : Symbol beta array eps : float, optional, default=0.00100000005 Epsilon to prevent div 0 momentum : float, optional, default=0.899999976 Momentum for moving average fix_gamma : boolean, optional, default=1 Fix gamma while training use_global_stats : boolean, optional, default=0 Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. output_mean_var : boolean, optional, default=0 Output All,normal mean and var name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def BilinearSampler(data=None, grid=None, cudnn_off=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies bilinear sampling to input feature map. Bilinear Sampling is the key of [NIPS2015] \"Spatial Transformer Networks\". The usage of the operator is very similar to remap function in OpenCV, except that the operator has the backward pass. Given :math:`data` and :math:`grid`, then the output is computed by .. math:: x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\ y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\ output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}) :math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and :math:`G()` denotes the bilinear interpolation kernel. The out-boundary points will be padded with zeros.The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]). The operator assumes that :math:`data` has 'NCHW' layout and :math:`grid` has been normalized to [-1, 1]. BilinearSampler often cooperates with GridGenerator which generates sampling grids for BilinearSampler. GridGenerator supports two kinds of transformation: ``affine`` and ``warp``. If users want to design a CustomOp to manipulate :math:`grid`, please firstly refer to the code of GridGenerator. Example 1:: ## Zoom out data two times data = array([[[[1, 4, 3, 6], [1, 8, 8, 9], [0, 4, 1, 5], [1, 0, 1, 3]]]]) affine_matrix = array([[2, 0, 0], [0, 2, 0]]) affine_matrix = reshape(affine_matrix, shape=(1, 6)) grid = GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(4, 4)) out = BilinearSampler(data, grid) out [[[[ 0, 0, 0, 0], [ 0, 3.5, 6.5, 0], [ 0, 1.25, 2.5, 0], [ 0, 0, 0, 0]]] Example 2:: ## shift data horizontally by -1 pixel data = array([[[[1, 4, 3, 6], [1, 8, 8, 9], [0, 4, 1, 5], [1, 0, 1, 3]]]]) warp_maxtrix = array([[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]]) grid = GridGenerator(data=warp_matrix, transform_type='warp') out = BilinearSampler(data, grid) out [[[[ 4, 3, 6, 0], [ 8, 8, 9, 0], [ 4, 1, 5, 0], [ 0, 1, 3, 0]]] Defined in ../src/operator/bilinear_sampler.cc:L256 Parameters ---------- data : Symbol Input data to the BilinearsamplerOp. grid : Symbol Input grid to the BilinearsamplerOp.grid has two channels: x_src, y_src cudnn_off : boolean or None, optional, default=None whether to turn cudnn off name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def BlockGrad(data=None, name=None, attr=None, out=None, **kwargs): r"""Stops gradient computation. Stops the accumulated gradient of the inputs from flowing through this operator in the backward direction. In other words, this operator prevents the contribution of its inputs to be taken into account for computing gradients. Example:: v1 = [1, 2] v2 = [0, 1] a = Variable('a') b = Variable('b') b_stop_grad = stop_gradient(3 * b) loss = MakeLoss(b_stop_grad + a) executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) executor.forward(is_train=True, a=v1, b=v2) executor.outputs [ 1. 5.] executor.backward() executor.grad_arrays [ 0. 0.] [ 1. 1.] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L325 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def CTCLoss(data=None, label=None, data_lengths=None, label_lengths=None, use_data_lengths=_Null, use_label_lengths=_Null, blank_label=_Null, name=None, attr=None, out=None, **kwargs): r"""Connectionist Temporal Classification Loss. .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. The shapes of the inputs and outputs: - **data**: `(sequence_length, batch_size, alphabet_size)` - **label**: `(batch_size, label_sequence_length)` - **out**: `(batch_size)` The `data` tensor consists of sequences of activation vectors (without applying softmax), with i-th channel in the last dimension corresponding to i-th label for i between 0 and alphabet_size-1 (i.e always 0-indexed). Alphabet size should include one additional value reserved for blank label. When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be reserved for blank label. ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. If a sequence of labels is shorter than *label_sequence_length*, use the special padding value at the end of the sequence to conform it to the correct length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. The resulting `label` tensor should be padded to be:: [[2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3]] When `blank_label` is ``"last"``, we can index the labels as `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. The resulting `label` tensor should be padded to be:: [[1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2]] ``out`` is a list of CTC loss values, one per example in the batch. See *Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more information on the definition and the algorithm. Defined in ../src/operator/nn/ctc_loss.cc:L100 Parameters ---------- data : Symbol Input ndarray label : Symbol Ground-truth labels for the loss. data_lengths : Symbol Lengths of data for each of the samples. Only required when use_data_lengths is true. label_lengths : Symbol Lengths of labels for each of the samples. Only required when use_label_lengths is true. use_data_lengths : boolean, optional, default=0 Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. use_label_lengths : boolean, optional, default=0 Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. blank_label : {'first', 'last'},optional, default='first' Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Cast(data=None, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Casts all elements of the input to a new type. .. note:: ``Cast`` is deprecated. Use ``cast`` instead. Example:: cast([0.9, 1.3], dtype='int32') = [0, 1] cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L664 Parameters ---------- data : Symbol The input. dtype : {'bfloat16', 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'}, required Output data type. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Concat(*data, **kwargs): r"""Joins input arrays along a given axis. .. note:: `Concat` is deprecated. Use `concat` instead. The dimensions of the input arrays should be the same except the axis along which they will be concatenated. The dimension of the output array along the concatenated axis will be equal to the sum of the corresponding dimensions of the input arrays. The storage type of ``concat`` output depends on storage types of inputs - concat(csr, csr, ..., csr, dim=0) = csr - otherwise, ``concat`` generates output with default storage Example:: x = [[1,1],[2,2]] y = [[3,3],[4,4],[5,5]] z = [[6,6], [7,7],[8,8]] concat(x,y,z,dim=0) = [[ 1., 1.], [ 2., 2.], [ 3., 3.], [ 4., 4.], [ 5., 5.], [ 6., 6.], [ 7., 7.], [ 8., 8.]] Note that you cannot concat x,y,z along dimension 1 since dimension 0 is not the same for all the input arrays. concat(y,z,dim=1) = [[ 3., 3., 6., 6.], [ 4., 4., 7., 7.], [ 5., 5., 8., 8.]] Defined in ../src/operator/nn/concat.cc:L385 This function support variable length of positional input. Parameters ---------- data : Symbol[] List of arrays to concatenate dim : int, optional, default='1' the dimension to be concated. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. Examples -------- Concat two (or more) inputs along a specific dimension: >>> a = Variable('a') >>> b = Variable('b') >>> c = Concat(a, b, dim=1, name='my-concat') >>> c <Symbol my-concat> >>> SymbolDoc.get_output_shape(c, a=(128, 10, 3, 3), b=(128, 15, 3, 3)) {'my-concat_output': (128L, 25L, 3L, 3L)} Note the shape should be the same except on the dimension that is being concatenated. """ return (0,) def Convolution(data=None, weight=None, bias=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, num_filter=_Null, num_group=_Null, workspace=_Null, no_bias=_Null, cudnn_tune=_Null, cudnn_off=_Null, cudnn_tensor_core=_Null, cudnn_tensor_core_only=_Null, layout=_Null, cudnn_algo_verbose=_Null, cudnn_algo_fwd=_Null, cudnn_algo_bwd_data=_Null, cudnn_algo_bwd_filter=_Null, cudnn_algo_fwd_prec=_Null, cudnn_algo_bwd_prec=_Null, name=None, attr=None, out=None, **kwargs): r"""Compute *N*-D convolution on *(N+2)*-D input. In the 2-D convolution, given input data with shape *(batch_size, channel, height, width)*, the output is computed by .. math:: out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star weight[i,j,:,:] where :math:`\star` is the 2-D cross-correlation operator. For general 2-D convolution, the shapes are - **data**: *(batch_size, channel, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_height, out_width)*. Define:: f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 then we have:: out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) If ``no_bias`` is set to be true, then the ``bias`` term is ignored. The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, width)*. We can choose other layouts such as *NWC*. If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` evenly into *g* parts along the channel axis, and also evenly split ``weight`` along the first dimension. Next compute the convolution on the *i*-th part of the data with the *i*-th weight part. The output is obtained by concatenating all the *g* results. 1-D convolution does not have *height* dimension but only *width* in space. - **data**: *(batch_size, channel, width)* - **weight**: *(num_filter, channel, kernel[0])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_width)*. 3-D convolution adds an additional *depth* dimension besides *height* and *width*. The shapes are - **data**: *(batch_size, channel, depth, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. Both ``weight`` and ``bias`` are learnable parameters. There are other options to tune the performance. - **cudnn_tune**: enable this option leads to higher startup time but may give faster speed. Options are - **off**: no tuning - **limited_workspace**:run test and pick the fastest algorithm that doesn't exceed workspace limit. - **fastest**: pick the fastest algorithm and ignore workspace limit. - **None** (default): the behavior is determined by environment variable ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace (default), 2 for fastest. - **workspace**: A large number leads to more (GPU) memory usage but may improve the performance. Defined in ../src/operator/nn/convolution.cc:L498 Parameters ---------- data : Symbol Input data to the ConvolutionOp. weight : Symbol Weight matrix. bias : Symbol Bias parameter. kernel : Shape(tuple), required Convolution kernel size: (w,), (h, w) or (d, h, w) stride : Shape(tuple), optional, default=[] Convolution stride: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. dilate : Shape(tuple), optional, default=[] Convolution dilate: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. pad : Shape(tuple), optional, default=[] Zero pad for convolution: (w,), (h, w) or (d, h, w). Defaults to no padding. num_filter : int (non-negative), required Convolution filter(channel) number num_group : int (non-negative), optional, default=1 Number of group partitions. workspace : long (non-negative), optional, default=1024 Maximum temporary workspace allowed (MB) in convolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the convolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. no_bias : boolean, optional, default=0 Whether to disable bias parameter. cudnn_tune : {None, 'fastest', 'limited_workspace', 'off'},optional, default='None' Whether to pick convolution algo by running performance test. cudnn_off : boolean, optional, default=0 Turn off cudnn for this layer. cudnn_tensor_core : boolean or None, optional, default=None Allow Tensor Core math within the algos. cudnn_tensor_core_only : boolean, optional, default=0 Require Tensor Core math within the algos. layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC', 'NWC'},optional, default='None' Set layout for input, output and weight. Empty for default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. cudnn_algo_verbose : boolean, optional, default=0 Verboseness of algo selection. 1 = output selection, 0 = no output cudnn_algo_fwd : int, optional, default='-1' Specified Forward Algorithm. cudnn_algo_bwd_data : int, optional, default='-1' Specified Backprop-to-Data Algorithm. cudnn_algo_bwd_filter : int, optional, default='-1' Specified Backprop-to-Filter Algorithm. cudnn_algo_fwd_prec : {'None', 'float16', 'float32', 'float64'},optional, default='None' Precision of the computation of the forward convolution kernel. Default is the tensor data type, or float32 if the tensor data type is float16. cudnn_algo_bwd_prec : {'None', 'float16', 'float32', 'float64'},optional, default='None' Precision of the computation of the back-prop kernels. Default is the tensor data type, or float32 if the tensor data type is float16. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Convolution_v1(data=None, weight=None, bias=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, num_filter=_Null, num_group=_Null, workspace=_Null, no_bias=_Null, cudnn_tune=_Null, cudnn_off=_Null, layout=_Null, name=None, attr=None, out=None, **kwargs): r"""This operator is DEPRECATED. Apply convolution to input then add a bias. Parameters ---------- data : Symbol Input data to the ConvolutionV1Op. weight : Symbol Weight matrix. bias : Symbol Bias parameter. kernel : Shape(tuple), required convolution kernel size: (h, w) or (d, h, w) stride : Shape(tuple), optional, default=[] convolution stride: (h, w) or (d, h, w) dilate : Shape(tuple), optional, default=[] convolution dilate: (h, w) or (d, h, w) pad : Shape(tuple), optional, default=[] pad for convolution: (h, w) or (d, h, w) num_filter : int (non-negative), required convolution filter(channel) number num_group : int (non-negative), optional, default=1 Number of group partitions. Equivalent to slicing input into num_group partitions, apply convolution on each, then concatenate the results workspace : long (non-negative), optional, default=1024 Maximum temporary workspace allowed for convolution (MB).This parameter determines the effective batch size of the convolution kernel, which may be smaller than the given batch size. Also, the workspace will be automatically enlarged to make sure that we can run the kernel with batch_size=1 no_bias : boolean, optional, default=0 Whether to disable bias parameter. cudnn_tune : {None, 'fastest', 'limited_workspace', 'off'},optional, default='None' Whether to pick convolution algo by running performance test. Leads to higher startup time but may give faster speed. Options are: 'off': no tuning 'limited_workspace': run test and pick the fastest algorithm that doesn't exceed workspace limit. 'fastest': pick the fastest algorithm and ignore workspace limit. If set to None (default), behavior is determined by environment variable MXNET_CUDNN_AUTOTUNE_DEFAULT: 0 for off, 1 for limited workspace (default), 2 for fastest. cudnn_off : boolean, optional, default=0 Turn off cudnn for this layer. layout : {None, 'NCDHW', 'NCHW', 'NDHWC', 'NHWC'},optional, default='None' Set layout for input, output and weight. Empty for default layout: NCHW for 2d and NCDHW for 3d. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Correlation(data1=None, data2=None, kernel_size=_Null, max_displacement=_Null, stride1=_Null, stride2=_Null, pad_size=_Null, is_multiply=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies correlation to inputs. The correlation layer performs multiplicative patch comparisons between two feature maps. Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and :math:`c` being their width, height, and number of channels, the correlation layer lets the network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`. For now we consider only a single comparison of two patches. The 'correlation' of two patches centered at :math:`x_{1}` in the first map and :math:`x_{2}` in the second map is then defined as: .. math:: c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} <f_{1}(x_{1} + o), f_{2}(x_{2} + o)> for a square patch of size :math:`K:=2k+1`. Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other data. For this reason, it has no training weights. Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood centered around :math:`x_{1}`. The final output is defined by the following expression: .. math:: out[n, q, i, j] = c(x_{i, j}, x_{q}) where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. Defined in ../src/operator/correlation.cc:L198 Parameters ---------- data1 : Symbol Input data1 to the correlation. data2 : Symbol Input data2 to the correlation. kernel_size : int (non-negative), optional, default=1 kernel size for Correlation must be an odd number max_displacement : int (non-negative), optional, default=1 Max displacement of Correlation stride1 : int (non-negative), optional, default=1 stride1 quantize data1 globally stride2 : int (non-negative), optional, default=1 stride2 quantize data2 within the neighborhood centered around data1 pad_size : int (non-negative), optional, default=0 pad for Correlation is_multiply : boolean, optional, default=1 operation type is either multiplication or subduction name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Crop(*data, **kwargs): r""" .. note:: `Crop` is deprecated. Use `slice` instead. Crop the 2nd and 3rd dim of input data, with the corresponding size of h_w or with width and height of the second input symbol, i.e., with one input, we need h_w to specify the crop height and width, otherwise the second input symbol's size will be used Defined in ../src/operator/crop.cc:L50 This function support variable length of positional input. Parameters ---------- data : Symbol or Symbol[] Tensor or List of Tensors, the second input will be used as crop_like shape reference offset : Shape(tuple), optional, default=[0,0] crop offset coordinate: (y, x) h_w : Shape(tuple), optional, default=[0,0] crop height and width: (h, w) center_crop : boolean, optional, default=0 If set to true, then it will use be the center_crop,or it will crop using the shape of crop_like name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def CuDNNBatchNorm(data=None, gamma=None, beta=None, moving_mean=None, moving_var=None, eps=_Null, momentum=_Null, fix_gamma=_Null, use_global_stats=_Null, output_mean_var=_Null, axis=_Null, cudnn_off=_Null, min_calib_range=_Null, max_calib_range=_Null, act_type=_Null, bn_group=_Null, xbuf_ptr=_Null, name=None, attr=None, out=None, **kwargs): r"""Apply batch normalization to input. Parameters ---------- data : Symbol Input data to batch normalization gamma : Symbol gamma array beta : Symbol beta array moving_mean : Symbol running mean of input moving_var : Symbol running variance of input eps : double, optional, default=0.0010000000474974513 Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) momentum : float, optional, default=0.899999976 Momentum for moving average fix_gamma : boolean, optional, default=1 Fix gamma while training use_global_stats : boolean, optional, default=0 Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. output_mean_var : boolean, optional, default=0 Output the mean and inverse std axis : int, optional, default='1' Specify which shape axis the channel is specified cudnn_off : boolean, optional, default=0 Do not select CUDNN operator, if available min_calib_range : float or None, optional, default=None The minimum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. max_calib_range : float or None, optional, default=None The maximum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. act_type : {None, 'relu', 'sigmoid', 'softrelu', 'tanh'},optional, default='None' Fused activation function to be applied. bn_group : int, optional, default='1' BN group xbuf_ptr : long (non-negative), optional, default=0 xbuf ptr name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Custom(*data, **kwargs): r"""Apply a custom operator implemented in a frontend language (like Python). Custom operators should override required methods like `forward` and `backward`. The custom operator must be registered before it can be used. Please check the tutorial here: https://mxnet.incubator.apache.org/api/faq/new_op Defined in ../src/operator/custom/custom.cc:L534 Parameters ---------- data : Symbol[] Input data for the custom operator. op_type : string Name of the custom operator. This is the name that is passed to `mx.operator.register` to register the operator. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Deconvolution(data=None, weight=None, bias=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, adj=_Null, target_shape=_Null, num_filter=_Null, num_group=_Null, workspace=_Null, no_bias=_Null, cudnn_tune=_Null, cudnn_off=_Null, cudnn_tensor_core=_Null, cudnn_tensor_core_only=_Null, layout=_Null, cudnn_algo_verbose=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. Parameters ---------- data : Symbol Input tensor to the deconvolution operation. weight : Symbol Weights representing the kernel. bias : Symbol Bias added to the result after the deconvolution operation. kernel : Shape(tuple), required Deconvolution kernel size: (w,), (h, w) or (d, h, w). This is same as the kernel size used for the corresponding convolution stride : Shape(tuple), optional, default=[] The stride used for the corresponding convolution: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. dilate : Shape(tuple), optional, default=[] Dilation factor for each dimension of the input: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. pad : Shape(tuple), optional, default=[] The amount of implicit zero padding added during convolution for each dimension of the input: (w,), (h, w) or (d, h, w). ``(kernel-1)/2`` is usually a good choice. If `target_shape` is set, `pad` will be ignored and a padding that will generate the target shape will be used. Defaults to no padding. adj : Shape(tuple), optional, default=[] Adjustment for output shape: (w,), (h, w) or (d, h, w). If `target_shape` is set, `adj` will be ignored and computed accordingly. target_shape : Shape(tuple), optional, default=[] Shape of the output tensor: (w,), (h, w) or (d, h, w). num_filter : int (non-negative), required Number of output filters. num_group : int (non-negative), optional, default=1 Number of groups partition. workspace : long (non-negative), optional, default=512 Maximum temporary workspace allowed (MB) in deconvolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the deconvolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. no_bias : boolean, optional, default=1 Whether to disable bias parameter. cudnn_tune : {None, 'fastest', 'limited_workspace', 'off'},optional, default='None' Whether to pick convolution algorithm by running performance test. cudnn_off : boolean, optional, default=0 Turn off cudnn for this layer. cudnn_tensor_core : boolean or None, optional, default=None Allow Tensor Core math within the algos. cudnn_tensor_core_only : boolean, optional, default=0 Require Tensor Core math within the algos. layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC', 'NWC'},optional, default='None' Set layout for input, output and weight. Empty for default layout, NCW for 1d, NCHW for 2d and NCDHW for 3d.HWC, NHWC and NDHWC are only supported on GPU. cudnn_algo_verbose : boolean, optional, default=0 Verboseness of algo selection. 1 = output selection, 0 = no output name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Dropout(data=None, p=_Null, mode=_Null, axes=_Null, cudnn_off=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies dropout operation to input array. - During training, each element of the input is set to zero with probability p. The whole array is rescaled by :math:`1/(1-p)` to keep the expected sum of the input unchanged. - During testing, this operator does not change the input if mode is 'training'. If mode is 'always', the same computaion as during training will be applied. Example:: random.seed(998) input_array = array([[3., 0.5, -0.5, 2., 7.], [2., -0.4, 7., 3., 0.2]]) a = symbol.Variable('a') dropout = symbol.Dropout(a, p = 0.2) executor = dropout.simple_bind(a = input_array.shape) ## If training executor.forward(is_train = True, a = input_array) executor.outputs [[ 3.75 0.625 -0. 2.5 8.75 ] [ 2.5 -0.5 8.75 3.75 0. ]] ## If testing executor.forward(is_train = False, a = input_array) executor.outputs [[ 3. 0.5 -0.5 2. 7. ] [ 2. -0.4 7. 3. 0.2 ]] Defined in ../src/operator/nn/dropout.cc:L96 Parameters ---------- data : Symbol Input array to which dropout will be applied. p : float, optional, default=0.5 Fraction of the input that gets dropped out during training time. mode : {'always', 'training'},optional, default='training' Whether to only turn on dropout during training or to also turn on for inference. axes : Shape(tuple), optional, default=[] Axes for variational dropout kernel. cudnn_off : boolean or None, optional, default=0 Whether to turn off cudnn in dropout operator. This option is ignored if axes is specified. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. Examples -------- Apply dropout to corrupt input as zero with probability 0.2: >>> data = Variable('data') >>> data_dp = Dropout(data=data, p=0.2) >>> shape = (100, 100) # take larger shapes to be more statistical stable >>> x = np.ones(shape) >>> op = Dropout(p=0.5, name='dp') >>> # dropout is identity during testing >>> y = test_utils.simple_forward(op, dp_data=x, is_train=False) >>> test_utils.almost_equal(x, y) True >>> y = test_utils.simple_forward(op, dp_data=x, is_train=True) >>> # expectation is (approximately) unchanged >>> np.abs(x.mean() - y.mean()) < 0.1 True >>> set(np.unique(y)) == set([0, 2]) True """ return (0,) def ElementWiseSum(*args, **kwargs): r"""Adds all input arguments element-wise. .. math:: add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n ``add_n`` is potentially more efficient than calling ``add`` by `n` times. The storage type of ``add_n`` output depends on storage types of inputs - add_n(row_sparse, row_sparse, ..) = row_sparse - add_n(default, csr, default) = default - add_n(any input combinations longer than 4 (>4) with at least one default type) = default - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage Defined in ../src/operator/tensor/elemwise_sum.cc:L156 This function support variable length of positional input. Parameters ---------- args : Symbol[] Positional input arguments name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Embedding(data=None, weight=None, input_dim=_Null, output_dim=_Null, dtype=_Null, sparse_grad=_Null, name=None, attr=None, out=None, **kwargs): r"""Maps integer indices to vector representations (embeddings). This operator maps words to real-valued vectors in a high-dimensional space, called word embeddings. These embeddings can capture semantic and syntactic properties of the words. For example, it has been noted that in the learned embedding spaces, similar words tend to be close to each other and dissimilar words far apart. For an input array of shape (d1, ..., dK), the shape of an output array is (d1, ..., dK, output_dim). All the input values should be integers in the range [0, input_dim). If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be (ip0, op0). When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that addresses the last vector in an embedding matrix. When "sparse_grad" is True, an error will be raised if invalid indices are found. Examples:: input_dim = 4 output_dim = 5 // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) y = [[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.], [ 10., 11., 12., 13., 14.], [ 15., 16., 17., 18., 19.]] // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] x = [[ 1., 3.], [ 0., 2.]] // Mapped input x to its vector representation y. Embedding(x, y, 4, 5) = [[[ 5., 6., 7., 8., 9.], [ 15., 16., 17., 18., 19.]], [[ 0., 1., 2., 3., 4.], [ 10., 11., 12., 13., 14.]]] The storage type of weight can be either row_sparse or default. .. Note:: If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad and Adam. Note that by default lazy updates is turned on, which may perform differently from standard updates. For more details, please check the Optimization API at: https://mxnet.incubator.apache.org/api/python/optimization/optimization.html Defined in ../src/operator/tensor/indexing_op.cc:L598 Parameters ---------- data : Symbol The input array to the embedding operator. weight : Symbol The embedding weight matrix. input_dim : int, required Vocabulary size of the input indices. output_dim : int, required Dimension of the embedding vectors. dtype : {'bfloat16', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'},optional, default='float32' Data type of weight. sparse_grad : boolean, optional, default=0 Compute row sparse gradient in the backward calculation. If set to True, the grad's storage type is row_sparse. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. Examples -------- Assume we want to map the 26 English alphabet letters to 16-dimensional vectorial representations. >>> vocabulary_size = 26 >>> embed_dim = 16 >>> seq_len, batch_size = (10, 64) >>> input = Variable('letters') >>> op = Embedding(data=input, input_dim=vocabulary_size, output_dim=embed_dim, ...name='embed') >>> SymbolDoc.get_output_shape(op, letters=(seq_len, batch_size)) {'embed_output': (10L, 64L, 16L)} >>> vocab_size, embed_dim = (26, 16) >>> batch_size = 12 >>> word_vecs = test_utils.random_arrays((vocab_size, embed_dim)) >>> op = Embedding(name='embed', input_dim=vocab_size, output_dim=embed_dim) >>> x = np.random.choice(vocab_size, batch_size) >>> y = test_utils.simple_forward(op, embed_data=x, embed_weight=word_vecs) >>> y_np = word_vecs[x] >>> test_utils.almost_equal(y, y_np) True """ return (0,) def Flatten(data=None, name=None, attr=None, out=None, **kwargs): r"""Flattens the input array into a 2-D array by collapsing the higher dimensions. .. note:: `Flatten` is deprecated. Use `flatten` instead. For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes the input array into an output array of shape ``(d1, d2*...*dk)``. Note that the behavior of this function is different from numpy.ndarray.flatten, which behaves similar to mxnet.ndarray.reshape((-1,)). Example:: x = [[ [1,2,3], [4,5,6], [7,8,9] ], [ [1,2,3], [4,5,6], [7,8,9] ]], flatten(x) = [[ 1., 2., 3., 4., 5., 6., 7., 8., 9.], [ 1., 2., 3., 4., 5., 6., 7., 8., 9.]] Defined in ../src/operator/tensor/matrix_op.cc:L250 Parameters ---------- data : Symbol Input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. Examples -------- Flatten is usually applied before `FullyConnected`, to reshape the 4D tensor produced by convolutional layers to 2D matrix: >>> data = Variable('data') # say this is 4D from some conv/pool >>> flatten = Flatten(data=data, name='flat') # now this is 2D >>> SymbolDoc.get_output_shape(flatten, data=(2, 3, 4, 5)) {'flat_output': (2L, 60L)} >>> test_dims = [(2, 3, 4, 5), (2, 3), (2,)] >>> op = Flatten(name='flat') >>> for dims in test_dims: ... x = test_utils.random_arrays(dims) ... y = test_utils.simple_forward(op, flat_data=x) ... y_np = x.reshape((dims[0], np.prod(dims[1:]).astype('int32'))) ... print('%s: %s' % (dims, test_utils.almost_equal(y, y_np))) (2, 3, 4, 5): True (2, 3): True (2,): True """ return (0,) def FullyConnected(data=None, weight=None, bias=None, num_hidden=_Null, no_bias=_Null, cublas_algo_verbose=_Null, cublas_off=_Null, cublas_tensor_core=_Null, cublas_algo_fwd=_Null, cublas_algo_bwd_data=_Null, cublas_algo_bwd_weights=_Null, cublas_algo_fwd_prec=_Null, cublas_algo_bwd_prec=_Null, flatten=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies a linear transformation: :math:`Y = XW^T + b`. If ``flatten`` is set to be true, then the shapes are: - **data**: `(batch_size, x1, x2, ..., xn)` - **weight**: `(num_hidden, x1 * x2 * ... * xn)` - **bias**: `(num_hidden,)` - **out**: `(batch_size, num_hidden)` If ``flatten`` is set to be false, then the shapes are: - **data**: `(x1, x2, ..., xn, input_dim)` - **weight**: `(num_hidden, input_dim)` - **bias**: `(num_hidden,)` - **out**: `(x1, x2, ..., xn, num_hidden)` The learnable parameters include both ``weight`` and ``bias``. If ``no_bias`` is set to be true, then the ``bias`` term is ignored. .. Note:: The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` weight and bias, where the length of `weight.indices` and `bias.indices` must be equal to `num_hidden`. This could be useful for model inference with `row_sparse` weights trained with importance sampling or noise contrastive estimation. To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead of sparse.FullyConnected. Defined in ../src/operator/nn/fully_connected.cc:L287 Parameters ---------- data : Symbol Input data. weight : Symbol Weight matrix. bias : Symbol Bias parameter. num_hidden : int, required Number of hidden nodes of the output. no_bias : boolean, optional, default=0 Whether to disable bias parameter. cublas_algo_verbose : boolean, optional, default=0 Verboseness of algo selection. true = output selection, false = no output cublas_off : boolean, optional, default=0 Turn off full-control cublas for this layer. cublas_tensor_core : boolean or None, optional, default=None Allow Tensor Core math for default-chosen algos. cublas_algo_fwd : int or None, optional, default='None' Specified Forward GEMM Algorithm. cublas_algo_bwd_data : int or None, optional, default='None' Specified Backprop-to-Data GEMM Algorithm. cublas_algo_bwd_weights : int or None, optional, default='None' Specified Backprop-to-Weights GEMM Algorithm. cublas_algo_fwd_prec : {'None', 'float16', 'float32', 'float64'},optional, default='None' Precision of the computation of the forward GEMM kernel. Default is the tensor data type, or float32 if the tensor data type is float16. cublas_algo_bwd_prec : {'None', 'float16', 'float32', 'float64'},optional, default='None' Precision of the computation of the back-prop kernels. Default is the tensor data type, or float32 if the tensor data type is float16. flatten : boolean, optional, default=1 Whether to collapse all but the first axis of the input data tensor. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. Examples -------- Construct a fully connected operator with target dimension 512. >>> data = Variable('data') # or some constructed NN >>> op = FullyConnected(data=data, ... num_hidden=512, ... name='FC1') >>> op <Symbol FC1> >>> SymbolDoc.get_output_shape(op, data=(128, 100)) {'FC1_output': (128L, 512L)} A simple 3-layer MLP with ReLU activation: >>> net = Variable('data') >>> for i, dim in enumerate([128, 64]): ... net = FullyConnected(data=net, num_hidden=dim, name='FC%d' % i) ... net = Activation(data=net, act_type='relu', name='ReLU%d' % i) >>> # 10-class predictor (e.g. MNIST) >>> net = FullyConnected(data=net, num_hidden=10, name='pred') >>> net <Symbol pred> >>> dim_in, dim_out = (3, 4) >>> x, w, b = test_utils.random_arrays((10, dim_in), (dim_out, dim_in), (dim_out,)) >>> op = FullyConnected(num_hidden=dim_out, name='FC') >>> out = test_utils.simple_forward(op, FC_data=x, FC_weight=w, FC_bias=b) >>> # numpy implementation of FullyConnected >>> out_np = np.dot(x, w.T) + b >>> test_utils.almost_equal(out, out_np) True """ return (0,) def GridGenerator(data=None, transform_type=_Null, target_shape=_Null, name=None, attr=None, out=None, **kwargs): r"""Generates 2D sampling grid for bilinear sampling. Parameters ---------- data : Symbol Input data to the function. transform_type : {'affine', 'warp'}, required The type of transformation. For `affine`, input data should be an affine matrix of size (batch, 6). For `warp`, input data should be an optical flow of size (batch, 2, h, w). target_shape : Shape(tuple), optional, default=[0,0] Specifies the output shape (H, W). This is required if transformation type is `affine`. If transformation type is `warp`, this parameter is ignored. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def GroupNorm(data=None, gamma=None, beta=None, num_groups=_Null, eps=_Null, output_mean_var=_Null, name=None, attr=None, out=None, **kwargs): r"""Group normalization. The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. The mean and standard-deviation are calculated separately over the each group. .. math:: data = data.reshape((N, num_groups, C // num_groups, ...)) out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta Both ``gamma`` and ``beta`` are learnable parameters. Defined in ../src/operator/nn/group_norm.cc:L77 Parameters ---------- data : Symbol Input data gamma : Symbol gamma array beta : Symbol beta array num_groups : int, optional, default='1' Total number of groups. eps : float, optional, default=9.99999975e-06 An `epsilon` parameter to prevent division by 0. output_mean_var : boolean, optional, default=0 Output the mean and std calculated along the given axis. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def IdentityAttachKLSparseReg(data=None, sparseness_target=_Null, penalty=_Null, momentum=_Null, name=None, attr=None, out=None, **kwargs): r"""Apply a sparse regularization to the output a sigmoid activation function. Parameters ---------- data : Symbol Input data. sparseness_target : float, optional, default=0.100000001 The sparseness target penalty : float, optional, default=0.00100000005 The tradeoff parameter for the sparseness penalty momentum : float, optional, default=0.899999976 The momentum for running average name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def InstanceNorm(data=None, gamma=None, beta=None, eps=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies instance normalization to the n-dimensional input array. This operator takes an n-dimensional input array where (n>2) and normalizes the input using the following formula: .. math:: out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta This layer is similar to batch normalization layer (`BatchNorm`) with two differences: first, the normalization is carried out per example (instance), not over a batch. Second, the same normalization is applied both at test and train time. This operation is also known as `contrast normalization`. If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], `gamma` and `beta` parameters must be vectors of shape [channel]. This implementation is based on this paper [1]_ .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). Examples:: // Input of shape (2,1,2) x = [[[ 1.1, 2.2]], [[ 3.3, 4.4]]] // gamma parameter of length 1 gamma = [1.5] // beta parameter of length 1 beta = [0.5] // Instance normalization is calculated with the above formula InstanceNorm(x,gamma,beta) = [[[-0.997527 , 1.99752665]], [[-0.99752653, 1.99752724]]] Defined in ../src/operator/instance_norm.cc:L103 Parameters ---------- data : Symbol An n-dimensional input array (n > 2) of the form [batch, channel, spatial_dim1, spatial_dim2, ...]. gamma : Symbol A vector of length 'channel', which multiplies the normalized input. beta : Symbol A vector of length 'channel', which is added to the product of the normalized input and the weight. eps : float, optional, default=0.00100000005 An `epsilon` parameter to prevent division by 0. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def InstanceNormV2(data=None, gamma=None, beta=None, axis=_Null, eps=_Null, output_mean_var=_Null, act_type=_Null, xbuf_group=_Null, xbuf_ptr=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies instance normalization to the n-dimensional input array. This operator takes an n-dimensional input array where (n>2) and normalizes the input using the following formula: .. math:: out = \frac{x - mean[data]}{ \sqrt{Var[data] + \epsilon}} * gamma + beta This layer is similar to batch normalization layer (`BatchNorm`) with two differences: first, the normalization is carried out per example (instance), not over a batch. Second, the same normalization is applied both at test and train time. This operation is also known as `contrast normalization`. If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], `gamma` and `beta` parameters must be vectors of shape [channel]. This implementation is based on this paper [1]_ .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). Examples:: // Input of shape (2,1,2) x = [[[ 1.1, 2.2]], [[ 3.3, 4.4]]] // gamma parameter of length 1 gamma = [1.5] // beta parameter of length 1 beta = [0.5] // Instance normalization is calculated with the above formula InstanceNorm(x,gamma,beta) = [[[-0.997527 , 1.99752665]], [[-0.99752653, 1.99752724]]] Defined in ../src/operator/nn/instance_norm_v2.cc:L172 Parameters ---------- data : Symbol Input data to instance normalization gamma : Symbol gamma array beta : Symbol beta array axis : int, optional, default='1' The axis to perform instance normalization. Usually, this should be be axis of the channel dimension. Negative values means indexing from right to left. eps : float, optional, default=0.00100000005 An `epsilon` parameter to prevent division by 0. output_mean_var : boolean, optional, default=0 Output the mean and std calculated along the given axis. act_type : {None, 'relu', 'sigmoid', 'softrelu', 'tanh'},optional, default='None' Fused activation function to be applied. xbuf_group : int, optional, default='1' exchange buffer group size xbuf_ptr : long (non-negative), optional, default=0 exchange buffer pointer name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def L2Normalization(data=None, eps=_Null, mode=_Null, name=None, attr=None, out=None, **kwargs): r"""Normalize the input array using the L2 norm. For 1-D NDArray, it computes:: out = data / sqrt(sum(data ** 2) + eps) For N-D NDArray, if the input array has shape (N, N, ..., N), with ``mode`` = ``instance``, it normalizes each instance in the multidimensional array by its L2 norm.:: for i in 0...N out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: for i in 0...N out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position in the array by its L2 norm.:: for dim in 2...N for i in 0...N out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) -dim- Example:: x = [[[1,2], [3,4]], [[2,2], [5,6]]] L2Normalization(x, mode='instance') =[[[ 0.18257418 0.36514837] [ 0.54772252 0.73029673]] [[ 0.24077171 0.24077171] [ 0.60192931 0.72231513]]] L2Normalization(x, mode='channel') =[[[ 0.31622776 0.44721359] [ 0.94868326 0.89442718]] [[ 0.37139067 0.31622776] [ 0.92847669 0.94868326]]] L2Normalization(x, mode='spatial') =[[[ 0.44721359 0.89442718] [ 0.60000002 0.80000001]] [[ 0.70710677 0.70710677] [ 0.6401844 0.76822126]]] Defined in ../src/operator/l2_normalization.cc:L196 Parameters ---------- data : Symbol Input array to normalize. eps : float, optional, default=1.00000001e-10 A small constant for numerical stability. mode : {'channel', 'instance', 'spatial'},optional, default='instance' Specify the dimension along which to compute L2 norm. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def LRN(data=None, alpha=_Null, beta=_Null, knorm=_Null, nsize=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies local response normalization to the input. The local response normalization layer performs "lateral inhibition" by normalizing over local input regions. If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized activity :math:`b_{x,y}^{i}` is given by the expression: .. math:: b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total number of kernels in the layer. Defined in ../src/operator/nn/lrn.cc:L158 Parameters ---------- data : Symbol Input data to LRN alpha : float, optional, default=9.99999975e-05 The variance scaling parameter :math:`lpha` in the LRN expression. beta : float, optional, default=0.75 The power parameter :math:`eta` in the LRN expression. knorm : float, optional, default=2 The parameter :math:`k` in the LRN expression. nsize : int (non-negative), required normalization window width in elements. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def LayerNorm(data=None, gamma=None, beta=None, axis=_Null, eps=_Null, output_mean_var=_Null, name=None, attr=None, out=None, **kwargs): r"""Layer normalization. Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as well as offset ``beta``. Assume the input has more than one dimension and we normalize along axis 1. We first compute the mean and variance along this axis and then compute the normalized output, which has the same shape as input, as following: .. math:: out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta Both ``gamma`` and ``beta`` are learnable parameters. Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and ``data_std``. Note that no gradient will be passed through these two outputs. The parameter ``axis`` specifies which axis of the input shape denotes the 'channel' (separately normalized groups). The default is -1, which sets the channel axis to be the last item in the input shape. Defined in ../src/operator/nn/layer_norm.cc:L159 Parameters ---------- data : Symbol Input data to layer normalization gamma : Symbol gamma array beta : Symbol beta array axis : int, optional, default='-1' The axis to perform layer normalization. Usually, this should be be axis of the channel dimension. Negative values means indexing from right to left. eps : float, optional, default=9.99999975e-06 An `epsilon` parameter to prevent division by 0. output_mean_var : boolean, optional, default=0 Output the mean and std calculated along the given axis. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def LeakyReLU(data=None, gamma=None, act_type=_Null, slope=_Null, lower_bound=_Null, upper_bound=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies Leaky rectified linear unit activation element-wise to the input. Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` when the input is negative and has a slope of one when input is positive. The following modified ReLU Activation functions are supported: - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from *[lower_bound, upper_bound)* for training, while fixed to be *(lower_bound+upper_bound)/2* for inference. Defined in ../src/operator/leaky_relu.cc:L172 Parameters ---------- data : Symbol Input data to activation function. gamma : Symbol Input data to activation function. act_type : {'elu', 'gelu', 'leaky', 'prelu', 'rrelu', 'selu'},optional, default='leaky' Activation function to be applied. slope : float, optional, default=0.25 Init slope for the activation. (For leaky and elu only) lower_bound : float, optional, default=0.125 Lower bound of random slope. (For rrelu only) upper_bound : float, optional, default=0.333999991 Upper bound of random slope. (For rrelu only) name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def LinearRegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes and optimizes for squared loss during backward propagation. Just outputs ``data`` during forward propagation. If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, then the squared loss estimated over :math:`n` samples is defined as :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` .. note:: Use the LinearRegressionOutput as the final output layer of a net. The storage type of ``label`` can be ``default`` or ``csr`` - LinearRegressionOutput(default, default) = default - LinearRegressionOutput(default, csr) = default By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. Defined in ../src/operator/regression_output.cc:L92 Parameters ---------- data : Symbol Input data to the function. label : Symbol Input label to the function. grad_scale : float, optional, default=1 Scale the gradient by a float factor name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def LogisticRegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies a logistic function to the input. The logistic function, also known as the sigmoid function, is computed as :math:`\frac{1}{1+exp(-\textbf{x})}`. Commonly, the sigmoid is used to squash the real-valued output of a linear model :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. It is suitable for binary classification or probability prediction tasks. .. note:: Use the LogisticRegressionOutput as the final output layer of a net. The storage type of ``label`` can be ``default`` or ``csr`` - LogisticRegressionOutput(default, default) = default - LogisticRegressionOutput(default, csr) = default The loss function used is the Binary Cross Entropy Loss: :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. Defined in ../src/operator/regression_output.cc:L152 Parameters ---------- data : Symbol Input data to the function. label : Symbol Input label to the function. grad_scale : float, optional, default=1 Scale the gradient by a float factor name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def MAERegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes mean absolute error of the input. MAE is a risk metric corresponding to the expected value of the absolute error. If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, then the mean absolute error (MAE) estimated over :math:`n` samples is defined as :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` .. note:: Use the MAERegressionOutput as the final output layer of a net. The storage type of ``label`` can be ``default`` or ``csr`` - MAERegressionOutput(default, default) = default - MAERegressionOutput(default, csr) = default By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. Defined in ../src/operator/regression_output.cc:L120 Parameters ---------- data : Symbol Input data to the function. label : Symbol Input label to the function. grad_scale : float, optional, default=1 Scale the gradient by a float factor name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def MakeLoss(data=None, grad_scale=_Null, valid_thresh=_Null, normalization=_Null, name=None, attr=None, out=None, **kwargs): r"""Make your own loss function in network construction. This operator accepts a customized loss function symbol as a terminal loss and the symbol should be an operator with no backward dependency. The output of this function is the gradient of loss with respect to the input data. For example, if you are a making a cross entropy loss function. Assume ``out`` is the predicted output and ``label`` is the true label, then the cross entropy can be defined as:: cross_entropy = label * log(out) + (1 - label) * log(1 - out) loss = MakeLoss(cross_entropy) We will need to use ``MakeLoss`` when we are creating our own loss function or we want to combine multiple loss functions. Also we may want to stop some variables' gradients from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. In addition, we can give a scale to the loss by setting ``grad_scale``, so that the gradient of the loss will be rescaled in the backpropagation. .. note:: This operator should be used as a Symbol instead of NDArray. Defined in ../src/operator/make_loss.cc:L71 Parameters ---------- data : Symbol Input array. grad_scale : float, optional, default=1 Gradient scale as a supplement to unary and binary operators valid_thresh : float, optional, default=0 clip each element in the array to 0 when it is less than ``valid_thresh``. This is used when ``normalization`` is set to ``'valid'``. normalization : {'batch', 'null', 'valid'},optional, default='null' If this is set to null, the output gradient will not be normalized. If this is set to batch, the output gradient will be divided by the batch size. If this is set to valid, the output gradient will be divided by the number of valid input elements. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def NormConvolution(data=None, sum=None, sum_squares=None, gamma=None, beta=None, moving_mean=None, moving_var=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, num_filter=_Null, num_group=_Null, no_norm=_Null, layout=_Null, act_type=_Null, eps=_Null, momentum=_Null, fix_gamma=_Null, use_global_stats=_Null, output_mean_var=_Null, output_equiv_scale_bias=_Null, name=None, attr=None, out=None, **kwargs): r"""Compute *N*-D normConvolution on *(N+2)*-D input. ******** Documentation not yet correct for this fused normalized convolution!! ************* In the 2-D normConvolution, given input data with shape *(batch_size, channel, height, width)*, the output is computed by .. math:: out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star weight[i,j,:,:] where :math:`\star` is the 2-D cross-correlation operator. For general 2-D normConvolution, the shapes are - **data**: *(batch_size, channel, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_height, out_width)*. Define:: f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 then we have:: out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) If ``no_bias`` is set to be true, then the ``bias`` term is ignored. The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, width)*. We can choose other layouts such as *NWC*. If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` evenly into *g* parts along the channel axis, and also evenly split ``weight`` along the first dimension. Next compute the normConvolution on the *i*-th part of the data with the *i*-th weight part. The output is obtained by concatenating all the *g* results. 1-D normConvolution does not have *height* dimension but only *width* in space. - **data**: *(batch_size, channel, width)* - **weight**: *(num_filter, channel, kernel[0])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_width)*. 3-D normConvolution adds an additional *depth* dimension besides *height* and *width*. The shapes are - **data**: *(batch_size, channel, depth, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. Both ``weight`` and ``bias`` are learnable parameters. Defined in ../src/operator/nn/norm_convolution.cc:L423 Parameters ---------- data : Symbol Input data to the NormConvolutionOp. sum : Symbol sum of input data to be normalized sum_squares : Symbol sum of squares of input data to be normalized gamma : Symbol gamma array beta : Symbol beta array moving_mean : Symbol running mean of input moving_var : Symbol running variance of input kernel : Shape(tuple), required NormConvolution kernel size: (w,), (h, w) or (d, h, w) stride : Shape(tuple), optional, default=[] NormConvolution stride: (w,), (h, w) or (d, h, w). Default 1 for each dim. dilate : Shape(tuple), optional, default=[] NormConvolution dilate: (w,), (h, w) or (d, h, w). Default 1 for each dim. pad : Shape(tuple), optional, default=[] Zero pad for NormConvolution: (w,), (h, w) or (d, h, w). Default no padding. num_filter : int (non-negative), required NormConvolution filter(channel) number num_group : int (non-negative), optional, default=1 Number of group partitions. no_norm : boolean, optional, default=0 Whether to disable input normalization prior to the convolution. layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC'},optional, default='None' Set layout for input, output and weight. Empty for default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. act_type : {None, 'relu', 'sigmoid', 'softrelu', 'tanh'},optional, default='None' Fused activation function to be applied. eps : double, optional, default=0.0010000000474974513 Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) momentum : float, optional, default=0.899999976 Momentum for moving average fix_gamma : boolean, optional, default=1 Fix gamma while training use_global_stats : boolean, optional, default=0 Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. output_mean_var : boolean, optional, default=0 Output the mean and inverse std. output_equiv_scale_bias : boolean, optional, default=0 Output the equiv_scale and equiv_bias (generally for testing). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def NormalizedConvolution(data=None, equiv_scale=None, equiv_bias=None, mean=None, var=None, gamma=None, beta=None, weight=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, num_filter=_Null, num_group=_Null, no_equiv_scale_bias=_Null, layout=_Null, act_type=_Null, name=None, attr=None, out=None, **kwargs): r"""Compute *N*-D normalizedConvolution on *(N+2)*-D input. ******** Documentation not yet correct for this fused normalized convolution!! ************* In the 2-D normalizedConvolution, given input data with shape *(batch_size, channel, height, width)*, the output is computed by .. math:: out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star weight[i,j,:,:] where :math:`\star` is the 2-D cross-correlation operator. For general 2-D normalizedConvolution, the shapes are - **data**: *(batch_size, channel, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_height, out_width)*. Define:: f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 then we have:: out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) If ``no_bias`` is set to be true, then the ``bias`` term is ignored. The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, width)*. We can choose other layouts such as *NWC*. If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` evenly into *g* parts along the channel axis, and also evenly split ``weight`` along the first dimension. Next compute the normalizedConvolution on the *i*-th part of the data with the *i*-th weight part. The output is obtained by concatenating all the *g* results. 1-D normalizedConvolution does not have *height* dimension but only *width* in space. - **data**: *(batch_size, channel, width)* - **weight**: *(num_filter, channel, kernel[0])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_width)*. 3-D normalizedConvolution adds an additional *depth* dimension besides *height* and *width*. The shapes are - **data**: *(batch_size, channel, depth, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. Both ``weight`` and ``bias`` are learnable parameters. Defined in ../src/operator/nn/normalized_convolution.cc:L397 Parameters ---------- data : Symbol Input data to the NormalizedConvolutionOp. equiv_scale : Symbol equivalent scale array equiv_bias : Symbol equivalent bias array mean : Symbol mean array var : Symbol array describing variance (actually an inverse std dev) gamma : Symbol gamma array (also known as 'scale') beta : Symbol beta array (also known as 'bias') weight : Symbol Weight matrix. kernel : Shape(tuple), required NormalizedConvolution kernel size: (w,), (h, w) or (d, h, w) stride : Shape(tuple), optional, default=[] NormalizedConvolution stride: (w,), (h, w) or (d, h, w). Default 1 for each dim. dilate : Shape(tuple), optional, default=[] NormalizedConvolution dilate: (w,), (h, w) or (d, h, w). Default 1 for each dim. pad : Shape(tuple), optional, default=[] Zero pad for NormalizedConvolution: (w,), (h, w) or (d, h, w). Default no padding. num_filter : int (non-negative), required NormalizedConvolution filter(channel) number num_group : int (non-negative), optional, default=1 Number of group partitions. no_equiv_scale_bias : boolean, optional, default=0 Whether to disable normalization equivalent-scale and equivalent-bias adjustments. layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC'},optional, default='None' Set layout for input, output and weight. Empty for default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. act_type : {None, 'relu', 'sigmoid', 'softrelu', 'tanh'},optional, default='None' Fused activation function to be applied. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Pad(data=None, mode=_Null, pad_width=_Null, constant_value=_Null, name=None, attr=None, out=None, **kwargs): r"""Pads an input array with a constant or edge values of the array. .. note:: `Pad` is deprecated. Use `pad` instead. .. note:: Current implementation only supports 4D and 5D input arrays with padding applied only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. This operation pads an input array with either a `constant_value` or edge values along each axis of the input array. The amount of padding is specified by `pad_width`. `pad_width` is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` where ``N`` is the number of dimensions of the array. For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values to add before and after the elements of the array along dimension ``N``. The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, ``after_2`` must be 0. Example:: x = [[[[ 1. 2. 3.] [ 4. 5. 6.]] [[ 7. 8. 9.] [ 10. 11. 12.]]] [[[ 11. 12. 13.] [ 14. 15. 16.]] [[ 17. 18. 19.] [ 20. 21. 22.]]]] pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = [[[[ 1. 1. 2. 3. 3.] [ 1. 1. 2. 3. 3.] [ 4. 4. 5. 6. 6.] [ 4. 4. 5. 6. 6.]] [[ 7. 7. 8. 9. 9.] [ 7. 7. 8. 9. 9.] [ 10. 10. 11. 12. 12.] [ 10. 10. 11. 12. 12.]]] [[[ 11. 11. 12. 13. 13.] [ 11. 11. 12. 13. 13.] [ 14. 14. 15. 16. 16.] [ 14. 14. 15. 16. 16.]] [[ 17. 17. 18. 19. 19.] [ 17. 17. 18. 19. 19.] [ 20. 20. 21. 22. 22.] [ 20. 20. 21. 22. 22.]]]] pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = [[[[ 0. 0. 0. 0. 0.] [ 0. 1. 2. 3. 0.] [ 0. 4. 5. 6. 0.] [ 0. 0. 0. 0. 0.]] [[ 0. 0. 0. 0. 0.] [ 0. 7. 8. 9. 0.] [ 0. 10. 11. 12. 0.] [ 0. 0. 0. 0. 0.]]] [[[ 0. 0. 0. 0. 0.] [ 0. 11. 12. 13. 0.] [ 0. 14. 15. 16. 0.] [ 0. 0. 0. 0. 0.]] [[ 0. 0. 0. 0. 0.] [ 0. 17. 18. 19. 0.] [ 0. 20. 21. 22. 0.] [ 0. 0. 0. 0. 0.]]]] Defined in ../src/operator/pad.cc:L766 Parameters ---------- data : Symbol An n-dimensional input array. mode : {'constant', 'edge', 'reflect'}, required Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. pad_width : Shape(tuple), required Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. constant_value : double, optional, default=0 The value used for padding when `mode` is "constant". name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Pooling(data=None, kernel=_Null, pool_type=_Null, global_pool=_Null, cudnn_off=_Null, pooling_convention=_Null, stride=_Null, pad=_Null, p_value=_Null, count_include_pad=_Null, layout=_Null, name=None, attr=None, out=None, **kwargs): r"""Performs pooling on the input. The shapes for 1-D pooling are - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or *(batch_size, width, channel)* (NWC layout), The shapes for 2-D pooling are - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or *(batch_size, height, width, channel)* (NHWC layout), out_height = f(height, kernel[0], pad[0], stride[0]) out_width = f(width, kernel[1], pad[1], stride[1]) The definition of *f* depends on ``pooling_convention``, which has two options: - **valid** (default):: f(x, k, p, s) = floor((x+2*p-k)/s)+1 - **full**, which is compatible with Caffe:: f(x, k, p, s) = ceil((x+2*p-k)/s)+1 When ``global_pool`` is set to be true, then global pooling is performed. It will reset ``kernel=(height, width)`` and set the appropiate padding to 0. Three pooling options are supported by ``pool_type``: - **avg**: average pooling - **max**: max pooling - **sum**: sum pooling - **lp**: Lp pooling For 3-D pooling, an additional *depth* dimension is added before *height*. Namely the input data and output will have shape *(batch_size, channel, depth, height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). Notes on Lp pooling: Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. We can see that Lp pooling stands between those two, in practice the most common value for p is 2. For each window ``X``, the mathematical expression for Lp pooling is: :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` Defined in ../src/operator/nn/pooling.cc:L431 Parameters ---------- data : Symbol Input data to the pooling operator. kernel : Shape(tuple), optional, default=[] Pooling kernel size: (y, x) or (d, y, x) pool_type : {'avg', 'lp', 'max', 'sum'},optional, default='max' Pooling type to be applied. global_pool : boolean, optional, default=0 Ignore kernel size, do global pooling based on current input feature map. cudnn_off : boolean, optional, default=0 Turn off cudnn pooling and use MXNet pooling operator. pooling_convention : {'full', 'same', 'valid'},optional, default='valid' Pooling convention to be applied. stride : Shape(tuple), optional, default=[] Stride: for pooling (y, x) or (d, y, x). Defaults to 1 for each dimension. pad : Shape(tuple), optional, default=[] Pad for pooling: (y, x) or (d, y, x). Defaults to no padding. p_value : int or None, optional, default='None' Value of p for Lp pooling, can be 1 or 2, required for Lp Pooling. count_include_pad : boolean or None, optional, default=None Only used for AvgPool, specify whether to count padding elements for averagecalculation. For example, with a 5*5 kernel on a 3*3 corner of a image,the sum of the 9 valid elements will be divided by 25 if this is set to true,or it will be divided by 9 if this is set to false. Defaults to true. layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC', 'NWC'},optional, default='None' Set layout for input and output. Empty for default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Pooling_v1(data=None, kernel=_Null, pool_type=_Null, global_pool=_Null, pooling_convention=_Null, stride=_Null, pad=_Null, name=None, attr=None, out=None, **kwargs): r"""This operator is DEPRECATED. Perform pooling on the input. The shapes for 2-D pooling is - **data**: *(batch_size, channel, height, width)* - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: out_height = f(height, kernel[0], pad[0], stride[0]) out_width = f(width, kernel[1], pad[1], stride[1]) The definition of *f* depends on ``pooling_convention``, which has two options: - **valid** (default):: f(x, k, p, s) = floor((x+2*p-k)/s)+1 - **full**, which is compatible with Caffe:: f(x, k, p, s) = ceil((x+2*p-k)/s)+1 But ``global_pool`` is set to be true, then do a global pooling, namely reset ``kernel=(height, width)``. Three pooling options are supported by ``pool_type``: - **avg**: average pooling - **max**: max pooling - **sum**: sum pooling 1-D pooling is special case of 2-D pooling with *weight=1* and *kernel[1]=1*. For 3-D pooling, an additional *depth* dimension is added before *height*. Namely the input data will have shape *(batch_size, channel, depth, height, width)*. Defined in ../src/operator/pooling_v1.cc:L104 Parameters ---------- data : Symbol Input data to the pooling operator. kernel : Shape(tuple), optional, default=[] pooling kernel size: (y, x) or (d, y, x) pool_type : {'avg', 'max', 'sum'},optional, default='max' Pooling type to be applied. global_pool : boolean, optional, default=0 Ignore kernel size, do global pooling based on current input feature map. pooling_convention : {'full', 'valid'},optional, default='valid' Pooling convention to be applied. stride : Shape(tuple), optional, default=[] stride: for pooling (y, x) or (d, y, x) pad : Shape(tuple), optional, default=[] pad for pooling: (y, x) or (d, y, x) name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def RNN(data=None, parameters=None, state=None, state_cell=None, sequence_length=None, state_size=_Null, num_layers=_Null, bidirectional=_Null, mode=_Null, p=_Null, state_outputs=_Null, projection_size=_Null, lstm_state_clip_min=_Null, lstm_state_clip_max=_Null, lstm_state_clip_nan=_Null, use_sequence_length=_Null, cudnn_algo_verbose=_Null, cudnn_algo=_Null, cudnn_tensor_core=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are implemented, with both multi-layer and bidirectional support. When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use pseudo-float16 precision (float32 math with float16 I/O) precision in order to use Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. **Vanilla RNN** Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported: ReLU and Tanh. With ReLU activation function: .. math:: h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) With Tanh activtion function: .. math:: h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) Reference paper: Finding structure in time - Elman, 1988. https://crl.ucsd.edu/~elman/Papers/fsit.pdf **LSTM** Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf .. math:: \begin{array}{ll} i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ c_t = f_t * c_{(t-1)} + i_t * g_t \\ h_t = o_t * \tanh(c_t) \end{array} With the projection size being set, LSTM could use the projection feature to reduce the parameters size and give some speedups without significant damage to the accuracy. Long Short-Term Memory Based Recurrent Neural Network Architectures for Large Vocabulary Speech Recognition - Sak et al. 2014. https://arxiv.org/abs/1402.1128 .. math:: \begin{array}{ll} i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{ri} r_{(t-1)} + b_{ri}) \\ f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{rf} r_{(t-1)} + b_{rf}) \\ g_t = \tanh(W_{ig} x_t + b_{ig} + W_{rc} r_{(t-1)} + b_{rg}) \\ o_t = \mathrm{sigmoid}(W_{io} x_t + b_{o} + W_{ro} r_{(t-1)} + b_{ro}) \\ c_t = f_t * c_{(t-1)} + i_t * g_t \\ h_t = o_t * \tanh(c_t) r_t = W_{hr} h_t \end{array} **GRU** Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078 The definition of GRU here is slightly different from paper but compatible with CUDNN. .. math:: \begin{array}{ll} r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\ \end{array} Defined in ../src/operator/rnn.cc:L369 Parameters ---------- data : Symbol Input data to RNN parameters : Symbol Vector of all RNN trainable parameters concatenated state : Symbol initial hidden state of the RNN state_cell : Symbol initial cell state for LSTM networks (only for LSTM) sequence_length : Symbol Vector of valid sequence lengths for each element in batch. (Only used if use_sequence_length kwarg is True) state_size : int (non-negative), required size of the state for each layer num_layers : int (non-negative), required number of stacked layers bidirectional : boolean, optional, default=0 whether to use bidirectional recurrent layers mode : {'gru', 'lstm', 'rnn_relu', 'rnn_tanh'}, required the type of RNN to compute p : float, optional, default=0 drop rate of the dropout on the outputs of each RNN layer, except the last layer. state_outputs : boolean, optional, default=0 Whether to have the states as symbol outputs. projection_size : int or None, optional, default='None' size of project size lstm_state_clip_min : double or None, optional, default=None Minimum clip value of LSTM states. This option must be used together with lstm_state_clip_max. lstm_state_clip_max : double or None, optional, default=None Maximum clip value of LSTM states. This option must be used together with lstm_state_clip_min. lstm_state_clip_nan : boolean, optional, default=0 Whether to stop NaN from propagating in state by clipping it to min/max. If clipping range is not specified, this option is ignored. use_sequence_length : boolean, optional, default=0 If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence cudnn_algo_verbose : boolean, optional, default=0 Verboseness of algo selection. true = output selection, false = no output cudnn_algo : int, optional, default='-1' Specified RNN Algorithm. cudnn_tensor_core : boolean or None, optional, default=None Allow Tensor Core math for the algos. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ROIPooling(data=None, rois=None, pooled_size=_Null, spatial_scale=_Null, name=None, attr=None, out=None, **kwargs): r"""Performs region of interest(ROI) pooling on the input array. ROI pooling is a variant of a max pooling layer, in which the output size is fixed and region of interest is a parameter. Its purpose is to perform max pooling on the inputs of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net layer mostly used in training a `Fast R-CNN` network for object detection. This operator takes a 4D feature map as an input array and region proposals as `rois`, then it pools over sub-regions of input and produces a fixed-sized output array regardless of the ROI size. To crop the feature map accordingly, you can resize the bounding box coordinates by changing the parameters `rois` and `spatial_scale`. The cropped feature maps are pooled by standard max pooling operation to a fixed size output indicated by a `pooled_size` parameter. batch_size will change to the number of region bounding boxes after `ROIPooling`. The size of each region of interest doesn't have to be perfectly divisible by the number of pooling sections(`pooled_size`). Example:: x = [[[[ 0., 1., 2., 3., 4., 5.], [ 6., 7., 8., 9., 10., 11.], [ 12., 13., 14., 15., 16., 17.], [ 18., 19., 20., 21., 22., 23.], [ 24., 25., 26., 27., 28., 29.], [ 30., 31., 32., 33., 34., 35.], [ 36., 37., 38., 39., 40., 41.], [ 42., 43., 44., 45., 46., 47.]]]] // region of interest i.e. bounding box coordinates. y = [[0,0,0,4,4]] // returns array of shape (2,2) according to the given roi with max pooling. ROIPooling(x, y, (2,2), 1.0) = [[[[ 14., 16.], [ 26., 28.]]]] // region of interest is changed due to the change in `spacial_scale` parameter. ROIPooling(x, y, (2,2), 0.7) = [[[[ 7., 9.], [ 19., 21.]]]] Defined in ../src/operator/roi_pooling.cc:L225 Parameters ---------- data : Symbol The input array to the pooling operator, a 4D Feature maps rois : Symbol Bounding box coordinates, a 2D array of [[batch_index, x1, y1, x2, y2]], where (x1, y1) and (x2, y2) are top left and bottom right corners of designated region of interest. `batch_index` indicates the index of corresponding image in the input array pooled_size : Shape(tuple), required ROI pooling output shape (h,w) spatial_scale : float, required Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal of total stride in convolutional layers name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Reshape(data=None, shape=_Null, reverse=_Null, target_shape=_Null, keep_highest=_Null, name=None, attr=None, out=None, **kwargs): r"""Reshapes the input array. .. note:: ``Reshape`` is deprecated, use ``reshape`` Given an array and a shape, this function returns a copy of the array in the new shape. The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. Example:: reshape([1,2,3,4], shape=(2,2)) = [[1,2], [3,4]] Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: - ``0`` copy this dimension from the input to the output shape. Example:: - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions keeping the size of the new array same as that of the input array. At most one dimension of shape can be -1. Example:: - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) - input shape = (2,3,4), shape=(-1,), output shape = (24,) - ``-2`` copy all/remainder of the input dimensions to the output shape. Example:: - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. Example:: - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). Example:: - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) If the argument `reverse` is set to 1, then the special values are inferred from right to left. Example:: - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) - with reverse=1, output shape will be (50,4). Defined in ../src/operator/tensor/matrix_op.cc:L175 Parameters ---------- data : Symbol Input data to reshape. shape : Shape(tuple), optional, default=[] The target shape reverse : boolean, optional, default=0 If true then the special values are inferred from right to left target_shape : Shape(tuple), optional, default=[] (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims keep_highest : boolean, optional, default=0 (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SVMOutput(data=None, label=None, margin=_Null, regularization_coefficient=_Null, use_linear=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes support vector machine based transformation of the input. This tutorial demonstrates using SVM as output layer for classification instead of softmax: https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. Parameters ---------- data : Symbol Input data for SVM transformation. label : Symbol Class label for the input data. margin : float, optional, default=1 The loss function penalizes outputs that lie outside this margin. Default margin is 1. regularization_coefficient : float, optional, default=1 Regularization parameter for the SVM. This balances the tradeoff between coefficient size and error. use_linear : boolean, optional, default=0 Whether to use L1-SVM objective. L2-SVM objective is used by default. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ScaleBiasAddRelu(dataX=None, dataZ=None, x_equiv_scale=None, x_equiv_bias=None, z_equiv_scale=None, z_equiv_bias=None, x_gamma=None, x_beta=None, x_mean=None, x_invvar=None, z_gamma=None, z_beta=None, z_mean=None, z_invvar=None, eps=_Null, dual_scale_bias=_Null, fused_add=_Null, fused_relu=_Null, layout=_Null, act_type=_Null, name=None, attr=None, out=None, **kwargs): r"""Compute ScaleBiasAddRelu. Defined in ../src/operator/nn/scale_bias_add_relu.cc:L272 Parameters ---------- dataX : Symbol Input data to the ScaleBiasAddReluOp. dataZ : Symbol Input data to the ScaleBiasAddReluOp. x_equiv_scale : Symbol equivalent scale array x_equiv_bias : Symbol equivalent bias array z_equiv_scale : Symbol equivalent scale array z_equiv_bias : Symbol equivalent bias array x_gamma : Symbol x_beta : Symbol x_mean : Symbol x_invvar : Symbol z_gamma : Symbol z_beta : Symbol z_mean : Symbol z_invvar : Symbol eps : double, optional, default=0.0010000000474974513 Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) dual_scale_bias : boolean, optional, default=1 dual scale bias fused_add : boolean, optional, default=1 dual scale bias fused_relu : boolean, optional, default=1 dual scale bias layout : {None, 'NHWC'},optional, default='None' Set layout for input and outputNHWC is only supported on GPU. act_type : {None, 'relu'},optional, default='None' Fused activation function to be applied. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SequenceLast(data=None, sequence_length=None, use_sequence_length=_Null, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Takes the last element of a sequence. This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array of the form [batch_size, other_feature_dims]. Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be an input array of positive ints of dimension [batch_size]. To use this parameter, set `use_sequence_length` to `True`, otherwise each example in the batch is assumed to have the max sequence length. .. note:: Alternatively, you can also use `take` operator. Example:: x = [[[ 1., 2., 3.], [ 4., 5., 6.], [ 7., 8., 9.]], [[ 10., 11., 12.], [ 13., 14., 15.], [ 16., 17., 18.]], [[ 19., 20., 21.], [ 22., 23., 24.], [ 25., 26., 27.]]] // returns last sequence when sequence_length parameter is not used SequenceLast(x) = [[ 19., 20., 21.], [ 22., 23., 24.], [ 25., 26., 27.]] // sequence_length is used SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = [[ 1., 2., 3.], [ 4., 5., 6.], [ 7., 8., 9.]] // sequence_length is used SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = [[ 1., 2., 3.], [ 13., 14., 15.], [ 25., 26., 27.]] Defined in ../src/operator/sequence_last.cc:L106 Parameters ---------- data : Symbol n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 sequence_length : Symbol vector of sequence lengths of the form [batch_size] use_sequence_length : boolean, optional, default=0 If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence axis : int, optional, default='0' The sequence axis. Only values of 0 and 1 are currently supported. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SequenceMask(data=None, sequence_length=None, use_sequence_length=_Null, value=_Null, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Sets all elements outside the sequence to a constant value. This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be an input array of positive ints of dimension [batch_size]. To use this parameter, set `use_sequence_length` to `True`, otherwise each example in the batch is assumed to have the max sequence length and this operator works as the `identity` operator. Example:: x = [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 16., 17., 18.]]] // Batch 1 B1 = [[ 1., 2., 3.], [ 7., 8., 9.], [ 13., 14., 15.]] // Batch 2 B2 = [[ 4., 5., 6.], [ 10., 11., 12.], [ 16., 17., 18.]] // works as identity operator when sequence_length parameter is not used SequenceMask(x) = [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 16., 17., 18.]]] // sequence_length [1,1] means 1 of each batch will be kept // and other rows are masked with default mask value = 0 SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 0., 0., 0.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 0., 0., 0.]]] // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept // and other rows are masked with value = 1 SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 1., 1.], [ 16., 17., 18.]]] Defined in ../src/operator/sequence_mask.cc:L186 Parameters ---------- data : Symbol n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 sequence_length : Symbol vector of sequence lengths of the form [batch_size] use_sequence_length : boolean, optional, default=0 If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence value : float, optional, default=0 The value to be used as a mask. axis : int, optional, default='0' The sequence axis. Only values of 0 and 1 are currently supported. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SequenceReverse(data=None, sequence_length=None, use_sequence_length=_Null, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Reverses the elements of each sequence. This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be an input array of positive ints of dimension [batch_size]. To use this parameter, set `use_sequence_length` to `True`, otherwise each example in the batch is assumed to have the max sequence length. Example:: x = [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 16., 17., 18.]]] // Batch 1 B1 = [[ 1., 2., 3.], [ 7., 8., 9.], [ 13., 14., 15.]] // Batch 2 B2 = [[ 4., 5., 6.], [ 10., 11., 12.], [ 16., 17., 18.]] // returns reverse sequence when sequence_length parameter is not used SequenceReverse(x) = [[[ 13., 14., 15.], [ 16., 17., 18.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]]] // sequence_length [2,2] means 2 rows of // both batch B1 and B2 will be reversed. SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = [[[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]], [[ 13., 14., 15.], [ 16., 17., 18.]]] // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 // will be reversed. SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = [[[ 7., 8., 9.], [ 16., 17., 18.]], [[ 1., 2., 3.], [ 10., 11., 12.]], [[ 13., 14, 15.], [ 4., 5., 6.]]] Defined in ../src/operator/sequence_reverse.cc:L122 Parameters ---------- data : Symbol n-dimensional input array of the form [max_sequence_length, batch_size, other dims] where n>2 sequence_length : Symbol vector of sequence lengths of the form [batch_size] use_sequence_length : boolean, optional, default=0 If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence axis : int, optional, default='0' The sequence axis. Only 0 is currently supported. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SliceChannel(data=None, num_outputs=_Null, axis=_Null, squeeze_axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Splits an array along a particular axis into multiple sub-arrays. .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. **Note** that `num_outputs` should evenly divide the length of the axis along which to split the array. Example:: x = [[[ 1.] [ 2.]] [[ 3.] [ 4.]] [[ 5.] [ 6.]]] x.shape = (3, 2, 1) y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) y = [[[ 1.]] [[ 3.]] [[ 5.]]] [[[ 2.]] [[ 4.]] [[ 6.]]] y[0].shape = (3, 1, 1) z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) z = [[[ 1.] [ 2.]]] [[[ 3.] [ 4.]]] [[[ 5.] [ 6.]]] z[0].shape = (1, 2, 1) `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. Example:: z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) z = [[ 1.] [ 2.]] [[ 3.] [ 4.]] [[ 5.] [ 6.]] z[0].shape = (2 ,1 ) Defined in ../src/operator/slice_channel.cc:L107 Parameters ---------- data : Symbol The input num_outputs : int, required Number of splits. Note that this should evenly divide the length of the `axis`. axis : int, optional, default='1' Axis along which to split. squeeze_axis : boolean, optional, default=0 If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def Softmax(data=None, label=None, grad_scale=_Null, ignore_label=_Null, multi_output=_Null, use_ignore=_Null, preserve_shape=_Null, normalization=_Null, out_grad=_Null, smooth_alpha=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the gradient of cross entropy loss with respect to softmax output. - This operator computes the gradient in two steps. The cross entropy loss does not actually need to be computed. - Applies softmax function on the input array. - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. - The softmax function, cross entropy loss and gradient is given by: - Softmax Function: .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} - Cross Entropy Function: .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) - The gradient of cross entropy loss w.r.t softmax output: .. math:: \text{gradient} = \text{output} - \text{label} - During forward propagation, the softmax function is computed for each instance in the input array. For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` and `multi_output` to specify the way to compute softmax: - By default, `preserve_shape` is ``false``. This operator will reshape the input array into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for each row in the reshaped array, and afterwards reshape it back to the original shape :math:`(d_1, d_2, ..., d_n)`. - If `preserve_shape` is ``true``, the softmax function will be computed along the last axis (`axis` = ``-1``). - If `multi_output` is ``true``, the softmax function will be computed along the second axis (`axis` = ``1``). - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. The provided label can be a one-hot label array or a probability label array. - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances with a particular label to be ignored during backward propagation. **This has no effect when softmax `output` has same shape as `label`**. Example:: data = [[1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4]] label = [1,0,2,3] ignore_label = 1 SoftmaxOutput(data=data, label = label,\ multi_output=true, use_ignore=true,\ ignore_label=ignore_label) ## forward softmax output [[ 0.0320586 0.08714432 0.23688284 0.64391428] [ 0.25 0.25 0.25 0.25 ] [ 0.25 0.25 0.25 0.25 ] [ 0.25 0.25 0.25 0.25 ]] ## backward gradient output [[ 0. 0. 0. 0. ] [-0.75 0.25 0.25 0.25] [ 0.25 0.25 -0.75 0.25] [ 0.25 0.25 0.25 -0.75]] ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. - The parameter `grad_scale` can be used to rescale the gradient, which is often used to give each loss function different weights. - This operator also supports various ways to normalize the gradient by `normalization`, The `normalization` is applied if softmax output has different shape than the labels. The `normalization` mode can be set to the followings: - ``'null'``: do nothing. - ``'batch'``: divide the gradient by the batch size. - ``'valid'``: divide the gradient by the number of instances which are not ignored. Defined in ../src/operator/softmax_output.cc:L243 Parameters ---------- data : Symbol Input array. label : Symbol Ground truth label. grad_scale : float, optional, default=1 Scales the gradient by a float factor. ignore_label : float, optional, default=-1 The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). multi_output : boolean, optional, default=0 If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. use_ignore : boolean, optional, default=0 If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. preserve_shape : boolean, optional, default=0 If set to ``true``, the softmax function will be computed along the last axis (``-1``). normalization : {'batch', 'null', 'valid'},optional, default='null' Normalizes the gradient. out_grad : boolean, optional, default=0 Multiplies gradient with output gradient element-wise. smooth_alpha : float, optional, default=0 Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SoftmaxActivation(data=None, mode=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies softmax activation to input. This is intended for internal layers. .. note:: This operator has been deprecated, please use `softmax`. If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. This is the default mode. If `mode` = ``channel``, this operator will compute a k-class softmax at each position of each instance, where `k` = ``num_channel``. This mode can only be used when the input array has at least 3 dimensions. This can be used for `fully convolutional network`, `image segmentation`, etc. Example:: >>> input_array = mx.nd.array([[3., 0.5, -0.5, 2., 7.], >>> [2., -.4, 7., 3., 0.2]]) >>> softmax_act = mx.nd.SoftmaxActivation(input_array) >>> print softmax_act.asnumpy() [[ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03]] Defined in ../src/operator/nn/softmax_activation.cc:L59 Parameters ---------- data : Symbol The input array. mode : {'channel', 'instance'},optional, default='instance' Specifies how to compute the softmax. If set to ``instance``, it computes softmax for each instance. If set to ``channel``, It computes cross channel softmax for each position of each instance. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SoftmaxOutput(data=None, label=None, grad_scale=_Null, ignore_label=_Null, multi_output=_Null, use_ignore=_Null, preserve_shape=_Null, normalization=_Null, out_grad=_Null, smooth_alpha=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the gradient of cross entropy loss with respect to softmax output. - This operator computes the gradient in two steps. The cross entropy loss does not actually need to be computed. - Applies softmax function on the input array. - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. - The softmax function, cross entropy loss and gradient is given by: - Softmax Function: .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} - Cross Entropy Function: .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) - The gradient of cross entropy loss w.r.t softmax output: .. math:: \text{gradient} = \text{output} - \text{label} - During forward propagation, the softmax function is computed for each instance in the input array. For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` and `multi_output` to specify the way to compute softmax: - By default, `preserve_shape` is ``false``. This operator will reshape the input array into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for each row in the reshaped array, and afterwards reshape it back to the original shape :math:`(d_1, d_2, ..., d_n)`. - If `preserve_shape` is ``true``, the softmax function will be computed along the last axis (`axis` = ``-1``). - If `multi_output` is ``true``, the softmax function will be computed along the second axis (`axis` = ``1``). - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. The provided label can be a one-hot label array or a probability label array. - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances with a particular label to be ignored during backward propagation. **This has no effect when softmax `output` has same shape as `label`**. Example:: data = [[1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4]] label = [1,0,2,3] ignore_label = 1 SoftmaxOutput(data=data, label = label,\ multi_output=true, use_ignore=true,\ ignore_label=ignore_label) ## forward softmax output [[ 0.0320586 0.08714432 0.23688284 0.64391428] [ 0.25 0.25 0.25 0.25 ] [ 0.25 0.25 0.25 0.25 ] [ 0.25 0.25 0.25 0.25 ]] ## backward gradient output [[ 0. 0. 0. 0. ] [-0.75 0.25 0.25 0.25] [ 0.25 0.25 -0.75 0.25] [ 0.25 0.25 0.25 -0.75]] ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. - The parameter `grad_scale` can be used to rescale the gradient, which is often used to give each loss function different weights. - This operator also supports various ways to normalize the gradient by `normalization`, The `normalization` is applied if softmax output has different shape than the labels. The `normalization` mode can be set to the followings: - ``'null'``: do nothing. - ``'batch'``: divide the gradient by the batch size. - ``'valid'``: divide the gradient by the number of instances which are not ignored. Defined in ../src/operator/softmax_output.cc:L243 Parameters ---------- data : Symbol Input array. label : Symbol Ground truth label. grad_scale : float, optional, default=1 Scales the gradient by a float factor. ignore_label : float, optional, default=-1 The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). multi_output : boolean, optional, default=0 If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. use_ignore : boolean, optional, default=0 If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. preserve_shape : boolean, optional, default=0 If set to ``true``, the softmax function will be computed along the last axis (``-1``). normalization : {'batch', 'null', 'valid'},optional, default='null' Normalizes the gradient. out_grad : boolean, optional, default=0 Multiplies gradient with output gradient element-wise. smooth_alpha : float, optional, default=0 Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SpatialParallelConvolution(data=None, weight=None, dummy=None, bias=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, num_filter=_Null, num_group=_Null, workspace=_Null, no_bias=_Null, cudnn_tune=_Null, cudnn_off=_Null, cudnn_tensor_core=_Null, cudnn_tensor_core_only=_Null, layout=_Null, cudnn_algo_verbose=_Null, cudnn_algo_fwd=_Null, cudnn_algo_bwd_data=_Null, cudnn_algo_bwd_filter=_Null, cudnn_algo_fwd_prec=_Null, cudnn_algo_bwd_prec=_Null, num_gpus=_Null, rank=_Null, nccl_unique_id=_Null, name=None, attr=None, out=None, **kwargs): r"""Compute *N*-D convolution on *(N+2)*-D input, using data stored on multiple GPUs, spatially divided in the outermost dimension. In the 2-D convolution, given input data with shape *(batch_size, channel, height, width)*, the output is computed by .. math:: out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star weight[i,j,:,:] where :math:`\star` is the 2-D cross-correlation operator. For general 2-D convolution, the shapes are - **data**: *(batch_size, channel, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_height, out_width)*. Define:: f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 then we have:: out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) If ``no_bias`` is set to be true, then the ``bias`` term is ignored. The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, width)*. We can choose other layouts such as *NWC*. If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` evenly into *g* parts along the channel axis, and also evenly split ``weight`` along the first dimension. Next compute the convolution on the *i*-th part of the data with the *i*-th weight part. The output is obtained by concatenating all the *g* results. 1-D convolution does not have *height* dimension but only *width* in space. - **data**: *(batch_size, channel, width)* - **weight**: *(num_filter, channel, kernel[0])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_width)*. 3-D convolution adds an additional *depth* dimension besides *height* and *width*. The shapes are - **data**: *(batch_size, channel, depth, height, width)* - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* - **bias**: *(num_filter,)* - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. Both ``weight`` and ``bias`` are learnable parameters. There are other options to tune the performance. - **cudnn_tune**: enable this option leads to higher startup time but may give faster speed. Options are - **off**: no tuning - **limited_workspace**:run test and pick the fastest algorithm that doesn't exceed workspace limit. - **fastest**: pick the fastest algorithm and ignore workspace limit. - **None** (default): the behavior is determined by environment variable ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace (default), 2 for fastest. - **workspace**: A large number leads to more (GPU) memory usage but may improve the performance. Defined in ../src/operator/nn/spatial_parallel_convolution.cc:L436 Parameters ---------- data : Symbol Input data to the ConvolutionOp. weight : Symbol Weight matrix. dummy : Symbol Dummy parameter needed to make sure no 2 spatial parallel convolutions are performed at the same time. bias : Symbol Bias parameter. kernel : Shape(tuple), required Convolution kernel size: (w,), (h, w) or (d, h, w) stride : Shape(tuple), optional, default=[] Convolution stride: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. dilate : Shape(tuple), optional, default=[] Convolution dilate: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. pad : Shape(tuple), optional, default=[] Zero pad for convolution: (w,), (h, w) or (d, h, w). Defaults to no padding. num_filter : int (non-negative), required Convolution filter(channel) number num_group : int (non-negative), optional, default=1 Number of group partitions. workspace : long (non-negative), optional, default=1024 Maximum temporary workspace allowed (MB) in convolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the convolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. no_bias : boolean, optional, default=0 Whether to disable bias parameter. cudnn_tune : {None, 'fastest', 'limited_workspace', 'off'},optional, default='None' Whether to pick convolution algo by running performance test. cudnn_off : boolean, optional, default=0 Turn off cudnn for this layer. cudnn_tensor_core : boolean or None, optional, default=None Allow Tensor Core math within the algos. cudnn_tensor_core_only : boolean, optional, default=0 Require Tensor Core math within the algos. layout : {None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC', 'NWC'},optional, default='None' Set layout for input, output and weight. Empty for default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. cudnn_algo_verbose : boolean, optional, default=0 Verboseness of algo selection. 1 = output selection, 0 = no output cudnn_algo_fwd : int, optional, default='-1' Specified Forward Algorithm. cudnn_algo_bwd_data : int, optional, default='-1' Specified Backprop-to-Data Algorithm. cudnn_algo_bwd_filter : int, optional, default='-1' Specified Backprop-to-Filter Algorithm. cudnn_algo_fwd_prec : {'None', 'float16', 'float32', 'float64'},optional, default='None' Precision of the computation of the forward convolution kernel. Default is the tensor data type, or float32 if the tensor data type is float16. cudnn_algo_bwd_prec : {'None', 'float16', 'float32', 'float64'},optional, default='None' Precision of the computation of the back-prop kernels. Default is the tensor data type, or float32 if the tensor data type is float16. num_gpus : int, required Number of GPUs per sample. rank : int, required Rank inside a group nccl_unique_id : long (non-negative), required NCCL unique ID name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SpatialTransformer(data=None, loc=None, target_shape=_Null, transform_type=_Null, sampler_type=_Null, cudnn_off=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies a spatial transformer to input feature map. Parameters ---------- data : Symbol Input data to the SpatialTransformerOp. loc : Symbol localisation net, the output dim should be 6 when transform_type is affine. You shold initialize the weight and bias with identity tranform. target_shape : Shape(tuple), optional, default=[0,0] output shape(h, w) of spatial transformer: (y, x) transform_type : {'affine'}, required transformation type sampler_type : {'bilinear'}, required sampling type cudnn_off : boolean or None, optional, default=None whether to turn cudnn off name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def SwapAxis(data=None, dim1=_Null, dim2=_Null, name=None, attr=None, out=None, **kwargs): r"""Interchanges two axes of an array. Examples:: x = [[1, 2, 3]]) swapaxes(x, 0, 1) = [[ 1], [ 2], [ 3]] x = [[[ 0, 1], [ 2, 3]], [[ 4, 5], [ 6, 7]]] // (2,2,2) array swapaxes(x, 0, 2) = [[[ 0, 4], [ 2, 6]], [[ 1, 5], [ 3, 7]]] Defined in ../src/operator/swapaxis.cc:L70 Parameters ---------- data : Symbol Input array. dim1 : int, optional, default='0' the first axis to be swapped. dim2 : int, optional, default='0' the second axis to be swapped. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def UpSampling(*data, **kwargs): r"""Upsamples the given input data. Two algorithms (``sample_type``) are available for upsampling: - Nearest Neighbor - Bilinear **Nearest Neighbor Upsampling** Input data is expected to be NCHW. Example:: x = [[[[1. 1. 1.] [1. 1. 1.] [1. 1. 1.]]]] UpSampling(x, scale=2, sample_type='nearest') = [[[[1. 1. 1. 1. 1. 1.] [1. 1. 1. 1. 1. 1.] [1. 1. 1. 1. 1. 1.] [1. 1. 1. 1. 1. 1.] [1. 1. 1. 1. 1. 1.] [1. 1. 1. 1. 1. 1.]]]] **Bilinear Upsampling** Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. Input data is expected to be NCHW. `num_filter` is expected to be same as the number of channels. Example:: x = [[[[1. 1. 1.] [1. 1. 1.] [1. 1. 1.]]]] w = [[[[1. 1. 1. 1.] [1. 1. 1. 1.] [1. 1. 1. 1.] [1. 1. 1. 1.]]]] UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = [[[[1. 2. 2. 2. 2. 1.] [2. 4. 4. 4. 4. 2.] [2. 4. 4. 4. 4. 2.] [2. 4. 4. 4. 4. 2.] [2. 4. 4. 4. 4. 2.] [1. 2. 2. 2. 2. 1.]]]] Defined in ../src/operator/nn/upsampling.cc:L173 This function support variable length of positional input. Parameters ---------- data : Symbol[] Array of tensors to upsample. For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight. scale : int, required Up sampling scale num_filter : int, optional, default='0' Input filter. Only used by bilinear sample_type.Since bilinear upsampling uses deconvolution, num_filters is set to the number of channels. sample_type : {'bilinear', 'nearest'}, required upsampling method multi_input_mode : {'concat', 'sum'},optional, default='concat' How to handle multiple input. concat means concatenate upsampled images along the channel dimension. sum means add all images together, only available for nearest neighbor upsampling. workspace : long (non-negative), optional, default=512 Tmp workspace for deconvolution (MB) name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def abs(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise absolute value of the input. Example:: abs([-2, 0, 3]) = [2, 0, 3] The storage type of ``abs`` output depends upon the input storage type: - abs(default) = default - abs(row_sparse) = row_sparse - abs(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L720 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def adam_update(weight=None, grad=None, mean=None, var=None, lr=_Null, beta1=_Null, beta2=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for Adam optimizer. Adam is seen as a generalization of AdaGrad. Adam update consists of the following steps, where g represents gradient and m, v are 1st and 2nd order moment estimates (mean and variance). .. math:: g_t = \nabla J(W_{t-1})\\ m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } It updates the weights using:: m = beta1*m + (1-beta1)*grad v = beta2*v + (1-beta2)*(grad**2) w += - learning_rate * m / (sqrt(v) + epsilon) However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage type of weight is the same as those of m and v, only the row slices whose indices appear in grad.indices are updated (for w, m and v):: for row in grad.indices: m[row] = beta1*m[row] + (1-beta1)*grad[row] v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) Defined in ../src/operator/optimizer_op.cc:L686 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mean : Symbol Moving mean var : Symbol Moving variance lr : float, required Learning rate beta1 : float, optional, default=0.899999976 The decay rate for the 1st moment estimates. beta2 : float, optional, default=0.999000013 The decay rate for the 2nd moment estimates. epsilon : float, optional, default=9.99999994e-09 A small constant for numerical stability. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). lazy_update : boolean, optional, default=1 If true, lazy updates are applied if gradient's stype is row_sparse and all of w, m and v have the same stype name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def add_n(*args, **kwargs): r"""Adds all input arguments element-wise. .. math:: add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n ``add_n`` is potentially more efficient than calling ``add`` by `n` times. The storage type of ``add_n`` output depends on storage types of inputs - add_n(row_sparse, row_sparse, ..) = row_sparse - add_n(default, csr, default) = default - add_n(any input combinations longer than 4 (>4) with at least one default type) = default - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage Defined in ../src/operator/tensor/elemwise_sum.cc:L156 This function support variable length of positional input. Parameters ---------- args : Symbol[] Positional input arguments name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def all_finite(data=None, init_output=_Null, name=None, attr=None, out=None, **kwargs): r"""Check if all the float numbers in the array are finite (used for AMP) Defined in ../src/operator/contrib/all_finite.cc:L101 Parameters ---------- data : NDArray Array init_output : boolean, optional, default=1 Initialize output to 1. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def amp_cast(data=None, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Cast function between low precision float/FP32 used by AMP. It casts only between low precision float/FP32 and does not do anything for other types. Defined in ../src/operator/tensor/amp_cast.cc:L135 Parameters ---------- data : Symbol The input. dtype : {'bfloat16', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'}, required Output data type. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def amp_multicast(*data, **kwargs): r"""Cast function used by AMP, that casts its inputs to the common widest type. It casts only between low precision float/FP32 and does not do anything for other types. Defined in ../src/operator/tensor/amp_cast.cc:L180 Parameters ---------- data : Symbol[] Weights num_outputs : int, required Number of input/output pairs to be casted to the widest type. cast_narrow : boolean, optional, default=0 Whether to cast to the narrowest type name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arccos(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse cosine of the input array. The input should be in range `[-1, 1]`. The output is in the closed interval :math:`[0, \pi]` .. math:: arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] The storage type of ``arccos`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L233 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arccosh(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the element-wise inverse hyperbolic cosine of the input array, \ computed element-wise. The storage type of ``arccosh`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L535 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arcsin(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse sine of the input array. The input should be in the range `[-1, 1]`. The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. .. math:: arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] The storage type of ``arcsin`` output depends upon the input storage type: - arcsin(default) = default - arcsin(row_sparse) = row_sparse - arcsin(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L187 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arcsinh(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the element-wise inverse hyperbolic sine of the input array, \ computed element-wise. The storage type of ``arcsinh`` output depends upon the input storage type: - arcsinh(default) = default - arcsinh(row_sparse) = row_sparse - arcsinh(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L494 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arctan(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse tangent of the input array. The output is in the closed interval :math:`[-\pi/2, \pi/2]` .. math:: arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] The storage type of ``arctan`` output depends upon the input storage type: - arctan(default) = default - arctan(row_sparse) = row_sparse - arctan(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L282 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arctanh(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the element-wise inverse hyperbolic tangent of the input array, \ computed element-wise. The storage type of ``arctanh`` output depends upon the input storage type: - arctanh(default) = default - arctanh(row_sparse) = row_sparse - arctanh(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L579 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def argmax(data=None, axis=_Null, keepdims=_Null, name=None, attr=None, out=None, **kwargs): r"""Returns indices of the maximum values along an axis. In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence are returned. Examples:: x = [[ 0., 1., 2.], [ 3., 4., 5.]] // argmax along axis 0 argmax(x, axis=0) = [ 1., 1., 1.] // argmax along axis 1 argmax(x, axis=1) = [ 2., 2.] // argmax along axis 1 keeping same dims as an input array argmax(x, axis=1, keepdims=True) = [[ 2.], [ 2.]] Defined in ../src/operator/tensor/broadcast_reduce_op_index.cc:L52 Parameters ---------- data : Symbol The input axis : int or None, optional, default='None' The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axis is left in the result as dimension with size one. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def argmax_channel(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns argmax indices of each channel from the input array. The result will be an NDArray of shape (num_channel,). In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence are returned. Examples:: x = [[ 0., 1., 2.], [ 3., 4., 5.]] argmax_channel(x) = [ 2., 2.] Defined in ../src/operator/tensor/broadcast_reduce_op_index.cc:L97 Parameters ---------- data : Symbol The input array name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def argmin(data=None, axis=_Null, keepdims=_Null, name=None, attr=None, out=None, **kwargs): r"""Returns indices of the minimum values along an axis. In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence are returned. Examples:: x = [[ 0., 1., 2.], [ 3., 4., 5.]] // argmin along axis 0 argmin(x, axis=0) = [ 0., 0., 0.] // argmin along axis 1 argmin(x, axis=1) = [ 0., 0.] // argmin along axis 1 keeping same dims as an input array argmin(x, axis=1, keepdims=True) = [[ 0.], [ 0.]] Defined in ../src/operator/tensor/broadcast_reduce_op_index.cc:L77 Parameters ---------- data : Symbol The input axis : int or None, optional, default='None' The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axis is left in the result as dimension with size one. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def argsort(data=None, axis=_Null, is_ascend=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Returns the indices that would sort an input array along the given axis. This function performs sorting along the given axis and returns an array of indices having same shape as an input array that index data in sorted order. Examples:: x = [[ 0.3, 0.2, 0.4], [ 0.1, 0.3, 0.2]] // sort along axis -1 argsort(x) = [[ 1., 0., 2.], [ 0., 2., 1.]] // sort along axis 0 argsort(x, axis=0) = [[ 1., 0., 1.] [ 0., 1., 0.]] // flatten and then sort argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] Defined in ../src/operator/tensor/ordering_op.cc:L185 Parameters ---------- data : Symbol The input array axis : int or None, optional, default='-1' Axis along which to sort the input tensor. If not given, the flattened array is used. Default is -1. is_ascend : boolean, optional, default=1 Whether to sort in ascending or descending order. dtype : {'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'},optional, default='float32' DType of the output indices. It is only valid when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def batch_dot(lhs=None, rhs=None, transpose_a=_Null, transpose_b=_Null, forward_stype=_Null, name=None, attr=None, out=None, **kwargs): r"""Batchwise dot product. ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, which is computed by:: batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) Defined in ../src/operator/tensor/dot.cc:L127 Parameters ---------- lhs : Symbol The first input rhs : Symbol The second input transpose_a : boolean, optional, default=0 If true then transpose the first input before dot. transpose_b : boolean, optional, default=0 If true then transpose the second input before dot. forward_stype : {None, 'csr', 'default', 'row_sparse'},optional, default='None' The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def batch_take(a=None, indices=None, name=None, attr=None, out=None, **kwargs): r"""Takes elements from a data batch. .. note:: `batch_take` is deprecated. Use `pick` instead. Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be an output array of shape ``(i0,)`` with:: output[i] = input[i, indices[i]] Examples:: x = [[ 1., 2.], [ 3., 4.], [ 5., 6.]] // takes elements with specified indices batch_take(x, [0,1,0]) = [ 1. 4. 5.] Defined in ../src/operator/tensor/indexing_op.cc:L836 Parameters ---------- a : Symbol The input array indices : Symbol The index array name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_add(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise sum of the input arrays with broadcasting. `broadcast_plus` is an alias to the function `broadcast_add`. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_add(x, y) = [[ 1., 1., 1.], [ 2., 2., 2.]] broadcast_plus(x, y) = [[ 1., 1., 1.], [ 2., 2., 2.]] Supported sparse operations: broadcast_add(csr, dense(1D)) = dense broadcast_add(dense(1D), csr) = dense Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_axes(data=None, axis=_Null, size=_Null, name=None, attr=None, out=None, **kwargs): r"""Broadcasts the input array over particular axes. Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. `broadcast_axes` is an alias to the function `broadcast_axis`. Example:: // given x of shape (1,2,1) x = [[[ 1.], [ 2.]]] // broadcast x on on axis 2 broadcast_axis(x, axis=2, size=3) = [[[ 1., 1., 1.], [ 2., 2., 2.]]] // broadcast x on on axes 0 and 2 broadcast_axis(x, axis=(0,2), size=(2,3)) = [[[ 1., 1., 1.], [ 2., 2., 2.]], [[ 1., 1., 1.], [ 2., 2., 2.]]] Defined in ../src/operator/tensor/broadcast_reduce_op_value.cc:L93 Parameters ---------- data : Symbol The input axis : Shape(tuple), optional, default=[] The axes to perform the broadcasting. size : Shape(tuple), optional, default=[] Target sizes of the broadcasting axes. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_axis(data=None, axis=_Null, size=_Null, name=None, attr=None, out=None, **kwargs): r"""Broadcasts the input array over particular axes. Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. `broadcast_axes` is an alias to the function `broadcast_axis`. Example:: // given x of shape (1,2,1) x = [[[ 1.], [ 2.]]] // broadcast x on on axis 2 broadcast_axis(x, axis=2, size=3) = [[[ 1., 1., 1.], [ 2., 2., 2.]]] // broadcast x on on axes 0 and 2 broadcast_axis(x, axis=(0,2), size=(2,3)) = [[[ 1., 1., 1.], [ 2., 2., 2.]], [[ 1., 1., 1.], [ 2., 2., 2.]]] Defined in ../src/operator/tensor/broadcast_reduce_op_value.cc:L93 Parameters ---------- data : Symbol The input axis : Shape(tuple), optional, default=[] The axes to perform the broadcasting. size : Shape(tuple), optional, default=[] Target sizes of the broadcasting axes. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_div(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise division of the input arrays with broadcasting. Example:: x = [[ 6., 6., 6.], [ 6., 6., 6.]] y = [[ 2.], [ 3.]] broadcast_div(x, y) = [[ 3., 3., 3.], [ 2., 2., 2.]] Supported sparse operations: broadcast_div(csr, dense(1D)) = csr Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_equal(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_equal(x, y) = [[ 0., 0., 0.], [ 1., 1., 1.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_greater(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_greater(x, y) = [[ 1., 1., 1.], [ 0., 0., 0.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_greater_equal(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_greater_equal(x, y) = [[ 1., 1., 1.], [ 1., 1., 1.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_hypot(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r""" Returns the hypotenuse of a right angled triangle, given its "legs" with broadcasting. It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. Example:: x = [[ 3., 3., 3.]] y = [[ 4.], [ 4.]] broadcast_hypot(x, y) = [[ 5., 5., 5.], [ 5., 5., 5.]] z = [[ 0.], [ 4.]] broadcast_hypot(x, z) = [[ 3., 3., 3.], [ 5., 5., 5.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_lesser(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_lesser(x, y) = [[ 0., 0., 0.], [ 0., 0., 0.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_lesser_equal(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_lesser_equal(x, y) = [[ 0., 0., 0.], [ 1., 1., 1.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_like(lhs=None, rhs=None, lhs_axes=_Null, rhs_axes=_Null, name=None, attr=None, out=None, **kwargs): r"""Broadcasts lhs to have the same shape as rhs. Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations with arrays of different shapes efficiently without creating multiple copies of arrays. Also see, `Broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_ for more explanation. Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. For example:: broadcast_like([[1,2,3]], [[5,6,7],[7,8,9]]) = [[ 1., 2., 3.], [ 1., 2., 3.]]) broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] Defined in ../src/operator/tensor/broadcast_reduce_op_value.cc:L179 Parameters ---------- lhs : Symbol First input. rhs : Symbol Second input. lhs_axes : Shape or None, optional, default=None Axes to perform broadcast on in the first input array rhs_axes : Shape or None, optional, default=None Axes to copy from the second input array name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_logical_and(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **logical and** with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_logical_and(x, y) = [[ 0., 0., 0.], [ 1., 1., 1.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_logical_or(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **logical or** with broadcasting. Example:: x = [[ 1., 1., 0.], [ 1., 1., 0.]] y = [[ 1.], [ 0.]] broadcast_logical_or(x, y) = [[ 1., 1., 1.], [ 1., 1., 0.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_logical_xor(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **logical xor** with broadcasting. Example:: x = [[ 1., 1., 0.], [ 1., 1., 0.]] y = [[ 1.], [ 0.]] broadcast_logical_xor(x, y) = [[ 0., 0., 1.], [ 1., 1., 0.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_maximum(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise maximum of the input arrays with broadcasting. This function compares two input arrays and returns a new array having the element-wise maxima. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_maximum(x, y) = [[ 1., 1., 1.], [ 1., 1., 1.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_minimum(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise minimum of the input arrays with broadcasting. This function compares two input arrays and returns a new array having the element-wise minima. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_maximum(x, y) = [[ 0., 0., 0.], [ 1., 1., 1.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L117 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_minus(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise difference of the input arrays with broadcasting. `broadcast_minus` is an alias to the function `broadcast_sub`. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_sub(x, y) = [[ 1., 1., 1.], [ 0., 0., 0.]] broadcast_minus(x, y) = [[ 1., 1., 1.], [ 0., 0., 0.]] Supported sparse operations: broadcast_sub/minus(csr, dense(1D)) = dense broadcast_sub/minus(dense(1D), csr) = dense Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_mod(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise modulo of the input arrays with broadcasting. Example:: x = [[ 8., 8., 8.], [ 8., 8., 8.]] y = [[ 2.], [ 3.]] broadcast_mod(x, y) = [[ 0., 0., 0.], [ 2., 2., 2.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_mul(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise product of the input arrays with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_mul(x, y) = [[ 0., 0., 0.], [ 1., 1., 1.]] Supported sparse operations: broadcast_mul(csr, dense(1D)) = csr Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_not_equal(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_not_equal(x, y) = [[ 1., 1., 1.], [ 0., 0., 0.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_plus(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise sum of the input arrays with broadcasting. `broadcast_plus` is an alias to the function `broadcast_add`. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_add(x, y) = [[ 1., 1., 1.], [ 2., 2., 2.]] broadcast_plus(x, y) = [[ 1., 1., 1.], [ 2., 2., 2.]] Supported sparse operations: broadcast_add(csr, dense(1D)) = dense broadcast_add(dense(1D), csr) = dense Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_power(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns result of first array elements raised to powers from second array, element-wise with broadcasting. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_power(x, y) = [[ 2., 2., 2.], [ 4., 4., 4.]] Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_sub(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise difference of the input arrays with broadcasting. `broadcast_minus` is an alias to the function `broadcast_sub`. Example:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] y = [[ 0.], [ 1.]] broadcast_sub(x, y) = [[ 1., 1., 1.], [ 0., 0., 0.]] broadcast_minus(x, y) = [[ 1., 1., 1.], [ 0., 0., 0.]] Supported sparse operations: broadcast_sub/minus(csr, dense(1D)) = dense broadcast_sub/minus(dense(1D), csr) = dense Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 Parameters ---------- lhs : Symbol First input to the function rhs : Symbol Second input to the function name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def broadcast_to(data=None, shape=_Null, name=None, attr=None, out=None, **kwargs): r"""Broadcasts the input array to a new shape. Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations with arrays of different shapes efficiently without creating multiple copies of arrays. Also see, `Broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_ for more explanation. Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. For example:: broadcast_to([[1,2,3]], shape=(2,3)) = [[ 1., 2., 3.], [ 1., 2., 3.]]) The dimension which you do not want to change can also be kept as `0` which means copy the original value. So with `shape=(2,0)`, we will obtain the same result as in the above example. Defined in ../src/operator/tensor/broadcast_reduce_op_value.cc:L117 Parameters ---------- data : Symbol The input shape : Shape(tuple), optional, default=[] The shape of the desired array. We can set the dim to zero if it's same as the original. E.g `A = broadcast_to(B, shape=(10, 0, 0))` has the same meaning as `A = broadcast_axis(B, axis=0, size=10)`. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def cast(data=None, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Casts all elements of the input to a new type. .. note:: ``Cast`` is deprecated. Use ``cast`` instead. Example:: cast([0.9, 1.3], dtype='int32') = [0, 1] cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L664 Parameters ---------- data : Symbol The input. dtype : {'bfloat16', 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'}, required Output data type. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def cast_storage(data=None, stype=_Null, name=None, attr=None, out=None, **kwargs): r"""Casts tensor storage type to the new type. When an NDArray with default storage type is cast to csr or row_sparse storage, the result is compact, which means: - for csr, zero values will not be retained - for row_sparse, row slices of all zeros will not be retained The storage type of ``cast_storage`` output depends on stype parameter: - cast_storage(csr, 'default') = default - cast_storage(row_sparse, 'default') = default - cast_storage(default, 'csr') = csr - cast_storage(default, 'row_sparse') = row_sparse - cast_storage(csr, 'csr') = csr - cast_storage(row_sparse, 'row_sparse') = row_sparse Example:: dense = [[ 0., 1., 0.], [ 2., 0., 3.], [ 0., 0., 0.], [ 0., 0., 0.]] # cast to row_sparse storage type rsp = cast_storage(dense, 'row_sparse') rsp.indices = [0, 1] rsp.values = [[ 0., 1., 0.], [ 2., 0., 3.]] # cast to csr storage type csr = cast_storage(dense, 'csr') csr.indices = [1, 0, 2] csr.values = [ 1., 2., 3.] csr.indptr = [0, 1, 3, 3, 3] Defined in ../src/operator/tensor/cast_storage.cc:L71 Parameters ---------- data : Symbol The input. stype : {'csr', 'default', 'row_sparse'}, required Output storage type. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def cbrt(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise cube-root value of the input. .. math:: cbrt(x) = \sqrt[3]{x} Example:: cbrt([1, 8, -125]) = [1, 2, -5] The storage type of ``cbrt`` output depends upon the input storage type: - cbrt(default) = default - cbrt(row_sparse) = row_sparse - cbrt(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L270 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ceil(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise ceiling of the input. The ceil of the scalar x is the smallest integer i, such that i >= x. Example:: ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] The storage type of ``ceil`` output depends upon the input storage type: - ceil(default) = default - ceil(row_sparse) = row_sparse - ceil(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L815 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def choose_element_0index(data=None, index=None, axis=_Null, keepdims=_Null, mode=_Null, name=None, attr=None, out=None, **kwargs): r"""Picks elements from an input array according to the input indices along the given axis. Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be an output array of shape ``(i0,)`` with:: output[i] = input[i, indices[i]] By default, if any index mentioned is too large, it is replaced by the index that addresses the last element along an axis (the `clip` mode). This function supports n-dimensional input and (n-1)-dimensional indices arrays. Examples:: x = [[ 1., 2.], [ 3., 4.], [ 5., 6.]] // picks elements with specified indices along axis 0 pick(x, y=[0,1], 0) = [ 1., 4.] // picks elements with specified indices along axis 1 pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] // picks elements with specified indices along axis 1 using 'wrap' mode // to place indicies that would normally be out of bounds pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] y = [[ 1.], [ 0.], [ 2.]] // picks elements with specified indices along axis 1 and dims are maintained pick(x, y, 1, keepdims=True) = [[ 2.], [ 3.], [ 6.]] Defined in ../src/operator/tensor/broadcast_reduce_op_index.cc:L151 Parameters ---------- data : Symbol The input array index : Symbol The index array axis : int or None, optional, default='-1' int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. keepdims : boolean, optional, default=0 If true, the axis where we pick the elements is left in the result as dimension with size one. mode : {'clip', 'wrap'},optional, default='clip' Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def clip(data=None, a_min=_Null, a_max=_Null, name=None, attr=None, out=None, **kwargs): r"""Clips (limits) the values in an array. Given an interval, values outside the interval are clipped to the interval edges. Clipping ``x`` between `a_min` and `a_max` would be:: .. math:: clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) Example:: x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ parameter values: - clip(default) = default - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse - clip(csr, a_min <= 0, a_max >= 0) = csr - clip(row_sparse, a_min < 0, a_max < 0) = default - clip(row_sparse, a_min > 0, a_max > 0) = default - clip(csr, a_min < 0, a_max < 0) = csr - clip(csr, a_min > 0, a_max > 0) = csr Defined in ../src/operator/tensor/matrix_op.cc:L693 Parameters ---------- data : Symbol Input array. a_min : float, required Minimum value a_max : float, required Maximum value name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def col2im(data=None, output_size=_Null, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, name=None, attr=None, out=None, **kwargs): r"""Combining the output column matrix of im2col back to image array. Like :class:`~mxnet.ndarray.im2col`, this operator is also used in the vanilla convolution implementation. Despite the name, col2im is not the reverse operation of im2col. Since there may be overlaps between neighbouring sliding blocks, the column elements cannot be directly put back into image. Instead, they are accumulated (i.e., summed) in the input image just like the gradient computation, so col2im is the gradient of im2col and vice versa. Using the notation in im2col, given an input column array of shape :math:`(N, C \times \prod(\text{kernel}), W)`, this operator accumulates the column elements into output array of shape :math:`(N, C, \text{output_size}[0], \text{output_size}[1], \dots)`. Only 1-D, 2-D and 3-D of spatial dimension is supported in this operator. Defined in ../src/operator/nn/im2col.cc:L182 Parameters ---------- data : Symbol Input array to combine sliding blocks. output_size : Shape(tuple), required The spatial dimension of image array: (w,), (h, w) or (d, h, w). kernel : Shape(tuple), required Sliding kernel size: (w,), (h, w) or (d, h, w). stride : Shape(tuple), optional, default=[] The stride between adjacent sliding blocks in spatial dimension: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. dilate : Shape(tuple), optional, default=[] The spacing between adjacent kernel points: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. pad : Shape(tuple), optional, default=[] The zero-value padding size on both sides of spatial dimension: (w,), (h, w) or (d, h, w). Defaults to no padding. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def concat(*data, **kwargs): r"""Joins input arrays along a given axis. .. note:: `Concat` is deprecated. Use `concat` instead. The dimensions of the input arrays should be the same except the axis along which they will be concatenated. The dimension of the output array along the concatenated axis will be equal to the sum of the corresponding dimensions of the input arrays. The storage type of ``concat`` output depends on storage types of inputs - concat(csr, csr, ..., csr, dim=0) = csr - otherwise, ``concat`` generates output with default storage Example:: x = [[1,1],[2,2]] y = [[3,3],[4,4],[5,5]] z = [[6,6], [7,7],[8,8]] concat(x,y,z,dim=0) = [[ 1., 1.], [ 2., 2.], [ 3., 3.], [ 4., 4.], [ 5., 5.], [ 6., 6.], [ 7., 7.], [ 8., 8.]] Note that you cannot concat x,y,z along dimension 1 since dimension 0 is not the same for all the input arrays. concat(y,z,dim=1) = [[ 3., 3., 6., 6.], [ 4., 4., 7., 7.], [ 5., 5., 8., 8.]] Defined in ../src/operator/nn/concat.cc:L385 This function support variable length of positional input. Parameters ---------- data : Symbol[] List of arrays to concatenate dim : int, optional, default='1' the dimension to be concated. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def cos(data=None, name=None, attr=None, out=None, **kwargs): r"""Computes the element-wise cosine of the input array. The input should be in radians (:math:`2\pi` rad equals 360 degrees). .. math:: cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] The storage type of ``cos`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L90 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def cosh(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the hyperbolic cosine of the input array, computed element-wise. .. math:: cosh(x) = 0.5\times(exp(x) + exp(-x)) The storage type of ``cosh`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L409 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def crop(data=None, begin=_Null, end=_Null, step=_Null, name=None, attr=None, out=None, **kwargs): r"""Slices a region of the array. .. note:: ``crop`` is deprecated. Use ``slice`` instead. This function returns a sliced array between the indices given by `begin` and `end` with the corresponding `step`. For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, slice operation with ``begin=(b_0, b_1...b_m-1)``, ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, where m <= n, results in an array with the shape ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. The resulting array's *k*-th dimension contains elements from the *k*-th dimension of the input array starting from index ``b_k`` (inclusive) with step ``s_k`` until reaching ``e_k`` (exclusive). If the *k*-th elements are `None` in the sequence of `begin`, `end`, and `step`, the following rule will be used to set default values. If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; else, set `b_k=d_k-1`, `e_k=-1`. The storage type of ``slice`` output depends on storage types of inputs - slice(csr) = csr - otherwise, ``slice`` generates output with default storage .. note:: When input data storage type is csr, it only supports step=(), or step=(None,), or step=(1,) to generate a csr output. For other step parameter values, it falls back to slicing a dense tensor. Example:: x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] slice(x, begin=(0,1), end=(2,4)) = [[ 2., 3., 4.], [ 6., 7., 8.]] slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = [[9., 11.], [5., 7.], [1., 3.]] Defined in ../src/operator/tensor/matrix_op.cc:L498 Parameters ---------- data : Symbol Source input begin : Shape(tuple), required starting indices for the slice operation, supports negative indices. end : Shape(tuple), required ending indices for the slice operation, supports negative indices. step : Shape(tuple), optional, default=[] step for the slice operation, supports negative values. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ctc_loss(data=None, label=None, data_lengths=None, label_lengths=None, use_data_lengths=_Null, use_label_lengths=_Null, blank_label=_Null, name=None, attr=None, out=None, **kwargs): r"""Connectionist Temporal Classification Loss. .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. The shapes of the inputs and outputs: - **data**: `(sequence_length, batch_size, alphabet_size)` - **label**: `(batch_size, label_sequence_length)` - **out**: `(batch_size)` The `data` tensor consists of sequences of activation vectors (without applying softmax), with i-th channel in the last dimension corresponding to i-th label for i between 0 and alphabet_size-1 (i.e always 0-indexed). Alphabet size should include one additional value reserved for blank label. When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be reserved for blank label. ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. If a sequence of labels is shorter than *label_sequence_length*, use the special padding value at the end of the sequence to conform it to the correct length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. The resulting `label` tensor should be padded to be:: [[2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3]] When `blank_label` is ``"last"``, we can index the labels as `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. The resulting `label` tensor should be padded to be:: [[1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2]] ``out`` is a list of CTC loss values, one per example in the batch. See *Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more information on the definition and the algorithm. Defined in ../src/operator/nn/ctc_loss.cc:L100 Parameters ---------- data : Symbol Input ndarray label : Symbol Ground-truth labels for the loss. data_lengths : Symbol Lengths of data for each of the samples. Only required when use_data_lengths is true. label_lengths : Symbol Lengths of labels for each of the samples. Only required when use_label_lengths is true. use_data_lengths : boolean, optional, default=0 Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. use_label_lengths : boolean, optional, default=0 Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. blank_label : {'first', 'last'},optional, default='first' Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def cumsum(a=None, axis=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Return the cumulative sum of the elements along a given axis. Defined in ../src/operator/numpy/np_cumsum.cc:L70 Parameters ---------- a : Symbol Input ndarray axis : int or None, optional, default='None' Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. dtype : {None, 'float16', 'float32', 'float64', 'int32', 'int64', 'int8'},optional, default='None' Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def degrees(data=None, name=None, attr=None, out=None, **kwargs): r"""Converts each element of the input array from radians to degrees. .. math:: degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] The storage type of ``degrees`` output depends upon the input storage type: - degrees(default) = default - degrees(row_sparse) = row_sparse - degrees(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L332 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def depth_to_space(data=None, block_size=_Null, name=None, attr=None, out=None, **kwargs): r"""Rearranges(permutes) data from depth into blocks of spatial data. Similar to ONNX DepthToSpace operator: https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. The output is a new tensor where the values from depth dimension are moved in spatial blocks to height and width dimension. The reverse of this operation is ``space_to_depth``. .. math:: \begin{gather*} x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \end{gather*} where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` Example:: x = [[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]], [[18, 19, 20], [21, 22, 23]]]] depth_to_space(x, 2) = [[[[0, 6, 1, 7, 2, 8], [12, 18, 13, 19, 14, 20], [3, 9, 4, 10, 5, 11], [15, 21, 16, 22, 17, 23]]]] Defined in ../src/operator/tensor/matrix_op.cc:L988 Parameters ---------- data : Symbol Input ndarray block_size : int, required Blocks of [block_size. block_size] are moved name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def diag(data=None, k=_Null, axis1=_Null, axis2=_Null, name=None, attr=None, out=None, **kwargs): r"""Extracts a diagonal or constructs a diagonal array. ``diag``'s behavior depends on the input array dimensions: - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the input shape and appending to the result a new axis with the size of the diagonals in question. For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. Examples:: x = [[1, 2, 3], [4, 5, 6]] diag(x) = [1, 5] diag(x, k=1) = [2, 6] diag(x, k=-1) = [4] x = [1, 2, 3] diag(x) = [[1, 0, 0], [0, 2, 0], [0, 0, 3]] diag(x, k=1) = [[0, 1, 0], [0, 0, 2], [0, 0, 0]] diag(x, k=-1) = [[0, 0, 0], [1, 0, 0], [0, 2, 0]] x = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] diag(x) = [[1, 7], [2, 8]] diag(x, k=1) = [[3], [4]] diag(x, axis1=-2, axis2=-1) = [[1, 4], [5, 8]] Defined in ../src/operator/tensor/diag_op.cc:L87 Parameters ---------- data : Symbol Input ndarray k : int, optional, default='0' Diagonal in question. The default is 0. Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal. If input has shape (S0 S1) k must be between -S0 and S1 axis1 : int, optional, default='0' The first axis of the sub-arrays of interest. Ignored when the input is a 1-D array. axis2 : int, optional, default='1' The second axis of the sub-arrays of interest. Ignored when the input is a 1-D array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def dot(lhs=None, rhs=None, transpose_a=_Null, transpose_b=_Null, forward_stype=_Null, name=None, attr=None, out=None, **kwargs): r"""Dot product of two arrays. ``dot``'s behavior depends on the input array dimensions: - 1-D arrays: inner product of vectors - 2-D arrays: matrix multiplication - N-D arrays: a sum product over the last axis of the first input and the first axis of the second input For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the result array will have shape `(n,m,r,s)`. It is computed by:: dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) Example:: x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) dot(x,y)[0,0,1,1] = 0 sum(x[0,0,:]*y[:,1,1]) = 0 The storage type of ``dot`` output depends on storage types of inputs, transpose option and forward_stype option for output storage type. Implemented sparse operations include: - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default - dot(csr, default, transpose_a=True) = default - dot(csr, default, transpose_a=True) = row_sparse - dot(csr, default) = default - dot(csr, row_sparse) = default - dot(default, csr) = csr (CPU only) - dot(default, csr, forward_stype='default') = default - dot(default, csr, transpose_b=True, forward_stype='default') = default If the combination of input storage types and forward_stype does not match any of the above patterns, ``dot`` will fallback and generate output with default storage. .. Note:: If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad and Adam. Note that by default lazy updates is turned on, which may perform differently from standard updates. For more details, please check the Optimization API at: https://mxnet.incubator.apache.org/api/python/optimization/optimization.html Defined in ../src/operator/tensor/dot.cc:L77 Parameters ---------- lhs : Symbol The first input rhs : Symbol The second input transpose_a : boolean, optional, default=0 If true then transpose the first input before dot. transpose_b : boolean, optional, default=0 If true then transpose the second input before dot. forward_stype : {None, 'csr', 'default', 'row_sparse'},optional, default='None' The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def elemwise_add(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Adds arguments element-wise. The storage type of ``elemwise_add`` output depends on storage types of inputs - elemwise_add(row_sparse, row_sparse) = row_sparse - elemwise_add(csr, csr) = csr - elemwise_add(default, csr) = default - elemwise_add(csr, default) = default - elemwise_add(default, rsp) = default - elemwise_add(rsp, default) = default - otherwise, ``elemwise_add`` generates output with default storage Parameters ---------- lhs : Symbol first input rhs : Symbol second input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def elemwise_div(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Divides arguments element-wise. The storage type of ``elemwise_div`` output is always dense Parameters ---------- lhs : Symbol first input rhs : Symbol second input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def elemwise_mul(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Multiplies arguments element-wise. The storage type of ``elemwise_mul`` output depends on storage types of inputs - elemwise_mul(default, default) = default - elemwise_mul(row_sparse, row_sparse) = row_sparse - elemwise_mul(default, row_sparse) = row_sparse - elemwise_mul(row_sparse, default) = row_sparse - elemwise_mul(csr, csr) = csr - otherwise, ``elemwise_mul`` generates output with default storage Parameters ---------- lhs : Symbol first input rhs : Symbol second input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def elemwise_sub(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Subtracts arguments element-wise. The storage type of ``elemwise_sub`` output depends on storage types of inputs - elemwise_sub(row_sparse, row_sparse) = row_sparse - elemwise_sub(csr, csr) = csr - elemwise_sub(default, csr) = default - elemwise_sub(csr, default) = default - elemwise_sub(default, rsp) = default - elemwise_sub(rsp, default) = default - otherwise, ``elemwise_sub`` generates output with default storage Parameters ---------- lhs : Symbol first input rhs : Symbol second input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def erf(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise gauss error function of the input. Example:: erf([0, -1., 10.]) = [0., -0.8427, 1.] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L884 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def erfinv(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse gauss error function of the input. Example:: erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L906 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def exp(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise exponential value of the input. .. math:: exp(x) = e^x \approx 2.718^x Example:: exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] The storage type of ``exp`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L64 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def expand_dims(data=None, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Inserts a new axis of size 1 into the array shape For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` will return a new array with shape ``(2,1,3,4)``. Defined in ../src/operator/tensor/matrix_op.cc:L411 Parameters ---------- data : Symbol Source input axis : int, required Position where new axis is to be inserted. Suppose that the input `NDArray`'s dimension is `ndim`, the range of the inserted axis is `[-ndim, ndim]` name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def expm1(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns ``exp(x) - 1`` computed element-wise on the input. This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. The storage type of ``expm1`` output depends upon the input storage type: - expm1(default) = default - expm1(row_sparse) = row_sparse - expm1(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L244 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def fill_element_0index(lhs=None, mhs=None, rhs=None, name=None, attr=None, out=None, **kwargs): r"""Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. Parameters ---------- lhs : NDArray Left operand to the function. mhs : NDArray Middle operand to the function. rhs : NDArray Right operand to the function. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def fix(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise rounded value to the nearest \ integer towards zero of the input. Example:: fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] The storage type of ``fix`` output depends upon the input storage type: - fix(default) = default - fix(row_sparse) = row_sparse - fix(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L872 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def flatten(data=None, name=None, attr=None, out=None, **kwargs): r"""Flattens the input array into a 2-D array by collapsing the higher dimensions. .. note:: `Flatten` is deprecated. Use `flatten` instead. For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes the input array into an output array of shape ``(d1, d2*...*dk)``. Note that the behavior of this function is different from numpy.ndarray.flatten, which behaves similar to mxnet.ndarray.reshape((-1,)). Example:: x = [[ [1,2,3], [4,5,6], [7,8,9] ], [ [1,2,3], [4,5,6], [7,8,9] ]], flatten(x) = [[ 1., 2., 3., 4., 5., 6., 7., 8., 9.], [ 1., 2., 3., 4., 5., 6., 7., 8., 9.]] Defined in ../src/operator/tensor/matrix_op.cc:L250 Parameters ---------- data : Symbol Input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def flip(data=None, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Reverses the order of elements along given axis while preserving array shape. Note: reverse and flip are equivalent. We use reverse in the following examples. Examples:: x = [[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.]] reverse(x, axis=0) = [[ 5., 6., 7., 8., 9.], [ 0., 1., 2., 3., 4.]] reverse(x, axis=1) = [[ 4., 3., 2., 1., 0.], [ 9., 8., 7., 6., 5.]] Defined in ../src/operator/tensor/matrix_op.cc:L848 Parameters ---------- data : Symbol Input data array axis : Shape(tuple), required The axis which to reverse elements. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def floor(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise floor of the input. The floor of the scalar x is the largest integer i, such that i <= x. Example:: floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] The storage type of ``floor`` output depends upon the input storage type: - floor(default) = default - floor(row_sparse) = row_sparse - floor(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L834 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ftml_update(weight=None, grad=None, d=None, v=None, z=None, lr=_Null, beta1=_Null, beta2=_Null, epsilon=_Null, t=_Null, wd=_Null, rescale_grad=_Null, clip_grad=_Null, name=None, attr=None, out=None, **kwargs): r"""The FTML optimizer described in *FTML - Follow the Moving Leader in Deep Learning*, available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. .. math:: g_t = \nabla J(W_{t-1})\\ v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) \sigma_t = d_t - \beta_1 d_{t-1} z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} W_t = - \frac{ z_t }{ d_t } Defined in ../src/operator/optimizer_op.cc:L638 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient d : Symbol Internal state ``d_t`` v : Symbol Internal state ``v_t`` z : Symbol Internal state ``z_t`` lr : float, required Learning rate. beta1 : float, optional, default=0.600000024 Generally close to 0.5. beta2 : float, optional, default=0.999000013 Generally close to 1. epsilon : double, optional, default=9.9999999392252903e-09 Epsilon to prevent div 0. t : int, required Number of update. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_grad : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ftrl_update(weight=None, grad=None, z=None, n=None, lr=_Null, lamda1=_Null, beta=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for Ftrl optimizer. Referenced from *Ad Click Prediction: a View from the Trenches*, available at http://dl.acm.org/citation.cfm?id=2488200. It updates the weights using:: rescaled_grad = clip(grad * rescale_grad, clip_gradient) z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate n += rescaled_grad**2 w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) If w, z and n are all of ``row_sparse`` storage type, only the row slices whose indices appear in grad.indices are updated (for w, z and n):: for row in grad.indices: rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate n[row] += rescaled_grad[row]**2 w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) Defined in ../src/operator/optimizer_op.cc:L945 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient z : Symbol z n : Symbol Square of grad lr : float, required Learning rate lamda1 : float, optional, default=0.00999999978 The L1 regularization coefficient. beta : float, optional, default=1 Per-Coordinate Learning Rate beta. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def gamma(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the gamma function (extension of the factorial function \ to the reals), computed element-wise on the input array. The storage type of ``gamma`` output is always dense Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def gammaln(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise log of the absolute value of the gamma function \ of the input. The storage type of ``gammaln`` output is always dense Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def gather_nd(data=None, indices=None, name=None, attr=None, out=None, **kwargs): r"""Gather elements or slices from `data` and store to a tensor whose shape is defined by `indices`. Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. The elements in output is defined as follows:: output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], ..., indices[M-1, y_0, ..., y_{K-1}], x_M, ..., x_{N-1}] Examples:: data = [[0, 1], [2, 3]] indices = [[1, 1, 0], [0, 1, 0]] gather_nd(data, indices) = [2, 3, 0] data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] indices = [[0, 1], [1, 0]] gather_nd(data, indices) = [[3, 4], [5, 6]] Parameters ---------- data : Symbol data indices : Symbol indices name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def hard_sigmoid(data=None, alpha=_Null, beta=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes hard sigmoid of x element-wise. .. math:: y = max(0, min(1, alpha * x + beta)) Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L161 Parameters ---------- data : Symbol The input array. alpha : float, optional, default=0.200000003 Slope of hard sigmoid beta : float, optional, default=0.5 Bias of hard sigmoid. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def identity(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns a copy of the input. From:../src/operator/tensor/elemwise_unary_op_basic.cc:244 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def im2col(data=None, kernel=_Null, stride=_Null, dilate=_Null, pad=_Null, name=None, attr=None, out=None, **kwargs): r"""Extract sliding blocks from input array. This operator is used in vanilla convolution implementation to transform the sliding blocks on image to column matrix, then the convolution operation can be computed by matrix multiplication between column and convolution weight. Due to the close relation between im2col and convolution, the concept of **kernel**, **stride**, **dilate** and **pad** in this operator are inherited from convolution operation. Given the input data of shape :math:`(N, C, *)`, where :math:`N` is the batch size, :math:`C` is the channel size, and :math:`*` is the arbitrary spatial dimension, the output column array is always with shape :math:`(N, C \times \prod(\text{kernel}), W)`, where :math:`C \times \prod(\text{kernel})` is the block size, and :math:`W` is the block number which is the spatial size of the convolution output with same input parameters. Only 1-D, 2-D and 3-D of spatial dimension is supported in this operator. Defined in ../src/operator/nn/im2col.cc:L100 Parameters ---------- data : Symbol Input array to extract sliding blocks. kernel : Shape(tuple), required Sliding kernel size: (w,), (h, w) or (d, h, w). stride : Shape(tuple), optional, default=[] The stride between adjacent sliding blocks in spatial dimension: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. dilate : Shape(tuple), optional, default=[] The spacing between adjacent kernel points: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. pad : Shape(tuple), optional, default=[] The zero-value padding size on both sides of spatial dimension: (w,), (h, w) or (d, h, w). Defaults to no padding. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def khatri_rao(*args, **kwargs): r"""Computes the Khatri-Rao product of the input matrices. Given a collection of :math:`n` input matrices, .. math:: A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, the (column-wise) Khatri-Rao product is defined as the matrix, .. math:: X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, where the :math:`k` th column is equal to the column-wise outer product :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth column of the ith matrix. Example:: >>> A = mx.nd.array([[1, -1], >>> [2, -3]]) >>> B = mx.nd.array([[1, 4], >>> [2, 5], >>> [3, 6]]) >>> C = mx.nd.khatri_rao(A, B) >>> print(C.asnumpy()) [[ 1. -4.] [ 2. -5.] [ 3. -6.] [ 2. -12.] [ 4. -15.] [ 6. -18.]] Defined in ../src/operator/contrib/krprod.cc:L108 This function support variable length of positional input. Parameters ---------- args : Symbol[] Positional input matrices name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def lamb_update_phase1(weight=None, grad=None, mean=None, var=None, beta1=_Null, beta2=_Null, epsilon=_Null, t=_Null, bias_correction=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs): r"""Phase I of lamb update it performs the following operations and returns g:. Link to paper: https://arxiv.org/pdf/1904.00962.pdf .. math:: \begin{gather*} grad = grad * rescale_grad if (grad < -clip_gradient) then grad = -clip_gradient if (grad > clip_gradient) then grad = clip_gradient mean = beta1 * mean + (1 - beta1) * grad; variance = beta2 * variance + (1. - beta2) * grad ^ 2; if (bias_correction) then mean_hat = mean / (1. - beta1^t); var_hat = var / (1 - beta2^t); g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; else g = mean / (var_data^(1/2) + epsilon) + wd * weight; \end{gather*} Defined in ../src/operator/optimizer_op.cc:L1022 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mean : Symbol Moving mean var : Symbol Moving variance beta1 : float, optional, default=0.899999976 The decay rate for the 1st moment estimates. beta2 : float, optional, default=0.999000013 The decay rate for the 2nd moment estimates. epsilon : float, optional, default=9.99999997e-07 A small constant for numerical stability. t : int, required Index update count. bias_correction : boolean, optional, default=1 Whether to use bias correction. wd : float, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def lamb_update_phase2(weight=None, g=None, r1=None, r2=None, lr=_Null, lower_bound=_Null, upper_bound=_Null, name=None, attr=None, out=None, **kwargs): r"""Phase II of lamb update it performs the following operations and updates grad. Link to paper: https://arxiv.org/pdf/1904.00962.pdf .. math:: \begin{gather*} if (lower_bound >= 0) then r1 = max(r1, lower_bound) if (upper_bound >= 0) then r1 = max(r1, upper_bound) if (r1 == 0 or r2 == 0) then lr = lr else lr = lr * (r1/r2) weight = weight - lr * g \end{gather*} Defined in ../src/operator/optimizer_op.cc:L1061 Parameters ---------- weight : Symbol Weight g : Symbol Output of lamb_update_phase 1 r1 : Symbol r1 r2 : Symbol r2 lr : float, required Learning rate lower_bound : float, optional, default=-1 Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set upper_bound : float, optional, default=-1 Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def lars_multi_mp_sgd_mom_update(*data, **kwargs): r"""Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. Momentum update has better convergence rates on neural networks. Mathematically it looks like below: .. math:: v_1 = \alpha * \nabla J(W_0)\\ v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ W_t = W_{t-1} + v_t It updates the weights using:: v = momentum * v - learning_rate * gradient weight += v Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. Defined in ../src/operator/lars_multi_sgd.cc:L203 Parameters ---------- data : Symbol[] Weights, gradients, momentums, learning rates and weight decays momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def lars_multi_mp_sgd_update(*data, **kwargs): r"""Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. It updates the weights using:: weight = weight - learning_rate * (gradient + wd * weight) Defined in ../src/operator/lars_multi_sgd.cc:L143 Parameters ---------- data : Symbol[] Weights, gradients, learning rates and weight decays rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def lars_multi_sgd_mom_update(*data, **kwargs): r"""Momentum update function for Stochastic Gradient Descent (SGD) optimizer. Momentum update has better convergence rates on neural networks. Mathematically it looks like below: .. math:: v_1 = \alpha * \nabla J(W_0)\\ v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ W_t = W_{t-1} + v_t It updates the weights using:: v = momentum * v - learning_rate * gradient weight += v Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. Defined in ../src/operator/lars_multi_sgd.cc:L94 Parameters ---------- data : Symbol[] Weights, gradients, momentum, learning rates and weight decays momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def lars_multi_sgd_update(*data, **kwargs): r"""Update function for Stochastic Gradient Descent (SDG) optimizer. It updates the weights using:: weight = weight - learning_rate * (gradient + wd * weight) Defined in ../src/operator/lars_multi_sgd.cc:L45 Parameters ---------- data : Symbol[] Weights, gradients, learning rates and weight decays rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_det(A=None, name=None, attr=None, out=None, **kwargs): r"""Compute the determinant of a matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, *A* is a square matrix. We compute: *out* = *det(A)* If *n>2*, *det* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. .. note:: There is no gradient backwarded when A is non-invertible (which is equivalent to det(A) = 0) because zero is rarely hit upon in float point computation and the Jacobi's formula on determinant gradient is not computationally efficient when A is non-invertible. Examples:: Single matrix determinant A = [[1., 4.], [2., 3.]] det(A) = [-5.] Batch matrix determinant A = [[[1., 4.], [2., 3.]], [[2., 3.], [1., 4.]]] det(A) = [-5., 5.] Defined in ../src/operator/tensor/la_op.cc:L975 Parameters ---------- A : Symbol Tensor of square matrix name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_extractdiag(A=None, offset=_Null, name=None, attr=None, out=None, **kwargs): r"""Extracts the diagonal entries of a square matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix diagonal extraction A = [[1.0, 2.0], [3.0, 4.0]] extractdiag(A) = [1.0, 4.0] extractdiag(A, 1) = [2.0] Batch matrix diagonal extraction A = [[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]] extractdiag(A) = [[1.0, 4.0], [5.0, 8.0]] Defined in ../src/operator/tensor/la_op.cc:L495 Parameters ---------- A : Symbol Tensor of square matrices offset : int, optional, default='0' Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_extracttrian(A=None, offset=_Null, lower=_Null, name=None, attr=None, out=None, **kwargs): r"""Extracts a triangular sub-matrix from a square matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. The *offset* and *lower* parameters determine the triangle to be extracted: - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. .. note:: The operator supports float32 and float64 data types only. Examples:: Single triagonal extraction A = [[1.0, 2.0], [3.0, 4.0]] extracttrian(A) = [1.0, 3.0, 4.0] extracttrian(A, lower=False) = [1.0, 2.0, 4.0] extracttrian(A, 1) = [2.0] extracttrian(A, -1) = [3.0] Batch triagonal extraction A = [[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]] extracttrian(A) = [[1.0, 3.0, 4.0], [5.0, 7.0, 8.0]] Defined in ../src/operator/tensor/la_op.cc:L605 Parameters ---------- A : Symbol Tensor of square matrices offset : int, optional, default='0' Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. lower : boolean, optional, default=1 Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_gelqf(A=None, name=None, attr=None, out=None, **kwargs): r"""LQ factorization for general matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so that: *A* = *L* \* *Q* Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, and *Q* is row-orthonormal, meaning that *Q* \* *Q*\ :sup:`T` is equal to the identity matrix of shape *(x, x)*. If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single LQ factorization A = [[1., 2., 3.], [4., 5., 6.]] Q, L = gelqf(A) Q = [[-0.26726124, -0.53452248, -0.80178373], [0.87287156, 0.21821789, -0.43643578]] L = [[-3.74165739, 0.], [-8.55235974, 1.96396101]] Batch LQ factorization A = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]] Q, L = gelqf(A) Q = [[[-0.26726124, -0.53452248, -0.80178373], [0.87287156, 0.21821789, -0.43643578]], [[-0.50257071, -0.57436653, -0.64616234], [0.7620735, 0.05862104, -0.64483142]]] L = [[[-3.74165739, 0.], [-8.55235974, 1.96396101]], [[-13.92838828, 0.], [-19.09768702, 0.52758934]]] Defined in ../src/operator/tensor/la_op.cc:L798 Parameters ---------- A : Symbol Tensor of input matrices to be factorized name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_gemm(A=None, B=None, C=None, transpose_a=_Null, transpose_b=_Null, alpha=_Null, beta=_Null, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Performs general matrix multiplication and accumulation. Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape on the leading *n-2* dimensions. If *n=2*, the BLAS3 function *gemm* is performed: *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or matrix transposition (depending on *transpose_a*, *transpose_b*). If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* parameter. By default, the trailing two dimensions will be used for matrix encoding. For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent to the following without the overhead of the additional swapaxis operations:: A1 = swapaxes(A, dim1=1, dim2=3) B1 = swapaxes(B, dim1=1, dim2=3) C = swapaxes(C, dim1=1, dim2=3) C = gemm(A1, B1, C) C = swapaxis(C, dim1=1, dim2=3) When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use pseudo-float16 precision (float32 math with float16 I/O) precision in order to use Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix multiply-add A = [[1.0, 1.0], [1.0, 1.0]] B = [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]] C = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) = [[14.0, 14.0, 14.0], [14.0, 14.0, 14.0]] Batch matrix multiply-add A = [[[1.0, 1.0]], [[0.1, 0.1]]] B = [[[1.0, 1.0]], [[0.1, 0.1]]] C = [[[10.0]], [[0.01]]] gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) = [[[104.0]], [[0.14]]] Defined in ../src/operator/tensor/la_op.cc:L89 Parameters ---------- A : Symbol Tensor of input matrices B : Symbol Tensor of input matrices C : Symbol Tensor of input matrices transpose_a : boolean, optional, default=0 Multiply with transposed of first input (A). transpose_b : boolean, optional, default=0 Multiply with transposed of second input (B). alpha : double, optional, default=1 Scalar factor multiplied with A*B. beta : double, optional, default=1 Scalar factor multiplied with C. axis : int, optional, default='-2' Axis corresponding to the matrix rows. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_gemm2(A=None, B=None, transpose_a=_Null, transpose_b=_Null, alpha=_Null, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Performs general matrix multiplication. Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape on the leading *n-2* dimensions. If *n=2*, the BLAS3 function *gemm* is performed: *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix transposition (depending on *transpose_a*, *transpose_b*). If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* parameter. By default, the trailing two dimensions will be used for matrix encoding. For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to the following without the overhead of the additional swapaxis operations:: A1 = swapaxes(A, dim1=1, dim2=3) B1 = swapaxes(B, dim1=1, dim2=3) C = gemm2(A1, B1) C = swapaxis(C, dim1=1, dim2=3) When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use pseudo-float16 precision (float32 math with float16 I/O) precision in order to use Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix multiply A = [[1.0, 1.0], [1.0, 1.0]] B = [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]] gemm2(A, B, transpose_b=True, alpha=2.0) = [[4.0, 4.0, 4.0], [4.0, 4.0, 4.0]] Batch matrix multiply A = [[[1.0, 1.0]], [[0.1, 0.1]]] B = [[[1.0, 1.0]], [[0.1, 0.1]]] gemm2(A, B, transpose_b=True, alpha=2.0) = [[[4.0]], [[0.04 ]]] Defined in ../src/operator/tensor/la_op.cc:L163 Parameters ---------- A : Symbol Tensor of input matrices B : Symbol Tensor of input matrices transpose_a : boolean, optional, default=0 Multiply with transposed of first input (A). transpose_b : boolean, optional, default=0 Multiply with transposed of second input (B). alpha : double, optional, default=1 Scalar factor multiplied with A*B. axis : int, optional, default='-2' Axis corresponding to the matrix row indices. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_inverse(A=None, name=None, attr=None, out=None, **kwargs): r"""Compute the inverse of a matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, *A* is a square matrix. We compute: *out* = *A*\ :sup:`-1` If *n>2*, *inverse* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix inverse A = [[1., 4.], [2., 3.]] inverse(A) = [[-0.6, 0.8], [0.4, -0.2]] Batch matrix inverse A = [[[1., 4.], [2., 3.]], [[1., 3.], [2., 4.]]] inverse(A) = [[[-0.6, 0.8], [0.4, -0.2]], [[-2., 1.5], [1., -0.5]]] Defined in ../src/operator/tensor/la_op.cc:L920 Parameters ---------- A : Symbol Tensor of square matrix name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_makediag(A=None, offset=_Null, name=None, attr=None, out=None, **kwargs): r"""Constructs a square matrix with the input as diagonal. Input is a tensor *A* of dimension *n >= 1*. If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. .. note:: The operator supports float32 and float64 data types only. Examples:: Single diagonal matrix construction A = [1.0, 2.0] makediag(A) = [[1.0, 0.0], [0.0, 2.0]] makediag(A, 1) = [[0.0, 1.0, 0.0], [0.0, 0.0, 2.0], [0.0, 0.0, 0.0]] Batch diagonal matrix construction A = [[1.0, 2.0], [3.0, 4.0]] makediag(A) = [[[1.0, 0.0], [0.0, 2.0]], [[3.0, 0.0], [0.0, 4.0]]] Defined in ../src/operator/tensor/la_op.cc:L547 Parameters ---------- A : Symbol Tensor of diagonal entries offset : int, optional, default='0' Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_maketrian(A=None, offset=_Null, lower=_Null, name=None, attr=None, out=None, **kwargs): r"""Constructs a square matrix with the input representing a specific triangular sub-matrix. This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional diagonal with zero entries to the square matrix. If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix construction A = [1.0, 2.0, 3.0] maketrian(A) = [[1.0, 0.0], [2.0, 3.0]] maketrian(A, lower=false) = [[1.0, 2.0], [0.0, 3.0]] maketrian(A, offset=1) = [[0.0, 1.0, 2.0], [0.0, 0.0, 3.0], [0.0, 0.0, 0.0]] maketrian(A, offset=-1) = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]] Batch matrix construction A = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] maketrian(A) = [[[1.0, 0.0], [2.0, 3.0]], [[4.0, 0.0], [5.0, 6.0]]] maketrian(A, offset=1) = [[[0.0, 1.0, 2.0], [0.0, 0.0, 3.0], [0.0, 0.0, 0.0]], [[0.0, 4.0, 5.0], [0.0, 0.0, 6.0], [0.0, 0.0, 0.0]]] Defined in ../src/operator/tensor/la_op.cc:L673 Parameters ---------- A : Symbol Tensor of triangular matrices stored as vectors offset : int, optional, default='0' Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. lower : boolean, optional, default=1 Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_potrf(A=None, name=None, attr=None, out=None, **kwargs): r"""Performs Cholesky factorization of a symmetric positive-definite matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is computed. *B* is triangular (entries of upper or lower triangle are all zero), has positive diagonal entries, and: *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix factorization A = [[4.0, 1.0], [1.0, 4.25]] potrf(A) = [[2.0, 0], [0.5, 2.0]] Batch matrix factorization A = [[[4.0, 1.0], [1.0, 4.25]], [[16.0, 4.0], [4.0, 17.0]]] potrf(A) = [[[2.0, 0], [0.5, 2.0]], [[4.0, 0], [1.0, 4.0]]] Defined in ../src/operator/tensor/la_op.cc:L214 Parameters ---------- A : Symbol Tensor of input matrices to be decomposed name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_potri(A=None, name=None, attr=None, out=None, **kwargs): r"""Performs matrix inversion from a Cholesky factorization. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) with positive diagonal. We compute: *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix *B* (obtained by *potrf*), then *out* = *B*\ :sup:`-1` If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. .. note:: Use this operator only if you are certain you need the inverse of *B*, and cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution (*trsm*). The latter is numerically much safer, and also cheaper. Examples:: Single matrix inverse A = [[2.0, 0], [0.5, 2.0]] potri(A) = [[0.26563, -0.0625], [-0.0625, 0.25]] Batch matrix inverse A = [[[2.0, 0], [0.5, 2.0]], [[4.0, 0], [1.0, 4.0]]] potri(A) = [[[0.26563, -0.0625], [-0.0625, 0.25]], [[0.06641, -0.01562], [-0.01562, 0,0625]]] Defined in ../src/operator/tensor/la_op.cc:L275 Parameters ---------- A : Symbol Tensor of lower triangular matrices name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_slogdet(A=None, name=None, attr=None, out=None, **kwargs): r"""Compute the sign and log of the determinant of a matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, *A* is a square matrix. We compute: *sign* = *sign(det(A))* *logabsdet* = *log(abs(det(A)))* If *n>2*, *slogdet* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. .. note:: The gradient is not properly defined on sign, so the gradient of it is not backwarded. .. note:: No gradient is backwarded when A is non-invertible. Please see the docs of operator det for detail. Examples:: Single matrix signed log determinant A = [[2., 3.], [1., 4.]] sign, logabsdet = slogdet(A) sign = [1.] logabsdet = [1.609438] Batch matrix signed log determinant A = [[[2., 3.], [1., 4.]], [[1., 2.], [2., 4.]], [[1., 2.], [4., 3.]]] sign, logabsdet = slogdet(A) sign = [1., 0., -1.] logabsdet = [1.609438, -inf, 1.609438] Defined in ../src/operator/tensor/la_op.cc:L1034 Parameters ---------- A : Symbol Tensor of square matrix name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_sumlogdiag(A=None, name=None, attr=None, out=None, **kwargs): r"""Computes the sum of the logarithms of the diagonal elements of a square matrix. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, *A* must be square with positive diagonal entries. We sum the natural logarithms of the diagonal elements, the result has shape (1,). If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix reduction A = [[1.0, 1.0], [1.0, 7.0]] sumlogdiag(A) = [1.9459] Batch matrix reduction A = [[[1.0, 1.0], [1.0, 7.0]], [[3.0, 0], [0, 17.0]]] sumlogdiag(A) = [1.9459, 3.9318] Defined in ../src/operator/tensor/la_op.cc:L445 Parameters ---------- A : Symbol Tensor of square matrices name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_syrk(A=None, transpose=_Null, alpha=_Null, name=None, attr=None, out=None, **kwargs): r"""Multiplication of matrix with its transpose. Input is a tensor *A* of dimension *n >= 2*. If *n=2*, the operator performs the BLAS3 function *syrk*: *out* = *alpha* \* *A* \* *A*\ :sup:`T` if *transpose=False*, or *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* if *transpose=True*. If *n>2*, *syrk* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix multiply A = [[1., 2., 3.], [4., 5., 6.]] syrk(A, alpha=1., transpose=False) = [[14., 32.], [32., 77.]] syrk(A, alpha=1., transpose=True) = [[17., 22., 27.], [22., 29., 36.], [27., 36., 45.]] Batch matrix multiply A = [[[1., 1.]], [[0.1, 0.1]]] syrk(A, alpha=2., transpose=False) = [[[4.]], [[0.04]]] Defined in ../src/operator/tensor/la_op.cc:L730 Parameters ---------- A : Symbol Tensor of input matrices transpose : boolean, optional, default=0 Use transpose of input matrix. alpha : double, optional, default=1 Scalar factor to be applied to the result. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_trmm(A=None, B=None, transpose=_Null, rightside=_Null, lower=_Null, alpha=_Null, name=None, attr=None, out=None, **kwargs): r"""Performs multiplication with a lower triangular matrix. Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape on the leading *n-2* dimensions. If *n=2*, *A* must be triangular. The operator performs the BLAS3 function *trmm*: *out* = *alpha* \* *op*\ (*A*) \* *B* if *rightside=False*, or *out* = *alpha* \* *B* \* *op*\ (*A*) if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the identity or the matrix transposition (depending on *transpose*). If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single triangular matrix multiply A = [[1.0, 0], [1.0, 1.0]] B = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] trmm(A, B, alpha=2.0) = [[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]] Batch triangular matrix multiply A = [[[1.0, 0], [1.0, 1.0]], [[1.0, 0], [1.0, 1.0]]] B = [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]] trmm(A, B, alpha=2.0) = [[[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]], [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]] Defined in ../src/operator/tensor/la_op.cc:L333 Parameters ---------- A : Symbol Tensor of lower triangular matrices B : Symbol Tensor of matrices transpose : boolean, optional, default=0 Use transposed of the triangular matrix rightside : boolean, optional, default=0 Multiply triangular matrix from the right to non-triangular one. lower : boolean, optional, default=1 True if the triangular matrix is lower triangular, false if it is upper triangular. alpha : double, optional, default=1 Scalar factor to be applied to the result. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def linalg_trsm(A=None, B=None, transpose=_Null, rightside=_Null, lower=_Null, alpha=_Null, name=None, attr=None, out=None, **kwargs): r"""Solves matrix equation involving a lower triangular matrix. Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape on the leading *n-2* dimensions. If *n=2*, *A* must be triangular. The operator performs the BLAS3 function *trsm*, solving for *out* in: *op*\ (*A*) \* *out* = *alpha* \* *B* if *rightside=False*, or *out* \* *op*\ (*A*) = *alpha* \* *B* if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the identity or the matrix transposition (depending on *transpose*). If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs (batch mode). .. note:: The operator supports float32 and float64 data types only. Examples:: Single matrix solve A = [[1.0, 0], [1.0, 1.0]] B = [[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]] trsm(A, B, alpha=0.5) = [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] Batch matrix solve A = [[[1.0, 0], [1.0, 1.0]], [[1.0, 0], [1.0, 1.0]]] B = [[[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]], [[4.0, 4.0, 4.0], [8.0, 8.0, 8.0]]] trsm(A, B, alpha=0.5) = [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]] Defined in ../src/operator/tensor/la_op.cc:L396 Parameters ---------- A : Symbol Tensor of lower triangular matrices B : Symbol Tensor of matrices transpose : boolean, optional, default=0 Use transposed of the triangular matrix rightside : boolean, optional, default=0 Multiply triangular matrix from the right to non-triangular one. lower : boolean, optional, default=1 True if the triangular matrix is lower triangular, false if it is upper triangular. alpha : double, optional, default=1 Scalar factor to be applied to the result. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def log(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise Natural logarithmic value of the input. The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` The storage type of ``log`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L77 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def log10(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise Base-10 logarithmic value of the input. ``10**log10(x) = x`` The storage type of ``log10`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L94 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def log1p(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise ``log(1 + x)`` value of the input. This function is more accurate than ``log(1 + x)`` for small ``x`` so that :math:`1+x\approx 1` The storage type of ``log1p`` output depends upon the input storage type: - log1p(default) = default - log1p(row_sparse) = row_sparse - log1p(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L199 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def log2(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise Base-2 logarithmic value of the input. ``2**log2(x) = x`` The storage type of ``log2`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L106 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def log_softmax(data=None, axis=_Null, temperature=_Null, dtype=_Null, use_length=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the log softmax of the input. This is equivalent to computing softmax followed by log. Examples:: >>> x = mx.nd.array([1, 2, .1]) >>> mx.nd.log_softmax(x).asnumpy() array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) >>> x = mx.nd.array( [[1, 2, .1],[.1, 2, 1]] ) >>> mx.nd.log_softmax(x, axis=0).asnumpy() array([[-0.34115392, -0.69314718, -1.24115396], [-1.24115396, -0.69314718, -0.34115392]], dtype=float32) Parameters ---------- data : Symbol The input array. axis : int, optional, default='-1' The axis along which to compute softmax. temperature : double or None, optional, default=None Temperature parameter in softmax dtype : {None, 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). use_length : boolean or None, optional, default=0 Whether to use the length input as a mask over the data input. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def logical_not(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the result of logical NOT (!) function Example: logical_not([-2., 0., 1.]) = [0., 1., 0.] Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def make_loss(data=None, name=None, attr=None, out=None, **kwargs): r"""Make your own loss function in network construction. This operator accepts a customized loss function symbol as a terminal loss and the symbol should be an operator with no backward dependency. The output of this function is the gradient of loss with respect to the input data. For example, if you are a making a cross entropy loss function. Assume ``out`` is the predicted output and ``label`` is the true label, then the cross entropy can be defined as:: cross_entropy = label * log(out) + (1 - label) * log(1 - out) loss = make_loss(cross_entropy) We will need to use ``make_loss`` when we are creating our own loss function or we want to combine multiple loss functions. Also we may want to stop some variables' gradients from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. The storage type of ``make_loss`` output depends upon the input storage type: - make_loss(default) = default - make_loss(row_sparse) = row_sparse Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L358 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def max(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the max of array elements over given axes. Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L32 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def max_axis(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the max of array elements over given axes. Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L32 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def mean(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the mean of array elements over given axes. Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L84 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def min(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the min of array elements over given axes. Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L47 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def min_axis(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the min of array elements over given axes. Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L47 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def moments(data=None, axes=_Null, keepdims=_Null, name=None, attr=None, out=None, **kwargs): r""" Calculate the mean and variance of `data`. The mean and variance are calculated by aggregating the contents of data across axes. If x is 1-D and axes = [0] this is just the mean and variance of a vector. Example: x = [[1, 2, 3], [4, 5, 6]] mean, var = moments(data=x, axes=[0]) mean = [2.5, 3.5, 4.5] var = [2.25, 2.25, 2.25] mean, var = moments(data=x, axes=[1]) mean = [2.0, 5.0] var = [0.66666667, 0.66666667] mean, var = moments(data=x, axis=[0, 1]) mean = [3.5] var = [2.9166667] Defined in ../src/operator/nn/moments.cc:L54 Parameters ---------- data : Symbol Input ndarray axes : Shape or None, optional, default=None Array of ints. Axes along which to compute mean and variance. keepdims : boolean, optional, default=0 produce moments with the same dimensionality as the input. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def mp_lamb_update_phase1(weight=None, grad=None, mean=None, var=None, weight32=None, beta1=_Null, beta2=_Null, epsilon=_Null, t=_Null, bias_correction=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs): r"""Mixed Precision version of Phase I of lamb update it performs the following operations and returns g:. Link to paper: https://arxiv.org/pdf/1904.00962.pdf .. math:: \begin{gather*} grad32 = grad(float16) * rescale_grad if (grad < -clip_gradient) then grad = -clip_gradient if (grad > clip_gradient) then grad = clip_gradient mean = beta1 * mean + (1 - beta1) * grad; variance = beta2 * variance + (1. - beta2) * grad ^ 2; if (bias_correction) then mean_hat = mean / (1. - beta1^t); var_hat = var / (1 - beta2^t); g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; else g = mean / (var_data^(1/2) + epsilon) + wd * weight32; \end{gather*} Defined in ../src/operator/optimizer_op.cc:L1102 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mean : Symbol Moving mean var : Symbol Moving variance weight32 : Symbol Weight32 beta1 : float, optional, default=0.899999976 The decay rate for the 1st moment estimates. beta2 : float, optional, default=0.999000013 The decay rate for the 2nd moment estimates. epsilon : float, optional, default=9.99999997e-07 A small constant for numerical stability. t : int, required Index update count. bias_correction : boolean, optional, default=1 Whether to use bias correction. wd : float, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def mp_lamb_update_phase2(weight=None, g=None, r1=None, r2=None, weight32=None, lr=_Null, lower_bound=_Null, upper_bound=_Null, name=None, attr=None, out=None, **kwargs): r"""Mixed Precision version Phase II of lamb update it performs the following operations and updates grad. Link to paper: https://arxiv.org/pdf/1904.00962.pdf .. math:: \begin{gather*} if (lower_bound >= 0) then r1 = max(r1, lower_bound) if (upper_bound >= 0) then r1 = max(r1, upper_bound) if (r1 == 0 or r2 == 0) then lr = lr else lr = lr * (r1/r2) weight32 = weight32 - lr * g weight(float16) = weight32 \end{gather*} Defined in ../src/operator/optimizer_op.cc:L1144 Parameters ---------- weight : Symbol Weight g : Symbol Output of mp_lamb_update_phase 1 r1 : Symbol r1 r2 : Symbol r2 weight32 : Symbol Weight32 lr : float, required Learning rate lower_bound : float, optional, default=-1 Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set upper_bound : float, optional, default=-1 Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def mp_nag_mom_update(name=None, attr=None, out=None, **kwargs): r""" Parameters ---------- name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def mp_sgd_mom_update(weight=None, grad=None, mom=None, weight32=None, lr=_Null, momentum=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs): r"""Updater function for multi-precision sgd optimizer Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mom : Symbol Momentum weight32 : Symbol Weight32 lr : float, required Learning rate momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). lazy_update : boolean, optional, default=1 If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def mp_sgd_update(weight=None, grad=None, weight32=None, lr=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs): r"""Updater function for multi-precision sgd optimizer Parameters ---------- weight : Symbol Weight grad : Symbol gradient weight32 : Symbol Weight32 lr : float, required Learning rate wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). lazy_update : boolean, optional, default=1 If true, lazy updates are applied if gradient's stype is row_sparse. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_all_finite(*data, **kwargs): r"""Check if all the float numbers in all the arrays are finite (used for AMP) Defined in ../src/operator/contrib/all_finite.cc:L133 Parameters ---------- data : Symbol[] Arrays num_arrays : int, optional, default='1' Number of arrays. init_output : boolean, optional, default=1 Initialize output to 1. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_lars(lrs=None, weights_sum_sq=None, grads_sum_sq=None, wds=None, eta=_Null, eps=_Null, rescale_grad=_Null, name=None, attr=None, out=None, **kwargs): r"""Compute the LARS coefficients of multiple weights and grads from their sums of square" Defined in ../src/operator/contrib/multi_lars.cc:L37 Parameters ---------- lrs : Symbol Learning rates to scale by LARS coefficient weights_sum_sq : Symbol sum of square of weights arrays grads_sum_sq : Symbol sum of square of gradients arrays wds : Symbol weight decays eta : float, required LARS eta eps : float, required LARS eps rescale_grad : float, optional, default=1 Gradient rescaling factor name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_mp_nag_mom_update(*data, **kwargs): r"""Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. Defined in ../src/operator/optimizer_op.cc:L793 Parameters ---------- data : Symbol[] Weights lrs : tuple of <float>, required Learning rates. wds : tuple of <float>, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_mp_sgd_mom_update(*data, **kwargs): r"""Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. Momentum update has better convergence rates on neural networks. Mathematically it looks like below: .. math:: v_1 = \nabla J(W_0)\\ v_t = \gamma v_{t-1} - \nabla J(W_{t-1})\\ W_t = W_{t-1} + \alpha * v_t It updates the weights using:: v = momentum * v - gradient weight += learning_rate * v Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. Defined in ../src/operator/optimizer_op.cc:L470 Parameters ---------- data : Symbol[] Weights lrs : tuple of <float>, required Learning rates. wds : tuple of <float>, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_mp_sgd_update(*data, **kwargs): r"""Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. It updates the weights using:: weight = weight - learning_rate * (gradient + wd * weight) Defined in ../src/operator/optimizer_op.cc:L415 Parameters ---------- data : Symbol[] Weights lrs : tuple of <float>, required Learning rates. wds : tuple of <float>, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_nag_mom_update(*data, **kwargs): r"""Update function for Nesterov Accelerated Gradient( NAG) optimizer. It updates the weights using the following formula, .. math:: v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ W_t = W_{t-1} - v_t Where :math:`\eta` is the learning rate of the optimizer :math:`\gamma` is the decay rate of the momentum estimate :math:`\v_t` is the update vector at time step `t` :math:`\W_t` is the weight vector at time step `t` Defined in ../src/operator/optimizer_op.cc:L754 Parameters ---------- data : Symbol[] Weights, gradients and momentum lrs : tuple of <float>, required Learning rates. wds : tuple of <float>, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_sgd_mom_update(*data, **kwargs): r"""Momentum update function for Stochastic Gradient Descent (SGD) optimizer. Momentum update has better convergence rates on neural networks. Mathematically it looks like below: .. math:: v_1 = \nabla J(W_0)\\ v_t = \gamma v_{t-1} - \nabla J(W_{t-1})\\ W_t = W_{t-1} + \alpha * v_t It updates the weights using:: v = momentum * v - gradient weight += learning_rate * v Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. Defined in ../src/operator/optimizer_op.cc:L372 Parameters ---------- data : Symbol[] Weights, gradients and momentum lrs : tuple of <float>, required Learning rates. wds : tuple of <float>, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_sgd_update(*data, **kwargs): r"""Update function for Stochastic Gradient Descent (SDG) optimizer. It updates the weights using:: weight = weight - learning_rate * (gradient + wd * weight) Defined in ../src/operator/optimizer_op.cc:L327 Parameters ---------- data : Symbol[] Weights lrs : tuple of <float>, required Learning rates. wds : tuple of <float>, required Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). num_weights : int, optional, default='1' Number of updated weights. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def multi_sum_sq(*data, **kwargs): r"""Compute the sums of squares of multiple arrays Defined in ../src/operator/contrib/multi_sum_sq.cc:L36 Parameters ---------- data : Symbol[] Arrays num_arrays : int, required number of input arrays. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def nag_mom_update(weight=None, grad=None, mom=None, lr=_Null, momentum=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for Nesterov Accelerated Gradient( NAG) optimizer. It updates the weights using the following formula, .. math:: v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ W_t = W_{t-1} - v_t Where :math:`\eta` is the learning rate of the optimizer :math:`\gamma` is the decay rate of the momentum estimate :math:`\v_t` is the update vector at time step `t` :math:`\W_t` is the weight vector at time step `t` Defined in ../src/operator/optimizer_op.cc:L724 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mom : Symbol Momentum lr : float, required Learning rate momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def nanprod(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. Defined in ../src/operator/tensor/broadcast_reduce_prod_value.cc:L47 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def nansum(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. Defined in ../src/operator/tensor/broadcast_reduce_sum_value.cc:L102 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def negative(data=None, name=None, attr=None, out=None, **kwargs): r"""Numerical negative of the argument, element-wise. The storage type of ``negative`` output depends upon the input storage type: - negative(default) = default - negative(row_sparse) = row_sparse - negative(csr) = csr Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def norm(data=None, ord=_Null, axis=_Null, out_dtype=_Null, keepdims=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the norm on an NDArray. This operator computes the norm on an NDArray with the specified axis, depending on the value of the ord parameter. By default, it computes the L2 norm on the entire array. Currently only ord=2 supports sparse ndarrays. Examples:: x = [[[1, 2], [3, 4]], [[2, 2], [5, 6]]] norm(x, ord=2, axis=1) = [[3.1622777 4.472136 ] [5.3851647 6.3245554]] norm(x, ord=1, axis=1) = [[4., 6.], [7., 8.]] rsp = x.cast_storage('row_sparse') norm(rsp) = [5.47722578] csr = x.cast_storage('csr') norm(csr) = [5.47722578] Defined in ../src/operator/tensor/broadcast_reduce_norm_value.cc:L89 Parameters ---------- data : Symbol The input ord : int, optional, default='2' Order of the norm. Currently ord=1 and ord=2 is supported. axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. out_dtype : {None, 'float16', 'float32', 'float64', 'int32', 'int64', 'int8'},optional, default='None' The data type of the output. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axis is left in the result as dimension with size one. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def normal(loc=_Null, scale=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a normal (Gaussian) distribution. .. note:: The existing alias ``normal`` is deprecated. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Example:: normal(loc=0, scale=1, shape=(2,2)) = [[ 1.89171135, -1.16881478], [-1.23474145, 1.55807114]] Defined in ../src/operator/random/sample_op.cc:L113 Parameters ---------- loc : float, optional, default=0 Mean of the distribution. scale : float, optional, default=1 Standard deviation of the distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def one_hot(indices=None, depth=_Null, on_value=_Null, off_value=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Returns a one-hot array. The locations represented by `indices` take value `on_value`, while all other locations take value `off_value`. `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result in an output array of shape ``(i0, i1, d)`` with:: output[i,j,:] = off_value output[i,j,indices[i,j]] = on_value Examples:: one_hot([1,0,2,0], 3) = [[ 0. 1. 0.] [ 1. 0. 0.] [ 0. 0. 1.] [ 1. 0. 0.]] one_hot([1,0,2,0], 3, on_value=8, off_value=1, dtype='int32') = [[1 8 1] [8 1 1] [1 1 8] [8 1 1]] one_hot([[1,0],[1,0],[2,0]], 3) = [[[ 0. 1. 0.] [ 1. 0. 0.]] [[ 0. 1. 0.] [ 1. 0. 0.]] [[ 0. 0. 1.] [ 1. 0. 0.]]] Defined in ../src/operator/tensor/indexing_op.cc:L883 Parameters ---------- indices : Symbol array of locations where to set on_value depth : int, required Depth of the one hot dimension. on_value : double, optional, default=1 The value assigned to the locations represented by indices. off_value : double, optional, default=0 The value assigned to the locations not represented by indices. dtype : {'bfloat16', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'},optional, default='float32' DType of the output name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ones_like(data=None, name=None, attr=None, out=None, **kwargs): r"""Return an array of ones with the same shape and type as the input array. Examples:: x = [[ 0., 0., 0.], [ 0., 0., 0.]] ones_like(x) = [[ 1., 1., 1.], [ 1., 1., 1.]] Parameters ---------- data : Symbol The input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def pad(data=None, mode=_Null, pad_width=_Null, constant_value=_Null, name=None, attr=None, out=None, **kwargs): r"""Pads an input array with a constant or edge values of the array. .. note:: `Pad` is deprecated. Use `pad` instead. .. note:: Current implementation only supports 4D and 5D input arrays with padding applied only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. This operation pads an input array with either a `constant_value` or edge values along each axis of the input array. The amount of padding is specified by `pad_width`. `pad_width` is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` where ``N`` is the number of dimensions of the array. For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values to add before and after the elements of the array along dimension ``N``. The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, ``after_2`` must be 0. Example:: x = [[[[ 1. 2. 3.] [ 4. 5. 6.]] [[ 7. 8. 9.] [ 10. 11. 12.]]] [[[ 11. 12. 13.] [ 14. 15. 16.]] [[ 17. 18. 19.] [ 20. 21. 22.]]]] pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = [[[[ 1. 1. 2. 3. 3.] [ 1. 1. 2. 3. 3.] [ 4. 4. 5. 6. 6.] [ 4. 4. 5. 6. 6.]] [[ 7. 7. 8. 9. 9.] [ 7. 7. 8. 9. 9.] [ 10. 10. 11. 12. 12.] [ 10. 10. 11. 12. 12.]]] [[[ 11. 11. 12. 13. 13.] [ 11. 11. 12. 13. 13.] [ 14. 14. 15. 16. 16.] [ 14. 14. 15. 16. 16.]] [[ 17. 17. 18. 19. 19.] [ 17. 17. 18. 19. 19.] [ 20. 20. 21. 22. 22.] [ 20. 20. 21. 22. 22.]]]] pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = [[[[ 0. 0. 0. 0. 0.] [ 0. 1. 2. 3. 0.] [ 0. 4. 5. 6. 0.] [ 0. 0. 0. 0. 0.]] [[ 0. 0. 0. 0. 0.] [ 0. 7. 8. 9. 0.] [ 0. 10. 11. 12. 0.] [ 0. 0. 0. 0. 0.]]] [[[ 0. 0. 0. 0. 0.] [ 0. 11. 12. 13. 0.] [ 0. 14. 15. 16. 0.] [ 0. 0. 0. 0. 0.]] [[ 0. 0. 0. 0. 0.] [ 0. 17. 18. 19. 0.] [ 0. 20. 21. 22. 0.] [ 0. 0. 0. 0. 0.]]]] Defined in ../src/operator/pad.cc:L766 Parameters ---------- data : Symbol An n-dimensional input array. mode : {'constant', 'edge', 'reflect'}, required Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. pad_width : Shape(tuple), required Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. constant_value : double, optional, default=0 The value used for padding when `mode` is "constant". name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def pick(data=None, index=None, axis=_Null, keepdims=_Null, mode=_Null, name=None, attr=None, out=None, **kwargs): r"""Picks elements from an input array according to the input indices along the given axis. Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be an output array of shape ``(i0,)`` with:: output[i] = input[i, indices[i]] By default, if any index mentioned is too large, it is replaced by the index that addresses the last element along an axis (the `clip` mode). This function supports n-dimensional input and (n-1)-dimensional indices arrays. Examples:: x = [[ 1., 2.], [ 3., 4.], [ 5., 6.]] // picks elements with specified indices along axis 0 pick(x, y=[0,1], 0) = [ 1., 4.] // picks elements with specified indices along axis 1 pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] // picks elements with specified indices along axis 1 using 'wrap' mode // to place indicies that would normally be out of bounds pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] y = [[ 1.], [ 0.], [ 2.]] // picks elements with specified indices along axis 1 and dims are maintained pick(x, y, 1, keepdims=True) = [[ 2.], [ 3.], [ 6.]] Defined in ../src/operator/tensor/broadcast_reduce_op_index.cc:L151 Parameters ---------- data : Symbol The input array index : Symbol The index array axis : int or None, optional, default='-1' int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. keepdims : boolean, optional, default=0 If true, the axis where we pick the elements is left in the result as dimension with size one. mode : {'clip', 'wrap'},optional, default='clip' Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def prod(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the product of array elements over given axes. Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L31 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def radians(data=None, name=None, attr=None, out=None, **kwargs): r"""Converts each element of the input array from degrees to radians. .. math:: radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] The storage type of ``radians`` output depends upon the input storage type: - radians(default) = default - radians(row_sparse) = row_sparse - radians(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L351 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_exponential(lam=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from an exponential distribution. Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). Example:: exponential(lam=4, shape=(2,2)) = [[ 0.0097189 , 0.08999364], [ 0.04146638, 0.31715935]] Defined in ../src/operator/random/sample_op.cc:L137 Parameters ---------- lam : float, optional, default=1 Lambda parameter (rate) of the exponential distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_gamma(alpha=_Null, beta=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Example:: gamma(alpha=9, beta=0.5, shape=(2,2)) = [[ 7.10486984, 3.37695289], [ 3.91697288, 3.65933681]] Defined in ../src/operator/random/sample_op.cc:L125 Parameters ---------- alpha : float, optional, default=1 Alpha parameter (shape) of the gamma distribution. beta : float, optional, default=1 Beta parameter (scale) of the gamma distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_generalized_negative_binomial(mu=_Null, alpha=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a generalized negative binomial distribution. Samples are distributed according to a generalized negative binomial distribution parametrized by *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the number of unsuccessful experiments (generalized to real numbers). Samples will always be returned as a floating point data type. Example:: generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = [[ 2., 1.], [ 6., 4.]] Defined in ../src/operator/random/sample_op.cc:L179 Parameters ---------- mu : float, optional, default=1 Mean of the negative binomial distribution. alpha : float, optional, default=1 Alpha (dispersion) parameter of the negative binomial distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_negative_binomial(k=_Null, p=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. Example:: negative_binomial(k=3, p=0.4, shape=(2,2)) = [[ 4., 7.], [ 2., 5.]] Defined in ../src/operator/random/sample_op.cc:L164 Parameters ---------- k : int, optional, default='1' Limit of unsuccessful experiments. p : float, optional, default=1 Failure probability in each experiment. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_normal(loc=_Null, scale=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a normal (Gaussian) distribution. .. note:: The existing alias ``normal`` is deprecated. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Example:: normal(loc=0, scale=1, shape=(2,2)) = [[ 1.89171135, -1.16881478], [-1.23474145, 1.55807114]] Defined in ../src/operator/random/sample_op.cc:L113 Parameters ---------- loc : float, optional, default=0 Mean of the distribution. scale : float, optional, default=1 Standard deviation of the distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_dirichlet(sample=None, alpha=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of Dirichlet distributions with parameter *alpha*. The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *alpha*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the value of *alpha* at index *i*. Examples:: random_pdf_dirichlet(sample=[[1,2],[2,3],[3,4]], alpha=[2.5, 2.5]) = [38.413498, 199.60245, 564.56085] sample = [[[1, 2, 3], [10, 20, 30], [100, 200, 300]], [[0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003]]] random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = [[2.3257459e-02, 5.8420084e-04, 1.4674458e-05], [9.2589635e-01, 3.6860607e+01, 1.4674468e+03]] Defined in ../src/operator/random/pdf_op.cc:L315 Parameters ---------- sample : Symbol Samples from the distributions. alpha : Symbol Concentration parameters of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_exponential(sample=None, lam=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of exponential distributions with parameters *lam* (rate). The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *lam*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the value of *lam* at index *i*. Examples:: random_pdf_exponential(sample=[[1, 2, 3]], lam=[1]) = [[0.36787945, 0.13533528, 0.04978707]] sample = [[1,2,3], [1,2,3], [1,2,3]] random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = [[0.36787945, 0.13533528, 0.04978707], [0.30326533, 0.18393973, 0.11156508], [0.1947002, 0.15163267, 0.11809164]] Defined in ../src/operator/random/pdf_op.cc:L304 Parameters ---------- sample : Symbol Samples from the distributions. lam : Symbol Lambda (rate) parameters of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_gamma(sample=None, alpha=None, beta=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of gamma distributions with parameters *alpha* (shape) and *beta* (rate). *alpha* and *beta* must have the same shape, which must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the values of *alpha* and *beta* at index *i*. Examples:: random_pdf_gamma(sample=[[1,2,3,4,5]], alpha=[5], beta=[1]) = [[0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739]] sample = [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = [[0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276]] Defined in ../src/operator/random/pdf_op.cc:L301 Parameters ---------- sample : Symbol Samples from the distributions. alpha : Symbol Alpha (shape) parameters of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. beta : Symbol Beta (scale) parameters of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_generalized_negative_binomial(sample=None, mu=None, alpha=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). This can be understood as a reparameterization of the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. *mu* and *alpha* must have the same shape, which must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the values of *mu* and *alpha* at index *i*. Examples:: random_pdf_generalized_negative_binomial(sample=[[1, 2, 3, 4]], alpha=[1], mu=[1]) = [[0.25, 0.125, 0.0625, 0.03125]] sample = [[1,2,3,4], [1,2,3,4]] random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = [[0.25, 0.125, 0.0625, 0.03125 ], [0.26517063, 0.16573331, 0.09667706, 0.05437994]] Defined in ../src/operator/random/pdf_op.cc:L311 Parameters ---------- sample : Symbol Samples from the distributions. mu : Symbol Means of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. alpha : Symbol Alpha (dispersion) parameters of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_negative_binomial(sample=None, k=None, p=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of samples of negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). *k* and *p* must have the same shape, which must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the values of *k* and *p* at index *i*. Examples:: random_pdf_negative_binomial(sample=[[1,2,3,4]], k=[1], p=a[0.5]) = [[0.25, 0.125, 0.0625, 0.03125]] # Note that k may be real-valued sample = [[1,2,3,4], [1,2,3,4]] random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = [[0.25, 0.125, 0.0625, 0.03125 ], [0.26516506, 0.16572815, 0.09667476, 0.05437956]] Defined in ../src/operator/random/pdf_op.cc:L308 Parameters ---------- sample : Symbol Samples from the distributions. k : Symbol Limits of unsuccessful experiments. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. p : Symbol Failure probabilities in each experiment. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_normal(sample=None, mu=None, sigma=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). *mu* and *sigma* must have the same shape, which must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the values of *mu* and *sigma* at index *i*. Examples:: sample = [[-2, -1, 0, 1, 2]] random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = [[0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097]] random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = [[0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537]] Defined in ../src/operator/random/pdf_op.cc:L299 Parameters ---------- sample : Symbol Samples from the distributions. mu : Symbol Means of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. sigma : Symbol Standard deviations of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_poisson(sample=None, lam=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of Poisson distributions with parameters *lam* (rate). The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *lam*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the value of *lam* at index *i*. Examples:: random_pdf_poisson(sample=[[0,1,2,3]], lam=[1]) = [[0.36787945, 0.36787945, 0.18393973, 0.06131324]] sample = [[0,1,2,3], [0,1,2,3], [0,1,2,3]] random_pdf_poisson(sample=sample, lam=[1,2,3]) = [[0.36787945, 0.36787945, 0.18393973, 0.06131324], [0.13533528, 0.27067056, 0.27067056, 0.18044704], [0.04978707, 0.14936121, 0.22404182, 0.22404182]] Defined in ../src/operator/random/pdf_op.cc:L306 Parameters ---------- sample : Symbol Samples from the distributions. lam : Symbol Lambda (rate) parameters of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_pdf_uniform(sample=None, low=None, high=None, is_log=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the value of the PDF of *sample* of uniform distributions on the intervals given by *[low,high)*. *low* and *high* must have the same shape, which must match the leftmost subshape of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which case the output contains one density per distribution, or *sample* can be a tensor of tensors with that shape, in which case the output is a tensor of densities such that the densities at index *i* in the output are given by the samples at index *i* in *sample* parameterized by the values of *low* and *high* at index *i*. Examples:: random_pdf_uniform(sample=[[1,2,3,4]], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] sample = [[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]] low = [[0, 0], [0, 0]] high = [[ 5, 10], [15, 20]] random_pdf_uniform(sample=sample, low=low, high=high) = [[[0.2, 0.2, 0.2 ], [0.1, 0.1, 0.1 ]], [[0.06667, 0.06667, 0.06667], [0.05, 0.05, 0.05 ]]] Defined in ../src/operator/random/pdf_op.cc:L297 Parameters ---------- sample : Symbol Samples from the distributions. low : Symbol Lower bounds of the distributions. is_log : boolean, optional, default=0 If set, compute the density of the log-probability instead of the probability. high : Symbol Upper bounds of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_poisson(lam=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a Poisson distribution. Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). Samples will always be returned as a floating point data type. Example:: poisson(lam=4, shape=(2,2)) = [[ 5., 2.], [ 4., 6.]] Defined in ../src/operator/random/sample_op.cc:L150 Parameters ---------- lam : float, optional, default=1 Lambda parameter (rate) of the Poisson distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_randint(low=_Null, high=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a discrete uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Example:: randint(low=0, high=5, shape=(2,2)) = [[ 0, 2], [ 3, 1]] Defined in ../src/operator/random/sample_op.cc:L194 Parameters ---------- low : long, required Lower bound of the distribution. high : long, required Upper bound of the distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'int32', 'int64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to int32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def random_uniform(low=_Null, high=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a uniform distribution. .. note:: The existing alias ``uniform`` is deprecated. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Example:: uniform(low=0, high=1, shape=(2,2)) = [[ 0.60276335, 0.85794562], [ 0.54488319, 0.84725171]] Defined in ../src/operator/random/sample_op.cc:L96 Parameters ---------- low : float, optional, default=0 Lower bound of the distribution. high : float, optional, default=1 Upper bound of the distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def ravel_multi_index(data=None, shape=_Null, name=None, attr=None, out=None, **kwargs): r"""Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. Examples:: A = [[3,6,6],[4,5,1]] ravel(A, shape=(7,6)) = [22,41,37] ravel(A, shape=(-1,6)) = [22,41,37] Defined in ../src/operator/tensor/ravel.cc:L42 Parameters ---------- data : Symbol Batch of multi-indices shape : Shape(tuple), optional, default=None Shape of the array into which the multi-indices apply. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def rcbrt(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse cube-root value of the input. .. math:: rcbrt(x) = 1/\sqrt[3]{x} Example:: rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L323 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def reciprocal(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the reciprocal of the argument, element-wise. Calculates 1/x. Example:: reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L43 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def relu(data=None, name=None, attr=None, out=None, **kwargs): r"""Computes rectified linear activation. .. math:: max(features, 0) The storage type of ``relu`` output depends upon the input storage type: - relu(default) = default - relu(row_sparse) = row_sparse - relu(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L85 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def repeat(data=None, repeats=_Null, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Repeats elements of an array. By default, ``repeat`` flattens the input array into 1-D and then repeats the elements:: x = [[ 1, 2], [ 3, 4]] repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] The parameter ``axis`` specifies the axis along which to perform repeat:: repeat(x, repeats=2, axis=1) = [[ 1., 1., 2., 2.], [ 3., 3., 4., 4.]] repeat(x, repeats=2, axis=0) = [[ 1., 2.], [ 1., 2.], [ 3., 4.], [ 3., 4.]] repeat(x, repeats=2, axis=-1) = [[ 1., 1., 2., 2.], [ 3., 3., 4., 4.]] Defined in ../src/operator/tensor/matrix_op.cc:L760 Parameters ---------- data : Symbol Input data array repeats : int, required The number of repetitions for each element. axis : int or None, optional, default='None' The axis along which to repeat values. The negative numbers are interpreted counting from the backward. By default, use the flattened input array, and return a flat output array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def reset_arrays(*data, **kwargs): r"""Set to zero multiple arrays Defined in ../src/operator/contrib/reset_arrays.cc:L36 Parameters ---------- data : Symbol[] Arrays num_arrays : int, required number of input arrays. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def reshape(data=None, shape=_Null, reverse=_Null, target_shape=_Null, keep_highest=_Null, name=None, attr=None, out=None, **kwargs): r"""Reshapes the input array. .. note:: ``Reshape`` is deprecated, use ``reshape`` Given an array and a shape, this function returns a copy of the array in the new shape. The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. Example:: reshape([1,2,3,4], shape=(2,2)) = [[1,2], [3,4]] Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: - ``0`` copy this dimension from the input to the output shape. Example:: - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions keeping the size of the new array same as that of the input array. At most one dimension of shape can be -1. Example:: - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) - input shape = (2,3,4), shape=(-1,), output shape = (24,) - ``-2`` copy all/remainder of the input dimensions to the output shape. Example:: - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. Example:: - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). Example:: - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) If the argument `reverse` is set to 1, then the special values are inferred from right to left. Example:: - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) - with reverse=1, output shape will be (50,4). Defined in ../src/operator/tensor/matrix_op.cc:L175 Parameters ---------- data : Symbol Input data to reshape. shape : Shape(tuple), optional, default=[] The target shape reverse : boolean, optional, default=0 If true then the special values are inferred from right to left target_shape : Shape(tuple), optional, default=[] (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims keep_highest : boolean, optional, default=0 (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def reshape_like(lhs=None, rhs=None, lhs_begin=_Null, lhs_end=_Null, rhs_begin=_Null, rhs_end=_Null, name=None, attr=None, out=None, **kwargs): r"""Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. Returns a **view** of the `lhs` array with a new shape without altering any data. Example:: x = [1, 2, 3, 4, 5, 6] y = [[0, -4], [3, 2], [2, 2]] reshape_like(x, y) = [[1, 2], [3, 4], [5, 6]] More precise control over how dimensions are inherited is achieved by specifying \ slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. Examples:: - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. Example:: - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L511 Parameters ---------- lhs : Symbol First input. rhs : Symbol Second input. lhs_begin : int or None, optional, default='None' Defaults to 0. The beginning index along which the lhs dimensions are to be reshaped. Supports negative indices. lhs_end : int or None, optional, default='None' Defaults to None. The ending index along which the lhs dimensions are to be used for reshaping. Supports negative indices. rhs_begin : int or None, optional, default='None' Defaults to 0. The beginning index along which the rhs dimensions are to be used for reshaping. Supports negative indices. rhs_end : int or None, optional, default='None' Defaults to None. The ending index along which the rhs dimensions are to be used for reshaping. Supports negative indices. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def reverse(data=None, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Reverses the order of elements along given axis while preserving array shape. Note: reverse and flip are equivalent. We use reverse in the following examples. Examples:: x = [[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.]] reverse(x, axis=0) = [[ 5., 6., 7., 8., 9.], [ 0., 1., 2., 3., 4.]] reverse(x, axis=1) = [[ 4., 3., 2., 1., 0.], [ 9., 8., 7., 6., 5.]] Defined in ../src/operator/tensor/matrix_op.cc:L848 Parameters ---------- data : Symbol Input data array axis : Shape(tuple), required The axis which to reverse elements. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def rint(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise rounded value to the nearest integer of the input. .. note:: - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. Example:: rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] The storage type of ``rint`` output depends upon the input storage type: - rint(default) = default - rint(row_sparse) = row_sparse - rint(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L796 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def rmsprop_update(weight=None, grad=None, n=None, lr=_Null, gamma1=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, clip_weights=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for `RMSProp` optimizer. `RMSprop` is a variant of stochastic gradient descent where the gradients are divided by a cache which grows with the sum of squares of recent gradients? `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for each parameter monotonically over the course of training. While this is analytically motivated for convex optimizations, it may not be ideal for non-convex problems. `RMSProp` deals with this heuristically by allowing the learning rates to rebound as the denominator decays over time. Define the Root Mean Square (RMS) error criterion of the gradient as :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. The :math:`E[g^2]_t` is given by: .. math:: E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 The update step is .. math:: \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t The RMSProp code follows the version in http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf Tieleman & Hinton, 2012. Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate :math:`\eta` to be 0.001. Defined in ../src/operator/optimizer_op.cc:L866 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient n : Symbol n lr : float, required Learning rate gamma1 : float, optional, default=0.949999988 The decay rate of momentum estimates. epsilon : float, optional, default=9.99999994e-09 A small constant for numerical stability. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). clip_weights : float, optional, default=-1 Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def rmspropalex_update(weight=None, grad=None, n=None, g=None, delta=None, lr=_Null, gamma1=_Null, gamma2=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, clip_weights=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for RMSPropAlex optimizer. `RMSPropAlex` is non-centered version of `RMSProp`. Define :math:`E[g^2]_t` is the decaying average over past squared gradient and :math:`E[g]_t` is the decaying average over past gradient. .. math:: E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ The update step is .. math:: \theta_{t+1} = \theta_t + \Delta_t The RMSPropAlex code follows the version in http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` to be 0.9 and the learning rate :math:`\eta` to be 0.0001. Defined in ../src/operator/optimizer_op.cc:L905 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient n : Symbol n g : Symbol g delta : Symbol delta lr : float, required Learning rate gamma1 : float, optional, default=0.949999988 Decay rate. gamma2 : float, optional, default=0.899999976 Decay rate. epsilon : float, optional, default=9.99999994e-09 A small constant for numerical stability. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). clip_weights : float, optional, default=-1 Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def round(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise rounded value to the nearest integer of the input. Example:: round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] The storage type of ``round`` output depends upon the input storage type: - round(default) = default - round(row_sparse) = row_sparse - round(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L775 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def rsqrt(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse square-root value of the input. .. math:: rsqrt(x) = 1/\sqrt{x} Example:: rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] The storage type of ``rsqrt`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L221 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_exponential(lam=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple exponential distributions with parameters lambda (rate). The parameters of the distributions are provided as an input array. Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input value at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input array. Examples:: lam = [ 1.0, 8.5 ] // Draw a single sample for each distribution sample_exponential(lam) = [ 0.51837951, 0.09994757] // Draw a vector containing two samples for each distribution sample_exponential(lam, shape=(2)) = [[ 0.51837951, 0.19866663], [ 0.09994757, 0.50447971]] Defined in ../src/operator/random/multisample_op.cc:L283 Parameters ---------- lam : Symbol Lambda (rate) parameters of the distributions. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_gamma(alpha=None, beta=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple gamma distributions with parameters *alpha* (shape) and *beta* (scale). The parameters of the distributions are provided as input arrays. Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input values at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input arrays. Examples:: alpha = [ 0.0, 2.5 ] beta = [ 1.0, 0.7 ] // Draw a single sample for each distribution sample_gamma(alpha, beta) = [ 0. , 2.25797319] // Draw a vector containing two samples for each distribution sample_gamma(alpha, beta, shape=(2)) = [[ 0. , 0. ], [ 2.25797319, 1.70734084]] Defined in ../src/operator/random/multisample_op.cc:L280 Parameters ---------- alpha : Symbol Alpha (shape) parameters of the distributions. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). beta : Symbol Beta (scale) parameters of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_generalized_negative_binomial(mu=None, alpha=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). The parameters of the distributions are provided as input arrays. Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input values at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input arrays. Samples will always be returned as a floating point data type. Examples:: mu = [ 2.0, 2.5 ] alpha = [ 1.0, 0.1 ] // Draw a single sample for each distribution sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] // Draw a vector containing two samples for each distribution sample_generalized_negative_binomial(mu, alpha, shape=(2)) = [[ 0., 3.], [ 3., 1.]] Defined in ../src/operator/random/multisample_op.cc:L290 Parameters ---------- mu : Symbol Means of the distributions. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). alpha : Symbol Alpha (dispersion) parameters of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_multinomial(data=None, shape=_Null, get_prob=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple multinomial distributions. *data* is an *n* dimensional array whose last dimension has length *k*, where *k* is the number of possible outcomes of each multinomial distribution. This operator will draw *shape* samples from each distribution. If shape is empty one sample will be drawn from each distribution. If *get_prob* is true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning where you can provide reward as head gradient for this array to estimate gradient. Note that the input distribution must be normalized, i.e. *data* must sum to 1 along its last axis. Examples:: probs = [[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]] // Draw a single sample for each distribution sample_multinomial(probs) = [3, 0] // Draw a vector containing two samples for each distribution sample_multinomial(probs, shape=(2)) = [[4, 2], [0, 0]] // requests log likelihood sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] Parameters ---------- data : Symbol Distribution probabilities. Must sum to one on the last axis. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. get_prob : boolean, optional, default=0 Whether to also return the log probability of sampled result. This is usually used for differentiating through stochastic variables, e.g. in reinforcement learning. dtype : {'float16', 'float32', 'float64', 'int32', 'uint8'},optional, default='int32' DType of the output in case this can't be inferred. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_negative_binomial(k=None, p=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). The parameters of the distributions are provided as input arrays. Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input values at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input arrays. Samples will always be returned as a floating point data type. Examples:: k = [ 20, 49 ] p = [ 0.4 , 0.77 ] // Draw a single sample for each distribution sample_negative_binomial(k, p) = [ 15., 16.] // Draw a vector containing two samples for each distribution sample_negative_binomial(k, p, shape=(2)) = [[ 15., 50.], [ 16., 12.]] Defined in ../src/operator/random/multisample_op.cc:L287 Parameters ---------- k : Symbol Limits of unsuccessful experiments. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). p : Symbol Failure probabilities in each experiment. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_normal(mu=None, sigma=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). The parameters of the distributions are provided as input arrays. Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input values at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input arrays. Examples:: mu = [ 0.0, 2.5 ] sigma = [ 1.0, 3.7 ] // Draw a single sample for each distribution sample_normal(mu, sigma) = [-0.56410581, 0.95934606] // Draw a vector containing two samples for each distribution sample_normal(mu, sigma, shape=(2)) = [[-0.56410581, 0.2928229 ], [ 0.95934606, 4.48287058]] Defined in ../src/operator/random/multisample_op.cc:L278 Parameters ---------- mu : Symbol Means of the distributions. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). sigma : Symbol Standard deviations of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_poisson(lam=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple Poisson distributions with parameters lambda (rate). The parameters of the distributions are provided as an input array. Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input value at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input array. Samples will always be returned as a floating point data type. Examples:: lam = [ 1.0, 8.5 ] // Draw a single sample for each distribution sample_poisson(lam) = [ 0., 13.] // Draw a vector containing two samples for each distribution sample_poisson(lam, shape=(2)) = [[ 0., 4.], [ 13., 8.]] Defined in ../src/operator/random/multisample_op.cc:L285 Parameters ---------- lam : Symbol Lambda (rate) parameters of the distributions. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sample_uniform(low=None, high=None, shape=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Concurrent sampling from multiple uniform distributions on the intervals given by *[low,high)*. The parameters of the distributions are provided as input arrays. Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* be the shape specified as the parameter of the operator, and *m* be the dimension of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* will be an *m*-dimensional array that holds randomly drawn samples from the distribution which is parameterized by the input values at index *i*. If the shape parameter of the operator is not set, then one sample will be drawn per distribution and the output array has the same shape as the input arrays. Examples:: low = [ 0.0, 2.5 ] high = [ 1.0, 3.7 ] // Draw a single sample for each distribution sample_uniform(low, high) = [ 0.40451524, 3.18687344] // Draw a vector containing two samples for each distribution sample_uniform(low, high, shape=(2)) = [[ 0.40451524, 0.18017688], [ 3.18687344, 3.68352246]] Defined in ../src/operator/random/multisample_op.cc:L276 Parameters ---------- low : Symbol Lower bounds of the distributions. shape : Shape(tuple), optional, default=[] Shape to be sampled from each random distribution. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). high : Symbol Upper bounds of the distributions. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def scatter_nd(data=None, indices=None, shape=_Null, name=None, attr=None, out=None, **kwargs): r"""Scatters data into a new tensor according to indices. Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. The elements in output is defined as follows:: output[indices[0, y_0, ..., y_{K-1}], ..., indices[M-1, y_0, ..., y_{K-1}], x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] all other entries in output are 0. .. warning:: If the indices have duplicates, the result will be non-deterministic and the gradient of `scatter_nd` will not be correct!! Examples:: data = [2, 3, 0] indices = [[1, 1, 0], [0, 1, 0]] shape = (2, 2) scatter_nd(data, indices, shape) = [[0, 0], [2, 3]] data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] indices = [[0, 1], [1, 1]] shape = (2, 2, 2, 2) scatter_nd(data, indices, shape) = [[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]] Parameters ---------- data : Symbol data indices : Symbol indices shape : Shape(tuple), required Shape of output. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sgd_mom_update(weight=None, grad=None, mom=None, lr=_Null, momentum=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs): r"""Momentum update function for Stochastic Gradient Descent (SGD) optimizer. Momentum update has better convergence rates on neural networks. Mathematically it looks like below: .. math:: v_1 = \nabla J(W_0)\\ v_t = \gamma v_{t-1} - \nabla J(W_{t-1})\\ W_t = W_{t-1} + \alpha * v_t It updates the weights using:: v = momentum * v - gradient weight += learning_rate * v Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage type is the same as momentum's storage type, only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: for row in gradient.indices: v[row] = momentum[row] * v[row] - gradient[row] weight[row] += learning_rate * v[row] Defined in ../src/operator/optimizer_op.cc:L563 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mom : Symbol Momentum lr : float, required Learning rate momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). lazy_update : boolean, optional, default=1 If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sgd_update(weight=None, grad=None, lr=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for Stochastic Gradient Descent (SGD) optimizer. It updates the weights using:: weight = weight - learning_rate * (gradient + wd * weight) However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, only the row slices whose indices appear in grad.indices are updated:: for row in gradient.indices: weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) Defined in ../src/operator/optimizer_op.cc:L522 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient lr : float, required Learning rate wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). lazy_update : boolean, optional, default=1 If true, lazy updates are applied if gradient's stype is row_sparse. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def shape_array(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns a 1D int64 array containing the shape of data. Example:: shape_array([[1,2,3,4], [5,6,7,8]]) = [2,4] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L573 Parameters ---------- data : Symbol Input Array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def shuffle(data=None, name=None, attr=None, out=None, **kwargs): r"""Randomly shuffle the elements. This shuffles the array along the first axis. The order of the elements in each subarray does not change. For example, if a 2D array is given, the order of the rows randomly changes, but the order of the elements in each row does not change. Parameters ---------- data : Symbol Data to be shuffled. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sigmoid(data=None, name=None, attr=None, out=None, **kwargs): r"""Computes sigmoid of x element-wise. .. math:: y = 1 / (1 + exp(-x)) The storage type of ``sigmoid`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L119 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sign(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise sign of the input. Example:: sign([-2, 0, 3]) = [-1, 0, 1] The storage type of ``sign`` output depends upon the input storage type: - sign(default) = default - sign(row_sparse) = row_sparse - sign(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L758 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def signsgd_update(weight=None, grad=None, lr=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for SignSGD optimizer. .. math:: g_t = \nabla J(W_{t-1})\\ W_t = W_{t-1} - \eta_t \text{sign}(g_t) It updates the weights using:: weight = weight - learning_rate * sign(gradient) .. note:: - sparse ndarray not supported for this optimizer yet. Defined in ../src/operator/optimizer_op.cc:L63 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient lr : float, required Learning rate wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def signum_update(weight=None, grad=None, mom=None, lr=_Null, momentum=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, wd_lh=_Null, name=None, attr=None, out=None, **kwargs): r"""SIGN momentUM (Signum) optimizer. .. math:: g_t = \nabla J(W_{t-1})\\ m_t = \beta m_{t-1} + (1 - \beta) g_t\\ W_t = W_{t-1} - \eta_t \text{sign}(m_t) It updates the weights using:: state = momentum * state + (1-momentum) * gradient weight = weight - learning_rate * sign(state) Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. .. note:: - sparse ndarray not supported for this optimizer yet. Defined in ../src/operator/optimizer_op.cc:L92 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mom : Symbol Momentum lr : float, required Learning rate momentum : float, optional, default=0 The decay rate of momentum estimates at each epoch. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). wd_lh : float, optional, default=0 The amount of weight decay that does not go into gradient/momentum calculationsotherwise do weight decay algorithmically only. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sin(data=None, name=None, attr=None, out=None, **kwargs): r"""Computes the element-wise sine of the input array. The input should be in radians (:math:`2\pi` rad equals 360 degrees). .. math:: sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] The storage type of ``sin`` output depends upon the input storage type: - sin(default) = default - sin(row_sparse) = row_sparse - sin(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L47 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sinh(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the hyperbolic sine of the input array, computed element-wise. .. math:: sinh(x) = 0.5\times(exp(x) - exp(-x)) The storage type of ``sinh`` output depends upon the input storage type: - sinh(default) = default - sinh(row_sparse) = row_sparse - sinh(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L371 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def size_array(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns a 1D int64 array containing the size of data. Example:: size_array([[1,2,3,4], [5,6,7,8]]) = [8] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L624 Parameters ---------- data : Symbol Input Array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def slice(data=None, begin=_Null, end=_Null, step=_Null, name=None, attr=None, out=None, **kwargs): r"""Slices a region of the array. .. note:: ``crop`` is deprecated. Use ``slice`` instead. This function returns a sliced array between the indices given by `begin` and `end` with the corresponding `step`. For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, slice operation with ``begin=(b_0, b_1...b_m-1)``, ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, where m <= n, results in an array with the shape ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. The resulting array's *k*-th dimension contains elements from the *k*-th dimension of the input array starting from index ``b_k`` (inclusive) with step ``s_k`` until reaching ``e_k`` (exclusive). If the *k*-th elements are `None` in the sequence of `begin`, `end`, and `step`, the following rule will be used to set default values. If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; else, set `b_k=d_k-1`, `e_k=-1`. The storage type of ``slice`` output depends on storage types of inputs - slice(csr) = csr - otherwise, ``slice`` generates output with default storage .. note:: When input data storage type is csr, it only supports step=(), or step=(None,), or step=(1,) to generate a csr output. For other step parameter values, it falls back to slicing a dense tensor. Example:: x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] slice(x, begin=(0,1), end=(2,4)) = [[ 2., 3., 4.], [ 6., 7., 8.]] slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = [[9., 11.], [5., 7.], [1., 3.]] Defined in ../src/operator/tensor/matrix_op.cc:L498 Parameters ---------- data : Symbol Source input begin : Shape(tuple), required starting indices for the slice operation, supports negative indices. end : Shape(tuple), required ending indices for the slice operation, supports negative indices. step : Shape(tuple), optional, default=[] step for the slice operation, supports negative values. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def slice_axis(data=None, axis=_Null, begin=_Null, end=_Null, name=None, attr=None, out=None, **kwargs): r"""Slices along a given axis. Returns an array slice along a given `axis` starting from the `begin` index to the `end` index. Examples:: x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] slice_axis(x, axis=0, begin=1, end=3) = [[ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] slice_axis(x, axis=1, begin=0, end=2) = [[ 1., 2.], [ 5., 6.], [ 9., 10.]] slice_axis(x, axis=1, begin=-3, end=-1) = [[ 2., 3.], [ 6., 7.], [ 10., 11.]] Defined in ../src/operator/tensor/matrix_op.cc:L587 Parameters ---------- data : Symbol Source input axis : int, required Axis along which to be sliced, supports negative indexes. begin : int, required The beginning index along the axis to be sliced, supports negative indexes. end : int or None, required The ending index along the axis to be sliced, supports negative indexes. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def slice_like(data=None, shape_like=None, axes=_Null, name=None, attr=None, out=None, **kwargs): r"""Slices a region of the array like the shape of another array. This function is similar to ``slice``, however, the `begin` are always `0`s and `end` of specific axes are inferred from the second input `shape_like`. Given the second `shape_like` input of ``shape=(d_0, d_1, ..., d_n-1)``, a ``slice_like`` operator with default empty `axes`, it performs the following operation: `` out = slice(input, begin=(0, 0, ..., 0), end=(d_0, d_1, ..., d_n-1))``. When `axes` is not empty, it is used to speficy which axes are being sliced. Given a 4-d input data, ``slice_like`` operator with ``axes=(0, 2, -1)`` will perform the following operation: `` out = slice(input, begin=(0, 0, 0, 0), end=(d_0, None, d_2, d_3))``. Note that it is allowed to have first and second input with different dimensions, however, you have to make sure the `axes` are specified and not exceeding the dimension limits. For example, given `input_1` with ``shape=(2,3,4,5)`` and `input_2` with ``shape=(1,2,3)``, it is not allowed to use: `` out = slice_like(a, b)`` because ndim of `input_1` is 4, and ndim of `input_2` is 3. The following is allowed in this situation: `` out = slice_like(a, b, axes=(0, 2))`` Example:: x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] y = [[ 0., 0., 0.], [ 0., 0., 0.]] slice_like(x, y) = [[ 1., 2., 3.] [ 5., 6., 7.]] slice_like(x, y, axes=(0, 1)) = [[ 1., 2., 3.] [ 5., 6., 7.]] slice_like(x, y, axes=(0)) = [[ 1., 2., 3., 4.] [ 5., 6., 7., 8.]] slice_like(x, y, axes=(-1)) = [[ 1., 2., 3.] [ 5., 6., 7.] [ 9., 10., 11.]] Defined in ../src/operator/tensor/matrix_op.cc:L641 Parameters ---------- data : Symbol Source input shape_like : Symbol Shape like input axes : Shape(tuple), optional, default=[] List of axes on which input data will be sliced according to the corresponding size of the second input. By default will slice on all axes. Negative axes are supported. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def smooth_l1(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs): r"""Calculate Smooth L1 Loss(lhs, scalar) by summing .. math:: f(x) = \begin{cases} (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ |x|-0.5/\sigma^2,& \text{otherwise} \end{cases} where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. Example:: smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] Defined in ../src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L109 Parameters ---------- data : Symbol source input scalar : float scalar input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def softmax(data=None, length=None, axis=_Null, temperature=_Null, dtype=_Null, use_length=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies the softmax function. The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. .. math:: softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} for :math:`j = 1, ..., K` t is the temperature parameter in softmax function. By default, t equals 1.0 Example:: x = [[ 1. 1. 1.] [ 1. 1. 1.]] softmax(x,axis=0) = [[ 0.5 0.5 0.5] [ 0.5 0.5 0.5]] softmax(x,axis=1) = [[ 0.33333334, 0.33333334, 0.33333334], [ 0.33333334, 0.33333334, 0.33333334]] Defined in ../src/operator/nn/softmax.cc:L136 Parameters ---------- data : Symbol The input array. length : Symbol The length array. axis : int, optional, default='-1' The axis along which to compute softmax. temperature : double or None, optional, default=None Temperature parameter in softmax dtype : {None, 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). use_length : boolean or None, optional, default=0 Whether to use the length input as a mask over the data input. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def softmax_cross_entropy(data=None, label=None, name=None, attr=None, out=None, **kwargs): r"""Calculate cross entropy of softmax output and one-hot label. - This operator computes the cross entropy in two steps: - Applies softmax function on the input array. - Computes and returns the cross entropy loss between the softmax output and the labels. - The softmax function and cross entropy loss is given by: - Softmax Function: .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} - Cross Entropy Function: .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) Example:: x = [[1, 2, 3], [11, 7, 5]] label = [2, 0] softmax(x) = [[0.09003057, 0.24472848, 0.66524094], [0.97962922, 0.01794253, 0.00242826]] softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 Defined in ../src/operator/loss_binary_op.cc:L59 Parameters ---------- data : Symbol Input data label : Symbol Input label name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def softmin(data=None, axis=_Null, temperature=_Null, dtype=_Null, use_length=_Null, name=None, attr=None, out=None, **kwargs): r"""Applies the softmin function. The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. .. math:: softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} for :math:`j = 1, ..., K` t is the temperature parameter in softmax function. By default, t equals 1.0 Example:: x = [[ 1. 2. 3.] [ 3. 2. 1.]] softmin(x,axis=0) = [[ 0.88079703, 0.5, 0.11920292], [ 0.11920292, 0.5, 0.88079703]] softmin(x,axis=1) = [[ 0.66524094, 0.24472848, 0.09003057], [ 0.09003057, 0.24472848, 0.66524094]] Defined in ../src/operator/nn/softmin.cc:L57 Parameters ---------- data : Symbol The input array. axis : int, optional, default='-1' The axis along which to compute softmax. temperature : double or None, optional, default=None Temperature parameter in softmax dtype : {None, 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). use_length : boolean or None, optional, default=0 Whether to use the length input as a mask over the data input. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def softsign(data=None, name=None, attr=None, out=None, **kwargs): r"""Computes softsign of x element-wise. .. math:: y = x / (1 + abs(x)) The storage type of ``softsign`` output is always dense Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L191 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sort(data=None, axis=_Null, is_ascend=_Null, name=None, attr=None, out=None, **kwargs): r"""Returns a sorted copy of an input array along the given axis. Examples:: x = [[ 1, 4], [ 3, 1]] // sorts along the last axis sort(x) = [[ 1., 4.], [ 1., 3.]] // flattens and then sorts sort(x, axis=None) = [ 1., 1., 3., 4.] // sorts along the first axis sort(x, axis=0) = [[ 1., 1.], [ 3., 4.]] // in a descend order sort(x, is_ascend=0) = [[ 4., 1.], [ 3., 1.]] Defined in ../src/operator/tensor/ordering_op.cc:L133 Parameters ---------- data : Symbol The input array axis : int or None, optional, default='-1' Axis along which to choose sort the input tensor. If not given, the flattened array is used. Default is -1. is_ascend : boolean, optional, default=1 Whether to sort in ascending or descending order. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def space_to_depth(data=None, block_size=_Null, name=None, attr=None, out=None, **kwargs): r"""Rearranges(permutes) blocks of spatial data into depth. Similar to ONNX SpaceToDepth operator: https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth The output is a new tensor where the values from height and width dimension are moved to the depth dimension. The reverse of this operation is ``depth_to_space``. .. math:: \begin{gather*} x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) \end{gather*} where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` Example:: x = [[[[0, 6, 1, 7, 2, 8], [12, 18, 13, 19, 14, 20], [3, 9, 4, 10, 5, 11], [15, 21, 16, 22, 17, 23]]]] space_to_depth(x, 2) = [[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]], [[18, 19, 20], [21, 22, 23]]]] Defined in ../src/operator/tensor/matrix_op.cc:L1035 Parameters ---------- data : Symbol Input ndarray block_size : int, required Blocks of [block_size. block_size] are moved name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def split(data=None, num_outputs=_Null, axis=_Null, squeeze_axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Splits an array along a particular axis into multiple sub-arrays. .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. **Note** that `num_outputs` should evenly divide the length of the axis along which to split the array. Example:: x = [[[ 1.] [ 2.]] [[ 3.] [ 4.]] [[ 5.] [ 6.]]] x.shape = (3, 2, 1) y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) y = [[[ 1.]] [[ 3.]] [[ 5.]]] [[[ 2.]] [[ 4.]] [[ 6.]]] y[0].shape = (3, 1, 1) z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) z = [[[ 1.] [ 2.]]] [[[ 3.] [ 4.]]] [[[ 5.] [ 6.]]] z[0].shape = (1, 2, 1) `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. Example:: z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) z = [[ 1.] [ 2.]] [[ 3.] [ 4.]] [[ 5.] [ 6.]] z[0].shape = (2 ,1 ) Defined in ../src/operator/slice_channel.cc:L107 Parameters ---------- data : Symbol The input num_outputs : int, required Number of splits. Note that this should evenly divide the length of the `axis`. axis : int, optional, default='1' Axis along which to split. squeeze_axis : boolean, optional, default=0 If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sqrt(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise square-root value of the input. .. math:: \textrm{sqrt}(x) = \sqrt{x} Example:: sqrt([4, 9, 16]) = [2, 3, 4] The storage type of ``sqrt`` output depends upon the input storage type: - sqrt(default) = default - sqrt(row_sparse) = row_sparse - sqrt(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L170 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def square(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise squared value of the input. .. math:: square(x) = x^2 Example:: square([2, 3, 4]) = [4, 9, 16] The storage type of ``square`` output depends upon the input storage type: - square(default) = default - square(row_sparse) = row_sparse - square(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L119 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def squeeze(data=None, axis=_Null, name=None, attr=None, out=None, **kwargs): r"""Remove single-dimensional entries from the shape of an array. Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. See the following note for exception. Examples:: data = [[[0], [1], [2]]] squeeze(data) = [0, 1, 2] squeeze(data, axis=0) = [[0], [1], [2]] squeeze(data, axis=2) = [[0, 1, 2]] squeeze(data, axis=(0, 2)) = [0, 1, 2] .. Note:: The output of this operator will keep at least one dimension not removed. For example, squeeze([[[4]]]) = [4], while in numpy.squeeze, the output will become a scalar. Parameters ---------- data : Symbol data to squeeze axis : Shape or None, optional, default=None Selects a subset of the single-dimensional entries in the shape. If an axis is selected with shape entry greater than one, an error is raised. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def stack(*data, **kwargs): r"""Join a sequence of arrays along a new axis. The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if axis=0 it will be the first dimension and if axis=-1 it will be the last dimension. Examples:: x = [1, 2] y = [3, 4] stack(x, y) = [[1, 2], [3, 4]] stack(x, y, axis=1) = [[1, 3], [2, 4]] This function support variable length of positional input. Parameters ---------- data : Symbol[] List of arrays to stack axis : int, optional, default='0' The axis in the result array along which the input arrays are stacked. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def stop_gradient(data=None, name=None, attr=None, out=None, **kwargs): r"""Stops gradient computation. Stops the accumulated gradient of the inputs from flowing through this operator in the backward direction. In other words, this operator prevents the contribution of its inputs to be taken into account for computing gradients. Example:: v1 = [1, 2] v2 = [0, 1] a = Variable('a') b = Variable('b') b_stop_grad = stop_gradient(3 * b) loss = MakeLoss(b_stop_grad + a) executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) executor.forward(is_train=True, a=v1, b=v2) executor.outputs [ 1. 5.] executor.backward() executor.grad_arrays [ 0. 0.] [ 1. 1.] Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L325 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sum(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the sum of array elements over given axes. .. Note:: `sum` and `sum_axis` are equivalent. For ndarray of csr storage type summation along axis 0 and axis 1 is supported. Setting keepdims or exclude to True will cause a fallback to dense operator. Example:: data = [[[1, 2], [2, 3], [1, 3]], [[1, 4], [4, 3], [5, 2]], [[7, 1], [7, 2], [7, 3]]] sum(data, axis=1) [[ 4. 8.] [ 10. 9.] [ 21. 6.]] sum(data, axis=[1,2]) [ 12. 19. 27.] data = [[1, 2, 0], [3, 0, 1], [4, 1, 0]] csr = cast_storage(data, 'csr') sum(csr, axis=0) [ 8. 3. 1.] sum(csr, axis=1) [ 3. 4. 5.] Defined in ../src/operator/tensor/broadcast_reduce_sum_value.cc:L67 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def sum_axis(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes the sum of array elements over given axes. .. Note:: `sum` and `sum_axis` are equivalent. For ndarray of csr storage type summation along axis 0 and axis 1 is supported. Setting keepdims or exclude to True will cause a fallback to dense operator. Example:: data = [[[1, 2], [2, 3], [1, 3]], [[1, 4], [4, 3], [5, 2]], [[7, 1], [7, 2], [7, 3]]] sum(data, axis=1) [[ 4. 8.] [ 10. 9.] [ 21. 6.]] sum(data, axis=[1,2]) [ 12. 19. 27.] data = [[1, 2, 0], [3, 0, 1], [4, 1, 0]] csr = cast_storage(data, 'csr') sum(csr, axis=0) [ 8. 3. 1.] sum(csr, axis=1) [ 3. 4. 5.] Defined in ../src/operator/tensor/broadcast_reduce_sum_value.cc:L67 Parameters ---------- data : Symbol The input axis : Shape or None, optional, default=None The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead. Negative values means indexing from right to left. keepdims : boolean, optional, default=0 If this is set to `True`, the reduced axes are left in the result as dimension with size one. exclude : boolean, optional, default=0 Whether to perform reduction on axis that are NOT in axis instead. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def swapaxes(data=None, dim1=_Null, dim2=_Null, name=None, attr=None, out=None, **kwargs): r"""Interchanges two axes of an array. Examples:: x = [[1, 2, 3]]) swapaxes(x, 0, 1) = [[ 1], [ 2], [ 3]] x = [[[ 0, 1], [ 2, 3]], [[ 4, 5], [ 6, 7]]] // (2,2,2) array swapaxes(x, 0, 2) = [[[ 0, 4], [ 2, 6]], [[ 1, 5], [ 3, 7]]] Defined in ../src/operator/swapaxis.cc:L70 Parameters ---------- data : Symbol Input array. dim1 : int, optional, default='0' the first axis to be swapped. dim2 : int, optional, default='0' the second axis to be swapped. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def take(a=None, indices=None, axis=_Null, mode=_Null, name=None, attr=None, out=None, **kwargs): r"""Takes elements from an input array along the given axis. This function slices the input array along a particular axis with the provided indices. Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them in an output tensor of rank q + (r - 1). Examples:: x = [4. 5. 6.] // Trivial case, take the second element along the first axis. take(x, [1]) = [ 5. ] // The other trivial case, axis=-1, take the third element along the first axis take(x, [3], axis=-1, mode='clip') = [ 6. ] x = [[ 1., 2.], [ 3., 4.], [ 5., 6.]] // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 take(x, [[0,1],[1,2]]) = [[[ 1., 2.], [ 3., 4.]], [[ 3., 4.], [ 5., 6.]]] // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). // Along axis 1 take(x, [[0, 3], [-1, -2]], axis=1, mode='wrap') = [[[ 1. 2.] [ 2. 1.]] [[ 3. 4.] [ 4. 3.]] [[ 5. 6.] [ 6. 5.]]] The storage type of ``take`` output depends upon the input storage type: - take(default, default) = default - take(csr, default, axis=0) = csr Defined in ../src/operator/tensor/indexing_op.cc:L777 Parameters ---------- a : Symbol The input array. indices : Symbol The indices of the values to be extracted. axis : int, optional, default='0' The axis of input array to be taken.For input tensor of rank r, it could be in the range of [-r, r-1] mode : {'clip', 'raise', 'wrap'},optional, default='clip' Specify how out-of-bound indices bahave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. "raise" means to raise an error when index out of range. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def tan(data=None, name=None, attr=None, out=None, **kwargs): r"""Computes the element-wise tangent of the input array. The input should be in radians (:math:`2\pi` rad equals 360 degrees). .. math:: tan([0, \pi/4, \pi/2]) = [0, 1, -inf] The storage type of ``tan`` output depends upon the input storage type: - tan(default) = default - tan(row_sparse) = row_sparse - tan(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L140 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def tanh(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns the hyperbolic tangent of the input array, computed element-wise. .. math:: tanh(x) = sinh(x) / cosh(x) The storage type of ``tanh`` output depends upon the input storage type: - tanh(default) = default - tanh(row_sparse) = row_sparse - tanh(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L451 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def tile(data=None, reps=_Null, name=None, attr=None, out=None, **kwargs): r"""Repeats the whole array multiple times. If ``reps`` has length *d*, and input array has dimension of *n*. There are three cases: - **n=d**. Repeat *i*-th dimension of the input by ``reps[i]`` times:: x = [[1, 2], [3, 4]] tile(x, reps=(2,3)) = [[ 1., 2., 1., 2., 1., 2.], [ 3., 4., 3., 4., 3., 4.], [ 1., 2., 1., 2., 1., 2.], [ 3., 4., 3., 4., 3., 4.]] - **n>d**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: tile(x, reps=(2,)) = [[ 1., 2., 1., 2.], [ 3., 4., 3., 4.]] - **n<d**. The input is promoted to be d-dimensional by prepending new axes. So a shape ``(2,2)`` array is promoted to ``(1,2,2)`` for 3-D replication:: tile(x, reps=(2,2,3)) = [[[ 1., 2., 1., 2., 1., 2.], [ 3., 4., 3., 4., 3., 4.], [ 1., 2., 1., 2., 1., 2.], [ 3., 4., 3., 4., 3., 4.]], [[ 1., 2., 1., 2., 1., 2.], [ 3., 4., 3., 4., 3., 4.], [ 1., 2., 1., 2., 1., 2.], [ 3., 4., 3., 4., 3., 4.]]] Defined in ../src/operator/tensor/matrix_op.cc:L812 Parameters ---------- data : Symbol Input data array reps : Shape(tuple), required The number of times for repeating the tensor a. Each dim size of reps must be a positive integer. If reps has length d, the result will have dimension of max(d, a.ndim); If a.ndim < d, a is promoted to be d-dimensional by prepending new axes. If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def topk(data=None, axis=_Null, k=_Null, ret_typ=_Null, is_ascend=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Returns the indices of the top *k* elements in an input array along the given axis (by default). If ret_type is set to 'value' returns the value of top *k* elements (instead of indices). In case of ret_type = 'both', both value and index would be returned. The returned elements will be sorted. Examples:: x = [[ 0.3, 0.2, 0.4], [ 0.1, 0.3, 0.2]] // returns an index of the largest element on last axis topk(x) = [[ 2.], [ 1.]] // returns the value of top-2 largest elements on last axis topk(x, ret_typ='value', k=2) = [[ 0.4, 0.3], [ 0.3, 0.2]] // returns the value of top-2 smallest elements on last axis topk(x, ret_typ='value', k=2, is_ascend=1) = [[ 0.2 , 0.3], [ 0.1 , 0.2]] // returns the value of top-2 largest elements on axis 0 topk(x, axis=0, ret_typ='value', k=2) = [[ 0.3, 0.3, 0.4], [ 0.1, 0.2, 0.2]] // flattens and then returns list of both values and indices topk(x, ret_typ='both', k=2) = [[[ 0.4, 0.3], [ 0.3, 0.2]] , [[ 2., 0.], [ 1., 2.]]] Defined in ../src/operator/tensor/ordering_op.cc:L68 Parameters ---------- data : Symbol The input array axis : int or None, optional, default='-1' Axis along which to choose the top k indices. If not given, the flattened array is used. Default is -1. k : int, optional, default='1' Number of top elements to select, should be always smaller than or equal to the element number in the given axis. A global sort is performed if set k < 1. ret_typ : {'both', 'indices', 'mask', 'value'},optional, default='indices' The return type. "value" means to return the top k values, "indices" means to return the indices of the top k values, "mask" means to return a mask array containing 0 and 1. 1 means the top k values. "both" means to return a list of both values and indices of top k elements. is_ascend : boolean, optional, default=0 Whether to choose k largest or k smallest elements. Top K largest elements will be chosen if set to false. dtype : {'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'},optional, default='float32' DType of the output indices when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def transpose(data=None, axes=_Null, name=None, attr=None, out=None, **kwargs): r"""Permutes the dimensions of an array. Examples:: x = [[ 1, 2], [ 3, 4]] transpose(x) = [[ 1., 3.], [ 2., 4.]] x = [[[ 1., 2.], [ 3., 4.]], [[ 5., 6.], [ 7., 8.]]] transpose(x) = [[[ 1., 5.], [ 3., 7.]], [[ 2., 6.], [ 4., 8.]]] transpose(x, axes=(1,0,2)) = [[[ 1., 2.], [ 5., 6.]], [[ 3., 4.], [ 7., 8.]]] Defined in ../src/operator/tensor/matrix_op.cc:L343 Parameters ---------- data : Symbol Source input axes : Shape(tuple), optional, default=[] Target axis order. By default the axes will be inverted. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def trunc(data=None, name=None, attr=None, out=None, **kwargs): r"""Return the element-wise truncated value of the input. The truncated value of the scalar x is the nearest integer i which is closer to zero than x is. In short, the fractional part of the signed number x is discarded. Example:: trunc([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 1., 1., 2.] The storage type of ``trunc`` output depends upon the input storage type: - trunc(default) = default - trunc(row_sparse) = row_sparse - trunc(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L854 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def uniform(low=_Null, high=_Null, shape=_Null, ctx=_Null, dtype=_Null, name=None, attr=None, out=None, **kwargs): r"""Draw random samples from a uniform distribution. .. note:: The existing alias ``uniform`` is deprecated. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Example:: uniform(low=0, high=1, shape=(2,2)) = [[ 0.60276335, 0.85794562], [ 0.54488319, 0.84725171]] Defined in ../src/operator/random/sample_op.cc:L96 Parameters ---------- low : float, optional, default=0 Lower bound of the distribution. high : float, optional, default=1 Upper bound of the distribution. shape : Shape(tuple), optional, default=None Shape of the output. ctx : string, optional, default='' Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. dtype : {'None', 'float16', 'float32', 'float64'},optional, default='None' DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def unravel_index(data=None, shape=_Null, name=None, attr=None, out=None, **kwargs): r"""Converts an array of flat indices into a batch of index arrays. The operator follows numpy conventions so a single multi index is given by a column of the output matrix. The leading dimension may be left unspecified by using -1 as placeholder. Examples:: A = [22,41,37] unravel(A, shape=(7,6)) = [[3,6,6],[4,5,1]] unravel(A, shape=(-1,6)) = [[3,6,6],[4,5,1]] Defined in ../src/operator/tensor/ravel.cc:L68 Parameters ---------- data : Symbol Array of flat indices shape : Shape(tuple), optional, default=None Shape of the array into which the multi-indices apply. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def where(condition=None, x=None, y=None, name=None, attr=None, out=None, **kwargs): r"""Return the elements, either from x or y, depending on the condition. Given three ndarrays, condition, x, and y, return an ndarray with the elements from x or y, depending on the elements from condition are true or false. x and y must have the same shape. If condition has the same shape as x, each element in the output array is from x if the corresponding element in the condition is true, and from y if false. If condition does not have the same shape as x, it must be a 1D array whose size is the same as x's first dimension size. Each row of the output array is from x's row if the corresponding element from condition is true, and from y's row if false. Note that all non-zero values are interpreted as ``True`` in condition. Examples:: x = [[1, 2], [3, 4]] y = [[5, 6], [7, 8]] cond = [[0, 1], [-1, 0]] where(cond, x, y) = [[5, 2], [3, 8]] csr_cond = cast_storage(cond, 'csr') where(csr_cond, x, y) = [[5, 2], [3, 8]] Defined in ../src/operator/tensor/control_flow_op.cc:L57 Parameters ---------- condition : Symbol condition array x : Symbol y : Symbol name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def zeros_like(data=None, name=None, attr=None, out=None, **kwargs): r"""Return an array of zeros with the same shape, type and storage type as the input array. The storage type of ``zeros_like`` output depends on the storage type of the input - zeros_like(row_sparse) = row_sparse - zeros_like(csr) = csr - zeros_like(default) = default Examples:: x = [[ 1., 1., 1.], [ 1., 1., 1.]] zeros_like(x) = [[ 0., 0., 0.], [ 0., 0., 0.]] Parameters ---------- data : Symbol The input name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) __all__ = ['Activation', 'BNStatsFinalize', 'BatchNorm', 'BatchNormAddRelu', 'BatchNorm_v1', 'BilinearSampler', 'BlockGrad', 'CTCLoss', 'Cast', 'Concat', 'Convolution', 'Convolution_v1', 'Correlation', 'Crop', 'CuDNNBatchNorm', 'Custom', 'Deconvolution', 'Dropout', 'ElementWiseSum', 'Embedding', 'Flatten', 'FullyConnected', 'GridGenerator', 'GroupNorm', 'IdentityAttachKLSparseReg', 'InstanceNorm', 'InstanceNormV2', 'L2Normalization', 'LRN', 'LayerNorm', 'LeakyReLU', 'LinearRegressionOutput', 'LogisticRegressionOutput', 'MAERegressionOutput', 'MakeLoss', 'NormConvolution', 'NormalizedConvolution', 'Pad', 'Pooling', 'Pooling_v1', 'RNN', 'ROIPooling', 'Reshape', 'SVMOutput', 'ScaleBiasAddRelu', 'SequenceLast', 'SequenceMask', 'SequenceReverse', 'SliceChannel', 'Softmax', 'SoftmaxActivation', 'SoftmaxOutput', 'SpatialParallelConvolution', 'SpatialTransformer', 'SwapAxis', 'UpSampling', 'abs', 'adam_update', 'add_n', 'all_finite', 'amp_cast', 'amp_multicast', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'argmax', 'argmax_channel', 'argmin', 'argsort', 'batch_dot', 'batch_take', 'broadcast_add', 'broadcast_axes', 'broadcast_axis', 'broadcast_div', 'broadcast_equal', 'broadcast_greater', 'broadcast_greater_equal', 'broadcast_hypot', 'broadcast_lesser', 'broadcast_lesser_equal', 'broadcast_like', 'broadcast_logical_and', 'broadcast_logical_or', 'broadcast_logical_xor', 'broadcast_maximum', 'broadcast_minimum', 'broadcast_minus', 'broadcast_mod', 'broadcast_mul', 'broadcast_not_equal', 'broadcast_plus', 'broadcast_power', 'broadcast_sub', 'broadcast_to', 'cast', 'cast_storage', 'cbrt', 'ceil', 'choose_element_0index', 'clip', 'col2im', 'concat', 'cos', 'cosh', 'crop', 'ctc_loss', 'cumsum', 'degrees', 'depth_to_space', 'diag', 'dot', 'elemwise_add', 'elemwise_div', 'elemwise_mul', 'elemwise_sub', 'erf', 'erfinv', 'exp', 'expand_dims', 'expm1', 'fill_element_0index', 'fix', 'flatten', 'flip', 'floor', 'ftml_update', 'ftrl_update', 'gamma', 'gammaln', 'gather_nd', 'hard_sigmoid', 'identity', 'im2col', 'khatri_rao', 'lamb_update_phase1', 'lamb_update_phase2', 'lars_multi_mp_sgd_mom_update', 'lars_multi_mp_sgd_update', 'lars_multi_sgd_mom_update', 'lars_multi_sgd_update', 'linalg_det', 'linalg_extractdiag', 'linalg_extracttrian', 'linalg_gelqf', 'linalg_gemm', 'linalg_gemm2', 'linalg_inverse', 'linalg_makediag', 'linalg_maketrian', 'linalg_potrf', 'linalg_potri', 'linalg_slogdet', 'linalg_sumlogdiag', 'linalg_syrk', 'linalg_trmm', 'linalg_trsm', 'log', 'log10', 'log1p', 'log2', 'log_softmax', 'logical_not', 'make_loss', 'max', 'max_axis', 'mean', 'min', 'min_axis', 'moments', 'mp_lamb_update_phase1', 'mp_lamb_update_phase2', 'mp_nag_mom_update', 'mp_sgd_mom_update', 'mp_sgd_update', 'multi_all_finite', 'multi_lars', 'multi_mp_nag_mom_update', 'multi_mp_sgd_mom_update', 'multi_mp_sgd_update', 'multi_nag_mom_update', 'multi_sgd_mom_update', 'multi_sgd_update', 'multi_sum_sq', 'nag_mom_update', 'nanprod', 'nansum', 'negative', 'norm', 'normal', 'one_hot', 'ones_like', 'pad', 'pick', 'prod', 'radians', 'random_exponential', 'random_gamma', 'random_generalized_negative_binomial', 'random_negative_binomial', 'random_normal', 'random_pdf_dirichlet', 'random_pdf_exponential', 'random_pdf_gamma', 'random_pdf_generalized_negative_binomial', 'random_pdf_negative_binomial', 'random_pdf_normal', 'random_pdf_poisson', 'random_pdf_uniform', 'random_poisson', 'random_randint', 'random_uniform', 'ravel_multi_index', 'rcbrt', 'reciprocal', 'relu', 'repeat', 'reset_arrays', 'reshape', 'reshape_like', 'reverse', 'rint', 'rmsprop_update', 'rmspropalex_update', 'round', 'rsqrt', 'sample_exponential', 'sample_gamma', 'sample_generalized_negative_binomial', 'sample_multinomial', 'sample_negative_binomial', 'sample_normal', 'sample_poisson', 'sample_uniform', 'scatter_nd', 'sgd_mom_update', 'sgd_update', 'shape_array', 'shuffle', 'sigmoid', 'sign', 'signsgd_update', 'signum_update', 'sin', 'sinh', 'size_array', 'slice', 'slice_axis', 'slice_like', 'smooth_l1', 'softmax', 'softmax_cross_entropy', 'softmin', 'softsign', 'sort', 'space_to_depth', 'split', 'sqrt', 'square', 'squeeze', 'stack', 'stop_gradient', 'sum', 'sum_axis', 'swapaxes', 'take', 'tan', 'tanh', 'tile', 'topk', 'transpose', 'trunc', 'uniform', 'unravel_index', 'where', 'zeros_like']
34.104254
4,305
0.602096
63,301
456,997
4.277784
0.039446
0.013036
0.009306
0.022667
0.786087
0.759805
0.739952
0.719504
0.702276
0.689728
0
0.03857
0.27535
456,997
13,400
4,305
34.104254
0.779115
0.82434
0
0.332143
1
0
0.075775
0.015327
0
0
0
0
0
1
0.332143
false
0
0.002381
0
0.666667
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
10
adf620c208159f895a90bdebab4bea7337f88133
6,161
py
Python
src/cms/contacts/migrations/0001_initial.py
UniversitaDellaCalabria/uniCMS
b0af4e1a767867f0a9b3c135a5c84587e713cb71
[ "Apache-2.0" ]
6
2021-01-26T17:22:53.000Z
2022-02-15T10:09:03.000Z
src/cms/contacts/migrations/0001_initial.py
UniversitaDellaCalabria/uniCMS
b0af4e1a767867f0a9b3c135a5c84587e713cb71
[ "Apache-2.0" ]
5
2020-12-24T14:29:23.000Z
2021-08-10T10:32:18.000Z
src/cms/contacts/migrations/0001_initial.py
UniversitaDellaCalabria/uniCMS
b0af4e1a767867f0a9b3c135a5c84587e713cb71
[ "Apache-2.0" ]
2
2020-12-24T14:13:39.000Z
2020-12-30T16:48:52.000Z
# Generated by Django 3.2.5 on 2021-09-09 07:45 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('cmsmedias', '0008_alter_media_file'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Contact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('is_active', models.BooleanField(default=False)), ('name', models.CharField(max_length=160)), ('contact_type', models.CharField(choices=[('person', 'Person'), ('structure', 'Structure')], max_length=10)), ('description', models.TextField(blank=True, default='', max_length=2048)), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contact_created_by', to=settings.AUTH_USER_MODEL)), ('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='cmsmedias.media')), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contact_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Contacts', 'ordering': ['name'], }, ), migrations.CreateModel( name='ContactInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('order', models.IntegerField(blank=True, default=10, null=True)), ('is_active', models.BooleanField(default=False)), ('info_type', models.CharField(choices=[('email', 'Email'), ('location', 'Location'), ('phone', 'Phone'), ('website', 'Website')], max_length=15)), ('name', models.CharField(max_length=160)), ('value', models.CharField(max_length=160)), ('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cmscontacts.contact')), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contactinfo_created_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contactinfo_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Contact extra infos', }, ), migrations.CreateModel( name='ContactLocalization', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('order', models.IntegerField(blank=True, default=10, null=True)), ('is_active', models.BooleanField(default=False)), ('language', models.CharField(choices=[('ar', 'Arabic'), ('en', 'English'), ('es', 'Spanish'), ('fr', 'French'), ('it', 'Italian'), ('pt', 'Portuguese')], default='en', max_length=12)), ('name', models.CharField(blank=True, default='', max_length=160)), ('description', models.TextField(blank=True, default='', max_length=2048)), ('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cmscontacts.contact')), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contactlocalization_created_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contactlocalization_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Contact Localization', }, ), migrations.CreateModel( name='ContactInfoLocalization', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('order', models.IntegerField(blank=True, default=10, null=True)), ('is_active', models.BooleanField(default=False)), ('language', models.CharField(choices=[('ar', 'Arabic'), ('en', 'English'), ('es', 'Spanish'), ('fr', 'French'), ('it', 'Italian'), ('pt', 'Portuguese')], default='en', max_length=12)), ('name', models.CharField(blank=True, default='', max_length=160)), ('value', models.CharField(blank=True, default='', max_length=160)), ('contact_info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cmscontacts.contactinfo')), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contactinfolocalization_created_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='contactinfolocalization_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Contact Info Localizations', }, ), ]
64.852632
201
0.619867
652
6,161
5.665644
0.173313
0.041419
0.049269
0.077423
0.812128
0.812128
0.783433
0.764483
0.752842
0.723064
0
0.012346
0.224314
6,161
94
202
65.542553
0.760619
0.007304
0
0.563218
1
0
0.173373
0.039581
0
0
0
0
0
1
0
false
0
0.034483
0
0.08046
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bc0605fef81119b33bbcf575b6d9bf0f608e5457
152,226
py
Python
mailslurp_client/api/inbox_controller_api.py
mailslurp/mailslurp-client-python
a1e9fdc6eb06e192909fd57a64813beb32419594
[ "MIT" ]
6
2020-04-30T07:47:42.000Z
2022-03-24T20:58:58.000Z
mailslurp_client/api/inbox_controller_api.py
mailslurp/mailslurp-client-python
a1e9fdc6eb06e192909fd57a64813beb32419594
[ "MIT" ]
1
2020-09-20T19:58:21.000Z
2020-11-29T16:49:19.000Z
mailslurp_client/api/inbox_controller_api.py
mailslurp/mailslurp-client-python
a1e9fdc6eb06e192909fd57a64813beb32419594
[ "MIT" ]
1
2019-08-09T14:55:50.000Z
2019-08-09T14:55:50.000Z
# coding: utf-8 """ MailSlurp API MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501 The version of the OpenAPI document: 6.5.2 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from mailslurp_client.api_client import ApiClient from mailslurp_client.exceptions import ( # noqa: F401 ApiTypeError, ApiValueError ) class InboxControllerApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_inbox(self, **kwargs): # noqa: E501 """Create an inbox email address. An inbox has a real email address and can send and receive emails. Inboxes can be either `SMTP` or `HTTP` inboxes. # noqa: E501 Create a new inbox and with a randomized email address to send and receive from. Pass emailAddress parameter if you wish to use a specific email address. Creating an inbox is required before sending or receiving emails. If writing tests it is recommended that you create a new inbox during each test method so that it is unique and empty. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param bool allow_team_access: DEPRECATED (team access is always true). Grant team access to this inbox and the emails that belong to it for team members of your organization. :param str description: Optional description of the inbox for labelling purposes. Is shown in the dashboard and can be used with :param str email_address: A custom email address to use with the inbox. Defaults to null. When null MailSlurp will assign a random email address to the inbox such as `123@mailslurp.com`. If you use the `useDomainPool` option when the email address is null it will generate an email address with a more varied domain ending such as `123@mailslurp.info` or `123@mailslurp.biz`. When a custom email address is provided the address is split into a domain and the domain is queried against your user. If you have created the domain in the MailSlurp dashboard and verified it you can use any email address that ends with the domain. Note domain types must match the inbox type - so `SMTP` inboxes will only work with `SMTP` type domains. Avoid `SMTP` inboxes if you need to send emails as they can only receive. Send an email to this address and the inbox will receive and store it for you. To retrieve the email use the Inbox and Email Controller endpoints with the inbox ID. :param datetime expires_at: Optional inbox expiration date. If null then this inbox is permanent and the emails in it won't be deleted. If an expiration date is provided or is required by your plan the inbox will be closed when the expiration time is reached. Expired inboxes still contain their emails but can no longer send or receive emails. An ExpiredInboxRecord is created when an inbox and the email address and inbox ID are recorded. The expiresAt property is a timestamp string in ISO DateTime Format yyyy-MM-dd'T'HH:mm:ss.SSSXXX. :param int expires_in: Number of milliseconds that inbox should exist for :param bool favourite: Is the inbox a favorite. Marking an inbox as a favorite is typically done in the dashboard for quick access or filtering :param str inbox_type: HTTP (default) or SMTP inbox type. HTTP inboxes are default and best solution for most cases. SMTP inboxes are more reliable for public inbound email consumption (but do not support sending emails). When using custom domains the domain type must match the inbox type. HTTP inboxes are processed by AWS SES while SMTP inboxes use a custom mail server running at `mx.mailslurp.com`. :param str name: Optional name of the inbox. Displayed in the dashboard for easier search and used as the sender name when sending emails. :param list[str] tags: Tags that inbox has been tagged with. Tags can be added to inboxes to group different inboxes within an account. You can also search for inboxes by tag in the dashboard UI. :param bool use_domain_pool: Use the MailSlurp domain name pool with this inbox when creating the email address. Defaults to null. If enabled the inbox will be an email address with a domain randomly chosen from a list of the MailSlurp domains. This is useful when the default `@mailslurp.com` email addresses used with inboxes are blocked or considered spam by a provider or receiving service. When domain pool is enabled an email address will be generated ending in `@mailslurp.{world,info,xyz,...}` . This means a TLD is randomly selecting from a list of `.biz`, `.info`, `.xyz` etc to add variance to the generated email addresses. When null or false MailSlurp uses the default behavior of `@mailslurp.com` or custom email address provided by the emailAddress field. Note this feature is only available for `HTTP` inbox types. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Inbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.create_inbox_with_http_info(**kwargs) # noqa: E501 def create_inbox_with_http_info(self, **kwargs): # noqa: E501 """Create an inbox email address. An inbox has a real email address and can send and receive emails. Inboxes can be either `SMTP` or `HTTP` inboxes. # noqa: E501 Create a new inbox and with a randomized email address to send and receive from. Pass emailAddress parameter if you wish to use a specific email address. Creating an inbox is required before sending or receiving emails. If writing tests it is recommended that you create a new inbox during each test method so that it is unique and empty. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param bool allow_team_access: DEPRECATED (team access is always true). Grant team access to this inbox and the emails that belong to it for team members of your organization. :param str description: Optional description of the inbox for labelling purposes. Is shown in the dashboard and can be used with :param str email_address: A custom email address to use with the inbox. Defaults to null. When null MailSlurp will assign a random email address to the inbox such as `123@mailslurp.com`. If you use the `useDomainPool` option when the email address is null it will generate an email address with a more varied domain ending such as `123@mailslurp.info` or `123@mailslurp.biz`. When a custom email address is provided the address is split into a domain and the domain is queried against your user. If you have created the domain in the MailSlurp dashboard and verified it you can use any email address that ends with the domain. Note domain types must match the inbox type - so `SMTP` inboxes will only work with `SMTP` type domains. Avoid `SMTP` inboxes if you need to send emails as they can only receive. Send an email to this address and the inbox will receive and store it for you. To retrieve the email use the Inbox and Email Controller endpoints with the inbox ID. :param datetime expires_at: Optional inbox expiration date. If null then this inbox is permanent and the emails in it won't be deleted. If an expiration date is provided or is required by your plan the inbox will be closed when the expiration time is reached. Expired inboxes still contain their emails but can no longer send or receive emails. An ExpiredInboxRecord is created when an inbox and the email address and inbox ID are recorded. The expiresAt property is a timestamp string in ISO DateTime Format yyyy-MM-dd'T'HH:mm:ss.SSSXXX. :param int expires_in: Number of milliseconds that inbox should exist for :param bool favourite: Is the inbox a favorite. Marking an inbox as a favorite is typically done in the dashboard for quick access or filtering :param str inbox_type: HTTP (default) or SMTP inbox type. HTTP inboxes are default and best solution for most cases. SMTP inboxes are more reliable for public inbound email consumption (but do not support sending emails). When using custom domains the domain type must match the inbox type. HTTP inboxes are processed by AWS SES while SMTP inboxes use a custom mail server running at `mx.mailslurp.com`. :param str name: Optional name of the inbox. Displayed in the dashboard for easier search and used as the sender name when sending emails. :param list[str] tags: Tags that inbox has been tagged with. Tags can be added to inboxes to group different inboxes within an account. You can also search for inboxes by tag in the dashboard UI. :param bool use_domain_pool: Use the MailSlurp domain name pool with this inbox when creating the email address. Defaults to null. If enabled the inbox will be an email address with a domain randomly chosen from a list of the MailSlurp domains. This is useful when the default `@mailslurp.com` email addresses used with inboxes are blocked or considered spam by a provider or receiving service. When domain pool is enabled an email address will be generated ending in `@mailslurp.{world,info,xyz,...}` . This means a TLD is randomly selecting from a list of `.biz`, `.info`, `.xyz` etc to add variance to the generated email addresses. When null or false MailSlurp uses the default behavior of `@mailslurp.com` or custom email address provided by the emailAddress field. Note this feature is only available for `HTTP` inbox types. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(Inbox, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'allow_team_access', 'description', 'email_address', 'expires_at', 'expires_in', 'favourite', 'inbox_type', 'name', 'tags', 'use_domain_pool' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_inbox" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'allow_team_access' in local_var_params and local_var_params['allow_team_access'] is not None: # noqa: E501 query_params.append(('allowTeamAccess', local_var_params['allow_team_access'])) # noqa: E501 if 'description' in local_var_params and local_var_params['description'] is not None: # noqa: E501 query_params.append(('description', local_var_params['description'])) # noqa: E501 if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501 query_params.append(('emailAddress', local_var_params['email_address'])) # noqa: E501 if 'expires_at' in local_var_params and local_var_params['expires_at'] is not None: # noqa: E501 query_params.append(('expiresAt', local_var_params['expires_at'])) # noqa: E501 if 'expires_in' in local_var_params and local_var_params['expires_in'] is not None: # noqa: E501 query_params.append(('expiresIn', local_var_params['expires_in'])) # noqa: E501 if 'favourite' in local_var_params and local_var_params['favourite'] is not None: # noqa: E501 query_params.append(('favourite', local_var_params['favourite'])) # noqa: E501 if 'inbox_type' in local_var_params and local_var_params['inbox_type'] is not None: # noqa: E501 query_params.append(('inboxType', local_var_params['inbox_type'])) # noqa: E501 if 'name' in local_var_params and local_var_params['name'] is not None: # noqa: E501 query_params.append(('name', local_var_params['name'])) # noqa: E501 if 'tags' in local_var_params and local_var_params['tags'] is not None: # noqa: E501 query_params.append(('tags', local_var_params['tags'])) # noqa: E501 collection_formats['tags'] = 'multi' # noqa: E501 if 'use_domain_pool' in local_var_params and local_var_params['use_domain_pool'] is not None: # noqa: E501 query_params.append(('useDomainPool', local_var_params['use_domain_pool'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Inbox', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def create_inbox_ruleset(self, inbox_id, create_inbox_ruleset_options, **kwargs): # noqa: E501 """Create an inbox ruleset # noqa: E501 Create a new inbox rule for forwarding, blocking, and allowing emails when sending and receiving # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_ruleset(inbox_id, create_inbox_ruleset_options, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param CreateInboxRulesetOptions create_inbox_ruleset_options: createInboxRulesetOptions (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: InboxRulesetDto If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.create_inbox_ruleset_with_http_info(inbox_id, create_inbox_ruleset_options, **kwargs) # noqa: E501 def create_inbox_ruleset_with_http_info(self, inbox_id, create_inbox_ruleset_options, **kwargs): # noqa: E501 """Create an inbox ruleset # noqa: E501 Create a new inbox rule for forwarding, blocking, and allowing emails when sending and receiving # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_ruleset_with_http_info(inbox_id, create_inbox_ruleset_options, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param CreateInboxRulesetOptions create_inbox_ruleset_options: createInboxRulesetOptions (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(InboxRulesetDto, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'create_inbox_ruleset_options' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_inbox_ruleset" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `create_inbox_ruleset`") # noqa: E501 # verify the required parameter 'create_inbox_ruleset_options' is set if self.api_client.client_side_validation and ('create_inbox_ruleset_options' not in local_var_params or # noqa: E501 local_var_params['create_inbox_ruleset_options'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `create_inbox_ruleset_options` when calling `create_inbox_ruleset`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'create_inbox_ruleset_options' in local_var_params: body_params = local_var_params['create_inbox_ruleset_options'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/rulesets', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InboxRulesetDto', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def create_inbox_with_defaults(self, **kwargs): # noqa: E501 """Create an inbox with default options. Uses MailSlurp domain pool address and is private. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_with_defaults(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Inbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.create_inbox_with_defaults_with_http_info(**kwargs) # noqa: E501 def create_inbox_with_defaults_with_http_info(self, **kwargs): # noqa: E501 """Create an inbox with default options. Uses MailSlurp domain pool address and is private. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_with_defaults_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(Inbox, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_inbox_with_defaults" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/withDefaults', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Inbox', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def create_inbox_with_options(self, create_inbox_dto, **kwargs): # noqa: E501 """Create an inbox with options. Extended options for inbox creation. # noqa: E501 Additional endpoint that allows inbox creation with request body options. Can be more flexible that other methods for some clients. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_with_options(create_inbox_dto, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param CreateInboxDto create_inbox_dto: createInboxDto (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Inbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.create_inbox_with_options_with_http_info(create_inbox_dto, **kwargs) # noqa: E501 def create_inbox_with_options_with_http_info(self, create_inbox_dto, **kwargs): # noqa: E501 """Create an inbox with options. Extended options for inbox creation. # noqa: E501 Additional endpoint that allows inbox creation with request body options. Can be more flexible that other methods for some clients. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_inbox_with_options_with_http_info(create_inbox_dto, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param CreateInboxDto create_inbox_dto: createInboxDto (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(Inbox, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'create_inbox_dto' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_inbox_with_options" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'create_inbox_dto' is set if self.api_client.client_side_validation and ('create_inbox_dto' not in local_var_params or # noqa: E501 local_var_params['create_inbox_dto'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `create_inbox_dto` when calling `create_inbox_with_options`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'create_inbox_dto' in local_var_params: body_params = local_var_params['create_inbox_dto'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/withOptions', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Inbox', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_all_inboxes(self, **kwargs): # noqa: E501 """Delete all inboxes # noqa: E501 Permanently delete all inboxes and associated email addresses. This will also delete all emails within the inboxes. Be careful as inboxes cannot be recovered once deleted. Note: deleting inboxes will not impact your usage limits. Monthly inbox creation limits are based on how many inboxes were created in the last 30 days, not how many inboxes you currently have. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_all_inboxes(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.delete_all_inboxes_with_http_info(**kwargs) # noqa: E501 def delete_all_inboxes_with_http_info(self, **kwargs): # noqa: E501 """Delete all inboxes # noqa: E501 Permanently delete all inboxes and associated email addresses. This will also delete all emails within the inboxes. Be careful as inboxes cannot be recovered once deleted. Note: deleting inboxes will not impact your usage limits. Monthly inbox creation limits are based on how many inboxes were created in the last 30 days, not how many inboxes you currently have. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_all_inboxes_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_all_inboxes" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_inbox(self, inbox_id, **kwargs): # noqa: E501 """Delete inbox # noqa: E501 Permanently delete an inbox and associated email address as well as all emails within the given inbox. This action cannot be undone. Note: deleting an inbox will not affect your account usage. Monthly inbox usage is based on how many inboxes you create within 30 days, not how many exist at time of request. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_inbox(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.delete_inbox_with_http_info(inbox_id, **kwargs) # noqa: E501 def delete_inbox_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Delete inbox # noqa: E501 Permanently delete an inbox and associated email address as well as all emails within the given inbox. This action cannot be undone. Note: deleting an inbox will not affect your account usage. Monthly inbox usage is based on how many inboxes you create within 30 days, not how many exist at time of request. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_inbox_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_inbox" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `delete_inbox`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def does_inbox_exist(self, email_address, **kwargs): # noqa: E501 """Does inbox exist # noqa: E501 Check if inboxes exist by email address. Useful if you are sending emails to mailslurp addresses # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.does_inbox_exist(email_address, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str email_address: Email address (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: InboxExistsDto If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.does_inbox_exist_with_http_info(email_address, **kwargs) # noqa: E501 def does_inbox_exist_with_http_info(self, email_address, **kwargs): # noqa: E501 """Does inbox exist # noqa: E501 Check if inboxes exist by email address. Useful if you are sending emails to mailslurp addresses # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.does_inbox_exist_with_http_info(email_address, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str email_address: Email address (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(InboxExistsDto, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'email_address' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method does_inbox_exist" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'email_address' is set if self.api_client.client_side_validation and ('email_address' not in local_var_params or # noqa: E501 local_var_params['email_address'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `email_address` when calling `does_inbox_exist`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501 query_params.append(('emailAddress', local_var_params['email_address'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/exists', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InboxExistsDto', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def flush_expired(self, **kwargs): # noqa: E501 """Remove expired inboxes # noqa: E501 Remove any expired inboxes for your account (instead of waiting for scheduled removal on server) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.flush_expired(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional expired at before flag to flush expired inboxes that have expired before the given time :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: FlushExpiredInboxesResult If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.flush_expired_with_http_info(**kwargs) # noqa: E501 def flush_expired_with_http_info(self, **kwargs): # noqa: E501 """Remove expired inboxes # noqa: E501 Remove any expired inboxes for your account (instead of waiting for scheduled removal on server) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.flush_expired_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional expired at before flag to flush expired inboxes that have expired before the given time :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(FlushExpiredInboxesResult, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'before' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method flush_expired" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/expired', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='FlushExpiredInboxesResult', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_all_inboxes(self, **kwargs): # noqa: E501 """List All Inboxes Paginated # noqa: E501 List inboxes in paginated form. The results are available on the `content` property of the returned object. This method allows for page index (zero based), page size (how many results to return), and a sort direction (based on createdAt time). You Can also filter by whether an inbox is favorited or use email address pattern. This method is the recommended way to query inboxes. The alternative `getInboxes` method returns a full list of inboxes but is limited to 100 results. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_inboxes(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional filter by created before given date time :param bool favourite: Optionally filter results for favourites only :param int page: Optional page index in list pagination :param str search: Optionally filter by search words partial matching ID, tags, name, and email address :param datetime since: Optional filter by created after given date time :param int size: Optional page size in list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param str tag: Optionally filter by tags. Will return inboxes that include given tags :param bool team_access: DEPRECATED. Optionally filter by team access. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: PageInboxProjection If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_all_inboxes_with_http_info(**kwargs) # noqa: E501 def get_all_inboxes_with_http_info(self, **kwargs): # noqa: E501 """List All Inboxes Paginated # noqa: E501 List inboxes in paginated form. The results are available on the `content` property of the returned object. This method allows for page index (zero based), page size (how many results to return), and a sort direction (based on createdAt time). You Can also filter by whether an inbox is favorited or use email address pattern. This method is the recommended way to query inboxes. The alternative `getInboxes` method returns a full list of inboxes but is limited to 100 results. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_inboxes_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional filter by created before given date time :param bool favourite: Optionally filter results for favourites only :param int page: Optional page index in list pagination :param str search: Optionally filter by search words partial matching ID, tags, name, and email address :param datetime since: Optional filter by created after given date time :param int size: Optional page size in list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param str tag: Optionally filter by tags. Will return inboxes that include given tags :param bool team_access: DEPRECATED. Optionally filter by team access. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(PageInboxProjection, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'before', 'favourite', 'page', 'search', 'since', 'size', 'sort', 'tag', 'team_access' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_all_inboxes" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'favourite' in local_var_params and local_var_params['favourite'] is not None: # noqa: E501 query_params.append(('favourite', local_var_params['favourite'])) # noqa: E501 if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'search' in local_var_params and local_var_params['search'] is not None: # noqa: E501 query_params.append(('search', local_var_params['search'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 if 'tag' in local_var_params and local_var_params['tag'] is not None: # noqa: E501 query_params.append(('tag', local_var_params['tag'])) # noqa: E501 if 'team_access' in local_var_params and local_var_params['team_access'] is not None: # noqa: E501 query_params.append(('teamAccess', local_var_params['team_access'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/paginated', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageInboxProjection', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_emails(self, inbox_id, **kwargs): # noqa: E501 """Get emails in an Inbox. This method is not idempotent as it allows retries and waits if you want certain conditions to be met before returning. For simple listing and sorting of known emails use the email controller instead. # noqa: E501 List emails that an inbox has received. Only emails that are sent to the inbox's email address will appear in the inbox. It may take several seconds for any email you send to an inbox's email address to appear in the inbox. To make this endpoint wait for a minimum number of emails use the `minCount` parameter. The server will retry the inbox database until the `minCount` is satisfied or the `retryTimeout` is reached # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_emails(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: Id of inbox that emails belongs to (required) :param datetime before: Exclude emails received after this ISO 8601 date time :param int delay_timeout: delayTimeout :param int limit: Limit the result set, ordered by received date time sort direction. Maximum 100. For more listing options see the email controller :param int min_count: Minimum acceptable email count. Will cause request to hang (and retry) until minCount is satisfied or retryTimeout is reached. :param int retry_timeout: Maximum milliseconds to spend retrying inbox database until minCount emails are returned :param datetime since: Exclude emails received before this ISO 8601 date time :param int size: Alias for limit. Assessed first before assessing any passed limit. :param str sort: Sort the results by received date and direction ASC or DESC :param bool unread_only: unreadOnly :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: list[EmailPreview] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_emails_with_http_info(inbox_id, **kwargs) # noqa: E501 def get_emails_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Get emails in an Inbox. This method is not idempotent as it allows retries and waits if you want certain conditions to be met before returning. For simple listing and sorting of known emails use the email controller instead. # noqa: E501 List emails that an inbox has received. Only emails that are sent to the inbox's email address will appear in the inbox. It may take several seconds for any email you send to an inbox's email address to appear in the inbox. To make this endpoint wait for a minimum number of emails use the `minCount` parameter. The server will retry the inbox database until the `minCount` is satisfied or the `retryTimeout` is reached # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_emails_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: Id of inbox that emails belongs to (required) :param datetime before: Exclude emails received after this ISO 8601 date time :param int delay_timeout: delayTimeout :param int limit: Limit the result set, ordered by received date time sort direction. Maximum 100. For more listing options see the email controller :param int min_count: Minimum acceptable email count. Will cause request to hang (and retry) until minCount is satisfied or retryTimeout is reached. :param int retry_timeout: Maximum milliseconds to spend retrying inbox database until minCount emails are returned :param datetime since: Exclude emails received before this ISO 8601 date time :param int size: Alias for limit. Assessed first before assessing any passed limit. :param str sort: Sort the results by received date and direction ASC or DESC :param bool unread_only: unreadOnly :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(list[EmailPreview], status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'before', 'delay_timeout', 'limit', 'min_count', 'retry_timeout', 'since', 'size', 'sort', 'unread_only' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_emails" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `get_emails`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'delay_timeout' in local_var_params and local_var_params['delay_timeout'] is not None: # noqa: E501 query_params.append(('delayTimeout', local_var_params['delay_timeout'])) # noqa: E501 if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501 query_params.append(('limit', local_var_params['limit'])) # noqa: E501 if 'min_count' in local_var_params and local_var_params['min_count'] is not None: # noqa: E501 query_params.append(('minCount', local_var_params['min_count'])) # noqa: E501 if 'retry_timeout' in local_var_params and local_var_params['retry_timeout'] is not None: # noqa: E501 query_params.append(('retryTimeout', local_var_params['retry_timeout'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 if 'unread_only' in local_var_params and local_var_params['unread_only'] is not None: # noqa: E501 query_params.append(('unreadOnly', local_var_params['unread_only'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/emails', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[EmailPreview]', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_inbox(self, inbox_id, **kwargs): # noqa: E501 """Get Inbox. Returns properties of an inbox. # noqa: E501 Returns an inbox's properties, including its email address and ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Inbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_inbox_with_http_info(inbox_id, **kwargs) # noqa: E501 def get_inbox_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Get Inbox. Returns properties of an inbox. # noqa: E501 Returns an inbox's properties, including its email address and ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(Inbox, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_inbox" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `get_inbox`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Inbox', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_inbox_emails_paginated(self, inbox_id, **kwargs): # noqa: E501 """Get inbox emails paginated # noqa: E501 Get a paginated list of emails in an inbox. Does not hold connections open. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_emails_paginated(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: Id of inbox that emails belongs to (required) :param datetime before: Optional filter by received before given date time :param int page: Optional page index in inbox emails list pagination :param datetime since: Optional filter by received after given date time :param int size: Optional page size in inbox emails list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: PageEmailPreview If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_inbox_emails_paginated_with_http_info(inbox_id, **kwargs) # noqa: E501 def get_inbox_emails_paginated_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Get inbox emails paginated # noqa: E501 Get a paginated list of emails in an inbox. Does not hold connections open. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_emails_paginated_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: Id of inbox that emails belongs to (required) :param datetime before: Optional filter by received before given date time :param int page: Optional page index in inbox emails list pagination :param datetime since: Optional filter by received after given date time :param int size: Optional page size in inbox emails list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(PageEmailPreview, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'before', 'page', 'since', 'size', 'sort' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_inbox_emails_paginated" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `get_inbox_emails_paginated`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/emails/paginated', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageEmailPreview', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_inbox_sent_emails(self, inbox_id, **kwargs): # noqa: E501 """Get Inbox Sent Emails # noqa: E501 Returns an inbox's sent email receipts. Call individual sent email endpoints for more details. Note for privacy reasons the full body of sent emails is never stored. An MD5 hash hex is available for comparison instead. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_sent_emails(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param datetime before: Optional filter by sent before given date time :param int page: Optional page index in inbox sent email list pagination :param str search_filter: Optional sent email search :param datetime since: Optional filter by sent after given date time :param int size: Optional page size in inbox sent email list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: PageSentEmailProjection If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_inbox_sent_emails_with_http_info(inbox_id, **kwargs) # noqa: E501 def get_inbox_sent_emails_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Get Inbox Sent Emails # noqa: E501 Returns an inbox's sent email receipts. Call individual sent email endpoints for more details. Note for privacy reasons the full body of sent emails is never stored. An MD5 hash hex is available for comparison instead. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_sent_emails_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param datetime before: Optional filter by sent before given date time :param int page: Optional page index in inbox sent email list pagination :param str search_filter: Optional sent email search :param datetime since: Optional filter by sent after given date time :param int size: Optional page size in inbox sent email list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(PageSentEmailProjection, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'before', 'page', 'search_filter', 'since', 'size', 'sort' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_inbox_sent_emails" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `get_inbox_sent_emails`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501 query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/sent', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageSentEmailProjection', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_inbox_tags(self, **kwargs): # noqa: E501 """Get inbox tags # noqa: E501 Get all inbox tags # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_tags(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: list[str] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_inbox_tags_with_http_info(**kwargs) # noqa: E501 def get_inbox_tags_with_http_info(self, **kwargs): # noqa: E501 """Get inbox tags # noqa: E501 Get all inbox tags # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inbox_tags_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(list[str], status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_inbox_tags" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/tags', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[str]', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_inboxes(self, **kwargs): # noqa: E501 """List Inboxes and email addresses # noqa: E501 List the inboxes you have created. Note use of the more advanced `getAllEmails` is recommended and allows paginated access using a limit and sort parameter. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inboxes(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional filter by created before given date time :param datetime since: Optional filter by created after given date time :param int size: Optional result size limit. Note an automatic limit of 100 results is applied. See the paginated `getAllEmails` for larger queries. :param str sort: Optional createdAt sort direction ASC or DESC :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: list[Inbox] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_inboxes_with_http_info(**kwargs) # noqa: E501 def get_inboxes_with_http_info(self, **kwargs): # noqa: E501 """List Inboxes and email addresses # noqa: E501 List the inboxes you have created. Note use of the more advanced `getAllEmails` is recommended and allows paginated access using a limit and sort parameter. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_inboxes_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional filter by created before given date time :param datetime since: Optional filter by created after given date time :param int size: Optional result size limit. Note an automatic limit of 100 results is applied. See the paginated `getAllEmails` for larger queries. :param str sort: Optional createdAt sort direction ASC or DESC :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(list[Inbox], status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'before', 'since', 'size', 'sort' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_inboxes" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Inbox]', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_organization_inboxes(self, **kwargs): # noqa: E501 """List Organization Inboxes Paginated # noqa: E501 List organization inboxes in paginated form. These are inboxes created with `allowTeamAccess` flag enabled. Organization inboxes are `readOnly` for non-admin users. The results are available on the `content` property of the returned object. This method allows for page index (zero based), page size (how many results to return), and a sort direction (based on createdAt time). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_organization_inboxes(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional filter by created before given date time :param int page: Optional page index in list pagination :param str search_filter: Optional search filter :param datetime since: Optional filter by created after given date time :param int size: Optional page size in list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: PageOrganizationInboxProjection If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_organization_inboxes_with_http_info(**kwargs) # noqa: E501 def get_organization_inboxes_with_http_info(self, **kwargs): # noqa: E501 """List Organization Inboxes Paginated # noqa: E501 List organization inboxes in paginated form. These are inboxes created with `allowTeamAccess` flag enabled. Organization inboxes are `readOnly` for non-admin users. The results are available on the `content` property of the returned object. This method allows for page index (zero based), page size (how many results to return), and a sort direction (based on createdAt time). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_organization_inboxes_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param datetime before: Optional filter by created before given date time :param int page: Optional page index in list pagination :param str search_filter: Optional search filter :param datetime since: Optional filter by created after given date time :param int size: Optional page size in list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(PageOrganizationInboxProjection, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'before', 'page', 'search_filter', 'since', 'size', 'sort' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_organization_inboxes" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501 query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/organization', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageOrganizationInboxProjection', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_inbox_rulesets(self, inbox_id, **kwargs): # noqa: E501 """List inbox rulesets # noqa: E501 List all rulesets attached to an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_inbox_rulesets(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param datetime before: Optional filter by created before given date time :param int page: Optional page index in inbox ruleset list pagination :param str search_filter: Optional search filter :param datetime since: Optional filter by created after given date time :param int size: Optional page size in inbox ruleset list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: PageInboxRulesetDto If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.list_inbox_rulesets_with_http_info(inbox_id, **kwargs) # noqa: E501 def list_inbox_rulesets_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """List inbox rulesets # noqa: E501 List all rulesets attached to an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_inbox_rulesets_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param datetime before: Optional filter by created before given date time :param int page: Optional page index in inbox ruleset list pagination :param str search_filter: Optional search filter :param datetime since: Optional filter by created after given date time :param int size: Optional page size in inbox ruleset list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(PageInboxRulesetDto, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'before', 'page', 'search_filter', 'since', 'size', 'sort' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_inbox_rulesets" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `list_inbox_rulesets`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501 query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/rulesets', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageInboxRulesetDto', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_inbox_tracking_pixels(self, inbox_id, **kwargs): # noqa: E501 """List inbox tracking pixels # noqa: E501 List all tracking pixels sent from an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_inbox_tracking_pixels(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param datetime before: Optional filter by created before given date time :param int page: Optional page index in inbox tracking pixel list pagination :param str search_filter: Optional search filter :param datetime since: Optional filter by created after given date time :param int size: Optional page size in inbox tracking pixel list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: PageTrackingPixelProjection If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.list_inbox_tracking_pixels_with_http_info(inbox_id, **kwargs) # noqa: E501 def list_inbox_tracking_pixels_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """List inbox tracking pixels # noqa: E501 List all tracking pixels sent from an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_inbox_tracking_pixels_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param datetime before: Optional filter by created before given date time :param int page: Optional page index in inbox tracking pixel list pagination :param str search_filter: Optional search filter :param datetime since: Optional filter by created after given date time :param int size: Optional page size in inbox tracking pixel list pagination :param str sort: Optional createdAt sort direction ASC or DESC :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(PageTrackingPixelProjection, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'before', 'page', 'search_filter', 'since', 'size', 'sort' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_inbox_tracking_pixels" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `list_inbox_tracking_pixels`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501 query_params.append(('before', local_var_params['before'])) # noqa: E501 if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'search_filter' in local_var_params and local_var_params['search_filter'] is not None: # noqa: E501 query_params.append(('searchFilter', local_var_params['search_filter'])) # noqa: E501 if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501 query_params.append(('since', local_var_params['since'])) # noqa: E501 if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501 query_params.append(('size', local_var_params['size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/tracking-pixels', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageTrackingPixelProjection', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def send_email(self, inbox_id, **kwargs): # noqa: E501 """Send Email # noqa: E501 Send an email from an inbox's email address. The request body should contain the `SendEmailOptions` that include recipients, attachments, body etc. See `SendEmailOptions` for all available properties. Note the `inboxId` refers to the inbox's id not the inbox's email address. See https://www.mailslurp.com/guides/ for more information on how to send emails. This method does not return a sent email entity due to legacy reasons. To send and get a sent email as returned response use the sister method `sendEmailAndConfirm`. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.send_email(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: ID of the inbox you want to send the email from (required) :param SendEmailOptions send_email_options: Options for the email :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.send_email_with_http_info(inbox_id, **kwargs) # noqa: E501 def send_email_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Send Email # noqa: E501 Send an email from an inbox's email address. The request body should contain the `SendEmailOptions` that include recipients, attachments, body etc. See `SendEmailOptions` for all available properties. Note the `inboxId` refers to the inbox's id not the inbox's email address. See https://www.mailslurp.com/guides/ for more information on how to send emails. This method does not return a sent email entity due to legacy reasons. To send and get a sent email as returned response use the sister method `sendEmailAndConfirm`. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.send_email_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: ID of the inbox you want to send the email from (required) :param SendEmailOptions send_email_options: Options for the email :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'send_email_options' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method send_email" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `send_email`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'send_email_options' in local_var_params: body_params = local_var_params['send_email_options'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def send_email_and_confirm(self, inbox_id, **kwargs): # noqa: E501 """Send email and return sent confirmation # noqa: E501 Sister method for standard `sendEmail` method with the benefit of returning a `SentEmail` entity confirming the successful sending of the email with a link to the sent object created for it. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.send_email_and_confirm(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: ID of the inbox you want to send the email from (required) :param SendEmailOptions send_email_options: Options for the email :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: SentEmailDto If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.send_email_and_confirm_with_http_info(inbox_id, **kwargs) # noqa: E501 def send_email_and_confirm_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Send email and return sent confirmation # noqa: E501 Sister method for standard `sendEmail` method with the benefit of returning a `SentEmail` entity confirming the successful sending of the email with a link to the sent object created for it. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.send_email_and_confirm_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: ID of the inbox you want to send the email from (required) :param SendEmailOptions send_email_options: Options for the email :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(SentEmailDto, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'send_email_options' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method send_email_and_confirm" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `send_email_and_confirm`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'send_email_options' in local_var_params: body_params = local_var_params['send_email_options'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/confirm', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SentEmailDto', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def send_test_email(self, inbox_id, **kwargs): # noqa: E501 """Send a test email to inbox # noqa: E501 Send an inbox a test email to test email receiving is working # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.send_test_email(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.send_test_email_with_http_info(inbox_id, **kwargs) # noqa: E501 def send_test_email_with_http_info(self, inbox_id, **kwargs): # noqa: E501 """Send a test email to inbox # noqa: E501 Send an inbox a test email to test email receiving is working # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.send_test_email_with_http_info(inbox_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method send_test_email" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `send_test_email`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/send-test-email', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def set_inbox_favourited(self, inbox_id, set_inbox_favourited_options, **kwargs): # noqa: E501 """Set inbox favourited state # noqa: E501 Set and return new favourite state for an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.set_inbox_favourited(inbox_id, set_inbox_favourited_options, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param SetInboxFavouritedOptions set_inbox_favourited_options: setInboxFavouritedOptions (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Inbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.set_inbox_favourited_with_http_info(inbox_id, set_inbox_favourited_options, **kwargs) # noqa: E501 def set_inbox_favourited_with_http_info(self, inbox_id, set_inbox_favourited_options, **kwargs): # noqa: E501 """Set inbox favourited state # noqa: E501 Set and return new favourite state for an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.set_inbox_favourited_with_http_info(inbox_id, set_inbox_favourited_options, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param SetInboxFavouritedOptions set_inbox_favourited_options: setInboxFavouritedOptions (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(Inbox, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'set_inbox_favourited_options' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method set_inbox_favourited" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `set_inbox_favourited`") # noqa: E501 # verify the required parameter 'set_inbox_favourited_options' is set if self.api_client.client_side_validation and ('set_inbox_favourited_options' not in local_var_params or # noqa: E501 local_var_params['set_inbox_favourited_options'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `set_inbox_favourited_options` when calling `set_inbox_favourited`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'set_inbox_favourited_options' in local_var_params: body_params = local_var_params['set_inbox_favourited_options'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}/favourite', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Inbox', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def update_inbox(self, inbox_id, update_inbox_options, **kwargs): # noqa: E501 """Update Inbox. Change name and description. Email address is not editable. # noqa: E501 Update editable fields on an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_inbox(inbox_id, update_inbox_options, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param UpdateInboxOptions update_inbox_options: updateInboxOptions (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Inbox If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.update_inbox_with_http_info(inbox_id, update_inbox_options, **kwargs) # noqa: E501 def update_inbox_with_http_info(self, inbox_id, update_inbox_options, **kwargs): # noqa: E501 """Update Inbox. Change name and description. Email address is not editable. # noqa: E501 Update editable fields on an inbox # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_inbox_with_http_info(inbox_id, update_inbox_options, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str inbox_id: inboxId (required) :param UpdateInboxOptions update_inbox_options: updateInboxOptions (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(Inbox, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'inbox_id', 'update_inbox_options' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method update_inbox" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'inbox_id' is set if self.api_client.client_side_validation and ('inbox_id' not in local_var_params or # noqa: E501 local_var_params['inbox_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `inbox_id` when calling `update_inbox`") # noqa: E501 # verify the required parameter 'update_inbox_options' is set if self.api_client.client_side_validation and ('update_inbox_options' not in local_var_params or # noqa: E501 local_var_params['update_inbox_options'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `update_inbox_options` when calling `update_inbox`") # noqa: E501 collection_formats = {} path_params = {} if 'inbox_id' in local_var_params: path_params['inboxId'] = local_var_params['inbox_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'update_inbox_options' in local_var_params: body_params = local_var_params['update_inbox_options'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['API_KEY'] # noqa: E501 return self.api_client.call_api( '/inboxes/{inboxId}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Inbox', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
51.742352
978
0.61948
18,142
152,226
4.982472
0.033072
0.043632
0.069232
0.0229
0.969267
0.964809
0.959488
0.954399
0.944619
0.938059
0
0.014143
0.311616
152,226
2,941
979
51.759946
0.848459
0.508632
0
0.755274
0
0
0.17481
0.040769
0
0
0
0
0
1
0.033052
false
0
0.003516
0
0.06962
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bc27506c24fc4223a952ab56f442a07ed64adefa
7,586
py
Python
haychecker/_test/chc/freshness_test.py
fruttasecca/hay_checker
2bbf4e8e90e0abc590dd74080fb6e4f445056354
[ "MIT" ]
2
2019-05-22T08:24:38.000Z
2020-12-04T13:36:30.000Z
haychecker/_test/chc/freshness_test.py
fruttasecca/hay_checker
2bbf4e8e90e0abc590dd74080fb6e4f445056354
[ "MIT" ]
null
null
null
haychecker/_test/chc/freshness_test.py
fruttasecca/hay_checker
2bbf4e8e90e0abc590dd74080fb6e4f445056354
[ "MIT" ]
3
2018-09-15T13:40:40.000Z
2021-06-29T23:31:18.000Z
import datetime import time as timelib import unittest import numpy as np import pandas as pd from haychecker.chc.metrics import freshness def to_datetime_cached(s, format): """ Transform a series of strings (dates) to datetimes, with a dict to cache results. :param s: :param format: :return: """ dates = {date: pd.to_datetime(date, errors="coerce", format=format) for date in s.dropna().unique()} dates[np.NaN] = None return s.map(dates) class TestFreshness(unittest.TestCase): def test_empty(self): df = pd.DataFrame() df["c1"] = [] df["c2"] = [] r1, r2 = freshness(["c1", "c2"], dateFormat="%d:%M:%y", df=df) self.assertEqual("None days", r1) self.assertEqual("None days", r2) r1, r2 = freshness(["c1", "c2"], timeFormat="%d:%M:%y", df=df) self.assertEqual("None seconds", r1) self.assertEqual("None seconds", r2) def test_allnull(self): df = pd.DataFrame() df["c1"] = [None for _ in range(100)] df["c2"] = [np.NaN for _ in range(100)] df["c1"] = df["c1"].astype(str) df["c2"] = df["c2"].astype(str) r1, r2 = freshness(["c1", "c2"], dateFormat="%d:%M:%y", df=df) self.assertEqual(r1, "nan days") self.assertEqual(r2, "nan days") r1, r2 = freshness(["c1", "c2"], timeFormat="%d:%M:%y", df=df) self.assertEqual(r1, "nan seconds") self.assertEqual(r2, "nan seconds") def test_dateformat(self): format = "%Y-%m-%d %H:%M:%S" now = str(datetime.datetime.now())[:19] # test wrong type of column df = pd.DataFrame() dates = [i for i in range(100)] df["c1"] = dates # test correct type df = pd.DataFrame() dates = [now for _ in range(100)] df["c1"] = dates df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], dateFormat=format, df=df) self.assertEqual(r1, "0.0 days") self.assertEqual(r2, "0.0 days") self.assertEqual(r3, "0.0 days") df = pd.DataFrame() dates = [now for _ in range(100)] for i in range(20): dates[-(i + 1)] = None df["c1"] = dates df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], dateFormat=format, df=df) self.assertEqual(r1, "0.0 days") self.assertEqual(r2, "0.0 days") self.assertEqual(r3, "0.0 days") def test_timeformat_nodate(self): format = "%H:%M:%S" now = str(datetime.datetime.now())[11:19] # test wrong type of column df = pd.DataFrame() times = [i for i in range(100)] df["c1"] = times with self.assertRaises(SystemExit) as cm: r1 = freshness(["c1"], timeFormat=format, df=df) # test correct type df = pd.DataFrame() times = [now for _ in range(100)] df["c1"] = times df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df) r1 = float(r1.split(" ")[0]) r2 = float(r2.split(" ")[0]) r3 = float(r3.split(" ")[0]) self.assertLessEqual(r1, 10.0) self.assertLessEqual(r2, 10.0) self.assertLessEqual(r3, 10.0) df = pd.DataFrame() times = [now for _ in range(100)] for i in range(20): times[-(i + 1)] = None df["c1"] = times df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df) r1 = float(r1.split(" ")[0]) r2 = float(r2.split(" ")[0]) r3 = float(r3.split(" ")[0]) self.assertLessEqual(r1, 10.0) self.assertLessEqual(r2, 10.0) self.assertLessEqual(r3, 10.0) def test_timeformat_nodate_dateincolumns(self): format = "%H:%M:%S" now = str(datetime.datetime.now())[11:19] # test wrong type of column df = pd.DataFrame() times = [i for i in range(100)] df["c1"] = times with self.assertRaises(SystemExit) as cm: r1 = freshness(["c1"], timeFormat=format, df=df) # test correct type df = pd.DataFrame() times = [now for _ in range(100)] df["c1"] = times df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df) r1 = float(r1.split(" ")[0]) r2 = float(r2.split(" ")[0]) r3 = float(r3.split(" ")[0]) self.assertLessEqual(r1, 10.0) self.assertLessEqual(r2, 10.0) self.assertLessEqual(r3, 10.0) df = pd.DataFrame() times = [now for _ in range(100)] for i in range(20): times[-(i + 1)] = "" df["c1"] = times df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df) r1 = float(r1.split(" ")[0]) r2 = float(r2.split(" ")[0]) r3 = float(r3.split(" ")[0]) self.assertLessEqual(r1, 10.0) self.assertLessEqual(r2, 10.0) self.assertLessEqual(r3, 10.0) def test_timeformat_withdate(self): format = "%Y-%m-%d %H:%M:%S" time = str(datetime.datetime.now())[11:19] time = "1970-01-01 " + time # test wrong type of column df = pd.DataFrame() times = [i for i in range(100)] df["c1"] = times with self.assertRaises(SystemExit) as cm: r1 = freshness(["c1"], timeFormat=format, df=df) # test correct type df = pd.DataFrame() times = [time for _ in range(100)] df["c1"] = times df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) # seconds from 1970 plus 10 seconds for computation time seconds = int(timelib.time()) + 10 r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df) r1 = float(r1.split(" ")[0]) r2 = float(r2.split(" ")[0]) r3 = float(r3.split(" ")[0]) self.assertLessEqual(r1, seconds) self.assertLessEqual(r2, seconds) self.assertLessEqual(r3, seconds) df = pd.DataFrame() times = [time for _ in range(100)] for i in range(20): times[-(i + 40)] = np.NaN df["c1"] = times df["c2"] = pd.to_datetime(df["c1"], errors="coerce", format=format) df["c3"] = pd.to_datetime(df["c1"], errors="coerce", format=format) r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df) r1 = float(r1.split(" ")[0]) r2 = float(r2.split(" ")[0]) r3 = float(r3.split(" ")[0]) self.assertLessEqual(r1, seconds) self.assertLessEqual(r2, seconds) self.assertLessEqual(r3, seconds)
35.12037
104
0.540469
1,021
7,586
3.977473
0.096964
0.031519
0.050234
0.100468
0.818271
0.812115
0.78626
0.78626
0.765575
0.735041
0
0.06226
0.278012
7,586
215
105
35.283721
0.679204
0.045742
0
0.754601
0
0
0.071538
0
0
0
0
0
0.214724
1
0.042945
false
0
0.03681
0
0.092025
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
bc345f0a5eb0ae1ac1124f1b01dcde7c49ebd177
1,750
py
Python
migrations/versions/902908e7b753_initial_migration.py
LemmyMwaura/make_your_pitch
465d4777c3c67cb03a02d46d516efd1ac96f256e
[ "MIT" ]
null
null
null
migrations/versions/902908e7b753_initial_migration.py
LemmyMwaura/make_your_pitch
465d4777c3c67cb03a02d46d516efd1ac96f256e
[ "MIT" ]
null
null
null
migrations/versions/902908e7b753_initial_migration.py
LemmyMwaura/make_your_pitch
465d4777c3c67cb03a02d46d516efd1ac96f256e
[ "MIT" ]
1
2022-03-15T07:50:08.000Z
2022-03-15T07:50:08.000Z
"""Initial migration Revision ID: 902908e7b753 Revises: Create Date: 2022-03-08 19:20:52.202395 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '902908e7b753' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('users', 'firstname', existing_type=sa.VARCHAR(length=80), nullable=False) op.alter_column('users', 'lastname', existing_type=sa.VARCHAR(length=80), nullable=False) op.alter_column('users', 'email', existing_type=sa.VARCHAR(length=80), nullable=False) op.alter_column('users', 'username', existing_type=sa.VARCHAR(length=80), nullable=False) op.alter_column('users', 'password', existing_type=sa.VARCHAR(length=200), nullable=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('users', 'password', existing_type=sa.VARCHAR(length=200), nullable=True) op.alter_column('users', 'username', existing_type=sa.VARCHAR(length=80), nullable=True) op.alter_column('users', 'email', existing_type=sa.VARCHAR(length=80), nullable=True) op.alter_column('users', 'lastname', existing_type=sa.VARCHAR(length=80), nullable=True) op.alter_column('users', 'firstname', existing_type=sa.VARCHAR(length=80), nullable=True) # ### end Alembic commands ###
30.701754
65
0.599429
193
1,750
5.316062
0.295337
0.068226
0.126706
0.175439
0.732943
0.732943
0.729045
0.729045
0.725146
0.725146
0
0.048819
0.274286
1,750
56
66
31.25
0.759055
0.164
0
0.789474
0
0
0.096774
0
0
0
0
0
0
1
0.052632
false
0.052632
0.052632
0
0.105263
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
70c85807c38325e69bdc54c517d982e82c0ae0cb
24,075
py
Python
sdk/python/pulumi_azure/digitaltwins/endpoint_servicebus.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
109
2018-06-18T00:19:44.000Z
2022-02-20T05:32:57.000Z
sdk/python/pulumi_azure/digitaltwins/endpoint_servicebus.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
663
2018-06-18T21:08:46.000Z
2022-03-31T20:10:11.000Z
sdk/python/pulumi_azure/digitaltwins/endpoint_servicebus.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
41
2018-07-19T22:37:38.000Z
2022-03-14T10:56:26.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['EndpointServicebusArgs', 'EndpointServicebus'] @pulumi.input_type class EndpointServicebusArgs: def __init__(__self__, *, digital_twins_id: pulumi.Input[str], servicebus_primary_connection_string: pulumi.Input[str], servicebus_secondary_connection_string: pulumi.Input[str], dead_letter_storage_secret: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a EndpointServicebus resource. :param pulumi.Input[str] digital_twins_id: The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] servicebus_primary_connection_string: The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . :param pulumi.Input[str] servicebus_secondary_connection_string: The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. :param pulumi.Input[str] dead_letter_storage_secret: The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. :param pulumi.Input[str] name: The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ pulumi.set(__self__, "digital_twins_id", digital_twins_id) pulumi.set(__self__, "servicebus_primary_connection_string", servicebus_primary_connection_string) pulumi.set(__self__, "servicebus_secondary_connection_string", servicebus_secondary_connection_string) if dead_letter_storage_secret is not None: pulumi.set(__self__, "dead_letter_storage_secret", dead_letter_storage_secret) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="digitalTwinsId") def digital_twins_id(self) -> pulumi.Input[str]: """ The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ return pulumi.get(self, "digital_twins_id") @digital_twins_id.setter def digital_twins_id(self, value: pulumi.Input[str]): pulumi.set(self, "digital_twins_id", value) @property @pulumi.getter(name="servicebusPrimaryConnectionString") def servicebus_primary_connection_string(self) -> pulumi.Input[str]: """ The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . """ return pulumi.get(self, "servicebus_primary_connection_string") @servicebus_primary_connection_string.setter def servicebus_primary_connection_string(self, value: pulumi.Input[str]): pulumi.set(self, "servicebus_primary_connection_string", value) @property @pulumi.getter(name="servicebusSecondaryConnectionString") def servicebus_secondary_connection_string(self) -> pulumi.Input[str]: """ The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. """ return pulumi.get(self, "servicebus_secondary_connection_string") @servicebus_secondary_connection_string.setter def servicebus_secondary_connection_string(self, value: pulumi.Input[str]): pulumi.set(self, "servicebus_secondary_connection_string", value) @property @pulumi.getter(name="deadLetterStorageSecret") def dead_letter_storage_secret(self) -> Optional[pulumi.Input[str]]: """ The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. """ return pulumi.get(self, "dead_letter_storage_secret") @dead_letter_storage_secret.setter def dead_letter_storage_secret(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dead_letter_storage_secret", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _EndpointServicebusState: def __init__(__self__, *, dead_letter_storage_secret: Optional[pulumi.Input[str]] = None, digital_twins_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, servicebus_primary_connection_string: Optional[pulumi.Input[str]] = None, servicebus_secondary_connection_string: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering EndpointServicebus resources. :param pulumi.Input[str] dead_letter_storage_secret: The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. :param pulumi.Input[str] digital_twins_id: The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] name: The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] servicebus_primary_connection_string: The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . :param pulumi.Input[str] servicebus_secondary_connection_string: The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. """ if dead_letter_storage_secret is not None: pulumi.set(__self__, "dead_letter_storage_secret", dead_letter_storage_secret) if digital_twins_id is not None: pulumi.set(__self__, "digital_twins_id", digital_twins_id) if name is not None: pulumi.set(__self__, "name", name) if servicebus_primary_connection_string is not None: pulumi.set(__self__, "servicebus_primary_connection_string", servicebus_primary_connection_string) if servicebus_secondary_connection_string is not None: pulumi.set(__self__, "servicebus_secondary_connection_string", servicebus_secondary_connection_string) @property @pulumi.getter(name="deadLetterStorageSecret") def dead_letter_storage_secret(self) -> Optional[pulumi.Input[str]]: """ The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. """ return pulumi.get(self, "dead_letter_storage_secret") @dead_letter_storage_secret.setter def dead_letter_storage_secret(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dead_letter_storage_secret", value) @property @pulumi.getter(name="digitalTwinsId") def digital_twins_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ return pulumi.get(self, "digital_twins_id") @digital_twins_id.setter def digital_twins_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "digital_twins_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="servicebusPrimaryConnectionString") def servicebus_primary_connection_string(self) -> Optional[pulumi.Input[str]]: """ The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . """ return pulumi.get(self, "servicebus_primary_connection_string") @servicebus_primary_connection_string.setter def servicebus_primary_connection_string(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "servicebus_primary_connection_string", value) @property @pulumi.getter(name="servicebusSecondaryConnectionString") def servicebus_secondary_connection_string(self) -> Optional[pulumi.Input[str]]: """ The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. """ return pulumi.get(self, "servicebus_secondary_connection_string") @servicebus_secondary_connection_string.setter def servicebus_secondary_connection_string(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "servicebus_secondary_connection_string", value) class EndpointServicebus(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, dead_letter_storage_secret: Optional[pulumi.Input[str]] = None, digital_twins_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, servicebus_primary_connection_string: Optional[pulumi.Input[str]] = None, servicebus_secondary_connection_string: Optional[pulumi.Input[str]] = None, __props__=None): """ Manages a Digital Twins Service Bus Endpoint. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_instance = azure.digitaltwins.Instance("exampleInstance", resource_group_name=example_resource_group.name, location=example_resource_group.location) example_namespace = azure.servicebus.Namespace("exampleNamespace", location=example_resource_group.location, resource_group_name=example_resource_group.name, sku="Standard") example_topic = azure.servicebus.Topic("exampleTopic", namespace_name=example_namespace.name, resource_group_name=example_resource_group.name) example_topic_authorization_rule = azure.servicebus.TopicAuthorizationRule("exampleTopicAuthorizationRule", namespace_name=example_namespace.name, resource_group_name=example_resource_group.name, topic_name=example_topic.name, listen=False, send=True, manage=False) example_endpoint_servicebus = azure.digitaltwins.EndpointServicebus("exampleEndpointServicebus", digital_twins_id=example_instance.id, servicebus_primary_connection_string=example_topic_authorization_rule.primary_connection_string, servicebus_secondary_connection_string=example_topic_authorization_rule.secondary_connection_string) ``` ## Import Digital Twins Service Bus Endpoints can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:digitaltwins/endpointServicebus:EndpointServicebus example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DigitalTwins/digitalTwinsInstances/dt1/endpoints/ep1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] dead_letter_storage_secret: The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. :param pulumi.Input[str] digital_twins_id: The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] name: The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] servicebus_primary_connection_string: The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . :param pulumi.Input[str] servicebus_secondary_connection_string: The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. """ ... @overload def __init__(__self__, resource_name: str, args: EndpointServicebusArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Digital Twins Service Bus Endpoint. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_instance = azure.digitaltwins.Instance("exampleInstance", resource_group_name=example_resource_group.name, location=example_resource_group.location) example_namespace = azure.servicebus.Namespace("exampleNamespace", location=example_resource_group.location, resource_group_name=example_resource_group.name, sku="Standard") example_topic = azure.servicebus.Topic("exampleTopic", namespace_name=example_namespace.name, resource_group_name=example_resource_group.name) example_topic_authorization_rule = azure.servicebus.TopicAuthorizationRule("exampleTopicAuthorizationRule", namespace_name=example_namespace.name, resource_group_name=example_resource_group.name, topic_name=example_topic.name, listen=False, send=True, manage=False) example_endpoint_servicebus = azure.digitaltwins.EndpointServicebus("exampleEndpointServicebus", digital_twins_id=example_instance.id, servicebus_primary_connection_string=example_topic_authorization_rule.primary_connection_string, servicebus_secondary_connection_string=example_topic_authorization_rule.secondary_connection_string) ``` ## Import Digital Twins Service Bus Endpoints can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:digitaltwins/endpointServicebus:EndpointServicebus example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DigitalTwins/digitalTwinsInstances/dt1/endpoints/ep1 ``` :param str resource_name: The name of the resource. :param EndpointServicebusArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(EndpointServicebusArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, dead_letter_storage_secret: Optional[pulumi.Input[str]] = None, digital_twins_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, servicebus_primary_connection_string: Optional[pulumi.Input[str]] = None, servicebus_secondary_connection_string: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EndpointServicebusArgs.__new__(EndpointServicebusArgs) __props__.__dict__["dead_letter_storage_secret"] = dead_letter_storage_secret if digital_twins_id is None and not opts.urn: raise TypeError("Missing required property 'digital_twins_id'") __props__.__dict__["digital_twins_id"] = digital_twins_id __props__.__dict__["name"] = name if servicebus_primary_connection_string is None and not opts.urn: raise TypeError("Missing required property 'servicebus_primary_connection_string'") __props__.__dict__["servicebus_primary_connection_string"] = servicebus_primary_connection_string if servicebus_secondary_connection_string is None and not opts.urn: raise TypeError("Missing required property 'servicebus_secondary_connection_string'") __props__.__dict__["servicebus_secondary_connection_string"] = servicebus_secondary_connection_string super(EndpointServicebus, __self__).__init__( 'azure:digitaltwins/endpointServicebus:EndpointServicebus', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, dead_letter_storage_secret: Optional[pulumi.Input[str]] = None, digital_twins_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, servicebus_primary_connection_string: Optional[pulumi.Input[str]] = None, servicebus_secondary_connection_string: Optional[pulumi.Input[str]] = None) -> 'EndpointServicebus': """ Get an existing EndpointServicebus resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] dead_letter_storage_secret: The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. :param pulumi.Input[str] digital_twins_id: The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] name: The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. :param pulumi.Input[str] servicebus_primary_connection_string: The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . :param pulumi.Input[str] servicebus_secondary_connection_string: The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _EndpointServicebusState.__new__(_EndpointServicebusState) __props__.__dict__["dead_letter_storage_secret"] = dead_letter_storage_secret __props__.__dict__["digital_twins_id"] = digital_twins_id __props__.__dict__["name"] = name __props__.__dict__["servicebus_primary_connection_string"] = servicebus_primary_connection_string __props__.__dict__["servicebus_secondary_connection_string"] = servicebus_secondary_connection_string return EndpointServicebus(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="deadLetterStorageSecret") def dead_letter_storage_secret(self) -> pulumi.Output[Optional[str]]: """ The storage secret of the dead-lettering, whose format is `https://<storageAccountname>.blob.core.windows.net/<containerName>?<SASToken>`. When an endpoint can't deliver an event within a certain time period or after trying to deliver the event a certain number of times, it can send the undelivered event to a storage account. """ return pulumi.get(self, "dead_letter_storage_secret") @property @pulumi.getter(name="digitalTwinsId") def digital_twins_id(self) -> pulumi.Output[str]: """ The ID of the Digital Twins Instance. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ return pulumi.get(self, "digital_twins_id") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name which should be used for this Digital Twins Service Bus Endpoint. Changing this forces a new Digital Twins Service Bus Endpoint to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="servicebusPrimaryConnectionString") def servicebus_primary_connection_string(self) -> pulumi.Output[str]: """ The primary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. . """ return pulumi.get(self, "servicebus_primary_connection_string") @property @pulumi.getter(name="servicebusSecondaryConnectionString") def servicebus_secondary_connection_string(self) -> pulumi.Output[str]: """ The secondary connection string of the Service Bus Topic Authorization Rule with a minimum of `send` permission. """ return pulumi.get(self, "servicebus_secondary_connection_string")
58.012048
388
0.715971
2,848
24,075
5.797402
0.077247
0.083338
0.056811
0.047968
0.894252
0.883169
0.880383
0.866816
0.860457
0.851675
0
0.003718
0.206729
24,075
414
389
58.152174
0.860823
0.463219
0
0.653659
1
0
0.157064
0.113444
0
0
0
0
0
1
0.156098
false
0.004878
0.02439
0
0.273171
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
cb0719f0e0b23b3db06b73aaeb952172972608d4
214
py
Python
coollib/__init__.py
xdaTq/coollib
a10a4485edf11dc9af3ec77200d36b06ded97895
[ "MIT" ]
1
2021-04-08T10:43:37.000Z
2021-04-08T10:43:37.000Z
coollib/__init__.py
xdaTq/coollib
a10a4485edf11dc9af3ec77200d36b06ded97895
[ "MIT" ]
null
null
null
coollib/__init__.py
xdaTq/coollib
a10a4485edf11dc9af3ec77200d36b06ded97895
[ "MIT" ]
null
null
null
def add_numbers(num1, num2): return num1 + num2 def sub_numbers(num1, num2): return num1 - num2 def multi_numbers(num1, num2): return num1 * num2 def div_numbers(num1, num2): return num1 / num2
16.461538
30
0.682243
32
214
4.4375
0.28125
0.450704
0.422535
0.591549
0.880282
0.880282
0.676056
0
0
0
0
0.096386
0.224299
214
12
31
17.833333
0.759036
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
8
cb09841fc2f7ed289db9c8ccb0136745c4cd44ba
3,309
gyp
Python
third_party/android_platform/relocation_packer.gyp
jason-simmons/flutter_buildroot
1c9494e60378bd119d910d530344077fc091b3a5
[ "BSD-3-Clause" ]
1
2021-06-12T00:47:11.000Z
2021-06-12T00:47:11.000Z
third_party/android_platform/relocation_packer.gyp
jason-simmons/flutter_buildroot
1c9494e60378bd119d910d530344077fc091b3a5
[ "BSD-3-Clause" ]
null
null
null
third_party/android_platform/relocation_packer.gyp
jason-simmons/flutter_buildroot
1c9494e60378bd119d910d530344077fc091b3a5
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { # These files lists are shared with the GN build. 'relocation_packer_sources': [ 'bionic/tools/relocation_packer/src/debug.cc', 'bionic/tools/relocation_packer/src/delta_encoder.cc', 'bionic/tools/relocation_packer/src/elf_file.cc', 'bionic/tools/relocation_packer/src/packer.cc', 'bionic/tools/relocation_packer/src/sleb128.cc', ], 'relocation_packer_main_source': [ 'bionic/tools/relocation_packer/src/main.cc', ], 'relocation_packer_test_sources': [ 'bionic/tools/relocation_packer/src/debug_unittest.cc', 'bionic/tools/relocation_packer/src/delta_encoder_unittest.cc', 'bionic/tools/relocation_packer/src/elf_file_unittest.cc', 'bionic/tools/relocation_packer/src/packer_unittest.cc', 'bionic/tools/relocation_packer/src/sleb128_unittest.cc', 'bionic/tools/relocation_packer/src/run_all_unittests.cc', ], }, 'targets': [ { # GN: //third_party/android_platform:android_lib_relocation_packer 'target_name': 'android_lib_relocation_packer', 'toolsets': ['host'], 'type': 'static_library', 'dependencies': [ '../../third_party/elfutils/elfutils.gyp:libelf', ], 'sources': [ '<@(relocation_packer_sources)' ], }, { # GN: //third_party/android_platform:android_relocation_packer 'target_name': 'android_relocation_packer', 'toolsets': ['host'], 'type': 'executable', 'dependencies': [ '../../third_party/elfutils/elfutils.gyp:libelf', 'android_lib_relocation_packer', ], 'sources': [ '<@(relocation_packer_main_source)' ], }, { # TODO(GN) 'target_name': 'android_relocation_packer_unittests', 'toolsets': ['host'], 'type': 'executable', 'dependencies': [ '../../testing/gtest.gyp:gtest', 'android_lib_relocation_packer', ], 'include_dirs': [ '../..', ], 'sources': [ '<@(relocation_packer_test_sources)' ], 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm32.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm32_packed.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm64.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_arm64_packed.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_ia32.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_ia32_packed.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_x64.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_x64_packed.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_mips32.so', 'bionic/tools/relocation_packer/test_data/elf_file_unittest_relocs_mips32_packed.so', ], }, ], }, ], }
37.602273
97
0.659414
367
3,309
5.577657
0.239782
0.273571
0.225696
0.290181
0.74597
0.628236
0.595017
0.397655
0.316561
0.316561
0
0.011455
0.208522
3,309
87
98
38.034483
0.770141
0.102146
0
0.35443
0
0
0.695241
0.613905
0
0
0
0.011494
0
1
0
true
0
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
1
0
0
0
1
0
0
0
0
0
0
7
cb4528939e812a8c77abe6226a52cb88b1158046
90,348
py
Python
venv/lib/python3.7/site-packages/tests/test_swagger.py
bfraz/python-flask-swagger-k8s
8381a172644d92c1fcbf0691340ea8545624b7e8
[ "MIT" ]
1
2021-03-06T05:07:42.000Z
2021-03-06T05:07:42.000Z
tests/test_swagger.py
aaronbenz/flask-restplus
15d9e872f260658f0d2c7cb44ea4c3148e000b50
[ "MIT" ]
null
null
null
tests/test_swagger.py
aaronbenz/flask-restplus
15d9e872f260658f0d2c7cb44ea4c3148e000b50
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from textwrap import dedent from flask import url_for, Blueprint from werkzeug.datastructures import FileStorage import flask_restplus as restplus from . import TestCase class ApiMixin(object): def build_api(self, **kwargs): bpkwargs = {} if 'prefix' in kwargs: bpkwargs['url_prefix'] = kwargs.pop('prefix') if 'subdomain' in kwargs: bpkwargs['subdomain'] = kwargs.pop('subdomain') blueprint = Blueprint('api', __name__, **bpkwargs) api = restplus.Api(blueprint, **kwargs) self.app.register_blueprint(blueprint) return api class SwaggerTests(ApiMixin, TestCase): def test_specs_endpoint(self): api = restplus.Api() api.init_app(self.app) data = self.get_specs('') self.assertEqual(data['swagger'], '2.0') self.assertEqual(data['basePath'], '/') self.assertEqual(data['produces'], ['application/json']) self.assertEqual(data['consumes'], ['application/json']) self.assertEqual(data['paths'], {}) self.assertIn('info', data) def test_specs_endpoint_with_prefix(self): self.build_api(prefix='/api') data = self.get_specs('/api') self.assertEqual(data['swagger'], '2.0') self.assertEqual(data['basePath'], '/api') self.assertEqual(data['produces'], ['application/json']) self.assertEqual(data['consumes'], ['application/json']) self.assertEqual(data['paths'], {}) self.assertIn('info', data) def test_specs_endpoint_produces(self): api = self.build_api() def output_xml(data, code, headers=None): pass api.representations['application/xml'] = output_xml data = self.get_specs() self.assertEqual(len(data['produces']), 2) self.assertIn('application/json', data['produces']) self.assertIn('application/xml', data['produces']) def test_specs_endpoint_info(self): api = restplus.Api(version='1.0', title='My API', description='This is a testing API', terms_url='http://somewhere.com/terms/', contact='Support', contact_url='http://support.somewhere.com', contact_email='contact@somewhere.com', license='Apache 2.0', license_url='http://www.apache.org/licenses/LICENSE-2.0.html' ) api.init_app(self.app) data = self.get_specs() self.assertEqual(data['swagger'], '2.0') self.assertEqual(data['basePath'], '/') self.assertEqual(data['produces'], ['application/json']) self.assertEqual(data['paths'], {}) self.assertIn('info', data) self.assertEqual(data['info']['title'], 'My API') self.assertEqual(data['info']['version'], '1.0') self.assertEqual(data['info']['description'], 'This is a testing API') self.assertEqual(data['info']['termsOfService'], 'http://somewhere.com/terms/') self.assertEqual(data['info']['contact'], { 'name': 'Support', 'url': 'http://support.somewhere.com', 'email': 'contact@somewhere.com', }) self.assertEqual(data['info']['license'], { 'name': 'Apache 2.0', 'url': 'http://www.apache.org/licenses/LICENSE-2.0.html', }) def test_specs_endpoint_info_delayed(self): api = restplus.Api(version='1.0') api.init_app(self.app, title='My API', description='This is a testing API', terms_url='http://somewhere.com/terms/', contact='Support', contact_url='http://support.somewhere.com', contact_email='contact@somewhere.com', license='Apache 2.0', license_url='http://www.apache.org/licenses/LICENSE-2.0.html' ) data = self.get_specs() self.assertEqual(data['swagger'], '2.0') self.assertEqual(data['basePath'], '/') self.assertEqual(data['produces'], ['application/json']) self.assertEqual(data['paths'], {}) self.assertIn('info', data) self.assertEqual(data['info']['title'], 'My API') self.assertEqual(data['info']['version'], '1.0') self.assertEqual(data['info']['description'], 'This is a testing API') self.assertEqual(data['info']['termsOfService'], 'http://somewhere.com/terms/') self.assertEqual(data['info']['contact'], { 'name': 'Support', 'url': 'http://support.somewhere.com', 'email': 'contact@somewhere.com', }) self.assertEqual(data['info']['license'], { 'name': 'Apache 2.0', 'url': 'http://www.apache.org/licenses/LICENSE-2.0.html', }) def test_specs_endpoint_info_callable(self): api = restplus.Api(version=lambda: '1.0', title=lambda: 'My API', description=lambda: 'This is a testing API', terms_url=lambda: 'http://somewhere.com/terms/', contact=lambda: 'Support', contact_url=lambda: 'http://support.somewhere.com', contact_email=lambda: 'contact@somewhere.com', license=lambda: 'Apache 2.0', license_url=lambda: 'http://www.apache.org/licenses/LICENSE-2.0.html' ) api.init_app(self.app) data = self.get_specs() self.assertEqual(data['swagger'], '2.0') self.assertEqual(data['basePath'], '/') self.assertEqual(data['produces'], ['application/json']) self.assertEqual(data['paths'], {}) self.assertIn('info', data) self.assertEqual(data['info']['title'], 'My API') self.assertEqual(data['info']['version'], '1.0') self.assertEqual(data['info']['description'], 'This is a testing API') self.assertEqual(data['info']['termsOfService'], 'http://somewhere.com/terms/') self.assertEqual(data['info']['contact'], { 'name': 'Support', 'url': 'http://support.somewhere.com', 'email': 'contact@somewhere.com', }) self.assertEqual(data['info']['license'], { 'name': 'Apache 2.0', 'url': 'http://www.apache.org/licenses/LICENSE-2.0.html', }) def test_specs_endpoint_no_host(self): restplus.Api(self.app) data = self.get_specs('') self.assertNotIn('host', data) def test_specs_endpoint_host(self): self.app.config['SERVER_NAME'] = 'api.restplus.org' restplus.Api(self.app) data = self.get_specs('') self.assertEqual(data['host'], 'api.restplus.org') def test_specs_endpoint_host_and_subdomain(self): self.app.config['SERVER_NAME'] = 'restplus.org' blueprint = Blueprint('api', __name__, subdomain='api') restplus.Api(blueprint) self.app.register_blueprint(blueprint) data = self.get_specs(base_url='http://api.restplus.org') self.assertEqual(data['host'], 'api.restplus.org') def test_specs_endpoint_tags_short(self): restplus.Api(self.app, tags=['tag-1', 'tag-2', 'tag-3']) data = self.get_specs('') self.assertEqual(data['tags'], [ {'name': 'tag-1'}, {'name': 'tag-2'}, {'name': 'tag-3'}, {'name': 'default', 'description': 'Default namespace'}, ]) def test_specs_endpoint_tags_tuple(self): restplus.Api(self.app, tags=[ ('tag-1', 'Tag 1'), ('tag-2', 'Tag 2'), ('tag-3', 'Tag 3'), ]) data = self.get_specs('') self.assertEqual(data['tags'], [ {'name': 'tag-1', 'description': 'Tag 1'}, {'name': 'tag-2', 'description': 'Tag 2'}, {'name': 'tag-3', 'description': 'Tag 3'}, {'name': 'default', 'description': 'Default namespace'}, ]) def test_specs_endpoint_tags_dict(self): restplus.Api(self.app, tags=[ {'name': 'tag-1', 'description': 'Tag 1'}, {'name': 'tag-2', 'description': 'Tag 2'}, {'name': 'tag-3', 'description': 'Tag 3'}, ]) data = self.get_specs('') self.assertEqual(data['tags'], [ {'name': 'tag-1', 'description': 'Tag 1'}, {'name': 'tag-2', 'description': 'Tag 2'}, {'name': 'tag-3', 'description': 'Tag 3'}, {'name': 'default', 'description': 'Default namespace'}, ]) def test_specs_endpoint_tags_namespaces(self): api = restplus.Api(self.app, tags=['ns', 'tag']) api.namespace('ns', 'Description') data = self.get_specs('') self.assertEqual(data['tags'], [ {'name': 'ns', 'description': 'Description'}, {'name': 'tag'}, {'name': 'default', 'description': 'Default namespace'}, ]) def test_specs_endpoint_invalid_tags(self): restplus.Api(self.app, tags=[ {'description': 'Tag 1'} ]) self.get_specs('', status=500) def test_specs_authorizations(self): authorizations = { 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' } } restplus.Api(self.app, authorizations=authorizations) data = self.get_specs() self.assertIn('securityDefinitions', data) self.assertEqual(data['securityDefinitions'], authorizations) def test_minimal_documentation(self): api = self.build_api(prefix='/api') ns = api.namespace('ns', 'Test namespace') @ns.route('/', endpoint='test') class TestResource(restplus.Resource): def get(self): return {} data = self.get_specs('/api') paths = data['paths'] self.assertEqual(len(paths.keys()), 1) self.assertIn('/ns/', paths) self.assertIn('get', paths['/ns/']) op = paths['/ns/']['get'] self.assertEqual(op['tags'], ['ns']) self.assertEqual(op['operationId'], 'get_test_resource') self.assertNotIn('parameters', op) self.assertNotIn('summary', op) self.assertNotIn('description', op) self.assertEqual(op['responses'], { '200': { 'description': 'Success', } }) with self.context(): self.assertEqual(url_for('api.test'), '/api/ns/') def test_default_ns_resource_documentation(self): api = self.build_api(prefix='/api', version='1.0') @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): def get(self): return {} data = self.get_specs('/api') paths = data['paths'] self.assertEqual(len(paths.keys()), 1) self.assertIn('/test/', paths) self.assertIn('get', paths['/test/']) op = paths['/test/']['get'] self.assertEqual(op['tags'], ['default']) self.assertEqual(op['responses'], { '200': { 'description': 'Success', } }) self.assertEqual(len(data['tags']), 1) tag = data['tags'][0] self.assertEqual(tag['name'], 'default') self.assertEqual(tag['description'], 'Default namespace') with self.context(): self.assertEqual(url_for('api.test'), '/api/test/') def test_default_ns_resource_documentation_with_override(self): api = self.build_api(default='site', default_label='Site namespace') @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): def get(self): return {} data = self.get_specs() paths = data['paths'] self.assertEqual(len(paths.keys()), 1) self.assertIn('/test/', paths) self.assertIn('get', paths['/test/']) op = paths['/test/']['get'] self.assertEqual(op['tags'], ['site']) self.assertEqual(op['responses'], { '200': { 'description': 'Success', } }) self.assertEqual(len(data['tags']), 1) tag = data['tags'][0] self.assertEqual(tag['name'], 'site') self.assertEqual(tag['description'], 'Site namespace') with self.context(): self.assertEqual(url_for('api.test'), '/test/') def test_ns_resource_documentation(self): api = self.build_api(prefix='/api') ns = api.namespace('ns', 'Test namespace') @ns.route('/', endpoint='test') class TestResource(restplus.Resource): def get(self): return {} data = self.get_specs('/api') paths = data['paths'] self.assertEqual(len(paths.keys()), 1) self.assertIn('/ns/', paths) self.assertIn('get', paths['/ns/']) op = paths['/ns/']['get'] self.assertEqual(op['tags'], ['ns']) self.assertEqual(op['responses'], { '200': { 'description': 'Success', } }) self.assertNotIn('parameters', op) self.assertEqual(len(data['tags']), 2) tag = data['tags'][-1] self.assertEqual(tag['name'], 'ns') self.assertEqual(tag['description'], 'Test namespace') with self.context(): self.assertEqual(url_for('api.test'), '/api/ns/') def test_ns_resource_documentation_lazy(self): api = restplus.Api() ns = api.namespace('ns', 'Test namespace') @ns.route('/', endpoint='test') class TestResource(restplus.Resource): def get(self): return {} api.init_app(self.app) data = self.get_specs() paths = data['paths'] self.assertEqual(len(paths.keys()), 1) self.assertIn('/ns/', paths) self.assertIn('get', paths['/ns/']) op = paths['/ns/']['get'] self.assertEqual(op['tags'], ['ns']) self.assertEqual(op['responses'], { '200': { 'description': 'Success', } }) self.assertEqual(len(data['tags']), 2) tag = data['tags'][-1] self.assertEqual(tag['name'], 'ns') self.assertEqual(tag['description'], 'Test namespace') with self.context(): self.assertEqual(url_for('test'), '/ns/') def test_methods_docstring_to_summary(self): api = self.build_api() @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): def get(self): ''' GET operation ''' return {} def post(self): '''POST operation. Should be ignored ''' return {} def put(self): '''PUT operation. Should be ignored''' return {} def delete(self): ''' DELETE operation. Should be ignored. ''' return {} data = self.get_specs() path = data['paths']['/test/'] self.assertEqual(len(path.keys()), 4) for method in path.keys(): operation = path[method] self.assertIn(method, ('get', 'post', 'put', 'delete')) self.assertEqual(operation['summary'], '{0} operation'.format(method.upper())) self.assertEqual(operation['operationId'], '{0}_test_resource'.format(method.lower())) # self.assertEqual(operation['parameters'], []) def test_path_parameter_no_type(self): api = self.build_api() @api.route('/id/<id>/', endpoint='by-id') class ByIdResource(restplus.Resource): def get(self, id): return {} data = self.get_specs() self.assertIn('/id/{id}/', data['paths']) path = data['paths']['/id/{id}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'id') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) def test_path_parameter_with_type(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name') class ByNameResource(restplus.Resource): def get(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) def test_path_parameter_with_explicit_details(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name', doc={ 'params': { 'age': {'description': 'An age'} } }) class ByNameResource(restplus.Resource): def get(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'An age') def test_path_parameter_with_decorator_details(self): api = self.build_api() @api.route('/name/<int:age>/') @api.param('age', 'An age') class ByNameResource(restplus.Resource): def get(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'An age') def test_expect_parser(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') @api.route('/with-parser/', endpoint='with-parser') class WithParserResource(restplus.Resource): @api.expect(parser) def get(self): return {} data = self.get_specs() self.assertIn('/with-parser/', data['paths']) op = data['paths']['/with-parser/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'param') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'Some param') def test_expect_parser_on_class(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') @api.route('/with-parser/', endpoint='with-parser') @api.expect(parser) class WithParserResource(restplus.Resource): def get(self): return {} data = self.get_specs() self.assertIn('/with-parser/', data['paths']) path = data['paths']['/with-parser/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'param') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'Some param') def test_method_parser_on_class(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') @api.route('/with-parser/', endpoint='with-parser') @api.doc(get={'expect': parser}) class WithParserResource(restplus.Resource): def get(self): return {} def post(self): return {} data = self.get_specs() self.assertIn('/with-parser/', data['paths']) op = data['paths']['/with-parser/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'param') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'Some param') op = data['paths']['/with-parser/']['post'] self.assertNotIn('parameters', op) def test_parser_parameters_override(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') @api.route('/with-parser/', endpoint='with-parser') class WithParserResource(restplus.Resource): @api.expect(parser) @api.doc(params={'param': {'description': 'New description'}}) def get(self): return {} data = self.get_specs() self.assertIn('/with-parser/', data['paths']) op = data['paths']['/with-parser/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'param') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'New description') def test_parser_parameter_in_form(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param', location='form') @api.route('/with-parser/', endpoint='with-parser') class WithParserResource(restplus.Resource): @api.expect(parser) def get(self): return {} data = self.get_specs() self.assertIn('/with-parser/', data['paths']) op = data['paths']['/with-parser/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'param') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'formData') self.assertEqual(parameter['description'], 'Some param') self.assertEqual(op['consumes'], ['application/x-www-form-urlencoded', 'multipart/form-data']) def test_parser_parameter_in_files(self): api = self.build_api() parser = api.parser() parser.add_argument('in_files', type=FileStorage, location='files') @api.route('/with-parser/', endpoint='with-parser') class WithParserResource(restplus.Resource): @api.expect(parser) def get(self): return {} data = self.get_specs() self.assertIn('/with-parser/', data['paths']) op = data['paths']['/with-parser/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'in_files') self.assertEqual(parameter['type'], 'file') self.assertEqual(parameter['in'], 'formData') self.assertEqual(op['consumes'], ['multipart/form-data']) def test_explicit_parameters(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name') class ByNameResource(restplus.Resource): @api.doc(params={ 'q': { 'type': 'string', 'in': 'query', 'description': 'A query string', } }) def get(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) op = path['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'A query string') def test_explicit_parameters_with_decorator(self): api = self.build_api() @api.route('/name/') class ByNameResource(restplus.Resource): @api.param('q', 'A query string', type='string', _in='formData') def get(self, age): return {} data = self.get_specs() self.assertIn('/name/', data['paths']) op = data['paths']['/name/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'formData') self.assertEqual(parameter['description'], 'A query string') def test_class_explicit_parameters(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name', doc={ 'params': { 'q': { 'type': 'string', 'in': 'query', 'description': 'A query string', } } }) class ByNameResource(restplus.Resource): def get(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 2) by_name = dict((p['name'], p) for p in path['parameters']) parameter = by_name['age'] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) parameter = by_name['q'] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'A query string') def test_explicit_parameters_override(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name', doc={ 'params': { 'q': { 'type': 'string', 'in': 'query', 'description': 'Overriden description', }, 'age': { 'description': 'An age' } } }) class ByNameResource(restplus.Resource): @api.doc(params={'q': {'description': 'A query string'}}) def get(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 2) by_name = dict((p['name'], p) for p in path['parameters']) parameter = by_name['age'] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'An age') parameter = by_name['q'] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'Overriden description') op = data['paths']['/name/{age}/']['get'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'A query string') def test_explicit_parameters_override_by_method(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name', doc={ 'get': { 'params': { 'q': { 'type': 'string', 'in': 'query', 'description': 'A query string', } } }, 'params': { 'age': { 'description': 'An age' } } }) class ByNameResource(restplus.Resource): @api.doc(params={'age': {'description': 'Overriden'}}) def get(self, age): return {} def post(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'An age') op = path['get'] self.assertEqual(len(op['parameters']), 2) by_name = dict((p['name'], p) for p in op['parameters']) parameter = by_name['age'] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'Overriden') parameter = by_name['q'] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'A query string') self.assertNotIn('parameters', path['post']) def test_explicit_parameters_desription_shortcut(self): api = self.build_api() @api.route('/name/<int:age>/', endpoint='by-name', doc={ 'get': { 'params': { 'q': 'A query string', } }, 'params': { 'age': 'An age' } }) class ByNameResource(restplus.Resource): @api.doc(params={'age': 'Overriden'}) def get(self, age): return {} def post(self, age): return {} data = self.get_specs() self.assertIn('/name/{age}/', data['paths']) path = data['paths']['/name/{age}/'] self.assertEqual(len(path['parameters']), 1) parameter = path['parameters'][0] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'An age') op = path['get'] self.assertEqual(len(op['parameters']), 2) by_name = dict((p['name'], p) for p in op['parameters']) parameter = by_name['age'] self.assertEqual(parameter['name'], 'age') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'path') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'Overriden') parameter = by_name['q'] self.assertEqual(parameter['name'], 'q') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'A query string') self.assertNotIn('parameters', path['post']) def test_explicit_parameters_native_types(self): api = self.build_api() @api.route('/types/', endpoint='native') class NativeTypesResource(restplus.Resource): @api.doc(params={ 'int': { 'type': int, 'in': 'query', }, 'bool': { 'type': bool, 'in': 'query', }, 'str': { 'type': str, 'in': 'query', }, 'int-array': { 'type': [int], 'in': 'query', }, 'bool-array': { 'type': [bool], 'in': 'query', }, 'str-array': { 'type': [str], 'in': 'query', } }) def get(self, age): return {} data = self.get_specs() op = data['paths']['/types/']['get'] parameters = dict((p['name'], p) for p in op['parameters']) self.assertEqual(parameters['int']['type'], 'integer') self.assertEqual(parameters['str']['type'], 'string') self.assertEqual(parameters['bool']['type'], 'boolean') self.assertEqual(parameters['int-array']['type'], 'array') self.assertEqual(parameters['int-array']['items']['type'], 'integer') self.assertEqual(parameters['str-array']['type'], 'array') self.assertEqual(parameters['str-array']['items']['type'], 'string') self.assertEqual(parameters['bool-array']['type'], 'array') self.assertEqual(parameters['bool-array']['items']['type'], 'boolean') def test_response_on_method(self): api = self.build_api() api.model('ErrorModel', { 'message': restplus.fields.String, }) @api.route('/test/') class ByNameResource(restplus.Resource): @api.doc(responses={ 404: 'Not found', 405: ('Some message', 'ErrorModel'), }) def get(self): return {} data = self.get_specs('') paths = data['paths'] self.assertEqual(len(paths.keys()), 1) op = paths['/test/']['get'] self.assertEqual(op['tags'], ['default']) self.assertEqual(op['responses'], { '404': { 'description': 'Not found', }, '405': { 'description': 'Some message', 'schema': { '$ref': '#/definitions/ErrorModel', } } }) self.assertIn('definitions', data) self.assertIn('ErrorModel', data['definitions']) def test_api_response(self): api = self.build_api() @api.route('/test/') class TestResource(restplus.Resource): @api.response(200, 'Success') def get(self): pass data = self.get_specs('') paths = data['paths'] op = paths['/test/']['get'] self.assertEqual(op['responses'], { '200': { 'description': 'Success', } }) def test_api_response_multiple(self): api = self.build_api() @api.route('/test/') class TestResource(restplus.Resource): @api.response(200, 'Success') @api.response(400, 'Validation error') def get(self): pass data = self.get_specs('') paths = data['paths'] op = paths['/test/']['get'] self.assertEqual(op['responses'], { '200': { 'description': 'Success', }, '400': { 'description': 'Validation error', } }) def test_api_response_with_model(self): api = self.build_api() model = api.model('SomeModel', { 'message': restplus.fields.String, }) @api.route('/test/') class TestResource(restplus.Resource): @api.response(200, 'Success', model) def get(self): pass data = self.get_specs('') paths = data['paths'] op = paths['/test/']['get'] self.assertEqual(op['responses'], { '200': { 'description': 'Success', 'schema': { '$ref': '#/definitions/SomeModel', } } }) self.assertIn('SomeModel', data['definitions']) def test_api_response_default(self): api = self.build_api() @api.route('/test/') class TestResource(restplus.Resource): @api.response('default', 'Error') def get(self): pass data = self.get_specs('') paths = data['paths'] op = paths['/test/']['get'] self.assertEqual(op['responses'], { 'default': { 'description': 'Error', } }) def test_api_header(self): api = self.build_api() @api.route('/test/') class TestResource(restplus.Resource): @api.header('X-HEADER', 'A required header', required=True) def get(self): pass @api.header('X-HEADER-2', 'Another header', type=[int], collectionFormat='csv') def post(self): pass @api.header('X-HEADER-3', type=int) def put(self): pass @api.header('X-HEADER-4', type='boolean') def delete(self): pass data = self.get_specs('') paths = data['paths'] def param_for(method): return paths['/test/'][method]['parameters'][0] parameter = param_for('get') self.assertEqual(parameter['name'], 'X-HEADER') self.assertEqual(parameter['type'], 'string') self.assertEqual(parameter['in'], 'header') self.assertEqual(parameter['required'], True) self.assertEqual(parameter['description'], 'A required header') parameter = param_for('post') self.assertEqual(parameter['name'], 'X-HEADER-2') self.assertEqual(parameter['type'], 'array') self.assertEqual(parameter['in'], 'header') self.assertEqual(parameter['items']['type'], 'integer') self.assertEqual(parameter['description'], 'Another header') self.assertEqual(parameter['collectionFormat'], 'csv') parameter = param_for('put') self.assertEqual(parameter['name'], 'X-HEADER-3') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'header') parameter = param_for('delete') self.assertEqual(parameter['name'], 'X-HEADER-4') self.assertEqual(parameter['type'], 'boolean') self.assertEqual(parameter['in'], 'header') def test_description(self): api = self.build_api() @api.route('/description/', endpoint='description', doc={ 'description': 'Parent description.', 'delete': {'description': 'A delete operation'}, }) class ResourceWithDescription(restplus.Resource): @api.doc(description='Some details') def get(self): return {} def post(self): ''' Do something. Extra description ''' return {} def put(self): '''No description (only summary)''' def delete(self): '''No description (only summary)''' @api.route('/descriptionless/', endpoint='descriptionless') class ResourceWithoutDescription(restplus.Resource): def get(self): '''No description (only summary)''' return {} data = self.get_specs() description = lambda m: data['paths']['/description/'][m]['description'] self.assertEqual(description('get'), dedent('''\ Parent description. Some details''' )) self.assertEqual(description('post'), dedent('''\ Parent description. Extra description''' )) self.assertEqual(description('delete'), dedent('''\ Parent description. A delete operation''' )) self.assertEqual(description('put'), 'Parent description.') self.assertNotIn('description', data['paths']['/descriptionless/']['get']) def test_operation_id(self): api = self.build_api() @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): @api.doc(id='get_objects') def get(self): return {} def post(self): return {} data = self.get_specs() path = data['paths']['/test/'] self.assertEqual(path['get']['operationId'], 'get_objects') self.assertEqual(path['post']['operationId'], 'post_test_resource') def test_operation_id_shortcut(self): api = self.build_api() @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): @api.doc('get_objects') def get(self): return {} data = self.get_specs() path = data['paths']['/test/'] self.assertEqual(path['get']['operationId'], 'get_objects') def test_custom_default_operation_id(self): def default_id(resource, method): return '{0}{1}'.format(method, resource) api = self.build_api(default_id=default_id) @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): @api.doc(id='get_objects') def get(self): return {} def post(self): return {} data = self.get_specs() path = data['paths']['/test/'] self.assertEqual(path['get']['operationId'], 'get_objects') self.assertEqual(path['post']['operationId'], 'postTestResource') def test_model_primitive_types(self): api = self.build_api() @api.route('/model-int/') class ModelInt(restplus.Resource): @api.doc(model=int) def get(self): return {} data = self.get_specs() self.assertNotIn('definitions', data) self.assertEqual(data['paths']['/model-int/']['get']['responses'], { '200': { 'description': 'Success', 'schema': { 'type': 'integer' } } }) def test_model_as_flat_dict(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.doc(model=fields) def get(self): return {} @api.doc(model='Person') def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) path = data['paths']['/model-as-dict/'] self.assertEqual(path['get']['responses']['200']['schema']['$ref'], '#/definitions/Person') self.assertEqual(path['post']['responses']['200']['schema']['$ref'], '#/definitions/Person') def test_model_as_nested_dict(self): api = self.build_api() address_fields = api.model('Address', { 'road': restplus.fields.String, }) fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, 'address': restplus.fields.Nested(address_fields) }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.doc(model=fields) def get(self): return {} @api.doc(model='Person') def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertIn('Address', data['definitions'].keys()) self.assertEqual(data['definitions']['Address'], { 'properties': { 'road': { 'type': 'string' }, }, 'type': 'object' }) path = data['paths']['/model-as-dict/'] self.assertEqual(path['get']['responses']['200']['schema']['$ref'], '#/definitions/Person') self.assertEqual(path['post']['responses']['200']['schema']['$ref'], '#/definitions/Person') def test_model_as_flat_dict_with_marchal_decorator(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.marshal_with(fields) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) responses = data['paths']['/model-as-dict/']['get']['responses'] self.assertEqual(responses, { '200': { 'description': 'Success', 'schema': { '$ref': '#/definitions/Person' } } }) def test_marchal_decorator_with_code(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.marshal_with(fields, code=204) def delete(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) responses = data['paths']['/model-as-dict/']['delete']['responses'] self.assertEqual(responses, { '204': { 'description': 'Success', 'schema': { '$ref': '#/definitions/Person' } } }) def test_marchal_decorator_with_description(self): api = self.build_api() person = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.marshal_with(person, description='Some details') def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) responses = data['paths']['/model-as-dict/']['get']['responses'] self.assertEqual(responses, { '200': { 'description': 'Some details', 'schema': { '$ref': '#/definitions/Person' } } }) def test_model_as_flat_dict_with_marchal_decorator_list(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.marshal_with(fields, as_list=True) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) path = data['paths']['/model-as-dict/'] self.assertEqual(path['get']['responses']['200']['schema'], { 'type': 'array', 'items': {'$ref': '#/definitions/Person'}, }) def test_model_as_flat_dict_with_marchal_decorator_list_alt(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.marshal_list_with(fields) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) path = data['paths']['/model-as-dict/'] self.assertEqual(path['get']['responses']['200']['schema'], { 'type': 'array', 'items': {'$ref': '#/definitions/Person'}, }) def test_model_as_flat_dict_with_marchal_decorator_list_kwargs(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.marshal_list_with(fields, code=201, description='Some details') def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) path = data['paths']['/model-as-dict/'] self.assertEqual(path['get']['responses'], { '201': { 'description': 'Some details', 'schema': { 'type': 'array', 'items': {'$ref': '#/definitions/Person'}, } } }) def test_model_as_dict_with_list(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'tags': restplus.fields.List(restplus.fields.String), }) @api.route('/model-with-list/') class ModelAsDict(restplus.Resource): @api.doc(model=fields) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'tags': { 'type': 'array', 'items': { 'type': 'string' } } }, 'type': 'object' }) path = data['paths']['/model-with-list/'] self.assertEqual(path['get']['responses']['200']['schema'], {'$ref': '#/definitions/Person'}) def test_model_as_nested_dict_with_list(self): api = self.build_api() address = api.model('Address', { 'road': restplus.fields.String, }) person = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, 'addresses': restplus.fields.List(restplus.fields.Nested(address)) }) @api.route('/model-with-list/') class ModelAsDict(restplus.Resource): @api.doc(model=person) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertIn('Address', data['definitions']) def test_model_list_of_primitive_types(self): api = self.build_api() @api.route('/model-list/') class ModelAsDict(restplus.Resource): @api.doc(model=[int]) def get(self): return {} @api.doc(model=[str]) def post(self): return {} data = self.get_specs() self.assertNotIn('definitions', data) path = data['paths']['/model-list/'] self.assertEqual(path['get']['responses']['200']['schema'], { 'type': 'array', 'items': {'type': 'integer'}, }) self.assertEqual(path['post']['responses']['200']['schema'], { 'type': 'array', 'items': {'type': 'string'}, }) def test_model_list_as_flat_dict(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.doc(model=[fields]) def get(self): return {} @api.doc(model=['Person']) def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) path = data['paths']['/model-as-dict/'] for method in 'get', 'post': self.assertEqual(path[method]['responses']['200']['schema'], { 'type': 'array', 'items': {'$ref': '#/definitions/Person'}, }) def test_model_doc_on_class(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') @api.doc(model=fields) class ModelAsDict(restplus.Resource): def get(self): return {} def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) path = data['paths']['/model-as-dict/'] for method in 'get', 'post': self.assertEqual(path[method]['responses']['200']['schema'], {'$ref': '#/definitions/Person'}) def test_model_doc_for_method_on_class(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') @api.doc(get={'model': fields}) class ModelAsDict(restplus.Resource): def get(self): return {} def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) path = data['paths']['/model-as-dict/'] self.assertEqual(path['get']['responses']['200']['schema'], {'$ref': '#/definitions/Person'}) self.assertNotIn('schema', path['post']['responses']['200']) def test_model_with_discriminator(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String(discriminator=True), 'age': restplus.fields.Integer, }) @api.route('/model-with-discriminator/') class ModelAsDict(restplus.Resource): @api.marshal_with(fields) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': {'type': 'string'}, 'age': {'type': 'integer'}, }, 'discriminator': 'name', 'required': ['name'], 'type': 'object' }) def test_model_with_discriminator_override_require(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String(discriminator=True, required=False), 'age': restplus.fields.Integer, }) @api.route('/model-with-discriminator/') class ModelAsDict(restplus.Resource): @api.marshal_with(fields) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': {'type': 'string'}, 'age': {'type': 'integer'}, }, 'discriminator': 'name', 'required': ['name'], 'type': 'object' }) def test_model_not_found(self): api = self.build_api() @api.route('/model-not-found/') class ModelAsDict(restplus.Resource): @api.doc(model='NotFound') def get(self): return {} self.get_specs(status=500) def test_clone(self): api = self.build_api() parent = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) child = api.clone('Child', parent, { 'extra': restplus.fields.String, }) @api.route('/extend/') class ModelAsDict(restplus.Resource): @api.doc(model=child) def get(self): return {} @api.doc(model='Child') def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertNotIn('Person', data['definitions']) self.assertIn('Child', data['definitions']) path = data['paths']['/extend/'] self.assertEqual(path['get']['responses']['200']['schema']['$ref'], '#/definitions/Child') self.assertEqual(path['post']['responses']['200']['schema']['$ref'], '#/definitions/Child') def test_inherit(self): api = self.build_api() parent = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, }) child = api.inherit('Child', parent, { 'extra': restplus.fields.String, }) @api.route('/inherit/') class ModelAsDict(restplus.Resource): @api.marshal_with(child) def get(self): return { 'name': 'John', 'age': 42, 'extra': 'test', } @api.doc(model='Child') def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertIn('Child', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': {'type': 'string'}, 'age': {'type': 'integer'}, }, 'type': 'object' }) self.assertEqual(data['definitions']['Child'], { 'allOf': [{ '$ref': '#/definitions/Person' }, { 'properties': { 'extra': {'type': 'string'} }, 'type': 'object' }] }) path = data['paths']['/inherit/'] self.assertEqual(path['get']['responses']['200']['schema']['$ref'], '#/definitions/Child') self.assertEqual(path['post']['responses']['200']['schema']['$ref'], '#/definitions/Child') data = self.get_json('/inherit/') self.assertEqual(data, { 'name': 'John', 'age': 42, 'extra': 'test', }) def test_inherit_inline(self): api = self.build_api() parent = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, }) child = api.inherit('Child', parent, { 'extra': restplus.fields.String, }) output = api.model('Output', { 'child': restplus.fields.Nested(child), 'children': restplus.fields.List(restplus.fields.Nested(child)) }) @api.route('/inherit/') class ModelAsDict(restplus.Resource): @api.marshal_with(output) def get(self): return { 'child': { 'name': 'John', 'age': 42, 'extra': 'test', }, 'children': [{ 'name': 'John', 'age': 42, 'extra': 'test', }, { 'name': 'Doe', 'age': 33, 'extra': 'test2', }] } data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertIn('Child', data['definitions']) data = self.get_json('/inherit/') self.assertEqual(data, { 'child': { 'name': 'John', 'age': 42, 'extra': 'test', }, 'children': [{ 'name': 'John', 'age': 42, 'extra': 'test', }, { 'name': 'Doe', 'age': 33, 'extra': 'test2', }] }) def test_polymorph_inherit(self): api = self.build_api() class Child1: pass class Child2: pass parent = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, }) child1 = api.inherit('Child1', parent, { 'extra1': restplus.fields.String, }) child2 = api.inherit('Child2', parent, { 'extra2': restplus.fields.String, }) mapping = { Child1: child1, Child2: child2, } output = api.model('Output', { 'child': restplus.fields.Polymorph(mapping) }) @api.route('/polymorph/') class ModelAsDict(restplus.Resource): @api.marshal_with(output) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertIn('Child1', data['definitions']) self.assertIn('Child2', data['definitions']) self.assertIn('Output', data['definitions']) path = data['paths']['/polymorph/'] self.assertEqual(path['get']['responses']['200']['schema']['$ref'], '#/definitions/Output') def test_polymorph_inherit_list(self): api = self.build_api() class Child1: name = 'Child1' extra1 = 'extra1' class Child2: name = 'Child2' extra2 = 'extra2' parent = api.model('Person', { 'name': restplus.fields.String, }) child1 = api.inherit('Child1', parent, { 'extra1': restplus.fields.String, }) child2 = api.inherit('Child2', parent, { 'extra2': restplus.fields.String, }) mapping = { Child1: child1, Child2: child2, } output = api.model('Output', { 'children': restplus.fields.List(restplus.fields.Polymorph(mapping)) }) @api.route('/polymorph/') class ModelAsDict(restplus.Resource): @api.marshal_with(output) def get(self): return { 'children': [Child1(), Child2()] } data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertIn('Child1', data['definitions']) self.assertIn('Child2', data['definitions']) self.assertIn('Output', data['definitions']) path = data['paths']['/polymorph/'] self.assertEqual(path['get']['responses']['200']['schema']['$ref'], '#/definitions/Output') data = self.get_json('/polymorph/') self.assertEqual(data, { 'children': [{ 'name': 'Child1', 'extra1': 'extra1', }, { 'name': 'Child2', 'extra2': 'extra2', }] }) def test_expect_model(self): api = self.build_api() person = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.expect(person) def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) op = data['paths']['/model-as-dict/']['post'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'schema': { '$ref': '#/definitions/Person' } }) self.assertNotIn('description', parameter) def test_body_model_shortcut(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.doc(model='Person') @api.expect(fields) def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) op = data['paths']['/model-as-dict/']['post'] self.assertEqual(op['responses']['200']['schema']['$ref'], '#/definitions/Person') self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'schema': { '$ref': '#/definitions/Person' } }) self.assertNotIn('description', parameter) def test_expect_model_list(self): api = self.build_api() model = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-list/') class ModelAsDict(restplus.Resource): @api.expect([model]) def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) op = data['paths']['/model-list/']['post'] parameter = op['parameters'][0] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'schema': { 'type': 'array', 'items': {'$ref': '#/definitions/Person'}, } }) def test_both_model_and_parser_from_expect(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') person = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/with-parser/', endpoint='with-parser') class WithParserResource(restplus.Resource): @api.expect(parser, person) def get(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) self.assertIn('/with-parser/', data['paths']) op = data['paths']['/with-parser/']['get'] self.assertEqual(len(op['parameters']), 2) parameters = dict((p['in'], p) for p in op['parameters']) parameter = parameters['query'] self.assertEqual(parameter['name'], 'param') self.assertEqual(parameter['type'], 'integer') self.assertEqual(parameter['in'], 'query') self.assertEqual(parameter['description'], 'Some param') parameter = parameters['body'] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'schema': { '$ref': '#/definitions/Person' } }) def test_expect_primitive_list(self): api = self.build_api() @api.route('/model-list/') class ModelAsDict(restplus.Resource): @api.expect([restplus.fields.String]) def post(self): return {} data = self.get_specs() op = data['paths']['/model-list/']['post'] parameter = op['parameters'][0] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'schema': { 'type': 'array', 'items': {'type': 'string'}, } }) def test_body_model_list(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-list/') class ModelAsDict(restplus.Resource): @api.expect([fields]) def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) op = data['paths']['/model-list/']['post'] parameter = op['parameters'][0] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'schema': { 'type': 'array', 'items': {'$ref': '#/definitions/Person'}, } }) def test_expect_model_with_description(self): api = self.build_api() person = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.expect((person, 'Body description')) def post(self): return {} data = self.get_specs() self.assertIn('definitions', data) self.assertIn('Person', data['definitions']) self.assertEqual(data['definitions']['Person'], { 'properties': { 'name': { 'type': 'string' }, 'age': { 'type': 'integer' }, 'birthdate': { 'type': 'string', 'format': 'date-time' } }, 'type': 'object' }) op = data['paths']['/model-as-dict/']['post'] self.assertEqual(len(op['parameters']), 1) parameter = op['parameters'][0] self.assertEqual(parameter, { 'name': 'payload', 'in': 'body', 'required': True, 'description': 'Body description', 'schema': { '$ref': '#/definitions/Person' } }) def test_authorizations(self): restplus.Api(self.app, authorizations={ 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' } }) # @api.route('/authorizations/') # class ModelAsDict(restplus.Resource): # def get(self): # return {} # def post(self): # return {} data = self.get_specs() self.assertIn('securityDefinitions', data) self.assertNotIn('security', data) # path = data['paths']['/authorizations/'] # self.assertNotIn('security', path['get']) # self.assertEqual(path['post']['security'], {'apikey': []}) def test_single_root_security_string(self): api = restplus.Api(self.app, security='apikey', authorizations={ 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' } }) @api.route('/authorizations/') class ModelAsDict(restplus.Resource): def post(self): return {} data = self.get_specs() self.assertEqual(data['securityDefinitions'], { 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' } }) self.assertEqual(data['security'], [{'apikey': []}]) op = data['paths']['/authorizations/']['post'] self.assertNotIn('security', op) def test_single_root_security_object(self): security_definitions = { 'oauth2': { 'type': 'oauth2', 'flow': 'accessCode', 'tokenUrl': 'https://somewhere.com/token', 'scopes': { 'read': 'Grant read-only access', 'write': 'Grant read-write access', } }, 'implicit': { 'type': 'oauth2', 'flow': 'implicit', 'tokenUrl': 'https://somewhere.com/token', 'scopes': { 'read': 'Grant read-only access', 'write': 'Grant read-write access', } } } api = restplus.Api(self.app, security={ 'oauth2': 'read', 'implicit': ['read', 'write'] }, authorizations=security_definitions ) @api.route('/authorizations/') class ModelAsDict(restplus.Resource): def post(self): return {} data = self.get_specs() self.assertEqual(data['securityDefinitions'], security_definitions) self.assertEqual(data['security'], [{ 'oauth2': ['read'], 'implicit': ['read', 'write'] }]) op = data['paths']['/authorizations/']['post'] self.assertNotIn('security', op) def test_root_security_as_list(self): security_definitions = { 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' }, 'oauth2': { 'type': 'oauth2', 'flow': 'accessCode', 'tokenUrl': 'https://somewhere.com/token', 'scopes': { 'read': 'Grant read-only access', 'write': 'Grant read-write access', } } } api = restplus.Api(self.app, security=['apikey', {'oauth2': 'read'}], authorizations=security_definitions) @api.route('/authorizations/') class ModelAsDict(restplus.Resource): def post(self): return {} data = self.get_specs() self.assertEqual(data['securityDefinitions'], security_definitions) self.assertEqual(data['security'], [{'apikey': []}, {'oauth2': ['read']}]) op = data['paths']['/authorizations/']['post'] self.assertNotIn('security', op) def test_method_security(self): api = restplus.Api(self.app, authorizations={ 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' } }) @api.route('/authorizations/') class ModelAsDict(restplus.Resource): @api.doc(security=['apikey']) def get(self): return {} @api.doc(security='apikey') def post(self): return {} data = self.get_specs() self.assertEqual(data['securityDefinitions'], { 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' } }) self.assertNotIn('security', data) path = data['paths']['/authorizations/'] for method in 'get', 'post': self.assertEqual(path[method]['security'], [{'apikey': []}]) def test_security_override(self): security_definitions = { 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' }, 'oauth2': { 'type': 'oauth2', 'flow': 'accessCode', 'tokenUrl': 'https://somewhere.com/token', 'scopes': { 'read': 'Grant read-only access', 'write': 'Grant read-write access', } } } api = restplus.Api(self.app, security=['apikey', {'oauth2': 'read'}], authorizations=security_definitions) @api.route('/authorizations/') class ModelAsDict(restplus.Resource): @api.doc(security=[{'oauth2': ['read', 'write']}]) def get(self): return {} data = self.get_specs() self.assertEqual(data['securityDefinitions'], security_definitions) op = data['paths']['/authorizations/']['get'] self.assertEqual(op['security'], [{'oauth2': ['read', 'write']}]) def test_security_nullify(self): security_definitions = { 'apikey': { 'type': 'apiKey', 'in': 'header', 'name': 'X-API' }, 'oauth2': { 'type': 'oauth2', 'flow': 'accessCode', 'tokenUrl': 'https://somewhere.com/token', 'scopes': { 'read': 'Grant read-only access', 'write': 'Grant read-write access', } } } api = restplus.Api(self.app, security=['apikey', {'oauth2': 'read'}], authorizations=security_definitions) @api.route('/authorizations/') class ModelAsDict(restplus.Resource): @api.doc(security=[]) def get(self): return {} @api.doc(security=None) def post(self): return {} data = self.get_specs() self.assertEqual(data['securityDefinitions'], security_definitions) path = data['paths']['/authorizations/'] for method in 'get', 'post': self.assertEqual(path[method]['security'], []) def test_hidden_resource(self): api = self.build_api() @api.route('/test/', endpoint='test', doc=False) class TestResource(restplus.Resource): def get(self): ''' GET operation ''' return {} @api.hide @api.route('/test2/', endpoint='test2') class TestResource2(restplus.Resource): def get(self): ''' GET operation ''' return {} @api.doc(False) @api.route('/test3/', endpoint='test3') class TestResource3(restplus.Resource): def get(self): ''' GET operation ''' return {} data = self.get_specs() for path in '/test/', '/test2/', '/test3/': self.assertNotIn(path, data['paths']) with self.app.test_client() as client: resp = client.get(path) self.assertEqual(resp.status_code, 200) def test_hidden_resource_from_namespace(self): api = self.build_api() ns = api.namespace('ns') @ns.route('/test/', endpoint='test', doc=False) class TestResource(restplus.Resource): def get(self): ''' GET operation ''' return {} data = self.get_specs() self.assertNotIn('/ns/test/', data['paths']) with self.app.test_client() as client: resp = client.get('/ns/test/') self.assertEqual(resp.status_code, 200) def test_hidden_methods(self): api = self.build_api() @api.route('/test/', endpoint='test') @api.doc(delete=False) class TestResource(restplus.Resource): def get(self): ''' GET operation ''' return {} @api.doc(False) def post(self): '''POST operation. Should be ignored ''' return {} @api.hide def put(self): '''PUT operation. Should be ignored''' return {} def delete(self): return {} data = self.get_specs() path = data['paths']['/test/'] self.assertIn('get', path) self.assertNotIn('post', path) self.assertNotIn('put', path) for method in 'GET', 'POST', 'PUT': with self.app.test_client() as client: resp = client.open('/test/', method=method) self.assertEqual(resp.status_code, 200) def test_deprecated_resource(self): api = self.build_api() @api.deprecated @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): def get(self): pass def post(self): pass data = self.get_specs() resource = data['paths']['/test/'] for operation in resource.values(): self.assertIn('deprecated', operation) self.assertTrue(operation['deprecated']) def test_deprecated_method(self): api = self.build_api() @api.route('/test/', endpoint='test') class TestResource(restplus.Resource): def get(self): pass @api.deprecated def post(self): pass data = self.get_specs() get_operation = data['paths']['/test/']['get'] self.assertNotIn('deprecated', get_operation) post_operation = data['paths']['/test/']['post'] self.assertIn('deprecated', post_operation) self.assertTrue(post_operation['deprecated']) def test_method_restrictions(self): api = self.build_api() @api.route('/foo/bar', endpoint='foo') @api.route('/bar', methods=['GET'], endpoint='bar') class TestResource(restplus.Resource): def get(self): pass def post(self): pass data = self.get_specs() path = data['paths']['/foo/bar'] self.assertIn('get', path) self.assertIn('post', path) path = data['paths']['/bar'] self.assertIn('get', path) self.assertNotIn('post', path) class SwaggerDeprecatedTest(ApiMixin, TestCase): def test_doc_parser_parameters(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') with self.assert_warning(DeprecationWarning): @api.route('/with-parser/') class WithParserResource(restplus.Resource): @api.doc(parser=parser) def get(self): return {} self.assertNotIn('parser', WithParserResource.get.__apidoc__) self.assertIn('expect', WithParserResource.get.__apidoc__) doc_parser = WithParserResource.get.__apidoc__['expect'][0] self.assertEqual(doc_parser.__schema__, parser.__schema__) def test_doc_method_parser_on_class(self): api = self.build_api() parser = api.parser() parser.add_argument('param', type=int, help='Some param') with self.assert_warning(DeprecationWarning): @api.route('/with-parser/') @api.doc(get={'parser': parser}) class WithParserResource(restplus.Resource): def get(self): return {} def post(self): return {} self.assertNotIn('parser', WithParserResource.__apidoc__['get']) self.assertIn('expect', WithParserResource.__apidoc__['get']) doc_parser = WithParserResource.__apidoc__['get']['expect'][0] self.assertEqual(doc_parser.__schema__, parser.__schema__) def test_doc_body_as_tuple(self): api = self.build_api() fields = api.model('Person', { 'name': restplus.fields.String, 'age': restplus.fields.Integer, 'birthdate': restplus.fields.DateTime, }) with self.assert_warning(DeprecationWarning): @api.route('/model-as-dict/') class ModelAsDict(restplus.Resource): @api.doc(body=(fields, 'Body description')) def post(self): return {} self.assertNotIn('body', ModelAsDict.post.__apidoc__) self.assertEqual(ModelAsDict.post.__apidoc__['expect'], [(fields, 'Body description')])
31.689933
114
0.506065
8,356
90,348
5.388822
0.037458
0.108264
0.069289
0.031624
0.860223
0.827955
0.7928
0.768416
0.745075
0.720647
0
0.006411
0.33185
90,348
2,850
115
31.701053
0.739522
0.008124
0
0.75122
0
0
0.183562
0.002894
0
0
0
0
0.207095
1
0.093126
false
0.007539
0.002661
0.037694
0.178714
0.003104
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
cb6bf6f2f823217f5fcb689a559f9713535165c9
404
py
Python
bukdjango_captcha/recaptcha_v3/secrets.py
bukdjango/captcha
5105477d00b43b2a6cc2315e8458bd755fa45801
[ "MIT" ]
null
null
null
bukdjango_captcha/recaptcha_v3/secrets.py
bukdjango/captcha
5105477d00b43b2a6cc2315e8458bd755fa45801
[ "MIT" ]
null
null
null
bukdjango_captcha/recaptcha_v3/secrets.py
bukdjango/captcha
5105477d00b43b2a6cc2315e8458bd755fa45801
[ "MIT" ]
null
null
null
import os def RECAPTCHA_V3_SITE(): return os.environ['BUKDJANGO_CAPTCHA_RECAPTCHA_V3_SITE'] def RECAPTCHA_V3_SECRET(): return os.environ['BUKDJANGO_CAPTCHA_RECAPTCHA_V3_SECRET'] def RECAPTCHA_V3_SCORE(): return float(os.environ.get('BUKDJANGO_CAPTCHA_RECAPTCHA_V3_SCORE', "0.5")) def RECAPTCHA_V3_DISABLE(): return int(os.environ.get('BUKDJANGO_CAPTCHA_RECAPTCHA_V3_DISABLE', 0))
22.444444
79
0.784653
57
404
5.140351
0.315789
0.300341
0.191126
0.368601
0.552901
0.552901
0.552901
0
0
0
0
0.030556
0.108911
404
17
80
23.764706
0.783333
0
0
0
0
0
0.368812
0.361386
0
0
0
0
0
1
0.444444
true
0
0.111111
0.444444
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
7
cb85a7a317ee8e0075a9357cddc0979f31495e27
104
py
Python
tests/runtime-trace-tests/cases/slice_str_types.py
jaydeetay/pxt
aad1beaf15edc46e1327806367298cbc942dcbc1
[ "MIT" ]
977
2019-05-06T23:12:55.000Z
2022-03-29T19:11:44.000Z
tests/runtime-trace-tests/cases/slice_str_types.py
jaydeetay/pxt
aad1beaf15edc46e1327806367298cbc942dcbc1
[ "MIT" ]
3,980
2019-05-09T20:48:14.000Z
2022-03-28T20:33:07.000Z
tests/runtime-trace-tests/cases/slice_str_types.py
jaydeetay/pxt
aad1beaf15edc46e1327806367298cbc942dcbc1
[ "MIT" ]
306
2016-04-09T05:28:07.000Z
2019-05-02T14:23:29.000Z
a = [[2,3], [4,5,6], [7]] b = a[1] print(len(b)) c = "test"[1] print(len(c)) d = str(45) print(len(d))
11.555556
25
0.471154
25
104
1.96
0.64
0.489796
0.367347
0
0
0
0
0
0
0
0
0.116279
0.173077
104
9
26
11.555556
0.453488
0
0
0
0
0
0.038095
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
1
0
1
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
7
cbe108530a9da3cbfe97357429424e2c9d662df9
3,423
py
Python
Fileset/add_exclusion_to_fileset.py
hbuter-rubrik/rubrik-scripts-for-python
0e434854b778ff0f857425173e5cb7d6b83dddec
[ "MIT" ]
5
2019-10-04T18:09:24.000Z
2020-08-25T04:46:01.000Z
Fileset/add_exclusion_to_fileset.py
hbuter-rubrik/rubrik-scripts-for-python
0e434854b778ff0f857425173e5cb7d6b83dddec
[ "MIT" ]
2
2020-01-07T18:25:11.000Z
2021-10-14T11:48:27.000Z
Fileset/add_exclusion_to_fileset.py
hbuter-rubrik/rubrik-scripts-for-python
0e434854b778ff0f857425173e5cb7d6b83dddec
[ "MIT" ]
6
2019-04-25T10:26:30.000Z
2021-11-18T08:20:50.000Z
<<<<<<< HEAD # # Title: add_exclusion_to_fileset.py # # Description: This script will take all fileset templates of a given type on a Rubrik system, and update the exclusion # list to include an array of additional file types. # # Author: Stew Parkin (Assured DP), Tim Hynes (Rubrik) # import requests import json requests.packages.urllib3.disable_warnings() ## Variables to modify here username = "admin" password = "Rubrik123!" rubrik_ip = "rubrik.demo.com" excludes = ["*.mp3"] fileset_type = "SMB" ## Should not need to modify anything beyond this point clusterUrl = "https://"+rubrik_ip+"/api/v1" fileseturl = clusterUrl + "/fileset_template?share_type=" + fileset_type # Get our session token tokenurl = clusterUrl + "/session" session = requests.post(tokenurl, verify=False, auth=(username, password)) session = session.json() token = 'Bearer ' + session['token'] # Get all fileset templates filesets = requests.get(fileseturl, headers= { 'Accept': 'application/json', 'Authorization': token },verify=False, stream=True) filesets = filesets.json() # For each fileset template we will add the new file exclusion types, and patch it using the REST API for fileset in filesets['data']: filesetId = fileset['id'] for exclude in fileset['excludes']: excludes.append(str(exclude)) data = {"id":filesetId,"excludes":excludes} print (data) editUrl = clusterUrl+"/fileset_template/"+filesetId addExclude = requests.patch(editUrl, data=json.dumps(data), headers= { 'Accept': 'application/json', 'Authorization': token },verify=False, stream=True) print addExclude print (addExclude.text) ======= # # Title: add_exclusion_to_fileset.py # # Description: This script will take all fileset templates of a given type on a Rubrik system, and update the exclusion # list to include an array of additional file types. # # Author: Stew Parkin (Assured DP), Tim Hynes (Rubrik) # import requests import json requests.packages.urllib3.disable_warnings() ## Variables to modify here username = "admin" password = "Rubrik123!" rubrik_ip = "rubrik.demo.com" exclude_list = ["*.mp3"] fileset_type = "SMB" ## Should not need to modify anything beyond this point clusterUrl = "https://"+rubrik_ip+"/api/v1" fileseturl = clusterUrl + "/fileset_template?share_type=" + fileset_type # Get our session token tokenurl = clusterUrl + "/session" session = requests.post(tokenurl, verify=False, auth=(username, password)) session = session.json() token = 'Bearer ' + session['token'] # Get all fileset templates filesets = requests.get(fileseturl, headers= { 'Accept': 'application/json', 'Authorization': token },verify=False, stream=True) filesets = filesets.json() # For each fileset template we will add the new file exclusion types, and patch it using the REST API for fileset in filesets['data']: excludes = exclude_list filesetId = fileset['id'] for exclude in fileset['excludes']: excludes.append(str(exclude)) data = {"id":filesetId,"excludes":excludes} print (data) editUrl = clusterUrl+"/fileset_template/"+filesetId addExclude = requests.patch(editUrl, data=json.dumps(data), headers= { 'Accept': 'application/json', 'Authorization': token },verify=False, stream=True) print addExclude print (addExclude.text) >>>>>>> 3cbba5eef094335fbc9ade1dcc30b3a7944beb7e
40.270588
157
0.711072
422
3,423
5.7109
0.258294
0.037344
0.031535
0.046473
0.965975
0.965975
0.965975
0.965975
0.965975
0.965975
0
0.010526
0.167397
3,423
84
158
40.75
0.835088
0.298276
0
0.892857
0
0
0.183533
0.024871
0
0
0
0
0
0
null
null
0.071429
0.071429
null
null
0.107143
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
9
1dd87cbb3822444054a7c091d674fa2df5369f75
2,442
py
Python
test/test_group.py
mergermarket/snyk-access
663caed22a58bf113e24236f75876b2a19a1a846
[ "MIT" ]
null
null
null
test/test_group.py
mergermarket/snyk-access
663caed22a58bf113e24236f75876b2a19a1a846
[ "MIT" ]
91
2019-08-21T09:46:01.000Z
2022-03-25T07:17:28.000Z
test/test_group.py
mergermarket/snyk-access
663caed22a58bf113e24236f75876b2a19a1a846
[ "MIT" ]
null
null
null
import json import unittest import httpretty import snyk class TestCreateOrg(unittest.TestCase): def setUp(self): httpretty.enable(allow_net_connect=False) self.base_url = 'http://snyk' httpretty.register_uri( httpretty.POST, self.base_url + '/group/1/org', body=json.dumps({ 'id': 'aaaaaaaa-4438-cccc-a3ba-eeeeeeeeeeee', 'name': 'bar', 'created': '2019-07-16T16:08:47.648Z', }), adding_headers={ 'Content-Type': 'text/plain; charset=utf-8', }, ) self.client = snyk.HTTPClient(self.base_url, 'token', 1) self.group = snyk.Group(self.client, 'foo', '1') self.org = self.group.create_org('bar') def tearDown(self): httpretty.disable() def test_has_correct_id(self): assert self.org.id == 'aaaaaaaa-4438-cccc-a3ba-eeeeeeeeeeee' def test_has_correct_name(self): assert self.org.name == 'bar' def test_has_the_correct_group_assigned(self): assert self.org.group is self.group class TestCreateOrgFromSource(unittest.TestCase): def setUp(self): httpretty.enable(allow_net_connect=False) self.base_url = 'http://snyk' httpretty.register_uri( httpretty.POST, self.base_url + '/group/1/org', body=json.dumps({ 'id': 'aaaaaaaa-4438-cccc-a3ba-eeeeeeeeeeee', 'name': 'bar', 'created': '2019-07-16T16:08:47.648Z', }), adding_headers={ 'Content-Type': 'text/plain; charset=utf-8', }, ) self.client = snyk.HTTPClient(self.base_url, 'token', 1) self.group = snyk.Group(self.client, 'foo', '1') self.source_org = snyk.Org(self.client, 'foo', '1', self.group) self.org = self.group.create_org('bar', self.source_org) def tearDown(self): httpretty.disable() def test_passes_source_org(self): request = httpretty.last_request() assert request.parsed_body['sourceOrgId'] == self.source_org.id def test_has_correct_id(self): assert self.org.id == 'aaaaaaaa-4438-cccc-a3ba-eeeeeeeeeeee' def test_has_correct_name(self): assert self.org.name == 'bar' def test_has_the_correct_group_assigned(self): assert self.org.group is self.group
30.525
71
0.590909
293
2,442
4.767918
0.245734
0.040086
0.047244
0.073014
0.839656
0.826772
0.826772
0.740157
0.740157
0.740157
0
0.035735
0.278051
2,442
79
72
30.911392
0.756665
0
0
0.741935
0
0
0.159296
0.078624
0
0
0
0
0.112903
1
0.177419
false
0.016129
0.064516
0
0.274194
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
3839f9042397a45f34ee6d99b59ab805f609d5f3
42,618
py
Python
app/DryBones.py
EngineerNerd/starter-snake-python
4b54f241c0383fd0ed1a2c2ee55ea76bcbc1113c
[ "MIT" ]
null
null
null
app/DryBones.py
EngineerNerd/starter-snake-python
4b54f241c0383fd0ed1a2c2ee55ea76bcbc1113c
[ "MIT" ]
null
null
null
app/DryBones.py
EngineerNerd/starter-snake-python
4b54f241c0383fd0ed1a2c2ee55ea76bcbc1113c
[ "MIT" ]
null
null
null
import json import os import random import bottle import time from api import ping_response, start_response, move_response, end_response #from print_grid import * @bottle.route('/') def index(): return ''' Battlesnake documentation can be found at <a href="https://docs.battlesnake.io">https://docs.battlesnake.io</a>. ''' @bottle.route('/static/<path:path>') def static(path): """ Given a path, return the static file located relative to the static folder. This can be used to return the snake head URL in an API response. """ return bottle.static_file(path, root='static/') @bottle.post('/ping') def ping(): """ A keep-alive endpoint used to prevent cloud application platforms, such as Heroku, from sleeping the application instance. """ return ping_response() @bottle.post('/start') def start(): data = bottle.request.json """ TODO: If you intend to have a stateful snake AI, initialize your snake state here using the request's data if necessary. """ #print(json.dumps(data)) color = "#C0C0C0" return start_response(color) @bottle.post('/move') def move(): starttime = time.time() jsonData = bottle.request.json #print(jsonData) print("\n\n" + str(jsonData["turn"])) height_of_board = jsonData["board"]["height"] width_of_board = jsonData["board"]["width"] my_head_x_component= jsonData["you"]["body"][0]["x"] my_head_y_component= jsonData["you"]["body"][0]["y"] #print("\n\n\n") ##print_grid(jsonData) ##print_grid_and_possible_movements(jsonData) #print_grid_and_safespots(jsonData) ##print(height_of_board) ##print(width_of_board) ##print("my location, x component", my_head_x_component) ##print("my location, y component", my_head_y_component) Direction = "up" possible_directions = ["left","right","up","down"] Direction= choose_direction(jsonData,my_head_x_component,my_head_y_component) # #Can you move towards the food in the y direction # Direction = look_for_food_y_precedence(jsonData,my_head_x_component,my_head_y_component) # #print("Am I going to kill myself?",am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction)) # #No? Can you move towards the food in the x direction # if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): # ##print("Going into x-direction emergency mode") # possible_directions.remove(Direction) # Direction = look_for_food_x_precedence(jsonData,my_head_x_component,my_head_y_component) # #Am I still going to die?? Let's just choose any old direction # if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): # #print("Choosing random safe direction") # possible_directions.remove(Direction) # Direction = possible_directions[0] # #lets go any way whatsoever as long as its safe ##print("Direction", Direction) test=snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component) # simulation = simulate(test,my_head_x_component,my_head_y_component,Direction) # #print("one turn simulation is:", simulation) # #print_grid_and_safespots_simulation(simulation,jsonData,"1-1") # simulation2 = simulate(simulation,my_head_x_component,my_head_y_component, choose_direction_simulation(simulation,jsonData) ) # #print_grid_and_safespots_simulation(simulation2,jsonData,"1-2") e = 1 number_of_simulations_to_run=20 # ------------------------------------------------------------------------------------------------------------------------------------------- 1 while e<=number_of_simulations_to_run: if e==1: curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[0] ##print(simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1]) if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break ##print_grid_and_safespots_simulation(curent_simulation,jsonData,"1-1") else: curent_simulation= simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[0] ##print(simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1]) if simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break r= "1-" +str(e) ##print_grid_and_safespots_simulation(curent_simulation,jsonData,r) e+=1 best_direction= Direction last_location=[curent_simulation[0][0],curent_simulation[0][1]] number_of_simulations_survived=e-1 print("Time 1: " + str(time.time()-starttime)) e = 1 # ------------------------------------------------------------------------------------------------------------------------------------------- 2 while e<=number_of_simulations_to_run: if e==1: curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[0] ##print(simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1]) if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break ##print_grid_and_safespots_simulation(curent_simulation,jsonData,"2-1") else: curent_simulation= simulate (curent_simulation, choose_direction_simulation_far_precedence(curent_simulation,jsonData),jsonData)[0] ##print(simulate (curent_simulation, choose_direction_simulation_far_precedence(curent_simulation,jsonData),jsonData)[1]) if simulate (curent_simulation, choose_direction_simulation_far_precedence(curent_simulation,jsonData),jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break r= "2-" +str(e) ##print_grid_and_safespots_simulation(curent_simulation,jsonData,r) e+=1 if last_location!=[curent_simulation[0][0],curent_simulation[0][1]]: if number_of_simulations_survived<(e-1): best_direction= Direction last_location=[curent_simulation[0][0],curent_simulation[0][1]] number_of_simulations_survived=e-1 # if last_location==[curent_simulation[0][0],curent_simulation[0][1]]: # if number_of_simulations_survived>(e-1): # best_direction= Direction # last_location=[curent_simulation[0][0],curent_simulation[0][1]] # number_of_simulations_survived=e-1 print("Time 2: " + str(time.time()-starttime)) e = 2 # ------------------------------------------------------------------------------------------------------------------------------------------- 3 # ------------------------------------------------------------------------------------------------------------------------------------------- curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"left",jsonData)[0] ##print_grid_and_safespots_simulation(curent_simulation,jsonData,"3-1") if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"left",jsonData)[1] == "Simulation snake is dead": ##print("Don't simulate. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) spacesaver=1 else: while e<=number_of_simulations_to_run: if e==1: curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[0] ##print(simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1]) if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break ##print_grid_and_safespots_simulation(curent_simulation,jsonData,"3-2") else: curent_simulation= simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[0] ##print(simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1]) if simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break r= "3-" +str(e) ##print_grid_and_safespots_simulation(curent_simulation,jsonData,r) e+=1 if last_location!=[curent_simulation[0][0],curent_simulation[0][1]]: if number_of_simulations_survived<(e-1): best_direction= "left" last_location=[curent_simulation[0][0],curent_simulation[0][1]] number_of_simulations_survived=e-1 # if last_location==[curent_simulation[0][0],curent_simulation[0][1]]: # if number_of_simulations_survived>(e-1): # best_direction= "left" # last_location=[curent_simulation[0][0],curent_simulation[0][1]] # number_of_simulations_survived=e-1 print("Time 3: " + str(time.time()-starttime)) e = 2 # ------------------------------------------------------------------------------------------------------------------------------------------- 4 curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"up",jsonData)[0] ##print_grid_and_safespots_simulation(curent_simulation,jsonData,"4-1") if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"up",jsonData)[1] == "Simulation snake is dead": ##print("Don't simulate. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) spacesaver=1 else: while e<=number_of_simulations_to_run: if e==1: curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component,jsonData),Direction)[0] ##print(simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1]) if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break ###print_grid_and_safespots_simulation(curent_simulation,jsonData,"4-2") else: curent_simulation= simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[0] ##print(simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1]) if simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break r= "4-" +str(e) ###print_grid_and_safespots_simulation(curent_simulation,jsonData,r) e+=1 if last_location!=[curent_simulation[0][0],curent_simulation[0][1]]: if number_of_simulations_survived<(e-1): best_direction= "up" last_location=[curent_simulation[0][0],curent_simulation[0][1]] number_of_simulations_survived=e-1 # if last_location==[curent_simulation[0][0],curent_simulation[0][1]]: # if number_of_simulations_survived>(e-1): # best_direction= "up" # last_location=[curent_simulation[0][0],curent_simulation[0][1]] # number_of_simulations_survived=e-1 print("Time 4: " + str(time.time()-starttime)) e = 2 # ------------------------------------------------------------------------------------------------------------------------------------------- 5 curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"right",jsonData)[0] ###print_grid_and_safespots_simulation(curent_simulation,jsonData,"5-1") if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"right",jsonData)[1] == "Simulation snake is dead": ##print("Don't simulate. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) spacesaver=1 else: while e<=number_of_simulations_to_run: if e==1: curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[0] ##print(simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1]) if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break ###print_grid_and_safespots_simulation(curent_simulation,jsonData,"5-2") else: curent_simulation= simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[0] ##print(simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1]) if simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break r= "5-" +str(e) ###print_grid_and_safespots_simulation(curent_simulation,jsonData,r) e+=1 if last_location!=[curent_simulation[0][0],curent_simulation[0][1]]: if number_of_simulations_survived<(e-1): best_direction= "right" last_location=[curent_simulation[0][0],curent_simulation[0][1]] number_of_simulations_survived=e-1 # if last_location==[curent_simulation[0][0],curent_simulation[0][1]]: # if number_of_simulations_survived>(e-1): # best_direction= "right" # last_location=[curent_simulation[0][0],curent_simulation[0][1]] # number_of_simulations_survived=e-1 print("Time 5: " + str(time.time()-starttime)) e = 2 # ------------------------------------------------------------------------------------------------------------------------------------------- 6 curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"down",jsonData)[0] ###print_grid_and_safespots_simulation(curent_simulation,jsonData,"6-1") if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),"down",jsonData)[1] == "Simulation snake is dead": ##print("Don't simulate. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) spacesaver=1 else: while e<=number_of_simulations_to_run: if e==1: curent_simulation = simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[0] ##print(simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1]) if simulate(snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component),Direction,jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break ###print_grid_and_safespots_simulation(curent_simulation,jsonData,"6-2") else: curent_simulation= simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[0] ##print(simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1]) if simulate (curent_simulation, choose_direction_simulation_close_precedence(curent_simulation,jsonData),jsonData)[1] == "Simulation snake is dead": ##print("break. Snake survived ",e, "simulations. Last location was: ", curent_simulation[0][0], "," , curent_simulation[0][1]) break r= "6-" +str(e) ###print_grid_and_safespots_simulation(curent_simulation,jsonData,r) e+=1 if last_location!=[curent_simulation[0][0],curent_simulation[0][1]]: if number_of_simulations_survived<(e-1): best_direction= "down" last_location=[curent_simulation[0][0],curent_simulation[0][1]] number_of_simulations_survived=e-1 # if last_location==[curent_simulation[0][0],curent_simulation[0][1]]: # if number_of_simulations_survived>(e-1): # best_direction= "down" # last_location=[curent_simulation[0][0],curent_simulation[0][1]] # number_of_simulations_survived=e-1 print("Time 6: " + str(time.time()-starttime)) print("Best Direction: " + str(best_direction) + "\nLast Location: " + str(last_location) +"\n Number of simulations survived: " + str(number_of_simulations_survived)) Direction=best_direction print("Direction is: "+ str(Direction)) endtime=time.time() print("Final time: " + str(endtime-starttime)) if endtime-starttime > .250: print("Overtime!") return move_response(Direction) def choose_direction(jsonData,my_head_x_component,my_head_y_component): food_location_relative_to_me=find_food_location_relative_to_me(jsonData,my_head_x_component,my_head_y_component) #print("Food location relative to me: " ,food_location_relative_to_me) ##print("Food location[0]",food_location_relative_to_me[0]) if abs(food_location_relative_to_me[0])>abs(food_location_relative_to_me[1]): if food_location_relative_to_me[0]>0: Direction="right" #print("Food is to the right. Direction equals ", Direction) elif food_location_relative_to_me[0]<0: Direction= "left" #print("Food is to the left. Direction equals ", Direction) else: if food_location_relative_to_me[1]>0: Direction="up" #print("Food is above. Direction equals ", Direction) elif food_location_relative_to_me[1]<0: Direction= "down" #print("Food is below. Direction equals ", Direction) #print("Am I going to kill myself? line 59",am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): if (Direction == "right") or (Direction == "left"): #print("line 62") if food_location_relative_to_me[1]>0: Direction="up" #print("Food is above. Direction equals ", Direction) else: #if food_location_relative_to_me[1]<0: Direction= "down" #print("Food is below. Direction equals ", Direction) #print("Am I going to kill myself? line 69",am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): Direction=switch_direction(Direction) #print("Direction equals ", Direction) #print("Am I going to kill myself? line 74",am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): Direction= go_any_safe_direction(jsonData,my_head_x_component,my_head_y_component) #print("Any safe direction.Direction equals ", Direction) elif (Direction == "up") or (Direction == "down"): #print("line 76") if food_location_relative_to_me[0]>0: Direction="right" #print("Food is to the right. Direction equals ", Direction) else: #if food_location_relative_to_me[0]<0: Direction= "left" #print("Food is to the left. Direction equals ", Direction) #print("Am I going to kill myself? line 87",am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): Direction=switch_direction(Direction) #print("Direction equals ", Direction) #print("Am I going to kill myself? line 92",am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): Direction= go_any_safe_direction(jsonData,my_head_x_component,my_head_y_component) #print("Any safe direction.Direction equals ", Direction) return (Direction) def choose_direction_simulation_close_precedence(simulation_data,jsonData): my_head_x_component= simulation_data[0][0] my_head_y_component= simulation_data[0][1] Direction = "up" food_location_relative_to_me= find_food_location_relative_to_me_simulation(simulation_data,jsonData) ##print("Food location relative to me: " ,food_location_relative_to_me) ##print("Food location[0]",food_location_relative_to_me[0]) if abs(food_location_relative_to_me[0])>abs(food_location_relative_to_me[1]): if food_location_relative_to_me[0]>0: Direction="right" ##print("Food is to the right. Direction equals ", Direction) elif food_location_relative_to_me[0]<0: Direction= "left" ##print("Food is to the left. Direction equals ", Direction) else: if food_location_relative_to_me[1]>0: Direction="up" ##print("Food is above. Direction equals ", Direction) elif food_location_relative_to_me[1]<0: Direction= "down" ##print("Food is below. Direction equals ", Direction) ##print("Am I going to kill myself? line 190",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): if (Direction == "right") or (Direction == "left"): ##print("line 193") if food_location_relative_to_me[1]>0: Direction="up" ##print("Food is above. Direction equals ", Direction) else: #if food_location_relative_to_me[1]<0: Direction= "down" ##print("Food is below. Direction equals ", Direction) ##print("Am I going to kill myself? line 200",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): Direction=switch_direction(Direction) ##print("Direction equals ", Direction) ##print("Am I going to kill myself? line 206",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): Direction= go_any_safe_direction_simulation(simulation_data,jsonData) ##print("Any safe direction.Direction equals ", Direction) elif (Direction == "up") or (Direction == "down"): ##print("line 212") if food_location_relative_to_me[0]>0: Direction="right" ##print("Food is to the right. Direction equals ", Direction) else: #if food_location_relative_to_me[0]<0: Direction= "left" ##print("Food is to the left. Direction equals ", Direction) ##print("Am I going to kill myself? line 218",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): Direction=switch_direction(Direction) ##print("Direction equals ", Direction) ##print("Am I going to kill myself? line 223",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): Direction= go_any_safe_direction_simulation(simulation_data,jsonData) ##print("Any safe direction.Direction equals ", Direction) return (Direction) def choose_direction_simulation_far_precedence(simulation_data,jsonData): my_head_x_component= simulation_data[0][0] my_head_y_component= simulation_data[0][1] Direction = "up" food_location_relative_to_me= find_food_location_relative_to_me_simulation(simulation_data,jsonData) ##print("Food location relative to me: " ,food_location_relative_to_me) ###print("Food location[0]",food_location_relative_to_me[0]) if abs(food_location_relative_to_me[0])<abs(food_location_relative_to_me[1]): if food_location_relative_to_me[0]>0: Direction="right" ##print("Food is to the right. Direction equals ", Direction) elif food_location_relative_to_me[0]<0: Direction= "left" ##print("Food is to the left. Direction equals ", Direction) else: if food_location_relative_to_me[1]>0: Direction="up" ##print("Food is above. Direction equals ", Direction) elif food_location_relative_to_me[1]<0: Direction= "down" ##print("Food is below. Direction equals ", Direction) ##print("Am I going to kill myself? line 190",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): if (Direction == "right") or (Direction == "left"): ##print("line 193") if food_location_relative_to_me[1]>0: Direction="up" ##print("Food is above. Direction equals ", Direction) else: #if food_location_relative_to_me[1]<0: Direction= "down" ##print("Food is below. Direction equals ", Direction) ##print("Am I going to kill myself? line 200",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): Direction=switch_direction(Direction) ##print("Direction equals ", Direction) ##print("Am I going to kill myself? line 206",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): Direction= go_any_safe_direction_simulation(simulation_data,jsonData) ##print("Any safe direction.Direction equals ", Direction) elif (Direction == "up") or (Direction == "down"): ##print("line 212") if food_location_relative_to_me[0]>0: Direction="right" ##print("Food is to the right. Direction equals ", Direction) else: #if food_location_relative_to_me[0]<0: Direction= "left" ##print("Food is to the left. Direction equals ", Direction) ##print("Am I going to kill myself? line 218",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): Direction=switch_direction(Direction) ##print("Direction equals ", Direction) ##print("Am I going to kill myself? line 223",am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction)) if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): Direction= go_any_safe_direction_simulation(simulation_data,jsonData) ##print("Any safe direction.Direction equals ", Direction) return (Direction) def go_any_safe_direction(jsonData,my_head_x_component,my_head_y_component): go_any_safe_direction_direction="left" #print("Is left safe?") if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,go_any_safe_direction_direction): #print("No, is right safe?") go_any_safe_direction_direction="right" if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,go_any_safe_direction_direction): #print("No, is up safe?") go_any_safe_direction_direction="up" if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,go_any_safe_direction_direction): #print("No, is down safe?") go_any_safe_direction_direction="down" if am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,go_any_safe_direction_direction): #print("Nothing is safe") return("Nonesafe") else: return(go_any_safe_direction_direction) else: return(go_any_safe_direction_direction) else: return(go_any_safe_direction_direction) else: return(go_any_safe_direction_direction) def go_any_safe_direction_simulation(simulation_data,jsonData): my_head_x_component= simulation_data[0][0] my_head_y_component= simulation_data[0][1] go_any_safe_direction_direction="left" ##print("Is left safe?") if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,go_any_safe_direction_direction): ##print("No, is right safe?") go_any_safe_direction_direction="right" if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,go_any_safe_direction_direction): ##print("No, is up safe?") go_any_safe_direction_direction="up" if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,go_any_safe_direction_direction): ##print("No, is down safe?") go_any_safe_direction_direction="down" if am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,go_any_safe_direction_direction): ##print("Nothing is safe") return("Nonesafe") else: return(go_any_safe_direction_direction) else: return(go_any_safe_direction_direction) else: return(go_any_safe_direction_direction) else: return(go_any_safe_direction_direction) def switch_direction(Direction): if Direction == "right": return("left") elif Direction == "left": return("right") elif Direction == "up": return("down") elif Direction == "down": return("up") def find_food_location_relative_to_me(jsonData,my_head_x_component,my_head_y_component): for fud in jsonData["board"]["food"]: fud_x = fud["x"] fud_y = fud["y"] x_location = fud_x - my_head_x_component y_location = my_head_y_component - fud_y return([x_location,y_location]) def find_food_location_relative_to_me_simulation(simulation_data,jsonData): my_head_x_component= simulation_data[0][0] my_head_y_component= simulation_data[0][1] # count=0 # while count<=(len(simulation_data)-1): # fud_x = simulation_data[count][0] # fud_y = simulation_data[count][1] # x_location = fud_x - my_head_x_component # y_location = my_head_y_component - fud_y # count +=1 # return([x_location,y_location]) for fud in jsonData["board"]["food"]: fud_x = fud["x"] fud_y = fud["y"] x_location = fud_x - my_head_x_component y_location = my_head_y_component - fud_y return([x_location,y_location]) def look_for_food_y_precedence(jsonData,my_head_x_component,my_head_y_component): for fud in jsonData["board"]["food"]: fud_x = fud["x"] fud_y = fud["y"] ##print("food location, x component", fud_x) ##print("food location, y component", fud_y) #temp_direction # if my_head_x_component< fud_x: # temp_direction= "right" # if my_head_x_component> fud_x: # temp_direction="left" if my_head_y_component< fud_y: temp_direction= "down" if my_head_y_component> fud_y: temp_direction="up" #print("Returning from y mode direction: ", temp_direction) ##print("food location, x component", fud_x, " \n","food location, y component", fud_y) return(temp_direction) def look_for_food_x_precedence(jsonData,my_head_x_component,my_head_y_component): for fud in jsonData["board"]["food"]: fud_x = fud["x"] fud_y = fud["y"] ##print("food location, x component", fud_x) ##print("food location, y component", fud_y) #temp_direction # if my_head_y_component< fud_y: # temp_direction= "down" # if my_head_y_component> fud_y: # temp_direction="up" if my_head_x_component< fud_x: temp_direction= "right" if my_head_x_component> fud_x: temp_direction="left" #print("Returning from x-emergency mode direction: ", temp_direction) ##print("food location, x component", fud_x, " \n","food location, y component", fud_y) return(temp_direction) def am_i_about_to_kill_my_self(jsonData,my_head_x_component,my_head_y_component,Direction): nextsquare_x = my_head_x_component if Direction == "left": nextsquare_x = my_head_x_component -1 if Direction == "right": nextsquare_x = my_head_x_component +1 nextsquare_y = my_head_y_component if Direction == "up": nextsquare_y = my_head_y_component -1 if Direction == "down": nextsquare_y = my_head_y_component +1 for bodypart in jsonData["you"]["body"]: if nextsquare_x == bodypart["x"] and nextsquare_y == bodypart["y"]: ##print(nextsquare_x, bodypart["x"]) ##print(nextsquare_y, bodypart["y"]) return True if nextsquare_x == -1 and Direction=="left": return True if nextsquare_x == jsonData["board"]["width"] and Direction=="right": return True if nextsquare_y == -1 and Direction=="up": return True if nextsquare_y == jsonData["board"]["height"] and Direction=="down": return True return False def am_i_about_to_kill_my_self_simulation(simulation_data,jsonData,Direction): my_head_x_component= simulation_data[0][0] my_head_y_component= simulation_data[0][1] nextsquare_x = my_head_x_component if Direction == "left": nextsquare_x = my_head_x_component -1 if Direction == "right": nextsquare_x = my_head_x_component +1 nextsquare_y = my_head_y_component if Direction == "up": nextsquare_y = my_head_y_component -1 if Direction == "down": nextsquare_y = my_head_y_component +1 count=0 while count<=(len(simulation_data)-1): if nextsquare_x == simulation_data[count][0] and nextsquare_y == simulation_data[count][1]: ##print(nextsquare_x, bodypart["x"]) ##print(nextsquare_y, bodypart["y"]) return True if nextsquare_x == -1 and Direction=="left": return True if nextsquare_x == jsonData["board"]["width"] and Direction=="right": return True if nextsquare_y == -1 and Direction=="up": return True if nextsquare_y == jsonData["board"]["height"] and Direction=="down": return True count+=1 return False def snake_location_copyjsonData(jsonData,my_head_x_component,my_head_y_component): index=-1 for bodypart in jsonData["you"]["body"]: index+=1 x_direction=0 y_direction=0 ##print("index equals:", index) # n = 1 # m = 2 # simulation = [0] * n # for i in range(n): # simulation[i] = [0] * m m = 2 jsonData_copy=[-1]*(index+1) for i in range(index+1): jsonData_copy[i] = [-1] * m q=0 for bodypart in jsonData["you"]["body"]: jsonData_copy[q][0]=bodypart["x"] jsonData_copy[q][1]=bodypart["y"] q+=1 ##print("jsonData_copy is :",jsonData_copy) return(jsonData_copy) def simulate(jsonData_copy,Direction,jsonData): if Direction== "Nonesafe": ##print("In simulate, returning jsonData_copy", ""'Simulation snake is dead"') return(jsonData_copy,"Simulation snake is dead") index=len(jsonData_copy)-1 x_direction=0 y_direction=0 m = 2 simulation = [-1] * (index+1) for i in range(index+1): simulation[i] = [-1] * m if Direction == "left": x_direction= -1 if Direction == "right": x_direction=1 if Direction == "up": y_direction=-1 if Direction == "down": y_direction=1 count=index while count >= 0: if count==0: simulation[count][0]=jsonData_copy[count][0]+x_direction simulation[count][1]=jsonData_copy[count][1]+y_direction else: simulation[count][0]=jsonData_copy[count-1][0] simulation[count][1]=jsonData_copy[count-1][1] count -=1 count=index while count >= 0: y=index while y>=0: if simulation[count][0]==simulation[y][0] and simulation[count][1]==simulation[y][1] and count != y: ##print("In simulate body parts overlap, returning jsonData_copy", ""'Simulation snake is dead"') return(jsonData_copy,"Simulation snake is dead") if simulation[count][0]<0 or simulation[count][1]<0: ##print("In simulate hit border (1), returning jsonData_copy", ""'Simulation snake is dead"') return(jsonData_copy,"Simulation snake is dead") if simulation[count][0]>(jsonData["board"]["width"]-1) or simulation[count][1]>(jsonData["board"]["height"]-1): ##print("In simulate hit border (1), returning jsonData_copy", ""'Simulation snake is dead"') return(jsonData_copy,"Simulation snake is dead") y-=1 count -=1 # if Direction== "Nonesafe": # #print("Simulation, no direction is safe") # #print("x_direction equals: ",x_direction,"\ny_direction=equals", y_direction) # #print("Head location is: ", simulation[0][0], "," , simulation[0][1]) # #print("Tail location is: ", simulation[index][0], "," , simulation[index][1]) return(simulation,"Simulation snake is alive") @bottle.post('/end') def end(): data = bottle.request.json """ TODO: If your snake AI was stateful, clean up any stateful objects here. """ #print(json.dumps(data)) return end_response() # Expose WSGI app (so gunicorn can find it) application = bottle.default_app() if __name__ == '__main__': bottle.run( application, host=os.getenv('IP', '0.0.0.0'), port=os.getenv('PORT', '8080'), debug=os.getenv('DEBUG', True) )
46.67908
171
0.644423
5,279
42,618
4.863232
0.044895
0.037861
0.022085
0.050481
0.876018
0.857282
0.845051
0.837495
0.833171
0.826861
0
0.015839
0.226688
42,618
912
172
46.730263
0.763146
0.350087
0
0.688034
0
0.002137
0.053548
0
0
0
0
0.002193
0
0
null
null
0
0.012821
null
null
0.023504
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
69b4cc13ba0387d4a46a2105bb1d147d9498634c
119
py
Python
amfeti/local_problems/__init__.py
AppliedMechanics/AMfe
be209dffe4d170aca735f1e912fd5cb448502119
[ "BSD-3-Clause" ]
21
2017-06-01T15:55:33.000Z
2022-03-13T08:43:31.000Z
amfeti/local_problems/__init__.py
AppliedMechanics/AMfeti
be209dffe4d170aca735f1e912fd5cb448502119
[ "BSD-3-Clause" ]
1
2022-01-08T07:20:15.000Z
2022-01-13T23:56:33.000Z
amfeti/local_problems/__init__.py
AppliedMechanics/AMfeti
be209dffe4d170aca735f1e912fd5cb448502119
[ "BSD-3-Clause" ]
10
2018-01-11T23:48:55.000Z
2022-01-12T15:58:54.000Z
from .static_local_problems import * from .dynamic_local_problems import * from .integrator_base import IntegratorBase
29.75
43
0.857143
15
119
6.466667
0.6
0.268041
0.391753
0.474227
0
0
0
0
0
0
0
0
0.10084
119
3
44
39.666667
0.906542
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
69bcae9c9c0044b0f67d8c3606b822b1b2a2375a
3,480
py
Python
Testes/Turtle_Circles.py
Juan-Kuhne/Python
52de96f60670e97203e203b20238133fc9b5ac02
[ "MIT" ]
null
null
null
Testes/Turtle_Circles.py
Juan-Kuhne/Python
52de96f60670e97203e203b20238133fc9b5ac02
[ "MIT" ]
null
null
null
Testes/Turtle_Circles.py
Juan-Kuhne/Python
52de96f60670e97203e203b20238133fc9b5ac02
[ "MIT" ]
null
null
null
import turtle as tt def ext_circles(line = 1): st.color('black') line = 1 + ((line - 1) * 2) st.up() st.goto(0,0) # circulo superior st.seth(0) st.lt(90) st.fd(51.96 * line) st.rt(90) st.down() st.circle(51.96) st.up() st.goto(0,0) st.seth(90) st.left(60) st.fd(51.96 * line) st.rt(90) st.down() st.circle(51.96) st.up() st.seth(270) st.goto(0,0) st.right(60) st.fd(51.96 * line) st.rt(90) st.down() st.circle(51.96) st.up() st.goto(0,0) st.seth(270) st.fd(51.96 * line) st.rt(90) st.down() st.circle(51.96) st.up() st.goto(0,0) st.seth(270) st.lt(60) st.fd(51.96 * line) st.rt(90) st.down() st.circle(51.96) st.up() st.goto(0,0) st.seth(90) st.rt(60) st.fd(51.96 * line) st.rt(90) st.down() st.circle(51.96) st.up() st.goto(0,0) st.seth(90) def ent_circles(line = 1): line = 1 st.lt(60) st.fd(51.96 * 2) st.rt(60) st.fd(51.96) st.seth(0) st.down() st.circle(51.96) st.up() st.seth(90) st.goto(0,0) st.rt(60) st.fd(51.96 * 2) st.lt(60) st.fd(51.96) st.seth(0) st.down() st.circle(51.96) st.up() st.seth(270) st.goto(0,0) st.rt(60) st.fd(51.96 * 2) st.lt(60) st.fd(51.96) st.seth(180) st.down() st.circle(51.96) st.up() st.seth(270) st.goto(0,0) st.lt(60) st.fd(51.96 * 2) st.rt(60) st.fd(51.96) st.seth(180) st.down() st.circle(51.96) st.up() st.seth(90) st.goto(0,0) st.lt(60) st.fd(51.96*4) st.seth(270) st.fd(51.96) st.seth(180) st.down() st.circle(51.96) st.up() st.seth(90) st.goto(0,0) st.rt(60) st.fd(51.96*4) st.seth(270) st.fd(51.96) st.seth(180) st.down() st.circle(51.96) st.up() st.seth(90) st.goto(0,0) def Wow(): st.up() st.goto(0,0) st.pencolor('red') st.rt(60) st.fd(51.96*4) st.lt(120) st.down() st.fd(51.96*4) st.lt(60) st.fd(51.96*4) st.lt(60) st.fd(51.96*4) st.lt(60) st.fd(51.96*4) st.lt(60) st.fd(51.96*4) st.lt(60) st.fd(51.96*4) st.lt(120) st.fd(51.96*8) st.seth(90) st.fd(51.96*4) st.rt(120) st.fd(51.96*8) st.backward(51.96*4) st.seth(270) st.fd(51.96*4) st.bk(51.96*8) st.fd(51.96*2) st.lt(60) st.fd(51.96*2) st.rt(60) st.fd(51.96*2) st.rt(60) st.fd(51.96*2) st.rt(60) st.fd(51.96*2) st.rt(60) st.fd(51.96*2) st.rt(60) st.fd(51.96*2) st.fd(51.96*2) st.seth(270) st.fd(51.96*6) st.seth(90) st.lt(60) st.fd(51.96*6) st.rt(120) st.fd(51.96*2) st.seth(270) st.fd(51.96*4) st.bk(51.96*6) st.lt(60) st.fd(51.96*6) st.rt(120) st.fd(51.96*6) tl = tt.Screen() st = tt.RawTurtle(tl) st.width(5) st.color('red') st.speed(0) st.ht() cont = 0 ps = [0, 1, 2, 3, 4, 5] st.up() st.goto(-60, -(207.85/2)) for i in range(1,7): st.fd(60) #st.write(cont) ps[cont] = st.pos() cont += 1 st.fd(60) st.left(60) print(ps) st.color('black') #st.speed(2) st.speed(0) st.goto(ps[0]) #circulo central st.left(90) st.fd(51.96) st.rt(90) st.down() st.circle(51.96) st.up() ext_circles(1) ext_circles(2) ent_circles(1) Wow() ext_circles(3) tt.exitonclick()
15.263158
35
0.493966
720
3,480
2.379167
0.088889
0.140105
0.154116
0.205487
0.751313
0.742557
0.728546
0.702277
0.702277
0.699942
0
0.198871
0.287644
3,480
227
36
15.330396
0.492134
0.016092
0
0.843902
0
0
0.00468
0
0
0
0
0
0
1
0.014634
false
0
0.004878
0
0.019512
0.004878
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
69d276ee7d345cc6a524db8c4109f2fde4a12c77
7,514
py
Python
tests/xml/test_xml_utils.py
virtualcell/Biosimulators_utils
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
[ "MIT" ]
null
null
null
tests/xml/test_xml_utils.py
virtualcell/Biosimulators_utils
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
[ "MIT" ]
null
null
null
tests/xml/test_xml_utils.py
virtualcell/Biosimulators_utils
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
[ "MIT" ]
null
null
null
from biosimulators_utils.xml import utils from lxml import etree import os import unittest class XmlUtilsTestCase(unittest.TestCase): XML_FILENAME = os.path.join(os.path.dirname(__file__), '..', 'fixtures', 'BIOMD0000000297.xml') MULTIPLE_NAMESPACES_XML_FILENAME = os.path.join(os.path.dirname(__file__), '..', 'fixtures', 'sbml-fbc-textbook.xml') def test_get_namespaces_for_xml_doc(self): et = etree.parse(self.XML_FILENAME) self.assertEqual(utils.get_namespaces_for_xml_doc(et), {'sbml': 'http://www.sbml.org/sbml/level2/version4'}) def test_get_attributes_of_xpaths(self): ids = utils.get_attributes_of_xpaths(self.XML_FILENAME, [ "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='BE']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='BUD']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clb2']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@name='Clb2']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clg']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='SBF_a']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Sic1']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Cdc20_a']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Cdh1_a']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clb2']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='IE_a']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='PSwe1M']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1M']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1_total']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_0']", "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='BUD']", "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='not_exist']", "/invalid:target", "--invalid--", ], 'id') expected_ids = { "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='BE']": ['BE'], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='PSwe1M']": ['PSwe1M'], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1M']": ['Swe1M'], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1']": ['Swe1'], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='BUD']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clb2']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clg']": ['Clg'], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='SBF_a']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Sic1']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Cdc20_a']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Cdh1_a']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='IE_a']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1_total']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_0']": [], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@name='Clb2']": ['Clb'], "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='BUD']": ['BUD'], "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='not_exist']": [], "/invalid:target": [], "--invalid--": [], } for target, target_ids in ids.items(): self.assertEqual(target_ids, expected_ids[target], target) ids = utils.get_attributes_of_xpaths(self.XML_FILENAME, [ "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species", ], 'id') self.assertEqual(len(ids["/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species"]), 19) def test_get_attributes_of_xpaths_in_namespaces(self): ids = utils.get_attributes_of_xpaths(self.MULTIPLE_NAMESPACES_XML_FILENAME, [ "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='obj']", "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='inactive_obj']", "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ACALD']", "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ACALD']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_13dpg_c']", ], 'id') expected_ids = { "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='obj']": [None], "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='inactive_obj']": [], "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ACALD']": ['R_ACALD'], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_13dpg_c']": ['M_13dpg_c'], } self.assertEqual(ids, expected_ids) ids = utils.get_attributes_of_xpaths(self.MULTIPLE_NAMESPACES_XML_FILENAME, [ "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='obj']", "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='inactive_obj']", "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ACALD']", "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ACALD']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_13dpg_c']", ], {'namespace': 'fbc', 'name': 'id'}) expected_ids = { "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='obj']": ['obj'], "/sbml:sbml/sbml:model/fbc:listOfObjectives/fbc:objective[@fbc:id='inactive_obj']": [], "/sbml:sbml/sbml:model/sbml:listOfReactions/sbml:reaction[@id='R_ACALD']": [None], "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='M_13dpg_c']": [None], } self.assertEqual(ids, expected_ids) def test_validate_xpaths_ref_to_unique_objects(self): utils.validate_xpaths_ref_to_unique_objects(self.XML_FILENAME, [ "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='BE']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='PSwe1M']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1M']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Swe1']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clg']", "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@name='Clb2']", "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='BUD']", ], 'id') with self.assertRaises(ValueError): utils.validate_xpaths_ref_to_unique_objects(self.XML_FILENAME, [ "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='not_exist']" ], 'id') with self.assertRaises(ValueError): utils.validate_xpaths_ref_to_unique_objects(self.XML_FILENAME, [ '/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species' ], 'id')
63.142857
121
0.638009
923
7,514
5.062839
0.102925
0.219131
0.164348
0.232827
0.91269
0.88915
0.877167
0.867323
0.865611
0.854697
0
0.008693
0.173277
7,514
118
122
63.677966
0.743561
0
0
0.448598
0
0.570093
0.607266
0.579319
0
0
0
0
0.065421
1
0.037383
false
0
0.037383
0
0.102804
0
0
0
0
null
1
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
11
69d83c3ea2bdfc0a5df3dfa3c9510b78ce27320e
40,222
py
Python
code/scut/Mv2attn.py
fei-hdu/FaceAttract
19352db65756d9976b085a3cb0d1804eac844f97
[ "MIT" ]
9
2020-07-29T22:32:44.000Z
2022-01-12T02:09:09.000Z
code/scut/Mv2attn.py
fei-hdu/FaceAttract
19352db65756d9976b085a3cb0d1804eac844f97
[ "MIT" ]
2
2021-03-03T09:51:38.000Z
2021-10-10T07:52:51.000Z
code/scut/Mv2attn.py
fei-hdu/FaceAttract
19352db65756d9976b085a3cb0d1804eac844f97
[ "MIT" ]
3
2020-07-20T16:20:18.000Z
2021-12-21T03:20:10.000Z
import torch import torch.nn as nn import math ###V2<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< class ImageAttention(nn.Module): def __init__(self): super(ImageAttention, self).__init__() #self.add_module('resnet',resnet(resnetBlock,in_channel=3,baseline=False)) self.L1 = nn.Linear(in_features=1280,out_features=1280) self.L2 = nn.Linear(in_features=1280,out_features=1) self.L3 = nn.Linear(in_features=1280,out_features=1) #self.L4 = nn.Linear(in_features=512,out_features=1) def forward(self,x): #out = self.resnet(x) out = x out = torch.transpose(out,1,3).contiguous() #print out.size() data = out.view(-1,49,1280)#1x49x1280 #print 'data5',data5.size() out = out.view(-1,1280) #49x1280 #print 'out1',out.size() out = self.L1(out) out = torch.tanh(out)#49*1280 out = self.L2(out) #49x1 #print 'out3',out.size() out = out.view(-1,49) #1x49 #print 'out4',out.size() out = nn.functional.softmax(out,dim=1) out = torch.unsqueeze(out,-1) #4x49x1 out = torch.sum(out*data,1) #1*1280 #print 'out5',out.size() out = self.L3(out) #out = nn.functional.relu(out) #out = self.L4(out) #print 'out',out.size() return out #4x5 def conv_bn(inp, oup, stride): return nn.Sequential( nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True) ) def conv_1x1_bn(inp, oup): return nn.Sequential( nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True) ) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride self.use_res_connect = self.stride == 1 and inp == oup self.conv = nn.Sequential( # pw nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False), nn.BatchNorm2d(inp * expand_ratio), nn.ReLU6(inplace=True), # dw nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False), nn.BatchNorm2d(inp * expand_ratio), nn.ReLU6(inplace=True), # pw-linear nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MV2attn(nn.Module): def __init__(self, n_class=1000, input_size=224, width_mult=1.): super(MV2attn, self).__init__() # setting of inverted residual blocks self.interverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # building first layer input_channel = int(32 * width_mult) self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280 self.features = [conv_bn(3, input_channel, 2)] # building inverted residual blocks for t, c, n, s in self.interverted_residual_setting: output_channel = int(c * width_mult) for i in range(n): if i == 0: self.features.append(InvertedResidual(input_channel, output_channel, s, t)) else: self.features.append(InvertedResidual(input_channel, output_channel, 1, t)) input_channel = output_channel # building last several layers self.features.append(conv_1x1_bn(input_channel, self.last_channel)) #self.features.append(nn.AvgPool2d(input_size/32)) # make it nn.Sequential self.features = nn.Sequential(*self.features) # building classifier # self.classifier = nn.Sequential( # nn.Dropout(), # nn.Linear(self.last_channel, n_class), # ) self.attention = ImageAttention() def forward(self, x): #1280*7*7 x = self.features(x) #print 'x',x.size() #1*1280*1*1 #x = self.AvgPool(x) #x = x.view(-1, self.last_channel) #x = self.classifier(x) x= self.attention(x) return x def load_weights(net, finenet): net.module.features._modules['0']._modules['0'].weight = finenet.module.features._modules['0']._modules['0'].weight net.module.features._modules['0']._modules['1'].weight = finenet.module.features._modules['0']._modules['1'].weight net.module.features._modules['0']._modules['1'].bias = finenet.module.features._modules['0']._modules['1'].bias net.module.features._modules['0']._modules['1'].running_mean = finenet.module.features._modules['0']._modules['1'].running_mean net.module.features._modules['0']._modules['1'].running_var = finenet.module.features._modules['0']._modules['1'].running_var net.module.features._modules['1'].conv._modules['0'].weight = finenet.module.features._modules['1'].conv._modules['0'].weight net.module.features._modules['1'].conv._modules['1'].weight = finenet.module.features._modules['1'].conv._modules['1'].weight net.module.features._modules['1'].conv._modules['1'].bias = finenet.module.features._modules['1'].conv._modules['1'].bias net.module.features._modules['1'].conv._modules['1'].running_mean = finenet.module.features._modules['1'].conv._modules['1'].running_mean net.module.features._modules['1'].conv._modules['1'].running_var = finenet.module.features._modules['1'].conv._modules['1'].running_var net.module.features._modules['1'].conv._modules['3'].weight = finenet.module.features._modules['1'].conv._modules['3'].weight net.module.features._modules['1'].conv._modules['4'].weight = finenet.module.features._modules['1'].conv._modules['4'].weight net.module.features._modules['1'].conv._modules['4'].bias = finenet.module.features._modules['1'].conv._modules['4'].bias net.module.features._modules['1'].conv._modules['4'].running_mean = finenet.module.features._modules['1'].conv._modules['4'].running_mean net.module.features._modules['1'].conv._modules['4'].running_var = finenet.module.features._modules['1'].conv._modules['4'].running_var net.module.features._modules['1'].conv._modules['6'].weight = finenet.module.features._modules['1'].conv._modules['6'].weight net.module.features._modules['1'].conv._modules['7'].weight = finenet.module.features._modules['1'].conv._modules['7'].weight net.module.features._modules['1'].conv._modules['7'].bias = finenet.module.features._modules['1'].conv._modules['7'].bias net.module.features._modules['1'].conv._modules['7'].running_mean = finenet.module.features._modules['1'].conv._modules['7'].running_mean net.module.features._modules['1'].conv._modules['7'].running_var = finenet.module.features._modules['1'].conv._modules['7'].running_var net.module.features._modules['2'].conv._modules['0'].weight = finenet.module.features._modules['2'].conv._modules['0'].weight net.module.features._modules['2'].conv._modules['1'].weight = finenet.module.features._modules['2'].conv._modules['1'].weight net.module.features._modules['2'].conv._modules['1'].bias = finenet.module.features._modules['2'].conv._modules['1'].bias net.module.features._modules['2'].conv._modules['1'].running_mean = finenet.module.features._modules['2'].conv._modules['1'].running_mean net.module.features._modules['2'].conv._modules['1'].running_var = finenet.module.features._modules['2'].conv._modules['1'].running_var net.module.features._modules['2'].conv._modules['3'].weight = finenet.module.features._modules['2'].conv._modules['3'].weight net.module.features._modules['2'].conv._modules['4'].weight = finenet.module.features._modules['2'].conv._modules['4'].weight net.module.features._modules['2'].conv._modules['4'].bias = finenet.module.features._modules['2'].conv._modules['4'].bias net.module.features._modules['2'].conv._modules['4'].running_mean = finenet.module.features._modules['2'].conv._modules['4'].running_mean net.module.features._modules['2'].conv._modules['4'].running_var = finenet.module.features._modules['2'].conv._modules['4'].running_var net.module.features._modules['2'].conv._modules['6'].weight = finenet.module.features._modules['2'].conv._modules['6'].weight net.module.features._modules['2'].conv._modules['7'].weight = finenet.module.features._modules['2'].conv._modules['7'].weight net.module.features._modules['2'].conv._modules['7'].bias = finenet.module.features._modules['2'].conv._modules['7'].bias net.module.features._modules['2'].conv._modules['7'].running_mean = finenet.module.features._modules['2'].conv._modules['7'].running_mean net.module.features._modules['2'].conv._modules['7'].running_var = finenet.module.features._modules['2'].conv._modules['7'].running_var net.module.features._modules['3'].conv._modules['0'].weight = finenet.module.features._modules['3'].conv._modules['0'].weight net.module.features._modules['3'].conv._modules['1'].weight = finenet.module.features._modules['3'].conv._modules['1'].weight net.module.features._modules['3'].conv._modules['1'].bias = finenet.module.features._modules['3'].conv._modules['1'].bias net.module.features._modules['3'].conv._modules['1'].running_mean = finenet.module.features._modules['3'].conv._modules['1'].running_mean net.module.features._modules['3'].conv._modules['1'].running_var = finenet.module.features._modules['3'].conv._modules['1'].running_var net.module.features._modules['3'].conv._modules['3'].weight = finenet.module.features._modules['3'].conv._modules['3'].weight net.module.features._modules['3'].conv._modules['4'].weight = finenet.module.features._modules['3'].conv._modules['4'].weight net.module.features._modules['3'].conv._modules['4'].bias = finenet.module.features._modules['3'].conv._modules['4'].bias net.module.features._modules['3'].conv._modules['4'].running_mean = finenet.module.features._modules['3'].conv._modules['4'].running_mean net.module.features._modules['3'].conv._modules['4'].running_var = finenet.module.features._modules['3'].conv._modules['4'].running_var net.module.features._modules['3'].conv._modules['6'].weight = finenet.module.features._modules['3'].conv._modules['6'].weight net.module.features._modules['3'].conv._modules['7'].weight = finenet.module.features._modules['3'].conv._modules['7'].weight net.module.features._modules['3'].conv._modules['7'].bias = finenet.module.features._modules['3'].conv._modules['7'].bias net.module.features._modules['3'].conv._modules['7'].running_mean = finenet.module.features._modules['3'].conv._modules['7'].running_mean net.module.features._modules['3'].conv._modules['7'].running_var = finenet.module.features._modules['3'].conv._modules['7'].running_var net.module.features._modules['4'].conv._modules['0'].weight = finenet.module.features._modules['4'].conv._modules['0'].weight net.module.features._modules['4'].conv._modules['1'].weight = finenet.module.features._modules['4'].conv._modules['1'].weight net.module.features._modules['4'].conv._modules['1'].bias = finenet.module.features._modules['4'].conv._modules['1'].bias net.module.features._modules['4'].conv._modules['1'].running_mean = finenet.module.features._modules['4'].conv._modules['1'].running_mean net.module.features._modules['4'].conv._modules['1'].running_var = finenet.module.features._modules['4'].conv._modules['1'].running_var net.module.features._modules['4'].conv._modules['3'].weight = finenet.module.features._modules['4'].conv._modules['3'].weight net.module.features._modules['4'].conv._modules['4'].weight = finenet.module.features._modules['4'].conv._modules['4'].weight net.module.features._modules['4'].conv._modules['4'].bias = finenet.module.features._modules['4'].conv._modules['4'].bias net.module.features._modules['4'].conv._modules['4'].running_mean = finenet.module.features._modules['4'].conv._modules['4'].running_mean net.module.features._modules['4'].conv._modules['4'].running_var = finenet.module.features._modules['4'].conv._modules['4'].running_var net.module.features._modules['4'].conv._modules['6'].weight = finenet.module.features._modules['4'].conv._modules['6'].weight net.module.features._modules['4'].conv._modules['7'].weight = finenet.module.features._modules['4'].conv._modules['7'].weight net.module.features._modules['4'].conv._modules['7'].bias = finenet.module.features._modules['4'].conv._modules['7'].bias net.module.features._modules['4'].conv._modules['7'].running_mean = finenet.module.features._modules['4'].conv._modules['7'].running_mean net.module.features._modules['4'].conv._modules['7'].running_var = finenet.module.features._modules['4'].conv._modules['7'].running_var net.module.features._modules['5'].conv._modules['0'].weight = finenet.module.features._modules['5'].conv._modules['0'].weight net.module.features._modules['5'].conv._modules['1'].weight = finenet.module.features._modules['5'].conv._modules['1'].weight net.module.features._modules['5'].conv._modules['1'].bias = finenet.module.features._modules['5'].conv._modules['1'].bias net.module.features._modules['5'].conv._modules['1'].running_mean = finenet.module.features._modules['5'].conv._modules['1'].running_mean net.module.features._modules['5'].conv._modules['1'].running_var = finenet.module.features._modules['5'].conv._modules['1'].running_var net.module.features._modules['5'].conv._modules['3'].weight = finenet.module.features._modules['5'].conv._modules['3'].weight net.module.features._modules['5'].conv._modules['4'].weight = finenet.module.features._modules['5'].conv._modules['4'].weight net.module.features._modules['5'].conv._modules['4'].bias = finenet.module.features._modules['5'].conv._modules['4'].bias net.module.features._modules['5'].conv._modules['4'].running_mean = finenet.module.features._modules['5'].conv._modules['4'].running_mean net.module.features._modules['5'].conv._modules['4'].running_var = finenet.module.features._modules['5'].conv._modules['4'].running_var net.module.features._modules['5'].conv._modules['6'].weight = finenet.module.features._modules['5'].conv._modules['6'].weight net.module.features._modules['5'].conv._modules['7'].weight = finenet.module.features._modules['5'].conv._modules['7'].weight net.module.features._modules['5'].conv._modules['7'].bias = finenet.module.features._modules['5'].conv._modules['7'].bias net.module.features._modules['5'].conv._modules['7'].running_mean = finenet.module.features._modules['5'].conv._modules['7'].running_mean net.module.features._modules['5'].conv._modules['7'].running_var = finenet.module.features._modules['5'].conv._modules['7'].running_var net.module.features._modules['6'].conv._modules['0'].weight = finenet.module.features._modules['6'].conv._modules['0'].weight net.module.features._modules['6'].conv._modules['1'].weight = finenet.module.features._modules['6'].conv._modules['1'].weight net.module.features._modules['6'].conv._modules['1'].bias = finenet.module.features._modules['6'].conv._modules['1'].bias net.module.features._modules['6'].conv._modules['1'].running_mean = finenet.module.features._modules['6'].conv._modules['1'].running_mean net.module.features._modules['6'].conv._modules['1'].running_var = finenet.module.features._modules['6'].conv._modules['1'].running_var net.module.features._modules['6'].conv._modules['3'].weight = finenet.module.features._modules['6'].conv._modules['3'].weight net.module.features._modules['6'].conv._modules['4'].weight = finenet.module.features._modules['6'].conv._modules['4'].weight net.module.features._modules['6'].conv._modules['4'].bias = finenet.module.features._modules['6'].conv._modules['4'].bias net.module.features._modules['6'].conv._modules['4'].running_mean = finenet.module.features._modules['6'].conv._modules['4'].running_mean net.module.features._modules['6'].conv._modules['4'].running_var = finenet.module.features._modules['6'].conv._modules['4'].running_var net.module.features._modules['6'].conv._modules['6'].weight = finenet.module.features._modules['6'].conv._modules['6'].weight net.module.features._modules['6'].conv._modules['7'].weight = finenet.module.features._modules['6'].conv._modules['7'].weight net.module.features._modules['6'].conv._modules['7'].bias = finenet.module.features._modules['6'].conv._modules['7'].bias net.module.features._modules['6'].conv._modules['7'].running_mean = finenet.module.features._modules['6'].conv._modules['7'].running_mean net.module.features._modules['6'].conv._modules['7'].running_var = finenet.module.features._modules['6'].conv._modules['7'].running_var net.module.features._modules['7'].conv._modules['0'].weight = finenet.module.features._modules['7'].conv._modules['0'].weight net.module.features._modules['7'].conv._modules['1'].weight = finenet.module.features._modules['7'].conv._modules['1'].weight net.module.features._modules['7'].conv._modules['1'].bias = finenet.module.features._modules['7'].conv._modules['1'].bias net.module.features._modules['7'].conv._modules['1'].running_mean = finenet.module.features._modules['7'].conv._modules['1'].running_mean net.module.features._modules['7'].conv._modules['1'].running_var = finenet.module.features._modules['7'].conv._modules['1'].running_var net.module.features._modules['7'].conv._modules['3'].weight = finenet.module.features._modules['7'].conv._modules['3'].weight net.module.features._modules['7'].conv._modules['4'].weight = finenet.module.features._modules['7'].conv._modules['4'].weight net.module.features._modules['7'].conv._modules['4'].bias = finenet.module.features._modules['7'].conv._modules['4'].bias net.module.features._modules['7'].conv._modules['4'].running_mean = finenet.module.features._modules['7'].conv._modules['4'].running_mean net.module.features._modules['7'].conv._modules['4'].running_var = finenet.module.features._modules['7'].conv._modules['4'].running_var net.module.features._modules['7'].conv._modules['6'].weight = finenet.module.features._modules['7'].conv._modules['6'].weight net.module.features._modules['7'].conv._modules['7'].weight = finenet.module.features._modules['7'].conv._modules['7'].weight net.module.features._modules['7'].conv._modules['7'].bias = finenet.module.features._modules['7'].conv._modules['7'].bias net.module.features._modules['7'].conv._modules['7'].running_mean = finenet.module.features._modules['7'].conv._modules['7'].running_mean net.module.features._modules['7'].conv._modules['7'].running_var = finenet.module.features._modules['7'].conv._modules['7'].running_var net.module.features._modules['8'].conv._modules['0'].weight = finenet.module.features._modules['8'].conv._modules['0'].weight net.module.features._modules['8'].conv._modules['1'].weight = finenet.module.features._modules['8'].conv._modules['1'].weight net.module.features._modules['8'].conv._modules['1'].bias = finenet.module.features._modules['8'].conv._modules['1'].bias net.module.features._modules['8'].conv._modules['1'].running_mean = finenet.module.features._modules['8'].conv._modules['1'].running_mean net.module.features._modules['8'].conv._modules['1'].running_var = finenet.module.features._modules['8'].conv._modules['1'].running_var net.module.features._modules['8'].conv._modules['3'].weight = finenet.module.features._modules['8'].conv._modules['3'].weight net.module.features._modules['8'].conv._modules['4'].weight = finenet.module.features._modules['8'].conv._modules['4'].weight net.module.features._modules['8'].conv._modules['4'].bias = finenet.module.features._modules['8'].conv._modules['4'].bias net.module.features._modules['8'].conv._modules['4'].running_mean = finenet.module.features._modules['8'].conv._modules['4'].running_mean net.module.features._modules['8'].conv._modules['4'].running_var = finenet.module.features._modules['8'].conv._modules['4'].running_var net.module.features._modules['8'].conv._modules['6'].weight = finenet.module.features._modules['8'].conv._modules['6'].weight net.module.features._modules['8'].conv._modules['7'].weight = finenet.module.features._modules['8'].conv._modules['7'].weight net.module.features._modules['8'].conv._modules['7'].bias = finenet.module.features._modules['8'].conv._modules['7'].bias net.module.features._modules['8'].conv._modules['7'].running_mean = finenet.module.features._modules['8'].conv._modules['7'].running_mean net.module.features._modules['8'].conv._modules['7'].running_var = finenet.module.features._modules['8'].conv._modules['7'].running_var net.module.features._modules['9'].conv._modules['0'].weight = finenet.module.features._modules['9'].conv._modules['0'].weight net.module.features._modules['9'].conv._modules['1'].weight = finenet.module.features._modules['9'].conv._modules['1'].weight net.module.features._modules['9'].conv._modules['1'].bias = finenet.module.features._modules['9'].conv._modules['1'].bias net.module.features._modules['9'].conv._modules['1'].running_mean = finenet.module.features._modules['9'].conv._modules['1'].running_mean net.module.features._modules['9'].conv._modules['1'].running_var = finenet.module.features._modules['9'].conv._modules['1'].running_var net.module.features._modules['9'].conv._modules['3'].weight = finenet.module.features._modules['9'].conv._modules['3'].weight net.module.features._modules['9'].conv._modules['4'].weight = finenet.module.features._modules['9'].conv._modules['4'].weight net.module.features._modules['9'].conv._modules['4'].bias = finenet.module.features._modules['9'].conv._modules['4'].bias net.module.features._modules['9'].conv._modules['4'].running_mean = finenet.module.features._modules['9'].conv._modules['4'].running_mean net.module.features._modules['9'].conv._modules['4'].running_var = finenet.module.features._modules['9'].conv._modules['4'].running_var net.module.features._modules['9'].conv._modules['6'].weight = finenet.module.features._modules['9'].conv._modules['6'].weight net.module.features._modules['9'].conv._modules['7'].weight = finenet.module.features._modules['9'].conv._modules['7'].weight net.module.features._modules['9'].conv._modules['7'].bias = finenet.module.features._modules['9'].conv._modules['7'].bias net.module.features._modules['9'].conv._modules['7'].running_mean = finenet.module.features._modules['9'].conv._modules['7'].running_mean net.module.features._modules['9'].conv._modules['7'].running_var = finenet.module.features._modules['9'].conv._modules['7'].running_var net.module.features._modules['10'].conv._modules['0'].weight = finenet.module.features._modules['10'].conv._modules['0'].weight net.module.features._modules['10'].conv._modules['1'].weight = finenet.module.features._modules['10'].conv._modules['1'].weight net.module.features._modules['10'].conv._modules['1'].bias = finenet.module.features._modules['10'].conv._modules['1'].bias net.module.features._modules['10'].conv._modules['1'].running_mean = finenet.module.features._modules['10'].conv._modules['1'].running_mean net.module.features._modules['10'].conv._modules['1'].running_var = finenet.module.features._modules['10'].conv._modules['1'].running_var net.module.features._modules['10'].conv._modules['3'].weight = finenet.module.features._modules['10'].conv._modules['3'].weight net.module.features._modules['10'].conv._modules['4'].weight = finenet.module.features._modules['10'].conv._modules['4'].weight net.module.features._modules['10'].conv._modules['4'].bias = finenet.module.features._modules['10'].conv._modules['4'].bias net.module.features._modules['10'].conv._modules['4'].running_mean = finenet.module.features._modules['10'].conv._modules['4'].running_mean net.module.features._modules['10'].conv._modules['4'].running_var = finenet.module.features._modules['10'].conv._modules['4'].running_var net.module.features._modules['10'].conv._modules['6'].weight = finenet.module.features._modules['10'].conv._modules['6'].weight net.module.features._modules['10'].conv._modules['7'].weight = finenet.module.features._modules['10'].conv._modules['7'].weight net.module.features._modules['10'].conv._modules['7'].bias = finenet.module.features._modules['10'].conv._modules['7'].bias net.module.features._modules['10'].conv._modules['7'].running_mean = finenet.module.features._modules['10'].conv._modules['7'].running_mean net.module.features._modules['10'].conv._modules['7'].running_var = finenet.module.features._modules['10'].conv._modules['7'].running_var net.module.features._modules['11'].conv._modules['0'].weight = finenet.module.features._modules['11'].conv._modules['0'].weight net.module.features._modules['11'].conv._modules['1'].weight = finenet.module.features._modules['11'].conv._modules['1'].weight net.module.features._modules['11'].conv._modules['1'].bias = finenet.module.features._modules['11'].conv._modules['1'].bias net.module.features._modules['11'].conv._modules['1'].running_mean = finenet.module.features._modules['11'].conv._modules['1'].running_mean net.module.features._modules['11'].conv._modules['1'].running_var = finenet.module.features._modules['11'].conv._modules['1'].running_var net.module.features._modules['11'].conv._modules['3'].weight = finenet.module.features._modules['11'].conv._modules['3'].weight net.module.features._modules['11'].conv._modules['4'].weight = finenet.module.features._modules['11'].conv._modules['4'].weight net.module.features._modules['11'].conv._modules['4'].bias = finenet.module.features._modules['11'].conv._modules['4'].bias net.module.features._modules['11'].conv._modules['4'].running_mean = finenet.module.features._modules['11'].conv._modules['4'].running_mean net.module.features._modules['11'].conv._modules['4'].running_var = finenet.module.features._modules['11'].conv._modules['4'].running_var net.module.features._modules['11'].conv._modules['6'].weight = finenet.module.features._modules['11'].conv._modules['6'].weight net.module.features._modules['11'].conv._modules['7'].weight = finenet.module.features._modules['11'].conv._modules['7'].weight net.module.features._modules['11'].conv._modules['7'].bias = finenet.module.features._modules['11'].conv._modules['7'].bias net.module.features._modules['11'].conv._modules['7'].running_mean = finenet.module.features._modules['11'].conv._modules['7'].running_mean net.module.features._modules['11'].conv._modules['7'].running_var = finenet.module.features._modules['11'].conv._modules['7'].running_var net.module.features._modules['12'].conv._modules['0'].weight = finenet.module.features._modules['12'].conv._modules['0'].weight net.module.features._modules['12'].conv._modules['1'].weight = finenet.module.features._modules['12'].conv._modules['1'].weight net.module.features._modules['12'].conv._modules['1'].bias = finenet.module.features._modules['12'].conv._modules['1'].bias net.module.features._modules['12'].conv._modules['1'].running_mean = finenet.module.features._modules['12'].conv._modules['1'].running_mean net.module.features._modules['12'].conv._modules['1'].running_var = finenet.module.features._modules['12'].conv._modules['1'].running_var net.module.features._modules['12'].conv._modules['3'].weight = finenet.module.features._modules['12'].conv._modules['3'].weight net.module.features._modules['12'].conv._modules['4'].weight = finenet.module.features._modules['12'].conv._modules['4'].weight net.module.features._modules['12'].conv._modules['4'].bias = finenet.module.features._modules['12'].conv._modules['4'].bias net.module.features._modules['12'].conv._modules['4'].running_mean = finenet.module.features._modules['12'].conv._modules['4'].running_mean net.module.features._modules['12'].conv._modules['4'].running_var = finenet.module.features._modules['12'].conv._modules['4'].running_var net.module.features._modules['12'].conv._modules['6'].weight = finenet.module.features._modules['12'].conv._modules['6'].weight net.module.features._modules['12'].conv._modules['7'].weight = finenet.module.features._modules['12'].conv._modules['7'].weight net.module.features._modules['12'].conv._modules['7'].bias = finenet.module.features._modules['12'].conv._modules['7'].bias net.module.features._modules['12'].conv._modules['7'].running_mean = finenet.module.features._modules['12'].conv._modules['7'].running_mean net.module.features._modules['12'].conv._modules['7'].running_var = finenet.module.features._modules['12'].conv._modules['7'].running_var net.module.features._modules['13'].conv._modules['0'].weight = finenet.module.features._modules['13'].conv._modules['0'].weight net.module.features._modules['13'].conv._modules['1'].weight = finenet.module.features._modules['13'].conv._modules['1'].weight net.module.features._modules['13'].conv._modules['1'].bias = finenet.module.features._modules['13'].conv._modules['1'].bias net.module.features._modules['13'].conv._modules['1'].running_mean = finenet.module.features._modules['13'].conv._modules['1'].running_mean net.module.features._modules['13'].conv._modules['1'].running_var = finenet.module.features._modules['13'].conv._modules['1'].running_var net.module.features._modules['13'].conv._modules['3'].weight = finenet.module.features._modules['13'].conv._modules['3'].weight net.module.features._modules['13'].conv._modules['4'].weight = finenet.module.features._modules['13'].conv._modules['4'].weight net.module.features._modules['13'].conv._modules['4'].bias = finenet.module.features._modules['13'].conv._modules['4'].bias net.module.features._modules['13'].conv._modules['4'].running_mean = finenet.module.features._modules['13'].conv._modules['4'].running_mean net.module.features._modules['13'].conv._modules['4'].running_var = finenet.module.features._modules['13'].conv._modules['4'].running_var net.module.features._modules['13'].conv._modules['6'].weight = finenet.module.features._modules['13'].conv._modules['6'].weight net.module.features._modules['13'].conv._modules['7'].weight = finenet.module.features._modules['13'].conv._modules['7'].weight net.module.features._modules['13'].conv._modules['7'].bias = finenet.module.features._modules['13'].conv._modules['7'].bias net.module.features._modules['13'].conv._modules['7'].running_mean = finenet.module.features._modules['13'].conv._modules['7'].running_mean net.module.features._modules['13'].conv._modules['7'].running_var = finenet.module.features._modules['13'].conv._modules['7'].running_var net.module.features._modules['14'].conv._modules['0'].weight = finenet.module.features._modules['14'].conv._modules['0'].weight net.module.features._modules['14'].conv._modules['1'].weight = finenet.module.features._modules['14'].conv._modules['1'].weight net.module.features._modules['14'].conv._modules['1'].bias = finenet.module.features._modules['14'].conv._modules['1'].bias net.module.features._modules['14'].conv._modules['1'].running_mean = finenet.module.features._modules['14'].conv._modules['1'].running_mean net.module.features._modules['14'].conv._modules['1'].running_var = finenet.module.features._modules['14'].conv._modules['1'].running_var net.module.features._modules['14'].conv._modules['3'].weight = finenet.module.features._modules['14'].conv._modules['3'].weight net.module.features._modules['14'].conv._modules['4'].weight = finenet.module.features._modules['14'].conv._modules['4'].weight net.module.features._modules['14'].conv._modules['4'].bias = finenet.module.features._modules['14'].conv._modules['4'].bias net.module.features._modules['14'].conv._modules['4'].running_mean = finenet.module.features._modules['14'].conv._modules['4'].running_mean net.module.features._modules['14'].conv._modules['4'].running_var = finenet.module.features._modules['14'].conv._modules['4'].running_var net.module.features._modules['14'].conv._modules['6'].weight = finenet.module.features._modules['14'].conv._modules['6'].weight net.module.features._modules['14'].conv._modules['7'].weight = finenet.module.features._modules['14'].conv._modules['7'].weight net.module.features._modules['14'].conv._modules['7'].bias = finenet.module.features._modules['14'].conv._modules['7'].bias net.module.features._modules['14'].conv._modules['7'].running_mean = finenet.module.features._modules['14'].conv._modules['7'].running_mean net.module.features._modules['14'].conv._modules['7'].running_var = finenet.module.features._modules['14'].conv._modules['7'].running_var net.module.features._modules['15'].conv._modules['0'].weight = finenet.module.features._modules['15'].conv._modules['0'].weight net.module.features._modules['15'].conv._modules['1'].weight = finenet.module.features._modules['15'].conv._modules['1'].weight net.module.features._modules['15'].conv._modules['1'].bias = finenet.module.features._modules['15'].conv._modules['1'].bias net.module.features._modules['15'].conv._modules['1'].running_mean = finenet.module.features._modules['15'].conv._modules['1'].running_mean net.module.features._modules['15'].conv._modules['1'].running_var = finenet.module.features._modules['15'].conv._modules['1'].running_var net.module.features._modules['15'].conv._modules['3'].weight = finenet.module.features._modules['15'].conv._modules['3'].weight net.module.features._modules['15'].conv._modules['4'].weight = finenet.module.features._modules['15'].conv._modules['4'].weight net.module.features._modules['15'].conv._modules['4'].bias = finenet.module.features._modules['15'].conv._modules['4'].bias net.module.features._modules['15'].conv._modules['4'].running_mean = finenet.module.features._modules['15'].conv._modules['4'].running_mean net.module.features._modules['15'].conv._modules['4'].running_var = finenet.module.features._modules['15'].conv._modules['4'].running_var net.module.features._modules['15'].conv._modules['6'].weight = finenet.module.features._modules['15'].conv._modules['6'].weight net.module.features._modules['15'].conv._modules['7'].weight = finenet.module.features._modules['15'].conv._modules['7'].weight net.module.features._modules['15'].conv._modules['7'].bias = finenet.module.features._modules['15'].conv._modules['7'].bias net.module.features._modules['15'].conv._modules['7'].running_mean = finenet.module.features._modules['15'].conv._modules['7'].running_mean net.module.features._modules['15'].conv._modules['7'].running_var = finenet.module.features._modules['15'].conv._modules['7'].running_var net.module.features._modules['16'].conv._modules['0'].weight = finenet.module.features._modules['16'].conv._modules['0'].weight net.module.features._modules['16'].conv._modules['1'].weight = finenet.module.features._modules['16'].conv._modules['1'].weight net.module.features._modules['16'].conv._modules['1'].bias = finenet.module.features._modules['16'].conv._modules['1'].bias net.module.features._modules['16'].conv._modules['1'].running_mean = finenet.module.features._modules['16'].conv._modules['1'].running_mean net.module.features._modules['16'].conv._modules['1'].running_var = finenet.module.features._modules['16'].conv._modules['1'].running_var net.module.features._modules['16'].conv._modules['3'].weight = finenet.module.features._modules['16'].conv._modules['3'].weight net.module.features._modules['16'].conv._modules['4'].weight = finenet.module.features._modules['16'].conv._modules['4'].weight net.module.features._modules['16'].conv._modules['4'].bias = finenet.module.features._modules['16'].conv._modules['4'].bias net.module.features._modules['16'].conv._modules['4'].running_mean = finenet.module.features._modules['16'].conv._modules['4'].running_mean net.module.features._modules['16'].conv._modules['4'].running_var = finenet.module.features._modules['16'].conv._modules['4'].running_var net.module.features._modules['16'].conv._modules['6'].weight = finenet.module.features._modules['16'].conv._modules['6'].weight net.module.features._modules['16'].conv._modules['7'].weight = finenet.module.features._modules['16'].conv._modules['7'].weight net.module.features._modules['16'].conv._modules['7'].bias = finenet.module.features._modules['16'].conv._modules['7'].bias net.module.features._modules['16'].conv._modules['7'].running_mean = finenet.module.features._modules['16'].conv._modules['7'].running_mean net.module.features._modules['16'].conv._modules['7'].running_var = finenet.module.features._modules['16'].conv._modules['7'].running_var net.module.features._modules['17'].conv._modules['0'].weight = finenet.module.features._modules['17'].conv._modules['0'].weight net.module.features._modules['17'].conv._modules['1'].weight = finenet.module.features._modules['17'].conv._modules['1'].weight net.module.features._modules['17'].conv._modules['1'].bias = finenet.module.features._modules['17'].conv._modules['1'].bias net.module.features._modules['17'].conv._modules['1'].running_mean = finenet.module.features._modules['17'].conv._modules['1'].running_mean net.module.features._modules['17'].conv._modules['1'].running_var = finenet.module.features._modules['17'].conv._modules['1'].running_var net.module.features._modules['17'].conv._modules['3'].weight = finenet.module.features._modules['17'].conv._modules['3'].weight net.module.features._modules['17'].conv._modules['4'].weight = finenet.module.features._modules['17'].conv._modules['4'].weight net.module.features._modules['17'].conv._modules['4'].bias = finenet.module.features._modules['17'].conv._modules['4'].bias net.module.features._modules['17'].conv._modules['4'].running_mean = finenet.module.features._modules['17'].conv._modules['4'].running_mean net.module.features._modules['17'].conv._modules['4'].running_var = finenet.module.features._modules['17'].conv._modules['4'].running_var net.module.features._modules['17'].conv._modules['6'].weight = finenet.module.features._modules['17'].conv._modules['6'].weight net.module.features._modules['17'].conv._modules['7'].weight = finenet.module.features._modules['17'].conv._modules['7'].weight net.module.features._modules['17'].conv._modules['7'].bias = finenet.module.features._modules['17'].conv._modules['7'].bias net.module.features._modules['17'].conv._modules['7'].running_mean = finenet.module.features._modules['17'].conv._modules['7'].running_mean net.module.features._modules['17'].conv._modules['7'].running_var = finenet.module.features._modules['17'].conv._modules['7'].running_var net.module.features._modules['18']._modules['0'].weight = finenet.module.features._modules['18']._modules['0'].weight net.module.features._modules['18']._modules['1'].weight = finenet.module.features._modules['18']._modules['1'].weight net.module.features._modules['18']._modules['1'].bias = finenet.module.features._modules['18']._modules['1'].bias net.module.features._modules['18']._modules['1'].running_mean = finenet.module.features._modules['18']._modules['1'].running_mean net.module.features._modules['18']._modules['1'].running_var = finenet.module.features._modules['18']._modules['1'].running_var return net
98.825553
143
0.710631
5,558
40,222
4.899424
0.027168
0.272484
0.408725
0.233557
0.933311
0.928501
0.92839
0.913628
0.618303
0.007932
0
0.041067
0.082219
40,222
406
144
99.068966
0.6966
0.023544
0
0.045455
0
0
0.033393
0
0
0
0
0
0
1
0.025568
false
0
0.008523
0.005682
0.0625
0
0
0
0
null
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
69da846375977d4fae6eaa4387dad14e51ca4bca
46,145
py
Python
test/unit/testvars.py
jgough/opensearch-curator
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
[ "Apache-2.0" ]
null
null
null
test/unit/testvars.py
jgough/opensearch-curator
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
[ "Apache-2.0" ]
null
null
null
test/unit/testvars.py
jgough/opensearch-curator
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
[ "Apache-2.0" ]
null
null
null
import opensearchpy fake_fail = Exception('Simulated Failure') four_oh_one = opensearchpy.TransportError(401, "simulated error") four_oh_four = opensearchpy.TransportError(404, "simulated error") get_alias_fail = opensearchpy.NotFoundError(404, "simulated error") named_index = 'index_name' named_indices = [ "index-2015.01.01", "index-2015.02.01" ] open_index = {'metadata': {'indices' : { named_index : {'state' : 'open'}}}} closed_index = {'metadata': {'indices' : { named_index : {'state' : 'close'}}}} cat_open_index = [{'status': 'open'}] cat_closed_index = [{'status': 'close'}] open_indices = { 'metadata': { 'indices' : { 'index1' : { 'state' : 'open' }, 'index2' : { 'state' : 'open' }}}} closed_indices = { 'metadata': { 'indices' : { 'index1' : { 'state' : 'close' }, 'index2' : { 'state' : 'close' }}}} named_alias = 'alias_name' alias_retval = { "pre_aliased_index": { "aliases" : { named_alias : { }}}} rollable_alias = { "index-000001": { "aliases" : { named_alias : { }}}} rollover_conditions = { 'conditions': { 'max_age': '1s' } } dry_run_rollover = { "acknowledged": True, "shards_acknowledged": True, "old_index": "index-000001", "new_index": "index-000002", "rolled_over": False, "dry_run": True, "conditions": { "max_age" : "1s" } } aliases_retval = { "index1": { "aliases" : { named_alias : { } } }, "index2": { "aliases" : { named_alias : { } } }, } alias_one_add = [{'add': {'alias': 'alias', 'index': 'index_name'}}] alias_one_add_with_extras = [ { 'add': { 'alias': 'alias', 'index': 'index_name', 'filter' : { 'term' : { 'user' : 'kimchy' }} } }] alias_one_rm = [{'remove': {'alias': 'my_alias', 'index': named_index}}] alias_one_body = { "actions" : [ {'remove': {'alias': 'alias', 'index': 'index_name'}}, {'add': {'alias': 'alias', 'index': 'index_name'}} ]} alias_two_add = [ {'add': {'alias': 'alias', 'index': 'index-2016.03.03'}}, {'add': {'alias': 'alias', 'index': 'index-2016.03.04'}}, ] alias_two_rm = [ {'remove': {'alias': 'my_alias', 'index': 'index-2016.03.03'}}, {'remove': {'alias': 'my_alias', 'index': 'index-2016.03.04'}}, ] alias_success = { "acknowledged": True } allocation_in = {named_index: {'settings': {'index': {'routing': {'allocation': {'require': {'foo': 'bar'}}}}}}} allocation_out = {named_index: {'settings': {'index': {'routing': {'allocation': {'require': {'not': 'foo'}}}}}}} indices_space = { 'indices' : { 'index1' : { 'index' : { 'primary_size_in_bytes': 1083741824 }}, 'index2' : { 'index' : { 'primary_size_in_bytes': 1083741824 }}}} snap_name = 'snap_name' repo_name = 'repo_name' test_repo = {repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/repo_name'}}} test_repos = {'TESTING': {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/TESTING'}}, repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/rmp/repos/repo_name'}}} snap_running = { 'snapshots': ['running'] } nosnap_running = { 'snapshots': [] } snapshot = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'SUCCESS', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }]} oneinprogress = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'IN_PROGRESS', 'snapshot': snap_name, 'end_time': '2015-03-01T00:00:03.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1425168002 }]} partial = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'PARTIAL', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }]} failed = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'FAILED', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }]} othersnap = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'SOMETHINGELSE', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }]} snapshots = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'SUCCESS', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }, { 'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'SUCCESS', 'snapshot': 'snapshot-2015.03.01', 'end_time': '2015-03-01T00:00:03.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1425168002 }]} inprogress = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'SUCCESS', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }, { 'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'IN_PROGRESS', 'snapshot': 'snapshot-2015.03.01', 'end_time': '2015-03-01T00:00:03.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1425168002 }]} highly_unlikely = { 'snapshots': [ { 'duration_in_millis': 60000, 'start_time': '2015-02-01T00:00:00.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'IN_PROGRESS', 'snapshot': snap_name, 'end_time': '2015-02-01T00:00:01.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1422748800 }, { 'duration_in_millis': 60000, 'start_time': '2015-03-01T00:00:02.000Z', 'shards': {'successful': 4, 'failed': 0, 'total': 4}, 'end_time_in_millis': 0, 'state': 'IN_PROGRESS', 'snapshot': 'snapshot-2015.03.01', 'end_time': '2015-03-01T00:00:03.000Z', 'indices': named_indices, 'failures': [], 'start_time_in_millis': 1425168002 }]} snap_body_all = { "ignore_unavailable": False, "include_global_state": True, "partial": False, "indices" : "_all" } snap_body = { "ignore_unavailable": False, "include_global_state": True, "partial": False, "indices" : "index-2015.01.01,index-2015.02.01" } verified_nodes = {'nodes': {'nodeid1': {'name': 'node1'}, 'nodeid2': {'name': 'node2'}}} synced_pass = { "_shards":{"total":1,"successful":1,"failed":0}, "index_name":{ "total":1,"successful":1,"failed":0, "failures":[], } } synced_fail = { "_shards":{"total":1,"successful":0,"failed":1}, "index_name":{ "total":1,"successful":0,"failed":1, "failures":[ {"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":True,"node":"nodeid1","relocating_node":None,"shard":0,"index":"index_name"}}, ] } } sync_conflict = opensearchpy.ConflictError(409, u'{"_shards":{"total":1,"successful":0,"failed":1},"index_name":{"total":1,"successful":0,"failed":1,"failures":[{"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":true,"node":"nodeid1","relocating_node":null,"shard":0,"index":"index_name"}}]}})', synced_fail) synced_fails = { "_shards":{"total":2,"successful":1,"failed":1}, "index1":{ "total":1,"successful":0,"failed":1, "failures":[ {"shard":0,"reason":"pending operations","routing":{"state":"STARTED","primary":True,"node":"nodeid1","relocating_node":None,"shard":0,"index":"index_name"}}, ] }, "index2":{ "total":1,"successful":1,"failed":0, "failures":[] }, } settings_one = { named_index: { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'2', u'creation_date': u'1456963200172', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } } } settings_1_get_aliases = { named_index: { "aliases" : { 'my_alias' : { } } } } settings_two = { u'index-2016.03.03': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'5', u'creation_date': u'1456963200172', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'index-2016.03.04': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'another_random_uuid_string', u'number_of_shards': u'5', u'creation_date': u'1457049600812', u'routing': {u'allocation': {u'include': {u'tag': u'bar'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } } } settings_2_get_aliases = { "index-2016.03.03": { "aliases" : { 'my_alias' : { } } }, "index-2016.03.04": { "aliases" : { 'my_alias' : { } } }, } settings_2_closed = { u'index-2016.03.03': { u'state': u'close', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'5', u'creation_date': u'1456963200172', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'index-2016.03.04': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'another_random_uuid_string', u'number_of_shards': u'5', u'creation_date': u'1457049600812', u'routing': {u'allocation': {u'include': {u'tag': u'bar'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } } } settings_two_no_cd = { u'index-2016.03.03': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'5', u'creation_date': u'1456963200172', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'index-2016.03.04': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'another_random_uuid_string', u'number_of_shards': u'5', u'routing': {u'allocation': {u'include': {u'tag': u'bar'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } } } settings_four = { u'a-2016.03.03': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'5', u'creation_date': u'1456963200172', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'b-2016.03.04': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'another_random_uuid_string', u'number_of_shards': u'5', u'creation_date': u'1457049600812', u'routing': {u'allocation': {u'include': {u'tag': u'bar'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'c-2016.03.05': { u'state': u'close', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'5', u'creation_date': u'1457136000933', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'd-2016.03.06': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'another_random_uuid_string', u'number_of_shards': u'5', u'creation_date': u'1457222400527', u'routing': {u'allocation': {u'include': {u'tag': u'bar'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } } } settings_named = { u'index-2015.01.01': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'random_uuid_string_here', u'number_of_shards': u'5', u'creation_date': u'1456963200172', u'routing': {u'allocation': {u'include': {u'tag': u'foo'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } }, u'index-2015.02.01': { u'state': u'open', u'aliases': [u'my_alias'], u'mappings': {}, u'settings': { u'index': { u'number_of_replicas': u'1', u'uuid': u'another_random_uuid_string', u'number_of_shards': u'5', u'creation_date': u'1457049600812', u'routing': {u'allocation': {u'include': {u'tag': u'bar'}}}, u'version': {u'created': u'2020099'}, u'refresh_interval': u'5s' } } } } clu_state_one = { u'metadata': { u'indices': settings_one } } clu_state_two = { u'metadata': { u'indices': settings_two } } cs_two_closed = { u'metadata': { u'indices': settings_2_closed } } clu_state_two_no_cd = { u'metadata': { u'indices': settings_two_no_cd } } clu_state_four = { u'metadata': { u'indices': settings_four } } stats_one = { u'indices': { named_index : { u'total': { u'docs': {u'count': 6374962, u'deleted': 0}, u'store': {u'size_in_bytes': 1115219663, u'throttle_time_in_millis': 0} }, u'primaries': { u'docs': {u'count': 3187481, u'deleted': 0}, u'store': {u'size_in_bytes': 557951789, u'throttle_time_in_millis': 0} } } } } stats_two = { u'indices': { u'index-2016.03.03': { u'total': { u'docs': {u'count': 6374962, u'deleted': 0}, u'store': {u'size_in_bytes': 1115219663, u'throttle_time_in_millis': 0} }, u'primaries': { u'docs': {u'count': 3187481, u'deleted': 0}, u'store': {u'size_in_bytes': 557951789, u'throttle_time_in_millis': 0} } }, u'index-2016.03.04': { u'total': { u'docs': {u'count': 6377544, u'deleted': 0}, u'store': {u'size_in_bytes': 1120891046, u'throttle_time_in_millis': 0} }, u'primaries': { u'docs': {u'count': 3188772, u'deleted': 0}, u'store': {u'size_in_bytes': 560677114, u'throttle_time_in_millis': 0} } } } } stats_four = { u'indices': { u'a-2016.03.03': { u'total': { u'docs': {u'count': 6374962, u'deleted': 0}, u'store': {u'size_in_bytes': 1115219663, u'throttle_time_in_millis': 0} }, u'primaries': { u'docs': {u'count': 3187481, u'deleted': 0}, u'store': {u'size_in_bytes': 557951789, u'throttle_time_in_millis': 0} } }, u'b-2016.03.04': { u'total': { u'docs': {u'count': 6377544, u'deleted': 0}, u'store': {u'size_in_bytes': 1120891046, u'throttle_time_in_millis': 0} }, u'primaries': { u'docs': {u'count': 3188772, u'deleted': 0}, u'store': {u'size_in_bytes': 560677114, u'throttle_time_in_millis': 0} } }, # CLOSED, ergo, not present # u'c-2016.03.05': { # u'total': { # u'docs': {u'count': 6266434, u'deleted': 0}, # u'store': {u'size_in_bytes': 1120882166, u'throttle_time_in_millis': 0} # }, # u'primaries': { # u'docs': {u'count': 3133217, u'deleted': 0}, # u'store': {u'size_in_bytes': 560441083, u'throttle_time_in_millis': 0} # } # }, u'd-2016.03.06': { u'total': { u'docs': {u'count': 6266436, u'deleted': 0}, u'store': {u'size_in_bytes': 1120882168, u'throttle_time_in_millis': 0} }, u'primaries': { u'docs': {u'count': 3133218, u'deleted': 0}, u'store': {u'size_in_bytes': 560441084, u'throttle_time_in_millis': 0} } } } } fieldstats_one = { u'indices': { named_index : { u'fields': { u'timestamp': { u'density': 100, u'min_value_as_string': u'2016-03-03T00:00:06.189Z', u'max_value': 1457049599152, u'max_doc': 415651, u'min_value': 1456963206189, u'doc_count': 415651, u'max_value_as_string': u'2016-03-03T23:59:59.152Z', u'sum_total_term_freq': -1, u'sum_doc_freq': 1662604}}}} } fieldstats_two = { u'indices': { u'index-2016.03.03': { u'fields': { u'timestamp': { u'density': 100, u'min_value_as_string': u'2016-03-03T00:00:06.189Z', u'max_value': 1457049599152, u'max_doc': 415651, u'min_value': 1456963206189, u'doc_count': 415651, u'max_value_as_string': u'2016-03-03T23:59:59.152Z', u'sum_total_term_freq': -1, u'sum_doc_freq': 1662604}}}, u'index-2016.03.04': { u'fields': { u'timestamp': { u'density': 100, u'min_value_as_string': u'2016-03-04T00:00:00.812Z', u'max_value': 1457135999223, u'max_doc': 426762, u'min_value': 1457049600812, u'doc_count': 426762, u'max_value_as_string': u'2016-03-04T23:59:59.223Z', u'sum_total_term_freq': -1, u'sum_doc_freq': 1673715}}}, } } fieldstats_four = { u'indices': { u'a-2016.03.03': { u'fields': { u'timestamp': { u'density': 100, u'min_value_as_string': u'2016-03-03T00:00:06.189Z', u'max_value': 1457049599152, u'max_doc': 415651, u'min_value': 1456963206189, u'doc_count': 415651, u'max_value_as_string': u'2016-03-03T23:59:59.152Z', u'sum_total_term_freq': -1, u'sum_doc_freq': 1662604}}}, u'b-2016.03.04': { u'fields': { u'timestamp': { u'density': 100, u'min_value_as_string': u'2016-03-04T00:00:00.812Z', u'max_value': 1457135999223, u'max_doc': 426762, u'min_value': 1457049600812, u'doc_count': 426762, u'max_value_as_string': u'2016-03-04T23:59:59.223Z', u'sum_total_term_freq': -1, u'sum_doc_freq': 1673715}}}, u'd-2016.03.06': { u'fields': { u'timestamp': { u'density': 100, u'min_value_as_string': u'2016-03-04T00:00:00.812Z', u'max_value': 1457308799223, u'max_doc': 426762, u'min_value': 1457222400567, u'doc_count': 426762, u'max_value_as_string': u'2016-03-04T23:59:59.223Z', u'sum_total_term_freq': -1, u'sum_doc_freq': 1673715}}}, } } fieldstats_query = { u'aggregations': { u'min' : { u'value_as_string': u'2016-03-03T00:00:06.189Z', u'value': 1456963206189, }, u'max' : { u'value': 1457049599152, u'value_as_string': u'2016-03-03T23:59:59.152Z', } } } shards = { 'indices': { named_index: { 'shards': { '0': [ { 'num_search_segments' : 15 }, { 'num_search_segments' : 21 } ], '1': [ { 'num_search_segments' : 19 }, { 'num_search_segments' : 16 } ] }}}} fm_shards = { 'indices': { named_index: { 'shards': { '0': [ { 'num_search_segments' : 1 }, { 'num_search_segments' : 1 } ], '1': [ { 'num_search_segments' : 1 }, { 'num_search_segments' : 1 } ] }}}} loginfo = { "loglevel": "INFO", "logfile": None, "logformat": "default" } default_format = '%(asctime)s %(levelname)-9s %(message)s' debug_format = '%(asctime)s %(levelname)-9s %(name)22s %(funcName)22s:%(lineno)-4d %(message)s' yamlconfig = ''' --- # Remember, leave a key empty to use the default value. None will be a string, # not a Python "NoneType" client: hosts: localhost port: 9200 url_prefix: use_ssl: False certificate: client_cert: client_key: ssl_no_validate: False http_auth: timeout: 30 master_only: False options: dry_run: False loglevel: DEBUG logfile: logformat: default quiet: False ''' pattern_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: pattern kind: prefix value: a exclude: False ''' age_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: age source: name direction: older timestring: '%Y.%m.%d' unit: seconds unit_count: 0 epoch: 1456963201 ''' space_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: space disk_space: 2.1 source: name use_age: True timestring: '%Y.%m.%d' ''' forcemerge_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: forcemerged max_num_segments: 2 ''' allocated_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: allocated key: tag value: foo allocation_type: include ''' kibana_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: kibana ''' opened_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: opened ''' closed_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: closed ''' none_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: none ''' invalid_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: sir_not_appearing_in_this_film ''' snap_age_ft = ''' --- actions: 1: description: test action: delete_snapshots options: continue_if_exception: False disable_action: False filters: - filtertype: age direction: older unit: days unit_count: 1 ''' snap_pattern_ft= ''' --- actions: 1: description: test action: delete_snapshots options: continue_if_exception: False disable_action: False filters: - filtertype: pattern kind: prefix value: sna ''' snap_none_ft = ''' --- actions: 1: description: test action: delete_snapshots options: continue_if_exception: False disable_action: False filters: - filtertype: none ''' size_ft = ''' --- actions: 1: description: open all matching indices action: open options: continue_if_exception: False disable_action: False filters: - filtertype: size size_threshold: 1.04 size_behavior: total threshold_behavior: less_than ''' not_rollable_name = {'index': {u'aliases': {'foo': {}}}} not_rollable_multiple = {u'index-a': {u'aliases': {u'foo': {}}}, u'index-b': {u'aliases': {u'foo': {}}}} not_rollable_non_numeric = {u'index-a': {u'aliases': {u'foo': {}}}} is_rollable_2digits = {u'index-00001': {u'aliases': {u'foo': {}}}} is_rollable_hypenated = {u'index-2017.03.07-1': {u'aliases': {u'foo': {}}}} generic_task = {u'task': u'I0ekFjMhSPCQz7FUs1zJOg:54510686'} incomplete_task = {u'completed': False, u'task': {u'node': u'I0ekFjMhSPCQz7FUs1zJOg', u'status': {u'retries': {u'bulk': 0, u'search': 0}, u'updated': 0, u'batches': 3647, u'throttled_until_millis': 0, u'throttled_millis': 0, u'noops': 0, u'created': 3646581, u'deleted': 0, u'requests_per_second': -1.0, u'version_conflicts': 0, u'total': 3646581}, u'description': u'UNIT TEST', u'running_time_in_nanos': 1637039537721, u'cancellable': True, u'action': u'indices:data/write/reindex', u'type': u'transport', u'id': 54510686, u'start_time_in_millis': 1489695981997}, u'response': {u'retries': {u'bulk': 0, u'search': 0}, u'updated': 0, u'batches': 3647, u'throttled_until_millis': 0, u'throttled_millis': 0, u'noops': 0, u'created': 3646581, u'deleted': 0, u'took': 1636917, u'requests_per_second': -1.0, u'timed_out': False, u'failures': [], u'version_conflicts': 0, u'total': 3646581}} completed_task = {u'completed': True, u'task': {u'node': u'I0ekFjMhSPCQz7FUs1zJOg', u'status': {u'retries': {u'bulk': 0, u'search': 0}, u'updated': 0, u'batches': 3647, u'throttled_until_millis': 0, u'throttled_millis': 0, u'noops': 0, u'created': 3646581, u'deleted': 0, u'requests_per_second': -1.0, u'version_conflicts': 0, u'total': 3646581}, u'description': u'UNIT TEST', u'running_time_in_nanos': 1637039537721, u'cancellable': True, u'action': u'indices:data/write/reindex', u'type': u'transport', u'id': 54510686, u'start_time_in_millis': 1489695981997}, u'response': {u'retries': {u'bulk': 0, u'search': 0}, u'updated': 0, u'batches': 3647, u'throttled_until_millis': 0, u'throttled_millis': 0, u'noops': 0, u'created': 3646581, u'deleted': 0, u'took': 1636917, u'requests_per_second': -1.0, u'timed_out': False, u'failures': [], u'version_conflicts': 0, u'total': 3646581}} completed_task_zero_total = {u'completed': True, u'task': {u'node': u'I0ekFjMhSPCQz7FUs1zJOg', u'status': {u'retries': {u'bulk': 0, u'search': 0}, u'updated': 0, u'batches': 0, u'throttled_until_millis': 0, u'throttled_millis': 0, u'noops': 0, u'created': 0, u'deleted': 0, u'requests_per_second': -1.0, u'version_conflicts': 0, u'total': 0}, u'description': u'UNIT TEST', u'running_time_in_nanos': 1637039537721, u'cancellable': True, u'action': u'indices:data/write/reindex', u'type': u'transport', u'id': 54510686, u'start_time_in_millis': 1489695981997}, u'response': {u'retries': {u'bulk': 0, u'search': 0}, u'updated': 0, u'batches': 0, u'throttled_until_millis': 0, u'throttled_millis': 0, u'noops': 0, u'created': 0, u'deleted': 0, u'took': 1636917, u'requests_per_second': -1.0, u'timed_out': False, u'failures': [], u'version_conflicts': 0, u'total': 0}} recovery_output = {'index-2015.01.01': {'shards' : [{'stage':'DONE'}]}, 'index-2015.02.01': {'shards' : [{'stage':'DONE'}]}} unrecovered_output = {'index-2015.01.01': {'shards' : [{'stage':'INDEX'}]}, 'index-2015.02.01': {'shards' : [{'stage':'INDEX'}]}} cluster_health = { "cluster_name": "unit_test", "status": "green", "timed_out": False, "number_of_nodes": 7, "number_of_data_nodes": 3, "active_primary_shards": 235, "active_shards": 471, "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100} reindex_basic = { 'source': { 'index': named_index }, 'dest': { 'index': 'other_index' } } reindex_replace = { 'source': { 'index': 'REINDEX_SELECTION' }, 'dest': { 'index': 'other_index' } } reindex_migration = { 'source': { 'index': named_index }, 'dest': { 'index': 'MIGRATION' } } index_list_966 = ['indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d'] recovery_966 = {u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d': {u'shards': [{u'total_time': u'10.1m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10.1m', u'target_throttle_time': u'-1', u'total_time_in_millis': 606577, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3171596177, u'reused': u'0b', u'total_in_bytes': 3171596177, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'45ms', u'percent': u'100.0%', u'total_time_in_millis': 45, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T11:54:48.183Z', u'primary': True, u'total_time_in_millis': 606631, u'stop_time_in_millis': 1494936294815, u'stop_time': u'2017-05-16T12:04:54.815Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 1, u'start_time_in_millis': 1494935688183}, {u'total_time': u'10m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10m', u'target_throttle_time': u'-1', u'total_time_in_millis': 602302, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3162299781, u'reused': u'0b', u'total_in_bytes': 3162299781, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'389ms', u'percent': u'100.0%', u'total_time_in_millis': 389, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T12:04:51.606Z', u'primary': True, u'total_time_in_millis': 602698, u'stop_time_in_millis': 1494936894305, u'stop_time': u'2017-05-16T12:14:54.305Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 5, u'start_time_in_millis': 1494936291606}, {u'total_time': u'10.1m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10.1m', u'target_throttle_time': u'-1', u'total_time_in_millis': 606692, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3156050994, u'reused': u'0b', u'total_in_bytes': 3156050994, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'38ms', u'percent': u'100.0%', u'total_time_in_millis': 38, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T11:54:48.166Z', u'primary': True, u'total_time_in_millis': 606737, u'stop_time_in_millis': 1494936294904, u'stop_time': u'2017-05-16T12:04:54.904Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 3, u'start_time_in_millis': 1494935688166}, {u'total_time': u'10m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10m', u'target_throttle_time': u'-1', u'total_time_in_millis': 602010, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3153017440, u'reused': u'0b', u'total_in_bytes': 3153017440, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'558ms', u'percent': u'100.0%', u'total_time_in_millis': 558, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T12:04:51.369Z', u'primary': True, u'total_time_in_millis': 602575, u'stop_time_in_millis': 1494936893944, u'stop_time': u'2017-05-16T12:14:53.944Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 4, u'start_time_in_millis': 1494936291369}, {u'total_time': u'10m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10m', u'target_throttle_time': u'-1', u'total_time_in_millis': 600492, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3153347402, u'reused': u'0b', u'total_in_bytes': 3153347402, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'445ms', u'percent': u'100.0%', u'total_time_in_millis': 445, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T12:04:54.817Z', u'primary': True, u'total_time_in_millis': 600946, u'stop_time_in_millis': 1494936895764, u'stop_time': u'2017-05-16T12:14:55.764Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 6, u'start_time_in_millis': 1494936294817}, {u'total_time': u'10m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10m', u'target_throttle_time': u'-1', u'total_time_in_millis': 603194, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3148003580, u'reused': u'0b', u'total_in_bytes': 3148003580, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'225ms', u'percent': u'100.0%', u'total_time_in_millis': 225, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T11:54:48.173Z', u'primary': True, u'total_time_in_millis': 603429, u'stop_time_in_millis': 1494936291602, u'stop_time': u'2017-05-16T12:04:51.602Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 2, u'start_time_in_millis': 1494935688173}, {u'total_time': u'10m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10m', u'target_throttle_time': u'-1', u'total_time_in_millis': 601453, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3168132171, u'reused': u'0b', u'total_in_bytes': 3168132171, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'43ms', u'percent': u'100.0%', u'total_time_in_millis': 43, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T12:04:54.905Z', u'primary': True, u'total_time_in_millis': 601503, u'stop_time_in_millis': 1494936896408, u'stop_time': u'2017-05-16T12:14:56.408Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 7, u'start_time_in_millis': 1494936294905}, {u'total_time': u'10m', u'index': {u'files': {u'reused': 0, u'total': 15, u'percent': u'100.0%', u'recovered': 15}, u'total_time': u'10m', u'target_throttle_time': u'-1', u'total_time_in_millis': 602897, u'source_throttle_time_in_millis': 0, u'source_throttle_time': u'-1', u'target_throttle_time_in_millis': 0, u'size': {u'recovered_in_bytes': 3153750393, u'reused': u'0b', u'total_in_bytes': 3153750393, u'percent': u'100.0%', u'reused_in_bytes': 0, u'total': u'2.9gb', u'recovered': u'2.9gb'}}, u'verify_index': {u'total_time': u'0s', u'total_time_in_millis': 0, u'check_index_time_in_millis': 0, u'check_index_time': u'0s'}, u'target': {u'ip': u'x.x.x.7', u'host': u'x.x.x.7', u'transport_address': u'x.x.x.7:9300', u'id': u'K4xQPaOFSWSPLwhb0P47aQ', u'name': u'staging-es5-forcem'}, u'source': {u'index': u'indexv0.2_2017-02-12_536a9247f9fa4fc7a7942ad46ea14e0d', u'version': u'5.1.1', u'snapshot': u'force-merge', u'repository': u'force-merge'}, u'translog': {u'total_time': u'271ms', u'percent': u'100.0%', u'total_time_in_millis': 271, u'total_on_start': 0, u'total': 0, u'recovered': 0}, u'start_time': u'2017-05-16T11:54:48.191Z', u'primary': True, u'total_time_in_millis': 603174, u'stop_time_in_millis': 1494936291366, u'stop_time': u'2017-05-16T12:04:51.366Z', u'stage': u'DONE', u'type': u'SNAPSHOT', u'id': 0, u'start_time_in_millis': 1494935688191}]}} no_snap_tasks = {u'nodes': {u'node1': {u'tasks': {u'task1': {u'action': u'cluster:monitor/tasks/lists[n]'}}}}} snap_task = {u'nodes': {u'node1': {u'tasks': {u'task1': {u'action': u'cluster:admin/snapshot/delete'}}}}} watermark_persistent = {u'persistent':{u'cluster':{u'routing':{u'allocation':{u'disk':{u'watermark':{u'low':u'11%',u'high':u'60gb'}}}}}}} watermark_transient = {u'transient':{u'cluster':{u'routing':{u'allocation':{u'disk':{u'watermark':{u'low':u'9%',u'high':u'50gb'}}}}}}} watermark_both = { u'persistent': {u'cluster':{u'routing':{u'allocation':{u'disk':{u'watermark':{u'low':u'11%',u'high':u'60gb'}}}}}}, u'transient': {u'cluster':{u'routing':{u'allocation':{u'disk':{u'watermark':{u'low':u'9%',u'high':u'50gb'}}}}}}, } empty_cluster_settings = {u'persistent':{},u'transient':{}} data_only_node_role = ['data'] master_data_node_role = ['data','master']
53.844807
11,002
0.561686
6,213
46,145
3.96797
0.086754
0.014603
0.05403
0.030057
0.834381
0.810206
0.784732
0.766438
0.732122
0.714964
0
0.116106
0.250796
46,145
856
11,003
53.90771
0.596986
0.007823
0
0.56691
0
0.002433
0.491873
0.098466
0
0
0
0
0
1
0
false
0.001217
0.001217
0
0.001217
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
3874da59d1a2241e86da6c255ad9af281cc57657
16,067
py
Python
tests/jacobian_test.py
ByzanTine/AutoHOOT
007bb423bfc8eefa64e4d1b0f8dad80b440bcf7a
[ "Apache-2.0" ]
null
null
null
tests/jacobian_test.py
ByzanTine/AutoHOOT
007bb423bfc8eefa64e4d1b0f8dad80b440bcf7a
[ "Apache-2.0" ]
null
null
null
tests/jacobian_test.py
ByzanTine/AutoHOOT
007bb423bfc8eefa64e4d1b0f8dad80b440bcf7a
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import autodiff as ad import backend as T def test_add_jacobian(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2]) y = x1 + x2 jacobian_x2, = ad.jacobians(y, [x2]) executor = ad.Executor([y, jacobian_x2]) x1_val = T.tensor([[1, 1], [1, 1]]) x2_val = T.tensor([[1, 1], [1, 1]]) y_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val }) I = T.identity(2) expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I) assert isinstance(y, ad.Node) assert isinstance(jacobian_x2, ad.Node) assert T.array_equal(y_val, x1_val + x2_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_add_jacobian_scalar(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[]) x2 = ad.Variable(name="x2", shape=[]) y = x1 + x2 jacobian_x2, = ad.jacobians(y, [x2]) executor = ad.Executor([y, jacobian_x2]) x1_val = T.tensor(1.) x2_val = T.tensor(1.) y_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val }) expected_jacobian_x2_val = T.tensor(1.) assert isinstance(y, ad.Node) assert isinstance(jacobian_x2, ad.Node) assert T.array_equal(y_val, x1_val + x2_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_chainjacobian(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2, 2]) x1.set_in_indices_length(1) x2.set_in_indices_length(2) y = ad.chainjacobian(x1, x2) executor = ad.Executor([y]) x1_val = T.tensor([[[1, 1], [1, 1]], [[1, 1], [1, 1]]]) x2_val = T.tensor([[[1, 1], [1, 1]], [[1, 1], [1, 1]]]) y_val, = executor.run(feed_dict={x1: x1_val, x2: x2_val}) expected_y_val = T.einsum("abc,bcd->ad", x1_val, x2_val) assert isinstance(y, ad.Node) assert T.array_equal(y_val, expected_y_val) def test_add_jacobian_w_chain(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2]) x3 = ad.Variable(name="x3", shape=[2, 2]) y = x1 + x2 z = y + x3 jacobian_x2, = ad.jacobians(z, [x2]) executor = ad.Executor([z, jacobian_x2]) x1_val = T.tensor([[1, 1], [1, 1]]) x2_val = T.tensor([[1, 1], [1, 1]]) x3_val = T.tensor([[1, 1], [1, 1]]) z_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val, x3: x3_val }) I = T.identity(2) # jacobian_z_y = T.einsum("ae,bf->abef", I, I) # jacobian_y_x2 = T.einsum("ec,fd->efcd", I, I) # jacobian_z_x2 = T.einsum("abef,efcd->abcd", jacobian_z_y, jacobian_y_x2) # = T.einsum("ae,bf,ec,fd->abcd", I, I, I, I) # = T.einsum("ac,bd->abcd", I, I) expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I) assert isinstance(z, ad.Node) assert isinstance(jacobian_x2, ad.Node) assert T.array_equal(z_val, x1_val + x2_val + x3_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_add_jacobian_scalar_w_chain(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[]) x2 = ad.Variable(name="x2", shape=[]) x3 = ad.Variable(name="x3", shape=[]) y = x1 + x2 z = y + x3 jacobian_x2, = ad.jacobians(z, [x2]) executor = ad.Executor([z, jacobian_x2]) x1_val = T.tensor(1.) x2_val = T.tensor(1.) x3_val = T.tensor(1.) z_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val, x3: x3_val }) expected_jacobian_x2_val = T.tensor(1.) assert isinstance(z, ad.Node) assert isinstance(jacobian_x2, ad.Node) assert T.array_equal(z_val, x1_val + x2_val + x3_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_sub_jacobian(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2]) y = x1 - x2 jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2]) executor = ad.Executor([y, jacobian_x1, jacobian_x2]) x1_val = T.tensor([[1, 1], [1, 1]]) x2_val = T.tensor([[1, 1], [1, 1]]) y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val }) I = T.identity(2) expected_jacobian_x1_val = T.einsum("ac,bd->abcd", I, I) expected_jacobian_x2_val = -T.einsum("ac,bd->abcd", I, I) assert isinstance(y, ad.Node) assert isinstance(jacobian_x2, ad.Node) assert T.array_equal(y_val, x1_val - x2_val) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_sub_jacobian_w_chain(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2]) x3 = ad.Variable(name="x3", shape=[2, 2]) y = x1 - x2 z = x3 - y jacobian_x2, = ad.jacobians(z, [x2]) executor = ad.Executor([z, jacobian_x2]) x1_val = T.tensor([[1, 1], [1, 1]]) x2_val = T.tensor([[1, 1], [1, 1]]) x3_val = T.tensor([[1, 1], [1, 1]]) z_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val, x3: x3_val }) I = T.identity(2) expected_jacobian_x2_val = T.einsum("ac,bd->abcd", I, I) assert isinstance(z, ad.Node) assert isinstance(jacobian_x2, ad.Node) assert T.array_equal(z_val, x3_val - x1_val + x2_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_mul_jacobian(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2]) y = x1 * x2 jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2]) executor = ad.Executor([y, jacobian_x1, jacobian_x2]) x1_val = T.tensor([[1., 2.], [3., 4.]]) x2_val = T.tensor([[5., 6.], [7., 8.]]) y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val }) I = T.identity(2) expected_jacobian_x1_val = T.einsum("ai,bj,ij->abij", I, I, x2_val) expected_jacobian_x2_val = T.einsum("ai,bj,ij->abij", I, I, x1_val) assert isinstance(y, ad.Node) assert T.array_equal(y_val, x1_val * x2_val) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_three_mul_jacobian(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[2, 2]) x2 = ad.Variable(name="x2", shape=[2, 2]) x3 = ad.Variable(name="x3", shape=[2, 2]) y = x1 * x2 * x3 jacobian_x1, = ad.jacobians(y, [x1]) executor = ad.Executor([y, jacobian_x1]) x1_val = T.tensor([[1., 2.], [3., 4.]]) x2_val = T.tensor([[5., 6.], [7., 8.]]) x3_val = T.tensor([[9., 10.], [11., 12.]]) y_val, jacobian_x1_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val, x3: x3_val }) I = T.identity(2) expected_jacobian_x1_val = T.einsum("ai,bj,ij,ij->abij", I, I, x2_val, x3_val) assert isinstance(y, ad.Node) assert T.array_equal(y_val, x1_val * x2_val * x3_val) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) def test_three_mul_jacobian_scalars(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[]) x2 = ad.Variable(name="x2", shape=[]) x3 = ad.Variable(name="x3", shape=[]) y = x1 * x2 * x3 jacobian_x1, = ad.jacobians(y, [x1]) executor = ad.Executor([y, jacobian_x1]) x1_val = T.tensor(1.) x2_val = T.tensor(2.) x3_val = T.tensor(3.) y_val, jacobian_x1_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val, x3: x3_val }) expected_jacobian_x1_val = x2_val * x3_val assert isinstance(y, ad.Node) assert T.array_equal(y_val, x1_val * x2_val * x3_val) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) def test_mul_jacobian_scalars(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[]) x2 = ad.Variable(name="x2", shape=[]) y = x1 * x2 jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2]) executor = ad.Executor([y, jacobian_x1, jacobian_x2]) x1_val = T.tensor(1.) x2_val = T.tensor(2.) y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val }) expected_jacobian_x1_val = x2_val expected_jacobian_x2_val = x1_val assert isinstance(y, ad.Node) assert T.array_equal(y_val, x1_val * x2_val) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_mul_jacobian_one_scalar(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[]) x2 = ad.Variable(name="x2", shape=[2, 2]) # test both cases of left and right multiply a scalar for y in [x1 * x2, x2 * x1]: jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2]) executor = ad.Executor([y, jacobian_x1, jacobian_x2]) x1_val = T.tensor(2.) x2_val = T.tensor([[5., 6.], [7., 8.]]) y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val }) I = T.identity(2) expected_jacobian_x1_val = T.einsum("ai,bj,ij->ab", I, I, x2_val) expected_jacobian_x2_val = x1_val * T.einsum("ai,bj->abij", I, I) assert isinstance(y, ad.Node) assert T.array_equal(y_val, x1_val * x2_val) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_mul_const_jacobian(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x2", shape=[2, 2]) jacobian_x1, = ad.jacobians(2 * x1, [x1]) executor = ad.Executor([jacobian_x1]) x1_val = T.tensor([[5., 6.], [7., 8.]]) jacobian_x1_val, = executor.run(feed_dict={x1: x1_val}) I = T.identity(2) expected_jacobian_x1_val = 2 * T.einsum("ai,bj->abij", I, I) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) def test_jacobian_einsum(backendopt): for datatype in backendopt: T.set_backend(datatype) x1 = ad.Variable(name="x1", shape=[3, 3, 3]) x2 = ad.Variable(name="x2", shape=[3, 3, 3]) y = ad.einsum("ikl,jkl->ijk", x1, x2) jacobian_x1, jacobian_x2 = ad.jacobians(y, [x1, x2]) executor = ad.Executor([y, jacobian_x1, jacobian_x2]) x1_val = T.random((3, 3, 3)) x2_val = T.random((3, 3, 3)) y_val, jacobian_x1_val, jacobian_x2_val = executor.run(feed_dict={ x1: x1_val, x2: x2_val, }) I = T.identity(3) expected_jacobian_x1_val = T.einsum("im,kn,jno->ijkmno", I, I, x2_val) expected_jacobian_x2_val = T.einsum("jm,kn,ino->ijkmno", I, I, x1_val) assert isinstance(y, ad.Node) assert T.array_equal(y_val, T.einsum("ikl,jkl->ijk", x1_val, x2_val)) assert T.array_equal(jacobian_x1_val, expected_jacobian_x1_val) assert T.array_equal(jacobian_x2_val, expected_jacobian_x2_val) def test_jacobian_summation_einsum(backendopt): for datatype in backendopt: T.set_backend(datatype) x = ad.Variable(name="x", shape=[2, 2]) x_sum = ad.einsum('ij->', x) grad_x, = ad.jacobians(x_sum, [x]) executor = ad.Executor([x_sum, grad_x]) x_val = T.tensor([[1., 2.], [3., 4.]]) x_sum_val, grad_x_val = executor.run(feed_dict={x: x_val}) expected_x_sum_val = T.sum(x_val) expected_grad_x_val = T.ones_like(x_val) assert T.array_equal(x_sum_val, expected_x_sum_val) assert T.array_equal(grad_x_val, expected_grad_x_val) def test_jacobian_summation_einsum_2(backendopt): for datatype in backendopt: T.set_backend(datatype) x = ad.Variable(name="x", shape=[2, 2]) y = ad.Variable(name="y", shape=[2, 2]) out = ad.einsum('ij,ab->ab', x, y) grad_x, = ad.jacobians(out, [x]) executor = ad.Executor([out, grad_x]) x_val = T.tensor([[1., 2.], [3., 4.]]) y_val = T.tensor([[5., 6.], [7., 8.]]) out_val, grad_x_val = executor.run(feed_dict={x: x_val, y: y_val}) expected_out_val = T.einsum('ij,ab->ab', x_val, y_val) expected_grad_x_val = T.einsum('ij,ab->abij', T.ones(x_val.shape), y_val) assert T.array_equal(out_val, expected_out_val) assert T.array_equal(grad_x_val, expected_grad_x_val) def test_jacobian_trace_einsum(backendopt): for datatype in backendopt: T.set_backend(datatype) x = ad.Variable(name="x", shape=[2, 2]) trace = ad.einsum('ii->', x) grad_x, = ad.jacobians(trace, [x]) executor = ad.Executor([trace, grad_x]) x_val = T.tensor([[1., 2.], [3., 4.]]) trace_val, grad_x_val = executor.run(feed_dict={x: x_val}) expected_trace_val = T.einsum('ii->', x_val) expected_grad_x_val = T.identity(2) assert T.array_equal(trace_val, expected_trace_val) assert T.array_equal(grad_x_val, expected_grad_x_val) def test_hessian_quadratic(backendopt): for datatype in backendopt: T.set_backend(datatype) x = ad.Variable(name="x", shape=[3]) H = ad.Variable(name="H", shape=[3, 3]) y = ad.einsum("i,ij,j->", x, H, x) hessian = ad.hessian(y, [x]) executor = ad.Executor([hessian[0][0]]) x_val = T.random([3]) H_val = T.random((3, 3)) hessian_val, = executor.run(feed_dict={x: x_val, H: H_val}) assert T.array_equal(hessian_val, H_val + T.transpose(H_val))
31.627953
82
0.587975
2,414
16,067
3.669843
0.06918
0.047974
0.058697
0.07292
0.851451
0.820973
0.7993
0.789254
0.775257
0.770064
0
0.049747
0.273106
16,067
507
83
31.690335
0.708794
0.054024
0
0.71261
0
0
0.021214
0
0
0
0
0
0.167155
1
0.052786
false
0
0.005865
0
0.058651
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
38b53449577e2fd8a609e50973db67e500b48497
13,567
py
Python
consensus_and_scoring/test/test_holistic_eval.py
Ericwimsatt/pe-consensus-scoring
f519a0c02fb069b821a2c890988bf9aff6b44c89
[ "Apache-2.0" ]
1
2021-02-26T08:53:05.000Z
2021-02-26T08:53:05.000Z
consensus_and_scoring/test/test_holistic_eval.py
Ericwimsatt/pe-consensus-scoring
f519a0c02fb069b821a2c890988bf9aff6b44c89
[ "Apache-2.0" ]
3
2020-05-14T21:20:33.000Z
2021-04-22T23:21:23.000Z
consensus_and_scoring/test/test_holistic_eval.py
Ericwimsatt/pe-consensus-scoring
f519a0c02fb069b821a2c890988bf9aff6b44c89
[ "Apache-2.0" ]
3
2020-10-11T17:01:53.000Z
2021-12-28T18:38:59.000Z
import test_utils from filegen_utils import * from holistic_eval import * def test_he_low_info_true_low_counts(config): tua_path = test_utils.make_test_directory(config, 'he_tua_input_low_info_true_low_counts') scoring_path = test_utils.make_test_directory(config, 'he_scoring_input_low_info_true_low_counts') #out_path = test_utils.make_test_directory(config, 'out_he_low_info_true_low_counts') pa = point_assignment(out_folder=scoring_path, article_num = '520', source_task_id='practice_makes+[perfect') pa.add_row({'namespace':'Covid2_Reasoning_2020_09_20', 'Answer_Number':3,'points':5, "Question_Number":5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10,30)}) new_tua = tua(out_folder=tua_path, article_num = '520', source_task_id = 'tua_task_id') new_tua.add_row({'topic_name': 'argument', 'start_pos': 10, 'end_pos':30, 'tua_uuid': 'test1'}) hol_dep = dep_iaa(out_folder=scoring_path, source_task_id ='doesnt matter', article_num= '520') #scientific discovery hol_dep.add_row({"namespace":"Covid2_Holistic_2020_09_20", "agreed_Answer": 5, "question_Number": 1, "agreement_score":1, "tua_uuid": 'test1'}) hol_dep.export() points = eval_triage_scoring(new_tua.df, pa.df, scoring_path) #points.to_csv(out_path+'/AssessedPoints.csv', encoding = 'utf-8') assert len(points) == 2 assert points['points'].sum() == 3 def test_he_low_info_true_many_assert(config): tua_path = test_utils.make_test_directory(config, 'test_he_low_info_true_many_assert') scoring_path = test_utils.make_test_directory(config, 'test_he_low_info_true_many_assert') out_path = test_utils.make_test_directory(config, 'out_he_low_info_true_many_assert') pa = point_assignment(out_folder=scoring_path, article_num = '520', source_task_id='practice_makes+[perfect') pa.add_row({'namespace':'Covid2_Reasoning_2020_09_20', 'Answer_Number':3,'points':5, "Question_Number":5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10,30)}) new_tua = tua(out_folder=tua_path, article_num = '520', source_task_id = 'tua_task_id') new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 10, 'end_pos':30, 'tua_uuid': 'test1'}) new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 40, 'end_pos': 80, 'tua_uuid': 'test3'}) new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 40, 'end_pos': 80, 'tua_uuid': 'test5'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 20, 'end_pos': 80, 'tua_uuid': 'test8'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 40, 'end_pos': 60, 'tua_uuid': 'test7'}) hol_dep = dep_iaa(out_folder=scoring_path, source_task_id ='doesnt matter', article_num= '520') #scientific discovery hol_dep.add_row({"namespace":"Covid2_Holistic_2020_09_20", "agreed_Answer": 5, "question_Number": 1, "agreement_score":1, "tua_uuid": 'test1'}) hol_dep.export() points = eval_triage_scoring(new_tua.df, pa.df, scoring_path) points.to_csv(out_path+'/AssessedPoints.csv', encoding = 'utf-8') assert len(points) == 4 assert points['points'].sum() == -1 def test_he_low_info_true_many_assert_news(config): tua_path = test_utils.make_test_directory(config, 'test_he_low_info_true_many_assert_news') scoring_path = test_utils.make_test_directory(config, 'test_he_low_info_true_many_assert_news') out_path = test_utils.make_test_directory(config, 'out_he_low_info_true_many_assert_news') pa = point_assignment(out_folder=scoring_path, article_num = '520', source_task_id='practice_makes+[perfect') pa.add_row({'namespace':'Covid2_Reasoning_2020_09_20', 'Answer_Number':3,'points':5, "Question_Number":5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10,30)}) new_tua = tua(out_folder=tua_path, article_num = '520', source_task_id = 'tua_task_id') new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 10, 'end_pos':30, 'tua_uuid': 'test1'}) new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 40, 'end_pos': 80, 'tua_uuid': 'test3'}) new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 40, 'end_pos': 80, 'tua_uuid': 'test5'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 20, 'end_pos': 80, 'tua_uuid': 'test8'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 40, 'end_pos': 60, 'tua_uuid': 'test7'}) hol_dep = dep_iaa(out_folder=scoring_path, source_task_id ='doesnt matter', article_num= '520') #scientific discovery hol_dep.add_row({"namespace":"Covid2_Holistic_2020_09_20", "agreed_Answer": 1, "question_Number": 1, "agreement_score":1, "tua_uuid": 'test1'}) hol_dep.export() points = eval_triage_scoring(new_tua.df, pa.df, scoring_path) points.to_csv(out_path+'/AssessedPoints.csv', encoding = 'utf-8') assert len(points) == 4 assert points['points'].sum() == -4 def test_he_low_info_false_many_assert(config): tua_path = test_utils.make_test_directory(config, 'test_he_low_info_false_many_assert') scoring_path = test_utils.make_test_directory(config, 'test_he_low_info_false_many_assert') out_path = test_utils.make_test_directory(config, 'out_he_low_info_false_many_assert') pa = point_assignment(out_folder=scoring_path, article_num = '520', source_task_id='practice_makes+[perfect') pa.add_row({'namespace':'Covid2_Reasoning_2020_09_20', 'Answer_Number':3,'points':5, "Question_Number":5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10,30)}) new_tua = tua(out_folder=tua_path, article_num = '520', source_task_id = 'tua_task_id') new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 10, 'end_pos':30, 'tua_uuid': 'test1'}) new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 40, 'end_pos': 80, 'tua_uuid': 'test3'}) new_tua.add_row({'topic_name': 'Assertions', 'start_pos': 40, 'end_pos': 80, 'tua_uuid': 'test5'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 20, 'end_pos': 80, 'tua_uuid': 'test8'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 40, 'end_pos': 60, 'tua_uuid': 'test7'}) new_tua.add_row({'topic_name': 'Evidence', 'start_pos': 20, 'end_pos': 80, 'tua_uuid': 'test9'}) new_tua.add_row({'topic_name': 'Evidence', 'start_pos': 40, 'end_pos': 60, 'tua_uuid': 'test10'}) new_tua.add_row({'topic_name': 'Evidence', 'start_pos': 20, 'end_pos': 80, 'tua_uuid': 'test11'}) new_tua.add_row({'topic_name': 'Evidence', 'start_pos': 40, 'end_pos': 60, 'tua_uuid': 'test12'}) hol_dep = dep_iaa(out_folder=scoring_path, source_task_id ='doesnt matter', article_num= '520') #scientific discovery hol_dep.add_row({"namespace":"Covid2_Holistic_2020_09_20", "agreed_Answer": 5, "question_Number": 1, "agreement_score":1, "tua_uuid": 'test1'}) hol_dep.export() points = eval_triage_scoring(new_tua.df, pa.df, scoring_path) points.to_csv(out_path+'/AssessedPoints.csv', encoding = 'utf-8') assert len(points) == 1 assert points['points'].sum() == 5 def test_he_vague_sources_true(config): tua_path = test_utils.make_test_directory(config, 'he_tua_vague_sources_true') scoring_path = test_utils.make_test_directory(config, 'he_scoring_vague_sources_true') out_path = test_utils.make_test_directory(config, 'out_he_vague_sources_true') #2800 is considered standard article; threhold for scoring is 4 vague sources per 2800 characters pa = point_assignment(out_folder=scoring_path, article_num='520', source_task_id='practice_makes+[perfect', article_text_length = 2700) pa.add_row({'namespace': 'Covid2_Reasoning_2020_09_20', 'Answer_Number': 3, 'points': 0, "Question_Number": 5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10, 30)}) pa.export() src_dep = dep_iaa(out_folder=scoring_path, source_task_id='qs1', article_num='520', article_text_length = 2700) # scientific discovery src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 5, "question_Number": 2, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10, 30), 'tua_uuid': 'tua3'}) src_dep.export() src_dep = dep_iaa(out_folder=scoring_path, source_task_id='qs2', article_num='520', article_text_length = 2700) src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 6, "question_Number": 2, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(15, 38), 'tua_uuid': 'tua3'}) src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 8, "question_Number": 5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(7, 27), 'tua_uuid': 'tua3'}) src_dep.export() src_dep = dep_iaa(out_folder=scoring_path, source_task_id='qs3', article_num='520', article_text_length = 2700) src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 7, "question_Number": 5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(15, 38), 'tua_uuid': 'tua3'}) src_dep.export() new_tua = tua(out_folder=tua_path, article_num='520', source_task_id='tua_task_id', article_text_length = 2700) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 10, 'end_pos': 30, 'tua_uuid': 'tua1'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 50, 'end_pos': 120, 'tua_uuid': 'tua2'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 900, 'end_pos': 1020, 'tua_uuid': 'tua3'}) new_tua.export() points = eval_triage_scoring(new_tua.df, pa.df, scoring_path) points.to_csv(out_path+'/AssessedPoints.csv', encoding = 'utf-8') assert points['points'].sum() == -14 assert len(points) == 4 hl = points[points['points']== -2]['highlighted_indices'].iloc[0] assert all([str(i) in hl for i in range(900, 1020)]) def test_he_vague_sources_false(config): tua_path = test_utils.make_test_directory(config, 'he_tua_vague_sources_false') scoring_path = test_utils.make_test_directory(config, 'he_scoring_vague_sources_false') out_path = test_utils.make_test_directory(config, 'out_he_vague_sources_false') #2800 is considered standard article; threhold for scoring is 4 vague sources per 2800 characters pa = point_assignment(out_folder=scoring_path, article_num='520', source_task_id='practice_makes+[perfect', article_text_length = 2900) pa.add_row({'namespace': 'Covid2_Reasoning_2020_09_20', 'Answer_Number': 3, 'points': 0, "Question_Number": 5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10, 30)}) pa.export() src_dep = dep_iaa(out_folder=scoring_path, source_task_id='qs1', article_num='520', article_text_length = 2900) # scientific discovery src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 5, "question_Number": 2, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(10, 30), 'tua_uuid': 'tua3'}) src_dep.export() src_dep = dep_iaa(out_folder=scoring_path, source_task_id='qs2', article_num='520', article_text_length = 2900) src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 6, "question_Number": 2, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(15, 38), 'tua_uuid': 'tua3'}) src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 8, "question_Number": 5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(7, 27), 'tua_uuid': 'tua3'}) src_dep.export() src_dep = dep_iaa(out_folder=scoring_path, source_task_id='qs3', article_num='520', article_text_length = 2900) src_dep.add_row({'namespace': 'Covid2_Sources_2002_09_20', 'agreed_Answer': 7, "question_Number": 5, 'agreement_score': 1, 'highlighted_indices': test_utils.make_highlight_indices(15, 38), 'tua_uuid': 'tua3'}) src_dep.export() new_tua = tua(out_folder=tua_path, article_num='520', source_task_id='tua_task_id', article_text_length = 2900) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 10, 'end_pos': 30, 'tua_uuid': 'tua1'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 50, 'end_pos': 120, 'tua_uuid': 'tua2'}) new_tua.add_row({'topic_name': 'Quoted Sources', 'start_pos': 900, 'end_pos': 1020, 'tua_uuid': 'tua3'}) new_tua.export() points = eval_triage_scoring(new_tua.df, pa.df, scoring_path) points.to_csv(out_path+'/AssessedPoints.csv', encoding = 'utf-8') assert points['points'].sum() == -4 assert len(points) == 3 hl = points[points['points']== -2]['highlighted_indices'].iloc[0] assert all([str(i) in hl for i in range(900, 1020)])
65.541063
117
0.69271
1,953
13,567
4.392729
0.073733
0.030773
0.048491
0.036368
0.97389
0.968411
0.957571
0.954074
0.947779
0.944283
0
0.052391
0.161495
13,567
206
118
65.859223
0.701741
0.034127
0
0.769231
0
0
0.325212
0.088215
0
0
0
0
0.207101
1
0.035503
false
0
0.017751
0
0.053254
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
38d07bdb33d49b045b710a59313445131a3746cc
11,961
py
Python
src/svm/utils.py
aramis-lab/pac2019
200681eb0441baa69a386ef1ac8cf6f0d2ab01e0
[ "MIT" ]
3
2020-11-22T11:56:03.000Z
2021-05-04T22:24:12.000Z
src/svm/utils.py
aramis-lab/PAC2019
200681eb0441baa69a386ef1ac8cf6f0d2ab01e0
[ "MIT" ]
null
null
null
src/svm/utils.py
aramis-lab/PAC2019
200681eb0441baa69a386ef1ac8cf6f0d2ab01e0
[ "MIT" ]
8
2020-06-02T08:41:15.000Z
2022-03-15T11:58:49.000Z
"""Utility tools.""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator, RegressorMixin from sklearn.metrics import mean_absolute_error from sklearn.model_selection import KFold from scipy.stats import spearmanr import matplotlib.pyplot as plt def plot_results(y_trues, y_preds, marker='o', ms=5, fillstyle=None, linestyle='None', output_file=None): """Plot the results for each fold. Parameters ---------- y_trues : list of arrays List of arrays with the true ages. y_preds : list of arrays List of arrays with the predicted ages. marker : str (default = 'o') Marker style. ms : float (default = 5) Marker size. fillstyle : None or str (default = None) Fillstyle. linestyle : None or str (default = 'None') Linestyle. output_file : None or str (default = None) If str, the plot is saved with the given name. """ n_folds = len(y_trues) plt.figure(figsize=(6, 6 * n_folds)) for i, (y_true, y_pred) in enumerate(zip(y_trues, y_preds)): plt.subplot(n_folds, 1, i + 1) # Plot each point plt.plot(y_true, y_pred, marker=marker, ms=ms, fillstyle=fillstyle, linestyle=linestyle, color='C0') # Plot the perfect line min_age = np.min(np.r_[y_true, y_pred]) max_age = np.max(np.r_[y_true, y_pred]) plt.plot([min_age, max_age], [min_age, max_age], color='C1') # Compute the MAE mae = mean_absolute_error(y_true, y_pred) r, _ = spearmanr(y_true, np.abs(y_true - y_pred)) # Add a title plt.title("Fold {0}\nMAE={1:0.3f} - r={2:0.3f}" .format(i + 1, mae, r), fontsize=16) plt.xlabel("True age", fontsize=12) plt.ylabel("Predicted age", fontsize=12) plt.subplots_adjust(hspace=0.45) if output_file is not None: plt.savefig(output_file) plt.show() class GridSearchCVRBFKernel(BaseEstimator, RegressorMixin): """GridSearchCV adapted for precomputed RBF kernel. Parameters ---------- estimator : estimator Regressor. param_grid : dict Grid of parameters. cv : None, int or KFold instance (default = None) Cross validation iterable. age_range : None or tuple (default = None) Age range. If tuple, it will be used to clip the predictions. Attributes ---------- best_estimator_ : estimator Best estimator cv_results_ : DataFrame Dataframe with all the cross validation results. """ def __init__(self, estimator, param_grid, cv=None, age_range=None): self.estimator = estimator self.param_grid = param_grid self.cv = cv self.age_range = age_range def fit(self, X, y, train_index): """Find the best combination of values. Parameters ---------- X : array Input values. y : array Target values. """ if self.cv is None: kfold = KFold(n_splits=5, shuffle=True) elif isinstance(self.cv, (int, np.integer)): kfold = KFold(n_splits=self.cv, shuffle=True) elif isinstance(self.cv, KFold): kfold = self.cv else: raise ValueError( "'cv' must be None, an integer or a KFold instance " "(got {0})".format(self.cv) ) self._train_index = train_index gamma_values = [] C_values = [] mae_val_values = [] mean_mae_val_values = [] y_train = y[train_index] for gamma in self.param_grid['gamma']: X_rbf = np.exp(-gamma * X) X_train = X_rbf[train_index[:, None], train_index] for C in self.param_grid['C']: self.estimator.set_params(C=C) mae_val_split = [] for train_train_index, train_val_index in kfold.split( X_train, y_train ): X_train_train = X_train[train_train_index[:, None], train_train_index] X_train_val = X_train[train_val_index[:, None], train_train_index] y_train_train = y_train[train_train_index] y_train_val = y_train[train_val_index] self.estimator.fit(X_train_train, y_train_train) y_pred = self.estimator.predict(X_train_val) if self.age_range is not None: y_pred = np.clip(y_pred, *self.age_range) score = mean_absolute_error(y_train_val, y_pred) mae_val_split.append(score) gamma_values.append(gamma) C_values.append(C) mae_val_values.append(mae_val_split) mean_mae_val_values.append(np.mean(mae_val_split)) idx = np.argmin(mean_mae_val_values) best_C = C_values[idx] best_gamma = gamma_values[idx] self.best_params_ = {'C': best_C, 'gamma': best_gamma} C_values = np.asarray(C_values).reshape(-1, 1) gamma_values = np.asarray(gamma_values).reshape(-1, 1) mae_val_values = np.asarray(mae_val_values).reshape( -1, kfold.get_n_splits()) mean_mae_val_values = np.asarray(mean_mae_val_values).reshape(-1, 1) cv_results = np.c_[C_values, gamma_values, np.round(mae_val_values, 4), np.round(mean_mae_val_values, 4)] columns = ['C', 'gamma'] columns += ['test_score_split{0}'.format(i) for i in range(mae_val_values.shape[1])] columns += ['mean_test_score'] cv_results = pd.DataFrame(cv_results, columns=columns) self.cv_results_ = cv_results self._X_rbf = np.exp(- best_gamma * X) self._y = y self.best_estimator_ = self.estimator self.best_estimator_.set_params(C=best_C) self.best_estimator_.fit(self._X_rbf[train_index[:, None], train_index], y_train) def predict(self, test_index): """Predict. Parameters ---------- test_index : array Indices for the test set. Returns ------- y_pred : array Predicted values. """ X_test = self._X_rbf[test_index[:, None], self._train_index] y_pred = self.best_estimator_.predict(X_test) if self.age_range is not None: y_pred = np.clip(y_pred, *self.age_range) return y_pred def score(self, test_index): """Predict and compute the mean absolute error. Parameters ---------- test_index : array Indices for the test set. Returns ------- mae : float Mean absolute error on the test set. """ y_pred = self.predict(test_index) mae = mean_absolute_error(self._y[test_index], y_pred) return mae class GridSearchCVLinearKernel(BaseEstimator, RegressorMixin): """GridSearchCV adapted for precomputed linear kernel. Parameters ---------- estimator : estimator Regressor. param_grid : dict Grid of parameters. cv : None, int or KFold instance (default = None) Cross validation iterable. age_range : None or tuple (default = None) Age range. If tuple, it will be used to clip the predictions. Attributes ---------- best_estimator_ : estimator Best estimator cv_results_ : DataFrame Dataframe with all the cross validation results. """ def __init__(self, estimator, param_grid, cv=None, age_range=None): self.estimator = estimator self.param_grid = param_grid self.cv = cv self.age_range = age_range def fit(self, X, y, train_index): """Find the best combination of values. Parameters ---------- X : array Input values. y : array Target values. """ if self.cv is None: kfold = KFold(n_splits=5, shuffle=True) elif isinstance(self.cv, (int, np.integer)): kfold = KFold(n_splits=self.cv, shuffle=True) elif isinstance(self.cv, KFold): kfold = self.cv else: raise ValueError( "'cv' must be None, an integer or a KFold instance " "(got {0})".format(self.cv) ) self._train_index = train_index C_values = [] mae_val_values = [] mean_mae_val_values = [] y_train = y[train_index] X_train = X[train_index[:, None], train_index] for C in self.param_grid['C']: self.estimator.set_params(C=C) mae_val_split = [] for train_train_index, train_val_index in kfold.split( X_train, y_train ): X_train_train = X_train[train_train_index[:, None], train_train_index] X_train_val = X_train[train_val_index[:, None], train_train_index] y_train_train = y_train[train_train_index] y_train_val = y_train[train_val_index] self.estimator.fit(X_train_train, y_train_train) y_pred = self.estimator.predict(X_train_val) if self.age_range is not None: y_pred = np.clip(y_pred, *self.age_range) score = mean_absolute_error(y_train_val, y_pred) mae_val_split.append(score) C_values.append(C) mae_val_values.append(mae_val_split) mean_mae_val_values.append(np.mean(mae_val_split)) idx = np.argmin(mean_mae_val_values) best_C = C_values[idx] self.best_params_ = {'C': best_C} C_values = np.asarray(C_values).reshape(-1, 1) mae_val_values = np.asarray(mae_val_values).reshape( -1, kfold.get_n_splits()) mean_mae_val_values = np.asarray(mean_mae_val_values).reshape(-1, 1) cv_results = np.c_[C_values, np.round(mae_val_values, 4), np.round(mean_mae_val_values, 4)] columns = ['C'] columns += ['test_score_split{0}'.format(i) for i in range(mae_val_values.shape[1])] columns += ['mean_test_score'] cv_results = pd.DataFrame(cv_results, columns=columns) self.cv_results_ = cv_results self._X = X self._y = y self.best_estimator_ = self.estimator self.best_estimator_.set_params(C=best_C) self.best_estimator_.fit(self._X[train_index[:, None], train_index], y_train) def predict(self, test_index): """Predict. Parameters ---------- test_index : array Indices for the test set. Returns ------- y_pred : array Predicted values. """ X_test = self._X[test_index[:, None], self._train_index] y_pred = self.best_estimator_.predict(X_test) if self.age_range is not None: y_pred = np.clip(y_pred, *self.age_range) return y_pred def score(self, test_index): """Predict and compute the mean absolute error. Parameters ---------- test_index : array Indices for the test set. Returns ------- mae : float Mean absolute error on the test set. """ y_pred = self.predict(test_index) mae = mean_absolute_error(self._y[test_index], y_pred) return mae
30.512755
76
0.562746
1,498
11,961
4.219626
0.127503
0.030375
0.045562
0.030375
0.80019
0.790698
0.767442
0.766334
0.748774
0.741022
0
0.006183
0.33743
11,961
391
77
30.590793
0.79142
0.203913
0
0.719577
0
0.005291
0.030974
0
0
0
0
0
0
1
0.047619
false
0
0.037037
0
0.116402
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
38db8d4899c6ea177fd7d93fdbf045bab3f5806f
92
py
Python
proper_forms/__init__.py
jpsca/pforms
77c9da93e5224e79bb147aa873f28951e972bb21
[ "MIT" ]
2
2019-10-11T03:13:10.000Z
2019-11-12T10:31:54.000Z
proper_forms/__init__.py
jpsca/hyperform
d5c450ad8684a853fed26f8c2606877151125a9e
[ "MIT" ]
2
2021-11-18T18:01:28.000Z
2021-11-18T18:03:29.000Z
proper_forms/__init__.py
jpsca/hyperform
d5c450ad8684a853fed26f8c2606877151125a9e
[ "MIT" ]
null
null
null
from .fields import * # noqa from .form import * # noqa from .validators import * # noqa
23
33
0.673913
12
92
5.166667
0.5
0.483871
0.451613
0
0
0
0
0
0
0
0
0
0.228261
92
3
34
30.666667
0.873239
0.152174
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2a83bd812dc615e783a34e93cd075085cedd1906
8,006
py
Python
test/test_utils.py
luke-goddard/ssh-audit
e447c42a79df49841d5269eacde6dbeb811a6be4
[ "MIT" ]
1
2020-12-23T18:27:03.000Z
2020-12-23T18:27:03.000Z
test/test_utils.py
FRooter/ssh-audit
e447c42a79df49841d5269eacde6dbeb811a6be4
[ "MIT" ]
null
null
null
test/test_utils.py
FRooter/ssh-audit
e447c42a79df49841d5269eacde6dbeb811a6be4
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import pytest # pylint: disable=attribute-defined-outside-init class TestUtils(object): @pytest.fixture(autouse=True) def init(self, ssh_audit): self.utils = ssh_audit.Utils self.PY3 = sys.version_info >= (3,) def test_to_bytes_py2(self): if self.PY3: return # binary_type (native str, bytes as str) assert self.utils.to_bytes('fran\xc3\xa7ais') == 'fran\xc3\xa7ais' assert self.utils.to_bytes(b'fran\xc3\xa7ais') == 'fran\xc3\xa7ais' # text_type (unicode) assert self.utils.to_bytes(u'fran\xe7ais') == 'fran\xc3\xa7ais' # other with pytest.raises(TypeError): self.utils.to_bytes(123) def test_to_bytes_py3(self): if not self.PY3: return # binary_type (bytes) assert self.utils.to_bytes(b'fran\xc3\xa7ais') == b'fran\xc3\xa7ais' # text_type (native str as unicode, unicode) assert self.utils.to_bytes('fran\xe7ais') == b'fran\xc3\xa7ais' assert self.utils.to_bytes(u'fran\xe7ais') == b'fran\xc3\xa7ais' # other with pytest.raises(TypeError): self.utils.to_bytes(123) def test_to_utext_py2(self): if self.PY3: return # binary_type (native str, bytes as str) assert self.utils.to_utext('fran\xc3\xa7ais') == u'fran\xe7ais' assert self.utils.to_utext(b'fran\xc3\xa7ais') == u'fran\xe7ais' # text_type (unicode) assert self.utils.to_utext(u'fran\xe7ais') == u'fran\xe7ais' # other with pytest.raises(TypeError): self.utils.to_utext(123) def test_to_utext_py3(self): if not self.PY3: return # binary_type (bytes) assert self.utils.to_utext(b'fran\xc3\xa7ais') == u'fran\xe7ais' # text_type (native str as unicode, unicode) assert self.utils.to_utext('fran\xe7ais') == 'fran\xe7ais' assert self.utils.to_utext(u'fran\xe7ais') == u'fran\xe7ais' # other with pytest.raises(TypeError): self.utils.to_utext(123) def test_to_ntext_py2(self): if self.PY3: return # str (native str, bytes as str) assert self.utils.to_ntext('fran\xc3\xa7ais') == 'fran\xc3\xa7ais' assert self.utils.to_ntext(b'fran\xc3\xa7ais') == 'fran\xc3\xa7ais' # text_type (unicode) assert self.utils.to_ntext(u'fran\xe7ais') == 'fran\xc3\xa7ais' # other with pytest.raises(TypeError): self.utils.to_ntext(123) def test_to_ntext_py3(self): if not self.PY3: return # str (native str) assert self.utils.to_ntext('fran\xc3\xa7ais') == 'fran\xc3\xa7ais' assert self.utils.to_ntext(u'fran\xe7ais') == 'fran\xe7ais' # binary_type (bytes) assert self.utils.to_ntext(b'fran\xc3\xa7ais') == 'fran\xe7ais' # other with pytest.raises(TypeError): self.utils.to_ntext(123) def test_is_ascii_py2(self): if self.PY3: return # text_type (unicode) assert self.utils.is_ascii(u'francais') is True assert self.utils.is_ascii(u'fran\xe7ais') is False # str assert self.utils.is_ascii('francais') is True assert self.utils.is_ascii('fran\xc3\xa7ais') is False # other assert self.utils.is_ascii(123) is False def test_is_ascii_py3(self): if not self.PY3: return # text_type (str) assert self.utils.is_ascii('francais') is True assert self.utils.is_ascii(u'francais') is True assert self.utils.is_ascii('fran\xe7ais') is False assert self.utils.is_ascii(u'fran\xe7ais') is False # other assert self.utils.is_ascii(123) is False def test_to_ascii_py2(self): if self.PY3: return # text_type (unicode) assert self.utils.to_ascii(u'francais') == 'francais' assert self.utils.to_ascii(u'fran\xe7ais') == 'fran?ais' assert self.utils.to_ascii(u'fran\xe7ais', 'ignore') == 'franais' # str assert self.utils.to_ascii('francais') == 'francais' assert self.utils.to_ascii('fran\xc3\xa7ais') == 'fran??ais' assert self.utils.to_ascii('fran\xc3\xa7ais', 'ignore') == 'franais' with pytest.raises(TypeError): self.utils.to_ascii(123) def test_to_ascii_py3(self): if not self.PY3: return # text_type (str) assert self.utils.to_ascii('francais') == 'francais' assert self.utils.to_ascii(u'francais') == 'francais' assert self.utils.to_ascii('fran\xe7ais') == 'fran?ais' assert self.utils.to_ascii('fran\xe7ais', 'ignore') == 'franais' assert self.utils.to_ascii(u'fran\xe7ais') == 'fran?ais' assert self.utils.to_ascii(u'fran\xe7ais', 'ignore') == 'franais' with pytest.raises(TypeError): self.utils.to_ascii(123) def test_is_print_ascii_py2(self): if self.PY3: return # text_type (unicode) assert self.utils.is_print_ascii(u'francais') is True assert self.utils.is_print_ascii(u'francais\n') is False assert self.utils.is_print_ascii(u'fran\xe7ais') is False assert self.utils.is_print_ascii(u'fran\xe7ais\n') is False # str assert self.utils.is_print_ascii('francais') is True assert self.utils.is_print_ascii('francais\n') is False assert self.utils.is_print_ascii('fran\xc3\xa7ais') is False # other assert self.utils.is_print_ascii(123) is False def test_is_print_ascii_py3(self): if not self.PY3: return # text_type (str) assert self.utils.is_print_ascii('francais') is True assert self.utils.is_print_ascii('francais\n') is False assert self.utils.is_print_ascii(u'francais') is True assert self.utils.is_print_ascii(u'francais\n') is False assert self.utils.is_print_ascii('fran\xe7ais') is False assert self.utils.is_print_ascii(u'fran\xe7ais') is False # other assert self.utils.is_print_ascii(123) is False def test_to_print_ascii_py2(self): if self.PY3: return # text_type (unicode) assert self.utils.to_print_ascii(u'francais') == 'francais' assert self.utils.to_print_ascii(u'francais\n') == 'francais?' assert self.utils.to_print_ascii(u'fran\xe7ais') == 'fran?ais' assert self.utils.to_print_ascii(u'fran\xe7ais\n') == 'fran?ais?' assert self.utils.to_print_ascii(u'fran\xe7ais', 'ignore') == 'franais' assert self.utils.to_print_ascii(u'fran\xe7ais\n', 'ignore') == 'franais' # str assert self.utils.to_print_ascii('francais') == 'francais' assert self.utils.to_print_ascii('francais\n') == 'francais?' assert self.utils.to_print_ascii('fran\xc3\xa7ais') == 'fran??ais' assert self.utils.to_print_ascii('fran\xc3\xa7ais\n') == 'fran??ais?' assert self.utils.to_print_ascii('fran\xc3\xa7ais', 'ignore') == 'franais' assert self.utils.to_print_ascii('fran\xc3\xa7ais\n', 'ignore') == 'franais' with pytest.raises(TypeError): self.utils.to_print_ascii(123) def test_to_print_ascii_py3(self): if not self.PY3: return # text_type (str) assert self.utils.to_print_ascii('francais') == 'francais' assert self.utils.to_print_ascii('francais\n') == 'francais?' assert self.utils.to_print_ascii(u'francais') == 'francais' assert self.utils.to_print_ascii(u'francais\n') == 'francais?' assert self.utils.to_print_ascii('fran\xe7ais') == 'fran?ais' assert self.utils.to_print_ascii('fran\xe7ais\n') == 'fran?ais?' assert self.utils.to_print_ascii('fran\xe7ais', 'ignore') == 'franais' assert self.utils.to_print_ascii('fran\xe7ais\n', 'ignore') == 'franais' assert self.utils.to_print_ascii(u'fran\xe7ais') == 'fran?ais' assert self.utils.to_print_ascii(u'fran\xe7ais\n') == 'fran?ais?' assert self.utils.to_print_ascii(u'fran\xe7ais', 'ignore') == 'franais' assert self.utils.to_print_ascii(u'fran\xe7ais\n', 'ignore') == 'franais' with pytest.raises(TypeError): self.utils.to_print_ascii(123) def test_ctoi(self): assert self.utils.ctoi(123) == 123 assert self.utils.ctoi('ABC') == 65 def test_parse_int(self): assert self.utils.parse_int(123) == 123 assert self.utils.parse_int('123') == 123 assert self.utils.parse_int(-123) == -123 assert self.utils.parse_int('-123') == -123 assert self.utils.parse_int('abc') == 0 def test_unique_seq(self): assert self.utils.unique_seq((1, 2, 2, 3, 3, 3)) == (1, 2, 3) assert self.utils.unique_seq((3, 3, 3, 2, 2, 1)) == (3, 2, 1) assert self.utils.unique_seq([1, 2, 2, 3, 3, 3]) == [1, 2, 3] assert self.utils.unique_seq([3, 3, 3, 2, 2, 1]) == [3, 2, 1]
36.557078
78
0.708344
1,297
8,006
4.209715
0.060139
0.166484
0.247253
0.168132
0.943956
0.932601
0.917033
0.902015
0.889377
0.859524
0
0.034493
0.134524
8,006
218
79
36.724771
0.7535
0.080065
0
0.604938
0
0
0.200464
0
0
0
0
0
0.555556
1
0.111111
false
0
0.012346
0
0.216049
0.277778
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
9
aa543460f48a5a5d9d71cfac64f9cf3f60c9a424
160
py
Python
admin_tools/dashboard/__init__.py
glic3rinu/django-admin-tools
f758e39c6b3f30ace6d43d243b006e1b650928b3
[ "MIT" ]
null
null
null
admin_tools/dashboard/__init__.py
glic3rinu/django-admin-tools
f758e39c6b3f30ace6d43d243b006e1b650928b3
[ "MIT" ]
1
2015-06-20T08:46:21.000Z
2015-06-20T08:46:21.000Z
admin_tools/dashboard/__init__.py
glic3rinu/django-admin-tools
f758e39c6b3f30ace6d43d243b006e1b650928b3
[ "MIT" ]
null
null
null
from admin_tools.dashboard.dashboards import * from admin_tools.dashboard.registry import * default_app_config = 'admin_tools.dashboard.apps.DashboardConfig'
26.666667
65
0.84375
20
160
6.5
0.6
0.230769
0.438462
0.353846
0
0
0
0
0
0
0
0
0.08125
160
5
66
32
0.884354
0
0
0
0
0
0.264151
0.264151
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
7
aa9be026882090a9058741c95f9e239e4bbaaa19
24,172
py
Python
tests/test_create_schema.py
roehling/jello
1073355e2bbfcd4f92af2584c9e539ce34859fc0
[ "MIT" ]
null
null
null
tests/test_create_schema.py
roehling/jello
1073355e2bbfcd4f92af2584c9e539ce34859fc0
[ "MIT" ]
null
null
null
tests/test_create_schema.py
roehling/jello
1073355e2bbfcd4f92af2584c9e539ce34859fc0
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import unittest import os from jello.lib import opts, Schema class MyTests(unittest.TestCase): def setUp(self): # initialize options opts.initialize = None opts.version_info = None opts.helpme = None opts.compact = None opts.nulls = None opts.raw = None opts.lines = None opts.mono = None opts.schema = None opts.types = None opts.keyname_color = None opts.keyword_color = None opts.number_color = None opts.string_color = None # initialize schema_lists self.schema = Schema() # initialize JELLO_COLORS env variable os.environ['JELLO_COLORS'] = 'default,default,default,default' # set the colors self.schema.set_colors() # create samples self.dict_sample = { 'string': 'string\nwith newline\ncharacters in it', 'true': True, 'false': False, 'null': None, 'int': 42, 'float': 3.14, 'array': [ 'string\nwith newline\ncharacters in it', True, False, None, 42, 3.14 ] } self.list_sample = [ 'string\nwith newline\ncharacters in it', True, False, None, 42, 3.14 ] self.list_of_dicts_sample = [ { 'string': 'string\nwith newline\ncharacters in it', 'true': True, 'false': False, 'null': None, 'int': 42, 'float': 3.14, 'array': [ 'string\nwith newline\ncharacters in it', True, False, None, 42, 3.14 ] }, { 'string': 'another string\nwith newline\ncharacters in it', 'true': True, 'false': False, 'null': None, 'int': 10001, 'float': -400.45, 'array': [ 'string\nwith newline\ncharacters in it', True, False, None, -6000034, 999999.854321 ] } ] self.list_of_lists_sample = [ [ 'string\nwith newline\ncharacters in it', True, False, None, 42, 3.14 ], [ 'another string\nwith newline\ncharacters in it', True, False, None, 42001, -3.14 ] ] # ------------ Tests ------------ # # Naked True # def test_true(self): """ Test True """ self.data_in = True self.expected = '. = \x1b[90mtrue\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_true_m(self): """ Test True -m """ self.data_in = True self.expected = '. = true;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # Naked False # def test_false(self): """ Test False """ self.data_in = False self.expected = '. = \x1b[90mfalse\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_false_m(self): """ Test False -m """ self.data_in = False self.expected = '. = false;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # Naked null # def test_null(self): """ Test None """ self.data_in = None self.expected = '. = \x1b[90mnull\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_null_m(self): """ Test None -m """ self.data_in = None self.expected = '. = null;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # naked int # def test_int(self): """ Test int """ self.data_in = 42 self.expected = '. = \x1b[35m42\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_int_m(self): """ Test int -m """ self.data_in = 42 self.expected = '. = 42;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # naked float # def test_float(self): """ Test float """ self.data_in = 3.14 self.expected = '. = \x1b[35m3.14\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_float_m(self): """ Test float -m """ self.data_in = 3.14 self.expected = '. = 3.14;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # naked string # def test_string(self): """ Test string """ self.data_in = '"string with\\nnewline char"' self.expected = '. = \x1b[32m"\\"string with\\\\nnewline char\\""\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_string_m(self): """ Test string -m """ self.data_in = '"string with\\nnewline char"' self.expected = '. = "\\"string with\\\\nnewline char\\"";' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # Naked Dict # def test_dict(self): """ Test self.dict_sample """ self.data_in = self.dict_sample self.expected = '.\x1b[34;01mstring\x1b[39;00m = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n.\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n.\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n.\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m;\n.\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m;\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_dict_t(self): """ Test self.dict_sample -t """ opts.types = True self.data_in = self.dict_sample self.expected = '.\x1b[34;01mstring\x1b[39;00m = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m; // (string)\n.\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m; // (boolean)\n.\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m; // (boolean)\n.\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m; // (null)\n.\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m; // (number)\n.\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m; // (number)\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m; // (string)\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m; // (boolean)\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m; // (boolean)\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m; // (null)\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m; // (number)\n.\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m; // (number)' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_dict_m(self): """ Test self.dict_sample -m """ self.data_in = self.dict_sample self.expected = '.string = "string\\nwith newline\\ncharacters in it";\n.true = true;\n.false = false;\n.null = null;\n.int = 42;\n.float = 3.14;\n.array[0] = "string\\nwith newline\\ncharacters in it";\n.array[1] = true;\n.array[2] = false;\n.array[3] = null;\n.array[4] = 42;\n.array[5] = 3.14;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) def test_dict_mt(self): """ Test self.dict_sample -mt """ opts.types = True self.data_in = self.dict_sample self.expected = '.string = "string\\nwith newline\\ncharacters in it"; // (string)\n.true = true; // (boolean)\n.false = false; // (boolean)\n.null = null; // (null)\n.int = 42; // (number)\n.float = 3.14; // (number)\n.array[0] = "string\\nwith newline\\ncharacters in it"; // (string)\n.array[1] = true; // (boolean)\n.array[2] = false; // (boolean)\n.array[3] = null; // (null)\n.array[4] = 42; // (number)\n.array[5] = 3.14; // (number)' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # true in a list # def test_list_true(self): """ Test [True] """ self.data_in = [True] self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[90mtrue\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_true_m(self): """ Test [True] -m """ self.data_in = [True] self.expected = '.[0] = true;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # false in a list # def test_list_false(self): """ Test [False] """ self.data_in = [False] self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[90mfalse\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_false_m(self): """ Test [False] -m """ self.data_in = [False] self.expected = '.[0] = false;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # null in a list # def test_list_null(self): """ Test [None] """ self.data_in = [None] self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[90mnull\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_null_m(self): """ Test [None] -m """ self.data_in = [None] self.expected = '.[0] = null;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # Int in a list # def test_list_int(self): """ Test [42] """ self.data_in = [42] self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[35m42\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_int_m(self): """ Test [42] -m """ self.data_in = [42] self.expected = '.[0] = 42;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # Float in a list # def test_list_float(self): """ Test [3.14] """ self.data_in = [3.14] self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[35m3.14\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_float_m(self): """ Test [3.14] -m """ self.data_in = [3.14] self.expected = '.[0] = 3.14;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # String in a list # def test_list_str(self): """ Test ['string with spaces\nand newline\ncharacters'] """ self.data_in = ['string with spaces\nand newline\ncharacters'] self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[32m"string with spaces\\nand newline\\ncharacters"\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_str_m(self): """ Test ['string with spaces\nand newline\ncharacters'] -m """ self.data_in = ['string with spaces\nand newline\ncharacters'] self.expected = '.[0] = "string with spaces\\nand newline\\ncharacters";' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # List with different types of elements # def test_list_sample(self): """ Test self.list_sample """ self.data_in = self.list_sample self.expected = '.[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n.[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n.[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_sample_m(self): """ Test self.list_sample -m """ self.data_in = self.list_sample self.expected = '.[0] = "string\\nwith newline\\ncharacters in it";\n.[1] = true;\n.[2] = false;\n.[3] = null;\n.[4] = 42;\n.[5] = 3.14;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # Dicts in a list # def test_list_dict(self): """ Test self.list_of_dicts_sample """ self.data_in = self.list_of_dicts_sample self.expected = '.[\x1b[35m0\x1b[39m].\x1b[34;01mstring\x1b[39;00m = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n.[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01mstring\x1b[39;00m = \x1b[32m"another string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[90mint\x1b[39m = \x1b[35m10001\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[90mfloat\x1b[39m = -\x1b[35m400.45\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = -\x1b[35m6000034\x1b[39m;\n.[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m999999.854321\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_dict_m(self): """ Test self.list_of_dicts_sample -m """ self.data_in = self.list_of_dicts_sample self.expected = '.[0].string = "string\\nwith newline\\ncharacters in it";\n.[0].true = true;\n.[0].false = false;\n.[0].null = null;\n.[0].int = 42;\n.[0].float = 3.14;\n.[0].array[0] = "string\\nwith newline\\ncharacters in it";\n.[0].array[1] = true;\n.[0].array[2] = false;\n.[0].array[3] = null;\n.[0].array[4] = 42;\n.[0].array[5] = 3.14;\n.[1].string = "another string\\nwith newline\\ncharacters in it";\n.[1].true = true;\n.[1].false = false;\n.[1].null = null;\n.[1].int = 10001;\n.[1].float = -400.45;\n.[1].array[0] = "string\\nwith newline\\ncharacters in it";\n.[1].array[1] = true;\n.[1].array[2] = false;\n.[1].array[3] = null;\n.[1].array[4] = -6000034;\n.[1].array[5] = 999999.854321;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) # # lists in list # def test_list_list(self): """ Test self.list_of_lists_sample """ self.data_in = self.list_of_lists_sample self.expected = '.[\x1b[35m0\x1b[39m].[\x1b[32m\'string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42\x1b[39m, \x1b[35m3.14\x1b[39m][\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m0\x1b[39m].[\x1b[32m\'string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42\x1b[39m, \x1b[35m3.14\x1b[39m][\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m0\x1b[39m].[\x1b[32m\'string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42\x1b[39m, \x1b[35m3.14\x1b[39m][\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m0\x1b[39m].[\x1b[32m\'string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42\x1b[39m, \x1b[35m3.14\x1b[39m][\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n.[\x1b[35m0\x1b[39m].[\x1b[32m\'string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42\x1b[39m, \x1b[35m3.14\x1b[39m][\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n.[\x1b[35m0\x1b[39m].[\x1b[32m\'string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42\x1b[39m, \x1b[35m3.14\x1b[39m][\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;\n.[\x1b[35m1\x1b[39m].[\x1b[32m\'another string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42001\x1b[39m, -\x1b[35m3.14\x1b[39m][\x1b[35m0\x1b[39m] = \x1b[32m"another string\\nwith newline\\ncharacters in it"\x1b[39m;\n.[\x1b[35m1\x1b[39m].[\x1b[32m\'another string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42001\x1b[39m, -\x1b[35m3.14\x1b[39m][\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n.[\x1b[35m1\x1b[39m].[\x1b[32m\'another string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42001\x1b[39m, -\x1b[35m3.14\x1b[39m][\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n.[\x1b[35m1\x1b[39m].[\x1b[32m\'another string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42001\x1b[39m, -\x1b[35m3.14\x1b[39m][\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n.[\x1b[35m1\x1b[39m].[\x1b[32m\'another string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42001\x1b[39m, -\x1b[35m3.14\x1b[39m][\x1b[35m4\x1b[39m] = \x1b[35m42001\x1b[39m;\n.[\x1b[35m1\x1b[39m].[\x1b[32m\'another string\\nwith newline\\ncharacters in it\'\x1b[39m, \x1b[34;01mTrue\x1b[39;00m, \x1b[34;01mFalse\x1b[39;00m, \x1b[34;01mNone\x1b[39;00m, \x1b[35m42001\x1b[39m, -\x1b[35m3.14\x1b[39m][\x1b[35m5\x1b[39m] = -\x1b[35m3.14\x1b[39m;' output = self.schema.create_schema(self.data_in) self.assertEqual(self.schema.color_output(output), self.expected) def test_list_list_m(self): """ Test self.list_of_lists_sample -m """ self.data_in = self.list_of_lists_sample self.expected = '.[0].[\'string\\nwith newline\\ncharacters in it\', True, False, None, 42, 3.14][0] = "string\\nwith newline\\ncharacters in it";\n.[0].[\'string\\nwith newline\\ncharacters in it\', True, False, None, 42, 3.14][1] = true;\n.[0].[\'string\\nwith newline\\ncharacters in it\', True, False, None, 42, 3.14][2] = false;\n.[0].[\'string\\nwith newline\\ncharacters in it\', True, False, None, 42, 3.14][3] = null;\n.[0].[\'string\\nwith newline\\ncharacters in it\', True, False, None, 42, 3.14][4] = 42;\n.[0].[\'string\\nwith newline\\ncharacters in it\', True, False, None, 42, 3.14][5] = 3.14;\n.[1].[\'another string\\nwith newline\\ncharacters in it\', True, False, None, 42001, -3.14][0] = "another string\\nwith newline\\ncharacters in it";\n.[1].[\'another string\\nwith newline\\ncharacters in it\', True, False, None, 42001, -3.14][1] = true;\n.[1].[\'another string\\nwith newline\\ncharacters in it\', True, False, None, 42001, -3.14][2] = false;\n.[1].[\'another string\\nwith newline\\ncharacters in it\', True, False, None, 42001, -3.14][3] = null;\n.[1].[\'another string\\nwith newline\\ncharacters in it\', True, False, None, 42001, -3.14][4] = 42001;\n.[1].[\'another string\\nwith newline\\ncharacters in it\', True, False, None, 42001, -3.14][5] = -3.14;' self.assertEqual(self.schema.create_schema(self.data_in), self.expected) if __name__ == '__main__': unittest.main()
50.781513
3,242
0.566482
3,355
24,172
4.006855
0.038152
0.0973
0.093729
0.052369
0.890575
0.869151
0.856133
0.834114
0.805252
0.763818
0
0.141051
0.253847
24,172
475
3,243
50.888421
0.604291
0.0453
0
0.533333
0
0.133333
0.516165
0.278895
0
0
0
0
0.133333
1
0.137255
false
0
0.011765
0
0.152941
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
aab93d15d743dda6a0c3d059b81b5e3ecde8a663
113
py
Python
pfdo_mgz2image/__init__.py
arushivyas/pfdo_mgz2image
9b6158dab4029842578e782bd5347a44f4dd50c3
[ "Apache-2.0" ]
null
null
null
pfdo_mgz2image/__init__.py
arushivyas/pfdo_mgz2image
9b6158dab4029842578e782bd5347a44f4dd50c3
[ "Apache-2.0" ]
null
null
null
pfdo_mgz2image/__init__.py
arushivyas/pfdo_mgz2image
9b6158dab4029842578e782bd5347a44f4dd50c3
[ "Apache-2.0" ]
1
2020-11-12T21:41:52.000Z
2020-11-12T21:41:52.000Z
try: from .pfdo_mgz2image import pfdo_mgz2image except: from pfdo_mgz2image import pfdo_mgz2image
22.6
49
0.752212
14
113
5.785714
0.428571
0.641975
0.419753
0.567901
0.888889
0.888889
0
0
0
0
0
0.045455
0.221239
113
4
50
28.25
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
9
2ab7678ec6616bee0b974815bd746e17afca7958
1,531
py
Python
IO.py
DdOtzen/espCarStuff
eab9573408e321661b5aa59a4bfdfbb8d7d55874
[ "MIT" ]
null
null
null
IO.py
DdOtzen/espCarStuff
eab9573408e321661b5aa59a4bfdfbb8d7d55874
[ "MIT" ]
null
null
null
IO.py
DdOtzen/espCarStuff
eab9573408e321661b5aa59a4bfdfbb8d7d55874
[ "MIT" ]
null
null
null
from machine import Pin import time led = Pin(2, Pin.OUT) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500) led.on() time.sleep_ms(500) led.off() time.sleep_ms(500)
14.864078
24
0.670803
299
1,531
3.274247
0.043478
0.441267
0.539326
0.686415
0.956078
0.956078
0.956078
0.956078
0.956078
0.956078
0
0.109848
0.137818
1,531
102
25
15.009804
0.631818
0
0
0.969697
0
0
0
0
0
0
0
0
0
1
0
false
0
0.020202
0
0.020202
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
2ade255310d00510a10ee488a26f84704d4e02cc
31,486
py
Python
prep_input.py
Arif-PhyChem/AIQD_FMO
00e153b19dd96cc672372c8df704586ff88d1825
[ "MIT" ]
null
null
null
prep_input.py
Arif-PhyChem/AIQD_FMO
00e153b19dd96cc672372c8df704586ff88d1825
[ "MIT" ]
null
null
null
prep_input.py
Arif-PhyChem/AIQD_FMO
00e153b19dd96cc672372c8df704586ff88d1825
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np import glob import os import re # path to the directory where the training trajectories are stored all_files = {} j = 0 datapath="/mnt/partition-2/data/quantum_HEOM/FMO/fmo_data/training_data" for files in glob.glob(datapath+'/*.np[yz]'): file_name = os.path.basename(files) all_files[file_name] = np.load(files) j = j + 1 file_count = j print("number of files = ", file_count) # create empty list gamma = np.zeros((file_count), dtype=float) lamb = np.zeros((file_count), dtype=float) temp = np.zeros((file_count), dtype=float) initial = np.zeros((file_count), dtype=int) j = 0 for files in glob.glob(datapath+'/*.np[yz]'): # # extract the values of gamma, lambda and temperature from the file name # file_name = os.path.basename(files) x = re.split(r'_', file_name) y = re.split(r'-', x[1]) initial[j] = y[1] y = re.split(r'-', x[2]) # extracting value of gamma gamma[j] = y[1] y = re.split(r'-', x[3]) # extract value of lambda lamb[j] = y[1] y = re.split(r'-', x[4]) x = re.split(r'.npy', y[1]) # extract value of temperature temp[j] = x[0] j = j + 1 # # Define logistic function # def logistic(x,c): a=1 b=15 d=1 f= a/(1 + b * np.exp(-(x-c)/d)) return f # We have seven sites and as a label we use ... # values between 0 and 1 # states = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] # # Normalize values range of gamma, lambda and temperature ... # by dividing each value by the maximum value in that range # j=0 for i in lamb: lamb[j] = i/310 j=j+1 j=0 for i in gamma: gamma[j] = i/300 j=j+1 j=0 for i in temp: temp[j] = i/310 j=j+1 tmp_M = np.zeros((file_count,len(states)), dtype=int) t_M = np.zeros((file_count), dtype=int) # # The gradient comparison scheme # threshold = 1*10**(-10) print("threshold = ", threshold) labels = [1, 9, 17, 25, 33, 41, 49] i = 0 for files in glob.glob(datapath+'/*.np[yz]'): file_name = os.path.basename(files) df = all_files[file_name] k = 0 for lab in labels: site_data = df[:,lab].real for j in range(0, len(site_data)-10): a1 = (site_data[j+1]-site_data[j])/0.005 a2 = (site_data[j+2]-site_data[j+1])/0.005 a3 = (site_data[j+3]-site_data[j+2])/0.005 a4 = (site_data[j+4]-site_data[j+3])/0.005 a5 = (site_data[j+5]-site_data[j+4])/0.005 a6 = (site_data[j+6]-site_data[j+5])/0.005 a7 = (site_data[j+7]-site_data[j+6])/0.005 a8 = (site_data[j+8]-site_data[j+7])/0.005 a9 = (site_data[j+9]-site_data[j+8])/0.005 a10 =(site_data[j+10]-site_data[j+9])/0.005 if abs(a1) < threshold and abs(a2) < threshold: if abs(a3) < threshold and abs(a4) < threshold: if abs(a5) < threshold and abs(a6) < threshold: if abs(a7) < threshold and abs(a8) < threshold: if abs(a9) < threshold and abs(a10) < threshold: tmp_M[i,k] = j # store the t_M value if all the a1--10 values were less than the threshold k = k + 1 break t_M[i] = np.max(tmp_M[i,:]) i = i + 1 # # find the total number of training points # m = 0 f = 0 for files in glob.glob(datapath+'/*.np[yz]'): file_name = os.path.basename(files) print(file_name, t_M[f]) # print trajectory and the corresponding time-length for it for lab in labels: tt_M = t_M[f] if tt_M <= 201: for i in range(0,tt_M): m = m + 1 if tt_M > 201 and tt_M <= 301: for i in range(0,201): m = m + 1 for i in range(202,tt_M, 2): m = m + 1 if tt_M > 301 and tt_M <= 501: for i in range(0,201): m = m + 1 for i in range(202,301, 2): m = m + 1 for i in range(305,tt_M, 5): m = m + 1 if tt_M > 501 and tt_M <= 1001: for i in range(0,201): m = m + 1 for i in range(202,301, 2): m = m + 1 for i in range(305,501, 5): m = m + 1 for i in range(510,tt_M, 10): m = m + 1 if tt_M > 1001 and tt_M <= 5001: for i in range(0,201): m = m + 1 for i in range(202,301, 2): m = m + 1 for i in range(305,501, 5): m = m + 1 for i in range(510,1001, 10): m = m + 1 for i in range(1020,tt_M, 20): m = m + 1 if tt_M > 5001 and tt_M <= 10001: for i in range(0,201): m = m + 1 for i in range(202,301, 2): m = m + 1 for i in range(305,501, 5): m = m + 1 for i in range(510,1001, 10): m = m + 1 for i in range(1020,5001, 20): m = m + 1 for i in range(5040,tt_M, 40): m = m + 1 if tt_M > 10001 and tt_M <= 50001: for i in range(0,201): m = m + 1 for i in range(202,301, 2): m = m + 1 for i in range(305,501, 5): m = m + 1 for i in range(510,1001, 10): m = m + 1 for i in range(1020,5001, 20): m = m + 1 for i in range(5040,10001, 40): m = m + 1 for i in range(10100,tt_M, 100): m = m + 1 if tt_M > 50001: for i in range(0,201): m = m + 1 for i in range(202,301, 2): m = m + 1 for i in range(305,501, 5): m = m + 1 for i in range(510,1001, 10): m = m + 1 for i in range(1020,5001, 20): m = m + 1 for i in range(5040,10001, 40): m = m + 1 for i in range(10100,50001, 100): m = m + 1 for i in range(50200,tt_M, 200): m = m + 1 f = f + 1 m = m + (7*file_count) red_times = 100 # m is the total number of training points t = np.arange(0,100001)*0.005 # 500ps tt = np.zeros((len(t), red_times), dtype=float) x = np.zeros((m, 5+red_times), dtype=float) y = np.zeros((m,13), dtype=float) # # normalizing the time feature using logistic function # u=0 for i in t: c = -1.0 for j in range(0, red_times): tt[u,j]=logistic(i,c) c = c + 5 u=u + 1 m=0 f = 0 for files in glob.glob(datapath+'/*.np[yz]'): file_name = os.path.basename(files) df = all_files[file_name] s = 0 l = 0 if initial[f] == 1: init_index = 0 # use 0 as a label for initial excitation on site-1 else: init_index = 1 # use 1 as a label for initial excitation on site-6 for lab in range(0, 7): site = df[:, 1+l:8+l] tt_M = t_M[f] if tt_M <= 201: for i in range(0,tt_M): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 201 and tt_M <= 301: for i in range(0,201): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,tt_M, 2): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 301 and tt_M <= 501: for i in range(0,201): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,301, 2): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(305,tt_M, 5): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 501 and tt_M <= 1001: for i in range(0,201): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,301, 2): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(305,501, 5): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(510,tt_M, 10): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 1001 and tt_M <= 5001: for i in range(0,201): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,301, 2): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(305,501, 5): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(510,1001, 10): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(1020,tt_M, 20): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 5001 and tt_M <= 10001: for i in range(0,201): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,301, 2): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(305,501, 5): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(510,1001, 10): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(1020,5001, 20): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(5040,tt_M, 40): # 200fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 10001 and tt_M <= 50001: for i in range(0,201): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,301, 2): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(305,501, 5): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(510,1001, 10): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(1020,5001, 20): x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(5040,10001, 40): # 200fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(10100,tt_M, 100): #500fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 if tt_M > 50001: for i in range(0,201): #5fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(202,301, 2): #10fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(305,501, 5): #25fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(510,1001, 10): #50fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(1020,5001, 20): #100fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(5040,10001, 40): # 200fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(10100,50001, 100): #500fs x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 for i in range(50200,tt_M, 200): # 1ps x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = tt[i,:] q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 m = m + 1 x[m,0] = init_index x[m,1] = states[s] x[m,2] = gamma[f] x[m,3] = lamb[f] x[m,4] = temp[f] x[m,5:x.shape[1]] = 1.0 q = 0 for p in range(0, 7): if p == s: y[m, q] = site[i,s].real q = q + 1 else: y[m, q] = site[i,p].real q = q + 1 y[m, q] = site[i,p].imag q = q + 1 l = l + 7 m = m + 1 s = s + 1 f = f + 1 filex = "x.npy" filey = "y.npy" np.save(filex, x) # the input is saved in x_10.dat np.save(filey, y) # the target values are saved in y_10.dat
34.714443
124
0.300483
4,449
31,486
2.093055
0.046303
0.04768
0.03576
0.083441
0.801439
0.789197
0.764927
0.764927
0.751074
0.747745
0
0.102434
0.566855
31,486
906
125
34.752759
0.580364
0.031157
0
0.89306
0
0
0.005089
0.002003
0
0
0
0
0
1
0.001138
false
0
0.005688
0
0.007964
0.003413
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
6315752120c343354d087b001195c4f890db74ef
5,344
py
Python
tests/end_to_end/cli/test_create.py
scherroman/mvgen
0e79079b3fb71e94c67d48fd5599b6c43602d3b5
[ "MIT" ]
9
2016-11-28T00:54:57.000Z
2016-12-22T21:21:17.000Z
tests/end_to_end/cli/test_create.py
scherroman/mvgen
0e79079b3fb71e94c67d48fd5599b6c43602d3b5
[ "MIT" ]
null
null
null
tests/end_to_end/cli/test_create.py
scherroman/mvgen
0e79079b3fb71e94c67d48fd5599b6c43602d3b5
[ "MIT" ]
null
null
null
import os import subprocess from subprocess import CalledProcessError import pytest from mugen import Audio, MusicVideo, VideoSegment from tests import ( NO_BEAT_AUDIO_PATH, SPECIAL_CHARACTERS_VIDEO_PATH, TRACKING_SHOT_VIDEO_PATH, TWO_BEATS_AUDIO_PATH, ) def test_create__creates_music_video_successfully(tmp_path): audio_path = TWO_BEATS_AUDIO_PATH try: subprocess.run( [ "mugen", "--output-directory", tmp_path, "create", "--audio-source", audio_path, "--video-sources", TRACKING_SHOT_VIDEO_PATH, "--video-dimensions", "1500", "600", "--video-codec", "libx265", ], check=True, timeout=180, capture_output=True, text=True, ) except CalledProcessError as error: print(error.stdout) print(error.stderr) raise error music_video_path_base = os.path.join(tmp_path, "music_video_0", "music_video_0") music_video_path = f"{music_video_path_base}.mkv" music_video_save_file_path = f"{music_video_path_base}.pickle" # Check that output files exist assert os.path.isfile(music_video_path) assert os.path.isfile(music_video_save_file_path) music_video_segment = VideoSegment(music_video_path) loaded_music_video = MusicVideo.load(music_video_save_file_path) audio = Audio(audio_path) # Check duration assert len(loaded_music_video.segments) == 3 assert music_video_segment.duration == pytest.approx(audio.duration, 0.1) assert loaded_music_video.duration == pytest.approx(audio.duration, 0.1) assert music_video_segment.duration == pytest.approx( loaded_music_video.duration, 0.1 ) # Check dimensions and codec assert music_video_segment.video_stream["width"] == 1500 assert music_video_segment.video_stream["height"] == 600 assert music_video_segment.video_stream["codec_name"] == "hevc" # Check video, audio and subtitle tracks assert len(music_video_segment.video_streams) == 1 assert len(music_video_segment.audio_streams) == 1 assert len(music_video_segment.subtitle_streams) == 1 assert len(music_video_segment.streams) == 3 assert music_video_segment.subtitle_streams[0]["tags"]["title"] == "events" assert len(music_video_segment.get_subtitle_stream_content(0)) > 0 def test_create__works_with_files_with_special_characters(tmp_path): try: subprocess.run( [ "mugen", "--output-directory", tmp_path, "create", "--audio-source", NO_BEAT_AUDIO_PATH, "--video-sources", SPECIAL_CHARACTERS_VIDEO_PATH, "--exclude-video-filters", "not_is_repeat", ], check=True, timeout=180, capture_output=True, text=True, ) except CalledProcessError as error: print(error.stdout) print(error.stderr) raise error music_video_path_base = os.path.join(tmp_path, "music_video_0", "music_video_0") music_video_path = f"{music_video_path_base}.mkv" music_video_save_file_path = f"{music_video_path_base}.pickle" # Check that output files exist assert os.path.isfile(music_video_path) assert os.path.isfile(music_video_save_file_path) def test_create__preserves_original_audio_when_option_is_passed(tmp_path): audio_path = TWO_BEATS_AUDIO_PATH try: subprocess.run( [ "mugen", "--output-directory", tmp_path, "create", "--audio-source", audio_path, "--video-sources", TRACKING_SHOT_VIDEO_PATH, "--use-original-audio", ], check=True, timeout=180, capture_output=True, text=True, ) except CalledProcessError as error: print(error.stdout) print(error.stderr) raise error music_video_path_base = os.path.join(tmp_path, "music_video_0", "music_video_0") music_video_path = f"{music_video_path_base}.mkv" music_video_save_file_path = f"{music_video_path_base}.pickle" # Check that output files exist assert os.path.isfile(music_video_path) assert os.path.isfile(music_video_save_file_path) music_video_segment = VideoSegment(music_video_path) loaded_music_video = MusicVideo.load(music_video_save_file_path) # Check duration assert len(loaded_music_video.segments) == 3 assert music_video_segment.duration == pytest.approx( loaded_music_video.duration, 0.1 ) # Check video, audio and subtitle tracks assert len(music_video_segment.video_streams) == 1 assert len(music_video_segment.audio_streams) == 1 assert len(music_video_segment.subtitle_streams) == 1 assert len(music_video_segment.streams) == 3 assert music_video_segment.subtitle_streams[0]["tags"]["title"] == "events" assert len(music_video_segment.get_subtitle_stream_content(0)) > 0
32.987654
84
0.639409
634
5,344
5.025237
0.159306
0.185185
0.106717
0.059636
0.826428
0.819837
0.787822
0.787822
0.770245
0.770245
0
0.014388
0.271707
5,344
161
85
33.192547
0.804214
0.041916
0
0.75
0
0
0.113285
0.037957
0
0
0
0
0.204545
1
0.022727
false
0.007576
0.045455
0
0.068182
0.045455
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
633191f35331ec6490c9ac5b84f716c5060d6a2d
176
py
Python
demosys/geometry/__init__.py
Contraz/demosys-py
0479e0f3b0a3901f601bffd2d11e155f97b47555
[ "0BSD" ]
70
2017-03-31T12:01:41.000Z
2022-01-05T06:30:57.000Z
demosys/geometry/__init__.py
Contraz/demosys-py
0479e0f3b0a3901f601bffd2d11e155f97b47555
[ "0BSD" ]
69
2017-06-18T22:37:46.000Z
2020-01-23T04:02:22.000Z
demosys/geometry/__init__.py
Contraz/demosys-py
0479e0f3b0a3901f601bffd2d11e155f97b47555
[ "0BSD" ]
9
2017-05-13T21:13:02.000Z
2020-10-01T18:09:49.000Z
from .cube import * # noqa from .points import * # noqa from .quad import * # noqa from .plane import * # noqa from .sphere import * # noqa from .bbox import bbox # noqa
25.142857
30
0.664773
25
176
4.68
0.36
0.42735
0.598291
0
0
0
0
0
0
0
0
0
0.238636
176
6
31
29.333333
0.873134
0.164773
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2da8d6d9b39d6a44489b315060cc0bb065255a9e
111
py
Python
example.py
dmsmiley/git_github_training
d2c8170ef592f618f6f11decd6094f574824883e
[ "Apache-2.0" ]
null
null
null
example.py
dmsmiley/git_github_training
d2c8170ef592f618f6f11decd6094f574824883e
[ "Apache-2.0" ]
1
2021-05-18T10:15:40.000Z
2021-05-18T10:15:40.000Z
example.py
dmsmiley/git_github_training
d2c8170ef592f618f6f11decd6094f574824883e
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 def add(a,b): return a+b def subtract(a,b): return a-b """Add a multiply function"""
11.1
29
0.630631
21
111
3.333333
0.52381
0.114286
0.228571
0.257143
0.285714
0
0
0
0
0
0
0.010989
0.18018
111
9
30
12.333333
0.758242
0.153153
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
2db1f2b6174e6c8a96c6cf7772b585b5ae2ec944
9,034
py
Python
test_ws/devel/lib/python2.7/dist-packages/test/srv/_display_image_new.py
ruslanjabari/factory_sim
f768bd170a9dea61a87d17ca9222ecf50b9369e7
[ "MIT" ]
null
null
null
test_ws/devel/lib/python2.7/dist-packages/test/srv/_display_image_new.py
ruslanjabari/factory_sim
f768bd170a9dea61a87d17ca9222ecf50b9369e7
[ "MIT" ]
null
null
null
test_ws/devel/lib/python2.7/dist-packages/test/srv/_display_image_new.py
ruslanjabari/factory_sim
f768bd170a9dea61a87d17ca9222ecf50b9369e7
[ "MIT" ]
1
2020-09-12T21:30:32.000Z
2020-09-12T21:30:32.000Z
# This Python file uses the following encoding: utf-8 """autogenerated by genpy from test/display_image_newRequest.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class display_image_newRequest(genpy.Message): _md5sum = "1ca26e28adfc59a4ed4a4edca51b7548" _type = "test/display_image_newRequest" _has_header = False #flag to mark the presence of a Header object _full_text = """int32 width int32 height char[] data int32 format """ __slots__ = ['width','height','data','format'] _slot_types = ['int32','int32','char[]','int32'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: width,height,data,format :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(display_image_newRequest, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.width is None: self.width = 0 if self.height is None: self.height = 0 if self.data is None: self.data = b'' if self.format is None: self.format = 0 else: self.width = 0 self.height = 0 self.data = b'' self.format = 0 def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self buff.write(_get_struct_2i().pack(_x.width, _x.height)) _x = self.data length = len(_x) # - if encoded as a list instead, serialize as bytes instead of string if type(_x) in [list, tuple]: buff.write(struct.pack('<I%sB'%length, length, *_x)) else: buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_i().pack(self.format)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: end = 0 _x = self start = end end += 8 (_x.width, _x.height,) = _get_struct_2i().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length self.data = str[start:end] start = end end += 4 (self.format,) = _get_struct_i().unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self buff.write(_get_struct_2i().pack(_x.width, _x.height)) _x = self.data length = len(_x) # - if encoded as a list instead, serialize as bytes instead of string if type(_x) in [list, tuple]: buff.write(struct.pack('<I%sB'%length, length, *_x)) else: buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_i().pack(self.format)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: end = 0 _x = self start = end end += 8 (_x.width, _x.height,) = _get_struct_2i().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length self.data = str[start:end] start = end end += 4 (self.format,) = _get_struct_i().unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_i = None def _get_struct_i(): global _struct_i if _struct_i is None: _struct_i = struct.Struct("<i") return _struct_i _struct_2i = None def _get_struct_2i(): global _struct_2i if _struct_2i is None: _struct_2i = struct.Struct("<2i") return _struct_2i # This Python file uses the following encoding: utf-8 """autogenerated by genpy from test/display_image_newResponse.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class display_image_newResponse(genpy.Message): _md5sum = "062bd6ec8c99fd70e30cc24256f9226a" _type = "test/display_image_newResponse" _has_header = False #flag to mark the presence of a Header object _full_text = """uint64 ir """ __slots__ = ['ir'] _slot_types = ['uint64'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: ir :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(display_image_newResponse, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.ir is None: self.ir = 0 else: self.ir = 0 def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: buff.write(_get_struct_Q().pack(self.ir)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: end = 0 start = end end += 8 (self.ir,) = _get_struct_Q().unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: buff.write(_get_struct_Q().pack(self.ir)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: end = 0 start = end end += 8 (self.ir,) = _get_struct_Q().unpack(str[start:end]) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_Q = None def _get_struct_Q(): global _struct_Q if _struct_Q is None: _struct_Q = struct.Struct("<Q") return _struct_Q class display_image_new(object): _type = 'test/display_image_new' _md5sum = '80a036f354960d09033ab0f8d6dffcf7' _request_class = display_image_newRequest _response_class = display_image_newResponse
32.850909
145
0.652203
1,257
9,034
4.506762
0.137629
0.02842
0.019417
0.026831
0.841306
0.841306
0.836717
0.832127
0.832127
0.832127
0
0.018378
0.229024
9,034
274
146
32.970803
0.794975
0.283153
0
0.745763
1
0
0.088578
0.029088
0
0
0.003287
0
0
1
0.096045
false
0
0.033898
0
0.299435
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
2df1fe5d5f5780e0bfd22a5c4fc8421574fd4af4
15,699
py
Python
cotoha_api_python3.py
ice-github/CoARiJAndCOTOHA
31e562d6ba14e8fa6c6c9e4c076bf3d410c4d1fa
[ "MIT" ]
null
null
null
cotoha_api_python3.py
ice-github/CoARiJAndCOTOHA
31e562d6ba14e8fa6c6c9e4c076bf3d410c4d1fa
[ "MIT" ]
null
null
null
cotoha_api_python3.py
ice-github/CoARiJAndCOTOHA
31e562d6ba14e8fa6c6c9e4c076bf3d410c4d1fa
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- # this code is based on https://qiita.com/gossy5454/items/83072418fb0c5f3e269f by @gossy5454 import os import urllib.request import json import configparser import codecs # COTOHA API操作用クラス class CotohaApi: # 初期化 def __init__(self, client_id, client_secret, developer_api_base_url, access_token_publish_url): self.client_id = client_id self.client_secret = client_secret self.developer_api_base_url = developer_api_base_url self.access_token_publish_url = access_token_publish_url self.getAccessToken() # アクセストークン取得 def getAccessToken(self): # アクセストークン取得URL指定 url = self.access_token_publish_url # ヘッダ指定 headers = { "Content-Type": "application/json;charset=UTF-8" } # リクエストボディ指定 data = { "grantType": "client_credentials", "clientId": self.client_id, "clientSecret": self.client_secret } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 res = urllib.request.urlopen(req) # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディからアクセストークンを取得 self.access_token = res_body["access_token"] # 構文解析API def parse(self, sentence): # 構文解析API URL指定 url = self.developer_api_base_url + "v1/parse" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "sentence": sentence } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: parse> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # 固有表現抽出API def ne(self, sentence): # 固有表現抽出API URL指定 url = self.developer_api_base_url + "v1/ne" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "sentence": sentence } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: ne> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # 照応解析API def coreference(self, document): # 照応解析API 取得URL指定 url = self.developer_api_base_url + "beta/coreference" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "document": document } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: coreference> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # キーワード抽出API def keyword(self, document): # キーワード抽出API URL指定 url = self.developer_api_base_url + "v1/keyword" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "document": document } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: keyword> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # 類似度算出API def similarity(self, s1, s2): # 類似度算出API URL指定 url = self.developer_api_base_url + "v1/similarity" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "s1": s1, "s2": s2 } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: similarity> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # 文タイプ判定API def sentenceType(self, sentence): # 文タイプ判定API URL指定 url = self.developer_api_base_url + "v1/sentence_type" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "sentence": sentence } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: sentenceType> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # ユーザ属性推定API def userAttribute(self, document): # ユーザ属性推定API URL指定 url = self.developer_api_base_url + "beta/user_attribute" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "document": document } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: userAttribute> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # 感情分析API def sentiment(self, sentence): # 照応解析API 取得URL指定 url = self.developer_api_base_url + "v1/sentiment" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "sentence": sentence } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: sentiment> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body # 要約API def summary(self, document, sent_len): # 照応解析API 取得URL指定 url = self.developer_api_base_url + "beta/summary" # ヘッダ指定 headers = { "Authorization": "Bearer " + self.access_token, "Content-Type": "application/json;charset=UTF-8", } # リクエストボディ指定 data = { "document": document, "sent_len": sent_len } # リクエストボディ指定をJSONにエンコード data = json.dumps(data).encode() # リクエスト生成 req = urllib.request.Request(url, data, headers) # リクエストを送信し、レスポンスを受信 try: res = urllib.request.urlopen(req) # リクエストでエラーが発生した場合の処理 except urllib.request.HTTPError as e: # ステータスコードが401 Unauthorizedならアクセストークンを取得し直して再リクエスト if e.code == 401: print("get access token") self.access_token = self.getAccessToken(self.client_id, self.client_secret) headers["Authorization"] = "Bearer " + self.access_token req = urllib.request.Request(url, data, headers) res = urllib.request.urlopen(req) # 401以外のエラーなら原因を表示 else: print("<Error: summary> " + e.reason) return json.loads('{}') # レスポンスボディ取得 res_body = res.read() # レスポンスボディをJSONからデコード res_body = json.loads(res_body) # レスポンスボディから解析結果を取得 return res_body if __name__ == '__main__': # ソースファイルの場所取得 APP_ROOT = os.path.dirname(os.path.abspath(__file__)) + "/" # 設定値取得 config = configparser.ConfigParser() config.read(APP_ROOT + "config.ini") CLIENT_ID = config.get("COTOHA API", "Developer Client id") CLIENT_SECRET = config.get("COTOHA API", "Developer Client secret") DEVELOPER_API_BASE_URL = config.get("COTOHA API", "Developer API Base URL") ACCESS_TOKEN_PUBLISH_URL = config.get("COTOHA API", "Access Token Publish URL") # COTOHA APIインスタンス生成 cotoha_api = CotohaApi(CLIENT_ID, CLIENT_SECRET, DEVELOPER_API_BASE_URL, ACCESS_TOKEN_PUBLISH_URL) # 解析対象文 sentence = "すもももももももものうち" # 構文解析API実行 result = cotoha_api.parse(sentence) # 出力結果を見やすく整形 result_formated = json.dumps(result, indent=4, separators=(',', ': ')) print(codecs.decode(result_formated, 'unicode-escape'))
34.05423
103
0.550799
1,412
15,699
5.991501
0.102691
0.073759
0.053191
0.051655
0.845745
0.832861
0.812057
0.807683
0.778723
0.762293
0
0.012549
0.355373
15,699
460
104
34.128261
0.823419
0.151411
0
0.700361
0
0
0.120245
0.023577
0
0
0
0
0
1
0.039711
false
0
0.018051
0
0.126354
0.068592
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
2dfbbf8edc79a3d2fd3a66134d65228ddbc51509
21,409
py
Python
sdk/python/pulumi_newrelic/dashboard.py
pulumi/pulumi-newrelic
cd9a882f3524883ed155f87ff26c4c17cd048c9a
[ "ECL-2.0", "Apache-2.0" ]
6
2019-09-17T20:41:26.000Z
2022-01-13T23:54:14.000Z
sdk/python/pulumi_newrelic/dashboard.py
pulumi/pulumi-newrelic
cd9a882f3524883ed155f87ff26c4c17cd048c9a
[ "ECL-2.0", "Apache-2.0" ]
136
2019-04-29T21:34:57.000Z
2022-03-30T17:07:03.000Z
sdk/python/pulumi_newrelic/dashboard.py
pulumi/pulumi-newrelic
cd9a882f3524883ed155f87ff26c4c17cd048c9a
[ "ECL-2.0", "Apache-2.0" ]
3
2019-10-05T10:33:59.000Z
2021-06-15T16:37:49.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities from . import outputs from ._inputs import * __all__ = ['DashboardArgs', 'Dashboard'] @pulumi.input_type class DashboardArgs: def __init__(__self__, *, title: pulumi.Input[str], editable: Optional[pulumi.Input[str]] = None, filter: Optional[pulumi.Input['DashboardFilterArgs']] = None, grid_column_count: Optional[pulumi.Input[int]] = None, icon: Optional[pulumi.Input[str]] = None, visibility: Optional[pulumi.Input[str]] = None, widgets: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]]] = None): """ The set of arguments for constructing a Dashboard resource. :param pulumi.Input[str] title: The title of the dashboard. :param pulumi.Input[str] editable: Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. :param pulumi.Input['DashboardFilterArgs'] filter: A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. :param pulumi.Input[int] grid_column_count: New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. :param pulumi.Input[str] icon: The icon for the dashboard. :param pulumi.Input[str] visibility: Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. :param pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]] widgets: A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ pulumi.set(__self__, "title", title) if editable is not None: pulumi.set(__self__, "editable", editable) if filter is not None: pulumi.set(__self__, "filter", filter) if grid_column_count is not None: pulumi.set(__self__, "grid_column_count", grid_column_count) if icon is not None: pulumi.set(__self__, "icon", icon) if visibility is not None: pulumi.set(__self__, "visibility", visibility) if widgets is not None: pulumi.set(__self__, "widgets", widgets) @property @pulumi.getter def title(self) -> pulumi.Input[str]: """ The title of the dashboard. """ return pulumi.get(self, "title") @title.setter def title(self, value: pulumi.Input[str]): pulumi.set(self, "title", value) @property @pulumi.getter def editable(self) -> Optional[pulumi.Input[str]]: """ Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. """ return pulumi.get(self, "editable") @editable.setter def editable(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "editable", value) @property @pulumi.getter def filter(self) -> Optional[pulumi.Input['DashboardFilterArgs']]: """ A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. """ return pulumi.get(self, "filter") @filter.setter def filter(self, value: Optional[pulumi.Input['DashboardFilterArgs']]): pulumi.set(self, "filter", value) @property @pulumi.getter(name="gridColumnCount") def grid_column_count(self) -> Optional[pulumi.Input[int]]: """ New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. """ return pulumi.get(self, "grid_column_count") @grid_column_count.setter def grid_column_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "grid_column_count", value) @property @pulumi.getter def icon(self) -> Optional[pulumi.Input[str]]: """ The icon for the dashboard. """ return pulumi.get(self, "icon") @icon.setter def icon(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "icon", value) @property @pulumi.getter def visibility(self) -> Optional[pulumi.Input[str]]: """ Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. """ return pulumi.get(self, "visibility") @visibility.setter def visibility(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "visibility", value) @property @pulumi.getter def widgets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]]]: """ A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ return pulumi.get(self, "widgets") @widgets.setter def widgets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]]]): pulumi.set(self, "widgets", value) @pulumi.input_type class _DashboardState: def __init__(__self__, *, dashboard_url: Optional[pulumi.Input[str]] = None, editable: Optional[pulumi.Input[str]] = None, filter: Optional[pulumi.Input['DashboardFilterArgs']] = None, grid_column_count: Optional[pulumi.Input[int]] = None, icon: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None, visibility: Optional[pulumi.Input[str]] = None, widgets: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]]] = None): """ Input properties used for looking up and filtering Dashboard resources. :param pulumi.Input[str] dashboard_url: The URL for viewing the dashboard. :param pulumi.Input[str] editable: Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. :param pulumi.Input['DashboardFilterArgs'] filter: A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. :param pulumi.Input[int] grid_column_count: New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. :param pulumi.Input[str] icon: The icon for the dashboard. :param pulumi.Input[str] title: The title of the dashboard. :param pulumi.Input[str] visibility: Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. :param pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]] widgets: A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ if dashboard_url is not None: pulumi.set(__self__, "dashboard_url", dashboard_url) if editable is not None: pulumi.set(__self__, "editable", editable) if filter is not None: pulumi.set(__self__, "filter", filter) if grid_column_count is not None: pulumi.set(__self__, "grid_column_count", grid_column_count) if icon is not None: pulumi.set(__self__, "icon", icon) if title is not None: pulumi.set(__self__, "title", title) if visibility is not None: pulumi.set(__self__, "visibility", visibility) if widgets is not None: pulumi.set(__self__, "widgets", widgets) @property @pulumi.getter(name="dashboardUrl") def dashboard_url(self) -> Optional[pulumi.Input[str]]: """ The URL for viewing the dashboard. """ return pulumi.get(self, "dashboard_url") @dashboard_url.setter def dashboard_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dashboard_url", value) @property @pulumi.getter def editable(self) -> Optional[pulumi.Input[str]]: """ Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. """ return pulumi.get(self, "editable") @editable.setter def editable(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "editable", value) @property @pulumi.getter def filter(self) -> Optional[pulumi.Input['DashboardFilterArgs']]: """ A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. """ return pulumi.get(self, "filter") @filter.setter def filter(self, value: Optional[pulumi.Input['DashboardFilterArgs']]): pulumi.set(self, "filter", value) @property @pulumi.getter(name="gridColumnCount") def grid_column_count(self) -> Optional[pulumi.Input[int]]: """ New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. """ return pulumi.get(self, "grid_column_count") @grid_column_count.setter def grid_column_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "grid_column_count", value) @property @pulumi.getter def icon(self) -> Optional[pulumi.Input[str]]: """ The icon for the dashboard. """ return pulumi.get(self, "icon") @icon.setter def icon(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "icon", value) @property @pulumi.getter def title(self) -> Optional[pulumi.Input[str]]: """ The title of the dashboard. """ return pulumi.get(self, "title") @title.setter def title(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "title", value) @property @pulumi.getter def visibility(self) -> Optional[pulumi.Input[str]]: """ Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. """ return pulumi.get(self, "visibility") @visibility.setter def visibility(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "visibility", value) @property @pulumi.getter def widgets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]]]: """ A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ return pulumi.get(self, "widgets") @widgets.setter def widgets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardWidgetArgs']]]]): pulumi.set(self, "widgets", value) class Dashboard(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, editable: Optional[pulumi.Input[str]] = None, filter: Optional[pulumi.Input[pulumi.InputType['DashboardFilterArgs']]] = None, grid_column_count: Optional[pulumi.Input[int]] = None, icon: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None, visibility: Optional[pulumi.Input[str]] = None, widgets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardWidgetArgs']]]]] = None, __props__=None): """ New Relic legacy Dashboards reached end of life Wednesday July 28, 2021. **This resource has been removed.** For more information, [click here](https://discuss.newrelic.com/t/important-insights-dashboard-api-end-of-life/149357) :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] editable: Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. :param pulumi.Input[pulumi.InputType['DashboardFilterArgs']] filter: A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. :param pulumi.Input[int] grid_column_count: New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. :param pulumi.Input[str] icon: The icon for the dashboard. :param pulumi.Input[str] title: The title of the dashboard. :param pulumi.Input[str] visibility: Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardWidgetArgs']]]] widgets: A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ ... @overload def __init__(__self__, resource_name: str, args: DashboardArgs, opts: Optional[pulumi.ResourceOptions] = None): """ New Relic legacy Dashboards reached end of life Wednesday July 28, 2021. **This resource has been removed.** For more information, [click here](https://discuss.newrelic.com/t/important-insights-dashboard-api-end-of-life/149357) :param str resource_name: The name of the resource. :param DashboardArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(DashboardArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, editable: Optional[pulumi.Input[str]] = None, filter: Optional[pulumi.Input[pulumi.InputType['DashboardFilterArgs']]] = None, grid_column_count: Optional[pulumi.Input[int]] = None, icon: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None, visibility: Optional[pulumi.Input[str]] = None, widgets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardWidgetArgs']]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = DashboardArgs.__new__(DashboardArgs) __props__.__dict__["editable"] = editable __props__.__dict__["filter"] = filter __props__.__dict__["grid_column_count"] = grid_column_count __props__.__dict__["icon"] = icon if title is None and not opts.urn: raise TypeError("Missing required property 'title'") __props__.__dict__["title"] = title __props__.__dict__["visibility"] = visibility __props__.__dict__["widgets"] = widgets __props__.__dict__["dashboard_url"] = None super(Dashboard, __self__).__init__( 'newrelic:index/dashboard:Dashboard', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, dashboard_url: Optional[pulumi.Input[str]] = None, editable: Optional[pulumi.Input[str]] = None, filter: Optional[pulumi.Input[pulumi.InputType['DashboardFilterArgs']]] = None, grid_column_count: Optional[pulumi.Input[int]] = None, icon: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None, visibility: Optional[pulumi.Input[str]] = None, widgets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardWidgetArgs']]]]] = None) -> 'Dashboard': """ Get an existing Dashboard resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] dashboard_url: The URL for viewing the dashboard. :param pulumi.Input[str] editable: Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. :param pulumi.Input[pulumi.InputType['DashboardFilterArgs']] filter: A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. :param pulumi.Input[int] grid_column_count: New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. :param pulumi.Input[str] icon: The icon for the dashboard. :param pulumi.Input[str] title: The title of the dashboard. :param pulumi.Input[str] visibility: Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardWidgetArgs']]]] widgets: A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _DashboardState.__new__(_DashboardState) __props__.__dict__["dashboard_url"] = dashboard_url __props__.__dict__["editable"] = editable __props__.__dict__["filter"] = filter __props__.__dict__["grid_column_count"] = grid_column_count __props__.__dict__["icon"] = icon __props__.__dict__["title"] = title __props__.__dict__["visibility"] = visibility __props__.__dict__["widgets"] = widgets return Dashboard(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="dashboardUrl") def dashboard_url(self) -> pulumi.Output[str]: """ The URL for viewing the dashboard. """ return pulumi.get(self, "dashboard_url") @property @pulumi.getter def editable(self) -> pulumi.Output[Optional[str]]: """ Determines who can edit the dashboard in an account. Valid values are all, editable_by_all, editable_by_owner, or read_only. Defaults to editable_by_all. """ return pulumi.get(self, "editable") @property @pulumi.getter def filter(self) -> pulumi.Output[Optional['outputs.DashboardFilter']]: """ A nested block that describes a dashboard filter. Exactly one nested filter block is allowed. """ return pulumi.get(self, "filter") @property @pulumi.getter(name="gridColumnCount") def grid_column_count(self) -> pulumi.Output[Optional[int]]: """ New Relic One supports a 3 column grid or a 12 column grid. New Relic Insights supports a 3 column grid. """ return pulumi.get(self, "grid_column_count") @property @pulumi.getter def icon(self) -> pulumi.Output[Optional[str]]: """ The icon for the dashboard. """ return pulumi.get(self, "icon") @property @pulumi.getter def title(self) -> pulumi.Output[str]: """ The title of the dashboard. """ return pulumi.get(self, "title") @property @pulumi.getter def visibility(self) -> pulumi.Output[Optional[str]]: """ Determines who can see the dashboard in an account. Valid values are all or owner. Defaults to all. """ return pulumi.get(self, "visibility") @property @pulumi.getter def widgets(self) -> pulumi.Output[Optional[Sequence['outputs.DashboardWidget']]]: """ A nested block that describes a visualization. Up to 300 widget blocks are allowed in a dashboard definition. """ return pulumi.get(self, "widgets")
44.976891
211
0.650848
2,567
21,409
5.24815
0.075964
0.093082
0.090261
0.060422
0.863495
0.84516
0.821779
0.812203
0.804855
0.794314
0
0.004594
0.247559
21,409
475
212
45.071579
0.831709
0.341072
0
0.764286
1
0
0.099778
0.006126
0
0
0
0
0
1
0.160714
false
0.003571
0.025
0
0.282143
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
9311d8f07908044e4a5d3de54cb3093c7ecaf229
87
py
Python
services/active.py
ojhermann/property_bids
588df29122cec402e065a3ca3f346cad4becb105
[ "MIT" ]
null
null
null
services/active.py
ojhermann/property_bids
588df29122cec402e065a3ca3f346cad4becb105
[ "MIT" ]
4
2021-07-07T12:43:12.000Z
2021-07-15T13:56:10.000Z
services/active.py
ojhermann/property_bids
588df29122cec402e065a3ca3f346cad4becb105
[ "MIT" ]
null
null
null
from services.check_ok.v0.service import router as check_ok, TAGS_METADATA as CHECK_OK
43.5
86
0.850575
16
87
4.375
0.6875
0.3
0.257143
0
0
0
0
0
0
0
0
0.012821
0.103448
87
1
87
87
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
932bb774345abb3af9947a316129ffbbd02f8355
171
py
Python
django_comments_ink/tests/test_init.py
comments-ink/django-comments-ink
25255d2ca60d5e2dfb8116e3ba4702dcde4e0a69
[ "BSD-2-Clause" ]
3
2022-03-26T23:53:14.000Z
2022-03-28T19:20:53.000Z
django_comments_ink/tests/test_init.py
comments-ink/django-comments-ink
25255d2ca60d5e2dfb8116e3ba4702dcde4e0a69
[ "BSD-2-Clause" ]
null
null
null
django_comments_ink/tests/test_init.py
comments-ink/django-comments-ink
25255d2ca60d5e2dfb8116e3ba4702dcde4e0a69
[ "BSD-2-Clause" ]
null
null
null
from django.urls import reverse from django_comments_ink import get_form_target def test_get_form_target(): assert get_form_target() == reverse("comments-ink-post")
24.428571
60
0.80117
26
171
4.923077
0.538462
0.164063
0.304688
0
0
0
0
0
0
0
0
0
0.116959
171
6
61
28.5
0.847682
0
0
0
0
0
0.099415
0
0
0
0
0
0.25
1
0.25
true
0
0.5
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
934fd6ef54907c612c7b2194dd142d1fb5409d89
133
py
Python
lift/__init__.py
dragonrobotics/2018-PowerUp
0fb6be22420b1488ca3d6abb04588e8564d768b9
[ "MIT" ]
2
2018-02-08T23:29:21.000Z
2018-12-27T22:45:12.000Z
lift/__init__.py
dragonrobotics/2018-PowerUp
0fb6be22420b1488ca3d6abb04588e8564d768b9
[ "MIT" ]
2
2018-02-10T20:25:16.000Z
2018-02-20T12:47:33.000Z
lift/__init__.py
dragonrobotics/2018-PowerUp
0fb6be22420b1488ca3d6abb04588e8564d768b9
[ "MIT" ]
8
2018-01-15T14:53:52.000Z
2018-02-14T22:34:30.000Z
from .lift import ManualControlLift # noqa: F401 from .rd4b_lift import RD4BLift # noqa: F401 from .claw import Claw # noqa: F401
33.25
49
0.75188
19
133
5.210526
0.473684
0.242424
0.242424
0
0
0
0
0
0
0
0
0.100917
0.180451
133
3
50
44.333333
0.807339
0.240602
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
93842602f45d1cf03647e589a6b85cff0915a2ce
68,580
py
Python
benchmarks/SimResults/combinations_splash_heteroFair/oldstuff/cmp_choleskybarnesfftfmm/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
benchmarks/SimResults/combinations_splash_heteroFair/oldstuff/cmp_choleskybarnesfftfmm/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
benchmarks/SimResults/combinations_splash_heteroFair/oldstuff/cmp_choleskybarnesfftfmm/power.py
TugberkArkose/MLScheduler
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
[ "Unlicense" ]
null
null
null
power = {'BUSES': {'Area': 1.33155, 'Bus/Area': 1.33155, 'Bus/Gate Leakage': 0.00662954, 'Bus/Peak Dynamic': 0.0, 'Bus/Runtime Dynamic': 0.0, 'Bus/Subthreshold Leakage': 0.0691322, 'Bus/Subthreshold Leakage with power gating': 0.0259246, 'Gate Leakage': 0.00662954, 'Peak Dynamic': 0.0, 'Runtime Dynamic': 0.0, 'Subthreshold Leakage': 0.0691322, 'Subthreshold Leakage with power gating': 0.0259246}, 'Core': [{'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.156295, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.32545, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.850535, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.492469, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.852779, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.489093, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.83434, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.356387, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 7.10312, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.160684, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0178524, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.187346, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.132029, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.34803, 'Execution Unit/Register Files/Runtime Dynamic': 0.149882, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.495523, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.23555, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 3.9506, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00178201, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00178201, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00155137, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000600145, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00189661, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00701201, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0171129, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.126923, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.365382, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.431089, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96874, 'Instruction Fetch Unit/Runtime Dynamic': 0.947518, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0633669, 'L2/Runtime Dynamic': 0.0132071, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 5.74381, 'Load Store Unit/Data Cache/Runtime Dynamic': 2.17319, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.145802, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.145802, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 6.43513, 'Load Store Unit/Runtime Dynamic': 3.03804, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.359524, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.719047, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.127596, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.128543, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0599114, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.779083, 'Memory Management Unit/Runtime Dynamic': 0.188455, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 27.9111, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.560592, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.0319279, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.246659, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 0.839179, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 8.977, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0670909, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.255385, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.36072, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.252021, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.406501, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.205188, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.863711, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.232937, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.91526, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0681477, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0105709, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.101624, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0781784, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.169772, 'Execution Unit/Register Files/Runtime Dynamic': 0.0887493, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.230869, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.562243, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.17547, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0017392, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0017392, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00153522, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000605451, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00112304, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00613666, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0159474, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0751549, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.7805, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.225957, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.25526, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.23102, 'Instruction Fetch Unit/Runtime Dynamic': 0.578455, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0268359, 'L2/Runtime Dynamic': 0.00695469, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.67324, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.1742, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0788142, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0788143, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 4.04541, 'Load Store Unit/Runtime Dynamic': 1.6417, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.194342, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.388685, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0689727, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.069375, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.297234, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0370447, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.571826, 'Memory Management Unit/Runtime Dynamic': 0.10642, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 20.3798, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.179265, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0135521, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.125178, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.317996, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 4.82699, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.111651, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.290384, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.592109, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.245507, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.395993, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.199884, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.841384, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.190009, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 5.24424, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.111862, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0102977, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.116706, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0761575, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.228568, 'Execution Unit/Register Files/Runtime Dynamic': 0.0864551, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.273786, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.59163, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.21523, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00124651, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00124651, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00112734, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000459177, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00109401, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00471438, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0104643, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0732121, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.65692, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.202698, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.248661, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.10144, 'Instruction Fetch Unit/Runtime Dynamic': 0.53975, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0205811, 'L2/Runtime Dynamic': 0.00573576, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 2.67462, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.700598, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0465065, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0465066, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 2.89423, 'Load Store Unit/Runtime Dynamic': 0.97646, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.114677, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.229355, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0406993, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0410071, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.28955, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0332332, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.515573, 'Memory Management Unit/Runtime Dynamic': 0.0742403, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 19.3655, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.294258, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0146576, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.118843, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.427759, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 4.23918, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.0917114, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.274723, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.516454, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.268603, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.433247, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.218688, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.920539, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.228024, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 5.167, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0975692, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0112664, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.114944, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0833221, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.212513, 'Execution Unit/Register Files/Runtime Dynamic': 0.0945885, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.265088, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.590687, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.28591, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00155245, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00155245, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00139096, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000559673, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00119693, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00569279, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0134993, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0800997, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.09503, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.240794, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.272055, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.56082, 'Instruction Fetch Unit/Runtime Dynamic': 0.61214, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0235177, 'L2/Runtime Dynamic': 0.00903124, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 2.71512, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.72897, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0478169, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0478169, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 2.94092, 'Load Store Unit/Runtime Dynamic': 1.0126, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.117908, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.235817, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.041846, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0421984, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.31679, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.039477, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.544783, 'Memory Management Unit/Runtime Dynamic': 0.0816753, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 19.8265, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.25666, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0152421, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.131902, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.403804, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 4.40517, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}], 'DRAM': {'Area': 0, 'Gate Leakage': 0, 'Peak Dynamic': 1.6845215567164933, 'Runtime Dynamic': 1.6845215567164933, 'Subthreshold Leakage': 4.252, 'Subthreshold Leakage with power gating': 4.252}, 'L3': [{'Area': 61.9075, 'Gate Leakage': 0.0484137, 'Peak Dynamic': 0.112407, 'Runtime Dynamic': 0.0693572, 'Subthreshold Leakage': 6.80085, 'Subthreshold Leakage with power gating': 3.32364}], 'Processor': {'Area': 191.908, 'Gate Leakage': 1.53485, 'Peak Dynamic': 87.5954, 'Peak Power': 120.708, 'Runtime Dynamic': 22.5177, 'Subthreshold Leakage': 31.5774, 'Subthreshold Leakage with power gating': 13.9484, 'Total Cores/Area': 128.669, 'Total Cores/Gate Leakage': 1.4798, 'Total Cores/Peak Dynamic': 87.483, 'Total Cores/Runtime Dynamic': 22.4483, 'Total Cores/Subthreshold Leakage': 24.7074, 'Total Cores/Subthreshold Leakage with power gating': 10.2429, 'Total L3s/Area': 61.9075, 'Total L3s/Gate Leakage': 0.0484137, 'Total L3s/Peak Dynamic': 0.112407, 'Total L3s/Runtime Dynamic': 0.0693572, 'Total L3s/Subthreshold Leakage': 6.80085, 'Total L3s/Subthreshold Leakage with power gating': 3.32364, 'Total Leakage': 33.1122, 'Total NoCs/Area': 1.33155, 'Total NoCs/Gate Leakage': 0.00662954, 'Total NoCs/Peak Dynamic': 0.0, 'Total NoCs/Runtime Dynamic': 0.0, 'Total NoCs/Subthreshold Leakage': 0.0691322, 'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
75.032823
124
0.681948
8,082
68,580
5.780747
0.067681
0.12363
0.113014
0.093493
0.939683
0.931229
0.918429
0.885873
0.86342
0.842894
0
0.131475
0.224424
68,580
914
125
75.032823
0.746903
0
0
0.642232
0
0
0.657689
0.048118
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
fa930e770e04f0495ab030a16d16f754a5ee72e1
2,493
py
Python
API/main/migrations/0011_auto_20200419_1214.py
Ju99ernaut/grapeflowAPI
0d6599775e5b666ad735160b65262624fea0bf99
[ "MIT" ]
null
null
null
API/main/migrations/0011_auto_20200419_1214.py
Ju99ernaut/grapeflowAPI
0d6599775e5b666ad735160b65262624fea0bf99
[ "MIT" ]
null
null
null
API/main/migrations/0011_auto_20200419_1214.py
Ju99ernaut/grapeflowAPI
0d6599775e5b666ad735160b65262624fea0bf99
[ "MIT" ]
null
null
null
# Generated by Django 3.0.3 on 2020-04-19 10:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0010_auto_20200416_1650'), ] operations = [ migrations.AddField( model_name='block', name='assets', field=models.TextField(blank=True, default='[]'), ), migrations.AddField( model_name='block', name='components', field=models.TextField(blank=True, default='[]'), ), migrations.AddField( model_name='block', name='styles', field=models.TextField(blank=True, default='[]'), ), migrations.AlterField( model_name='block', name='css', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='block', name='description', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='block', name='html', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='block', name='script', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='logic', name='description', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='logic', name='script', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='page', name='assets', field=models.TextField(blank=True, default='[]'), ), migrations.AlterField( model_name='page', name='components', field=models.TextField(blank=True, default='[]'), ), migrations.AlterField( model_name='page', name='css', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='page', name='html', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='page', name='styles', field=models.TextField(blank=True, default='[]'), ), ]
29.678571
61
0.513839
211
2,493
5.990521
0.208531
0.099684
0.221519
0.276899
0.883703
0.883703
0.858386
0.858386
0.820411
0.79193
0
0.019053
0.347373
2,493
83
62
30.036145
0.757837
0.018051
0
0.909091
1
0
0.080131
0.009403
0
0
0
0
0
1
0
false
0
0.012987
0
0.051948
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
10
87c0873529354436218d73b2c698721fcf71072a
140
py
Python
sutils/core/runsubmit.py
t-mertz/slurm_utils
6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd
[ "MIT" ]
null
null
null
sutils/core/runsubmit.py
t-mertz/slurm_utils
6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd
[ "MIT" ]
null
null
null
sutils/core/runsubmit.py
t-mertz/slurm_utils
6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd
[ "MIT" ]
null
null
null
#import ssubmit import core import sys if __name__ == "__main__": # ssubmit.main('submit', *sys.argv) core.main('submit', *sys.argv)
20
38
0.678571
19
140
4.578947
0.473684
0.229885
0.298851
0.390805
0
0
0
0
0
0
0
0
0.157143
140
7
39
20
0.737288
0.364286
0
0
0
0
0.159091
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
87d7db1635935db3f7d388c66b33281997f34f14
16,083
py
Python
tests/unit/preprocessor/_trend/test_trend.py
markelg/ESMValCore
b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c
[ "Apache-2.0" ]
26
2019-06-07T07:50:07.000Z
2022-03-22T21:04:01.000Z
tests/unit/preprocessor/_trend/test_trend.py
markelg/ESMValCore
b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c
[ "Apache-2.0" ]
1,370
2019-06-06T09:03:07.000Z
2022-03-31T04:37:20.000Z
tests/unit/preprocessor/_trend/test_trend.py
zklaus/ESMValCore
5656fb8b546eeb4d750a424de7ed56a237edfabb
[ "Apache-2.0" ]
26
2019-07-03T13:08:48.000Z
2022-03-02T16:08:47.000Z
"""Unit tests for :mod:`esmvalcore.preprocessor._trend`.""" import dask.array as da import iris import iris.coord_categorisation import numpy as np import pytest from cf_units import Unit from esmvalcore.preprocessor._trend import linear_trend, linear_trend_stderr def assert_masked_array_equal(arr_1, arr_2): """Check equality of two masked arrays.""" arr_1 = np.ma.array(arr_1) arr_2 = np.ma.array(arr_2) mask_1 = np.ma.getmaskarray(arr_1) mask_2 = np.ma.getmaskarray(arr_2) np.testing.assert_allclose(mask_1, mask_2) data_1 = arr_1.filled(np.nan) data_2 = arr_2.filled(np.nan) np.testing.assert_allclose(data_1, data_2) def get_cube(times=None, time_units=None): """Create cube.""" lats = iris.coords.DimCoord([0.0, 20.0], standard_name='latitude', units='m') lons = iris.coords.DimCoord([500.0, 600.0], standard_name='longitude', units='m') aux_coord = iris.coords.AuxCoord([0.0, 0.0], var_name='aux') if times is None: cube = iris.cube.Cube([[1.0, 2.0], [3.0, 4.0]], var_name='x', long_name='X', units='kg', dim_coords_and_dims=[(lats, 0), (lons, 1)], aux_coords_and_dims=[(aux_coord, 0)]) return cube if time_units is None: time_units = Unit('days since 1850-01-01 00:00:00') times = iris.coords.DimCoord(times, standard_name='time', units=time_units) cube_data = np.arange(4 * times.shape[0]).reshape(times.shape[0], 2, 2) cube = iris.cube.Cube(cube_data.astype('float32'), var_name='x', long_name='X', units='kg', dim_coords_and_dims=[(times, 0), (lats, 1), (lons, 2)], aux_coords_and_dims=[(aux_coord, 1)]) return cube @pytest.fixture def cube_no_time(): """Cube with no time dimension.""" return get_cube() @pytest.fixture def cube_1_time(): """Cube with single time point.""" return get_cube(times=[0.0]) @pytest.fixture def cube_3_time(): """Cube with three time points.""" return get_cube(times=[0.0, 1.0, 2.0]) @pytest.fixture def cube_3_time_years(): """Cube with three years.""" return get_cube(times=[0.0, 1.0, 2.0], time_units='year') def test_linear_trend_coord_not_found(cube_no_time): """Test calculation of linear trend when dimension is not available.""" with pytest.raises(iris.exceptions.CoordinateNotFoundError) as err: linear_trend(cube_no_time) assert 'time' in str(err.value) with pytest.raises(iris.exceptions.CoordinateNotFoundError) as err: linear_trend(cube_no_time, coordinate='time') assert 'time' in str(err.value) with pytest.raises(iris.exceptions.CoordinateNotFoundError) as err: linear_trend(cube_no_time, coordinate='aux') assert 'aux' in str(err.value) def test_linear_trend_1_time(cube_1_time): """Test calculation of linear trend with single time point.""" cube_trend = linear_trend(cube_1_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, np.ma.masked_equal([[0.0, 0.0], [0.0, 0.0]], 0.0)) assert not cube_trend.coords('time', dim_coords=True) assert cube_trend.coords('latitude', dim_coords=True) assert cube_trend.coords('longitude', dim_coords=True) assert cube_trend.units == 'kg day-1' assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) def test_linear_trend_3_time(cube_3_time): """Test calculation of linear trend with three time points.""" cube_3_time.data[0, 0, 0] = 1.0 cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[3.5, 4.0], [4.0, 4.0]]) assert not cube_trend.coords('time', dim_coords=True) assert cube_trend.coords('latitude', dim_coords=True) assert cube_trend.coords('longitude', dim_coords=True) assert cube_trend.units == 'kg day-1' assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) def test_linear_trend_3_time_lazy(cube_3_time): """Test lazy calculation of linear trend with three time points.""" cube_3_time.data = -2.0 * da.arange(3 * 2 * 2).reshape(3, 2, 2) assert cube_3_time.has_lazy_data() cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[-8.0, -8.0], [-8.0, -8.0]]) assert not cube_trend.coords('time', dim_coords=True) assert cube_trend.coords('latitude', dim_coords=True) assert cube_trend.coords('longitude', dim_coords=True) assert cube_trend.units == 'kg day-1' assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) def test_linear_trend_3_time_no_metadata(cube_3_time): """Test calculation of trend with three time points and no metadata.""" cube_3_time.units = None cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[4.0, 4.0], [4.0, 4.0]]) assert cube_trend.units == Unit('unknown') assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) # Cube with unknown units cube_3_time.units = Unit('unknown') cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[4.0, 4.0], [4.0, 4.0]]) assert cube_trend.units == Unit('unknown') assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) # Cube with no units cube_3_time.units = Unit('no unit') cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[4.0, 4.0], [4.0, 4.0]]) assert cube_trend.units == Unit('no unit') assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) # Time with unknown units cube_3_time.units = 'kg' cube_3_time.coord('time').units = Unit('unknown') cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[4.0, 4.0], [4.0, 4.0]]) assert cube_trend.units == Unit('unknown') assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) # Time with no units cube_3_time.coord('time').units = Unit('no unit') cube_trend = linear_trend(cube_3_time) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[4.0, 4.0], [4.0, 4.0]]) assert cube_trend.units == Unit('kg') assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) def test_linear_trend_3_time_years(cube_3_time_years): """Test calculation of linear trend with three years.""" cube_trend = linear_trend(cube_3_time_years) assert cube_trend.shape == (2, 2) assert_masked_array_equal(cube_trend.data, [[4.0, 4.0], [4.0, 4.0]]) assert cube_trend.units == 'kg yr-1' assert (iris.coords.CellMethod('trend', coords=('time',)) in cube_trend.cell_methods) def test_linear_trend_latitude(cube_3_time): """Test calculation of linear trend along latitude coordinate.""" cube_3_time.data[0, 0, 0] = np.nan cube_3_time.data = np.ma.masked_invalid(cube_3_time.data) cube_trend = linear_trend(cube_3_time, coordinate='latitude') assert cube_trend.shape == (3, 2) assert_masked_array_equal(cube_trend.data, np.ma.masked_invalid( [[np.nan, 0.1], [0.1, 0.1], [0.1, 0.1]])) assert cube_trend.coords('time', dim_coords=True) assert not cube_trend.coords('latitude', dim_coords=True) assert cube_trend.coords('longitude', dim_coords=True) assert cube_trend.units == 'kg m-1' assert (iris.coords.CellMethod('trend', coords=('latitude',)) in cube_trend.cell_methods) def test_linear_trend_longitude(cube_3_time): """Test calculation of linear trend along longitude coordinate.""" cube_3_time.data[1, 0, 0] = np.nan cube_3_time.data = np.ma.masked_invalid(cube_3_time.data) cube_trend = linear_trend(cube_3_time, coordinate='longitude') assert cube_trend.shape == (3, 2) assert_masked_array_equal(cube_trend.data, np.ma.masked_invalid( [[0.01, 0.01], [np.nan, 0.01], [0.01, 0.01]])) assert cube_trend.coords('time', dim_coords=True) assert cube_trend.coords('latitude', dim_coords=True) assert not cube_trend.coords('longitude', dim_coords=True) assert cube_trend.units == 'kg m-1' assert (iris.coords.CellMethod('trend', coords=('longitude',)) in cube_trend.cell_methods) def test_linear_trend_stderr_coord_not_found(cube_no_time): """Test calculation of trend stderr when dimension is not available.""" with pytest.raises(iris.exceptions.CoordinateNotFoundError) as err: linear_trend_stderr(cube_no_time) assert 'time' in str(err.value) with pytest.raises(iris.exceptions.CoordinateNotFoundError) as err: linear_trend_stderr(cube_no_time, coordinate='time') assert 'time' in str(err.value) with pytest.raises(iris.exceptions.CoordinateNotFoundError) as err: linear_trend_stderr(cube_no_time, coordinate='aux') assert 'aux' in str(err.value) def test_linear_trend_stderr_1_time(cube_1_time): """Test calculation of trend stderr with single time point.""" cube_stderr = linear_trend_stderr(cube_1_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, np.ma.masked_equal([[0.0, 0.0], [0.0, 0.0]], 0.0)) assert not cube_stderr.coords('time', dim_coords=True) assert cube_stderr.coords('latitude', dim_coords=True) assert cube_stderr.coords('longitude', dim_coords=True) assert cube_stderr.units == 'kg day-1' assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) def test_linear_trend_stderr_3_time(cube_3_time): """Test calculation of trend stderr with three time points.""" cube_3_time.data[0, 0, 0] = 1.0 cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.28867513459482086, 0.0], [0.0, 0.0]]) assert not cube_stderr.coords('time', dim_coords=True) assert cube_stderr.coords('latitude', dim_coords=True) assert cube_stderr.coords('longitude', dim_coords=True) assert cube_stderr.units == 'kg day-1' assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) def test_linear_trend_stderr_3_time_lazy(cube_3_time): """Test lazy calculation of trend stderr with three time points.""" cube_3_time.data = da.array([[[1.0, 1.0], [2.0, 3.0]], [[4.0, 5.0], [6.0, 7.0]], [[8.0, 9.0], [10.0, 11.0]]]) assert cube_3_time.has_lazy_data() cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.28867513459482086, 0.0], [0.0, 0.0]]) assert not cube_stderr.coords('time', dim_coords=True) assert cube_stderr.coords('latitude', dim_coords=True) assert cube_stderr.coords('longitude', dim_coords=True) assert cube_stderr.units == 'kg day-1' assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) def test_linear_trend_stderr_3_time_no_metadata(cube_3_time): """Test calculation of trend stderr with no metadata.""" cube_3_time.units = None cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.0, 0.0], [0.0, 0.0]]) assert cube_stderr.units == Unit('unknown') assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) # Cube with unknown units cube_3_time.units = Unit('unknown') cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.0, 0.0], [0.0, 0.0]]) assert cube_stderr.units == Unit('unknown') assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) # Cube with no units cube_3_time.units = Unit('no unit') cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.0, 0.0], [0.0, 0.0]]) assert cube_stderr.units == Unit('no unit') assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) # Time with unknown units cube_3_time.units = 'kg' cube_3_time.coord('time').units = Unit('unknown') cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.0, 0.0], [0.0, 0.0]]) assert cube_stderr.units == Unit('unknown') assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) # Time with no units cube_3_time.coord('time').units = Unit('no unit') cube_stderr = linear_trend_stderr(cube_3_time) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.0, 0.0], [0.0, 0.0]]) assert cube_stderr.units == Unit('kg') assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) def test_linear_trend_stderr_3_time_years(cube_3_time_years): """Test calculation of trend stderr with three years.""" cube_3_time_years.data[1, 1, 1] = 1.0 cube_stderr = linear_trend_stderr(cube_3_time_years) assert cube_stderr.shape == (2, 2) assert_masked_array_equal(cube_stderr.data, [[0.0, 0.0], [0.0, 3.464101615137754]]) assert cube_stderr.units == 'kg yr-1' assert (iris.coords.CellMethod('trend_stderr', coords=('time',)) in cube_stderr.cell_methods) def test_linear_trend_stderr_latitude(cube_3_time): """Test calculation of trend stderr along latitude coordinate.""" cube_3_time.data[0, 0, 0] = np.nan cube_3_time.data = np.ma.masked_invalid(cube_3_time.data) cube_stderr = linear_trend_stderr(cube_3_time, coordinate='latitude') assert cube_stderr.shape == (3, 2) assert_masked_array_equal(cube_stderr.data, np.ma.masked_invalid( [[np.nan, 0.0], [0.0, 0.0], [0.0, 0.0]])) assert cube_stderr.coords('time', dim_coords=True) assert not cube_stderr.coords('latitude', dim_coords=True) assert cube_stderr.coords('longitude', dim_coords=True) assert cube_stderr.units == 'kg m-1' assert (iris.coords.CellMethod('trend_stderr', coords=('latitude',)) in cube_stderr.cell_methods) def test_linear_trend_stderr_longitude(cube_3_time): """Test calculation of trend stderr along longitude coordinate.""" cube_3_time.data[1, 0, 0] = np.nan cube_3_time.data = np.ma.masked_invalid(cube_3_time.data) cube_stderr = linear_trend_stderr(cube_3_time, coordinate='longitude') assert cube_stderr.shape == (3, 2) assert_masked_array_equal(cube_stderr.data, np.ma.masked_invalid( [[0.0, 0.0], [np.nan, 0.0], [0.0, 0.0]])) assert cube_stderr.coords('time', dim_coords=True) assert cube_stderr.coords('latitude', dim_coords=True) assert not cube_stderr.coords('longitude', dim_coords=True) assert cube_stderr.units == 'kg m-1' assert (iris.coords.CellMethod('trend_stderr', coords=('longitude',)) in cube_stderr.cell_methods)
43.233871
79
0.670397
2,416
16,083
4.201159
0.054222
0.020099
0.023054
0.023645
0.881478
0.86266
0.848966
0.826404
0.803153
0.762759
0
0.040201
0.19418
16,083
371
80
43.350404
0.742978
0.082758
0
0.660839
0
0
0.059471
0
0
0
0
0
0.451049
1
0.076923
false
0
0.024476
0
0.122378
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
7
87e983a9ca568da133bd8897b47f19acaf46eee0
11,211
py
Python
mlearner/preprocessing/reduce_feature.py
jaisenbe58r/MLearner
e768a4cad150b35fb5bf543ab28aa23764af51d9
[ "MIT" ]
6
2020-04-16T22:36:14.000Z
2020-04-25T14:34:47.000Z
mlearner/preprocessing/reduce_feature.py
jaisenbe58r/MLearner
e768a4cad150b35fb5bf543ab28aa23764af51d9
[ "MIT" ]
9
2020-04-16T18:25:37.000Z
2020-05-03T17:24:36.000Z
mlearner/preprocessing/reduce_feature.py
jaisenbe58r/MLearner
e768a4cad150b35fb5bf543ab28aa23764af51d9
[ "MIT" ]
1
2020-04-18T17:29:42.000Z
2020-04-18T17:29:42.000Z
"""Jaime Sendra Berenguer-2020. MLearner Machine Learning Library Extensions Author:Jaime Sendra Berenguer<www.linkedin.com/in/jaisenbe> License: MIT """ import numpy as np import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.preprocessing import StandardScaler class PCA_selector(BaseEstimator, TransformerMixin): def __init__(self, columns=None, n_components=2, random_state=99): """Init log PCA_selector.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise TypeError("Invalid type {}".format(type(columns))) else: self.columns = columns if n_components is not None: if isinstance(n_components, int): self.n_components = n_components else: raise TypeError("Invalid type {}".format(type(n_components))) else: self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Selecting PCA columns from the dataset. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns if isinstance(X, pd.core.frame.DataFrame): try: _test = X[self.columns].astype(np.float32) del(_test) except ValueError: raise NameError("Null or categorical variables are not allowed: {}".format(X.dtypes)) else: raise NameError("Invalid type {}".format(type(X))) self._fitted = True return self def transform(self, X): """Trransformer applies PCA. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_transform : {DAtaframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns centered. """ if not hasattr(self, "_fitted"): raise AttributeError("PCA_selector has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) self.X_std = StandardScaler().fit_transform(X[self.columns]) return PCA(n_components=self.n_components, random_state=self.random_state).fit_transform(self.X_std) class LDA_selector(BaseEstimator, TransformerMixin): def __init__(self, columns=None, random_state=99): """Init log LDA_selector.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise TypeError("Invalid type {}".format(type(columns))) else: self.columns = columns self.random_state = random_state def fit(self, X, y): """Selecting LDA columns from the dataset. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns if isinstance(X, pd.core.frame.DataFrame): try: _test = X[self.columns].astype(np.float32) del(_test) except ValueError: raise NameError("Null or categorical variables are not allowed: {}".format(X.dtypes)) else: raise NameError("Invalid type {}".format(type(X))) if not isinstance(y, pd.core.frame.DataFrame) and not isinstance(y, pd.core.series.Series): raise NameError("Invalid type {}".format(type(y))) self.LDA = LinearDiscriminantAnalysis().fit(X[self.columns], y) return self def transform(self, X): """Trransformer applies LDA. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_transform : {DAtaframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns centered. """ if not hasattr(self, "LDA"): raise AttributeError("LDA_selector has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) return self.LDA.transform(X[self.columns]) class PCA_add(BaseEstimator, TransformerMixin): def __init__(self, columns=None, n_components=2, PCA_name=None, random_state=99): """Init log PCA_add.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise TypeError("Invalid type {}".format(type(columns))) else: self.columns = columns if n_components is not None: if isinstance(n_components, int): self.n_components = n_components else: raise TypeError("Invalid type {}".format(type(n_components))) else: self.n_components = n_components self.PCA_name = PCA_name self.random_state = random_state def fit(self, X, y=None): """Selecting PCA columns from the dataset. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns if isinstance(X, pd.core.frame.DataFrame): try: _test = X[self.columns].astype(np.float32) del(_test) except ValueError: raise NameError("Null or categorical variables are not allowed: {}".format(X.dtypes)) else: raise NameError("Invalid type {}".format(type(X))) self._fitted = True return self def _add_dataframe(self, Y): df = pd.DataFrame() for i in range(Y.shape[1]): nombre = str(self.PCA_name) + "_PCA_" + str(i+1) df[nombre] = Y[:, i] return df def _concat_dataframe(self, X_transf, df): for i in df.columns.tolist(): X_transf[i] = df[i].values return X_transf def transform(self, X): """Trransformer applies PCA. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_transform : {DAtaframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns centered. """ if not hasattr(self, "_fitted"): raise AttributeError("PCA_selector has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X_transform = X.copy() self.X_std = StandardScaler().fit_transform(X_transform[self.columns]) X_PCA = PCA(n_components=self.n_components, random_state=self.random_state).fit_transform(self.X_std) df_PCA = self._add_dataframe(X_PCA) return self._concat_dataframe(X_transform, df_PCA) class LDA_add(BaseEstimator, TransformerMixin): def __init__(self, columns=None, LDA_name=None, random_state=99): """Init log LDA_selector.""" if columns is not None: if isinstance(columns, list) or isinstance(columns, tuple): self.columns = columns else: raise TypeError("Invalid type {}".format(type(columns))) else: self.columns = columns self.LDA_name = LDA_name self.random_state = random_state def fit(self, X, y=None): """Selecting LDA columns from the dataset. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe, where n_samples is the number of samples and n_features is the number of features. Returns -------- self """ if self.columns is None: self.columns = X.select_dtypes(exclude=["object"]).columns if isinstance(X, pd.core.frame.DataFrame): try: _test = X[self.columns].astype(np.float32) del(_test) except ValueError: raise NameError("Null or categorical variables are not allowed: {}".format(X.dtypes)) else: raise NameError("Invalid type {}".format(type(X))) if not isinstance(y, pd.core.frame.DataFrame) and not isinstance(y, pd.core.series.Series): raise NameError("Invalid type {}".format(type(y))) self.LDA = LinearDiscriminantAnalysis().fit(X[self.columns], y) return self def _add_dataframe_LDA(self, Y): df_LDA = pd.DataFrame() for i in range(Y.shape[1]): nombre = str(self.LDA_name) + "_LDA_" + str(i+1) df_LDA[nombre] = Y[:, i] return df_LDA def _concat_dataframe(self, X_transf, df_LDA): for i in df_LDA.columns.tolist(): X_transf[i] = df_LDA[i].values return X_transf def transform(self, X): """Trransformer applies LDA. Parameters ---------- X : {Dataframe}, shape = [n_samples, n_features] Dataframe of samples, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_transform : {DAtaframe}, shape = [n_samples, n_features] A copy of the input Dataframe with the columns centered. """ if not hasattr(self, "X_LDA"): raise AttributeError("LDA_selector has not been fitted, yet.") if not isinstance(X, pd.core.frame.DataFrame): raise NameError("Invalid type {}".format(type(X))) X_transform = X.copy() X_LDA = self.LDA.transform(X_transform) df_LDA = self._add_dataframe_LDA(X_LDA) return self._concat_dataframe(X_transform, df_LDA)
33.070796
109
0.588708
1,320
11,211
4.856061
0.098485
0.049766
0.042434
0.052418
0.89766
0.888924
0.877691
0.841186
0.821841
0.821841
0
0.003335
0.304522
11,211
338
110
33.168639
0.818776
0.229507
0
0.735294
0
0
0.080966
0
0
0
0
0
0
1
0.094118
false
0
0.035294
0
0.223529
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
ea0cf0adaa23c29d60ca11f55e56a05074780879
92
py
Python
or_suite/experiment/__init__.py
JasmineSamadi/ORSuite
e2b2b0a5b497ea6566e794dcef1f176081fca4ce
[ "MIT" ]
4
2021-12-01T10:56:17.000Z
2022-02-06T17:07:43.000Z
or_suite/experiment/__init__.py
JasmineSamadi/ORSuite
e2b2b0a5b497ea6566e794dcef1f176081fca4ce
[ "MIT" ]
2
2021-08-11T13:25:01.000Z
2022-03-20T19:23:23.000Z
or_suite/experiment/__init__.py
JasmineSamadi/ORSuite
e2b2b0a5b497ea6566e794dcef1f176081fca4ce
[ "MIT" ]
3
2021-04-02T20:24:25.000Z
2021-04-10T23:53:28.000Z
from or_suite.experiment.experiment import * from or_suite.experiment.sb_experiment import *
46
47
0.858696
13
92
5.846154
0.461538
0.157895
0.289474
0.552632
0
0
0
0
0
0
0
0
0.076087
92
2
47
46
0.894118
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
ea2e79c0663d9c11a92be553c433430b7f5742d1
2,102
py
Python
secureme/__init__.py
jainamoswal/SecureMe
b14a0b20cefaaad30e462d0267b559b1c9393932
[ "MIT" ]
6
2020-12-23T16:51:46.000Z
2022-02-06T06:24:51.000Z
secureme/__init__.py
jainamoswal/SecureMe
b14a0b20cefaaad30e462d0267b559b1c9393932
[ "MIT" ]
2
2020-12-23T16:43:55.000Z
2022-03-25T10:36:36.000Z
secureme/__init__.py
jainamoswal/SecureMe
b14a0b20cefaaad30e462d0267b559b1c9393932
[ "MIT" ]
3
2020-12-21T14:12:11.000Z
2021-02-18T17:19:27.000Z
import sys, subprocess, datetime class top(): _alphabet = r"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890`~!@#$%^&*()_+-=,./;'[]<>?:\"{}|₹" def HWID(): string = subprocess.check_output('wmic csproduct get uuid').decode().split('\n')[1].strip() HW_ID = 0 for i in string: if i.isdigit() == True: HW += int(i) return HW_ID def encrypt(Text, Method="Length", Password="22"): args = ["HWID", "Length", "Password", "Date", "Month", "Year", "Hour"] if Method == "HWID": key = HWID() if Method == "Length": key = len(Text) if Method == "Password": key = Password if Method == "Date": key = datetime.datetime.now().day if Method == "Month": key = datetime.datetime.now().month if Method == "Year": key = datetime.datetime.now().year if Method == "Hour": key = datetime.datetime.now().hour if Method not in args: print(f"You have to pass one from {args} to encrypt or leave it blank.") sys.exit() encrypted = '' for i in Text : encrypted += top._alphabet[int((top._alphabet.find(i) + key) % len(top._alphabet))] return encrypted def decrypt(Text, Method="Length", Password="22"): args = ["HWID", "Length", "Password", "Date", "Month", "Year", "Hour"] if Method == "HWID": key = HWID() if Method == "Length": key = len(Text) if Method == "Password": key = Password if Method == "Date": key = datetime.datetime.now().day if Method == "Month": key = datetime.datetime.now().month if Method == "Year": key = datetime.datetime.now().year if Method == "Hour": key = datetime.datetime.now().hour if Method not in args: print(f"You have to pass one from {args} to encrypt or leave it blank.") sys.exit() encrypted = '' for i in Text : encrypted += top._alphabet[int((top._alphabet.find(i) + key) % len(top._alphabet))] return encrypted
34.459016
116
0.55471
251
2,102
4.605578
0.270916
0.110727
0.131488
0.152249
0.787197
0.787197
0.787197
0.787197
0.787197
0.787197
0
0.010617
0.283064
2,102
60
117
35.033333
0.755806
0
0
0.785714
0
0.035714
0.193928
0.044074
0
0
0
0
0
1
0.053571
false
0.178571
0.017857
0
0.160714
0.035714
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
7
ea76ff68ede03617ecc5da3559ebf9c4282f5551
81,199
py
Python
tests/test_symmetry.py
rosenbrockc/phonon-enumeration
a7878814e58eb6bcd993bec416cae72bb4585d69
[ "MIT-0" ]
1
2022-01-13T02:57:55.000Z
2022-01-13T02:57:55.000Z
tests/test_symmetry.py
skphy/phonon-enumeration
a7878814e58eb6bcd993bec416cae72bb4585d69
[ "MIT-0" ]
null
null
null
tests/test_symmetry.py
skphy/phonon-enumeration
a7878814e58eb6bcd993bec416cae72bb4585d69
[ "MIT-0" ]
1
2021-11-30T02:35:26.000Z
2021-11-30T02:35:26.000Z
"""Methods for testing the subroutines in the symmetry module.""" import unittest as ut import numpy as np gpath = "tests/symmetry/" def _read_float_3D(fname): array = [] parray = [] lc = 0 dc = 0 with open(fname,"r") as f1: for line in f1: lc +=1 if lc == 2: d1 = int(line.strip().split()[1]) d2 = int(line.strip().split()[2]) d3 = int(line.strip().split()[3]) elif lc > 3: if "#" not in line: dc +=1 parray.append([float(i) for i in line.strip().split()]) if dc == 3: array.append(list(map(list,zip(*parray)))) parray = [] dc = 0 array2 = [] for i in range(d3): array2.append(list(map(list,zip(*[array[0][i],array[1][i],array[2][i]])))) return array2 def _read_float_2D(fname): array = [] with open(fname,"r") as f1: for line in f1: if "#" not in line: array.append([float(i) for i in line.strip().split()]) return array def _read_float_1D(fname): array = [] with open(fname,"r") as f1: for line in f1: if "#" not in line: array = [float(i) for i in line.strip().split()] return array def _read_int_1D(fname): array = [] with open(fname,"r") as f1: for line in f1: if "#" not in line: array = [int(i) for i in line.strip().split()] return array def _read_int(fname): with open(fname,"r") as f1: line = f1.readline() if "#" in line: line = f1.readline() val = int(line.strip()) return val def _read_float(fname): with open(fname,"r") as f1: line = f1.readline() if "#" in line: line = f1.readline() val = float(line.strip()) return val def _read_logical(fname): with open(fname,"r") as f1: line = f1.readline() if "#" in line: line = f1.readline() if "t" in line.lower(): val = True else: val = False return val def _read_output(test): values = [] with open("tests/symmetry/"+test) as f: for line in f: values.append(eval(line)) return values def _read_spaceGroup(case): sg_ops = _read_float_3D(gpath+"get_spaceGroup_sg_op.out."+str(case)) sg_fracts = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_sg_fract.out."+str(case))))) return [sg_ops,sg_fracts] class TestGetConcsForSize(ut.TestCase): """Tests of the get_concs_for_size subroutine.""" def test1(self): from phenum.symmetry import get_concs_for_size size = 4 nspecies = 2 nB = 3 res_concs = True concs = [[1, 1, 4], [1, 1, 4]] out = [] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test2(self): from phenum.symmetry import get_concs_for_size size = 15 nspecies = 3 nB = 2 res_concs = True concs = [[2, 8, 15], [5, 5, 15], [4, 8, 15]] out = [[4, 10, 16], [5, 10, 15], [6, 10, 14], [7, 10, 13], [8, 10, 12], [9, 10, 11], [10, 10, 10], [11, 10, 9], [12, 10, 8]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test3(self): from phenum.symmetry import get_concs_for_size size = 18 nspecies = 4 nB = 2 res_concs = True concs = [[5, 5, 18], [6, 13, 18], [4, 16, 18], [5, 8, 18]] out = [] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test4(self): from phenum.symmetry import get_concs_for_size size = 16 nspecies = 2 nB = 3 res_concs = False concs = [] out = [[0,16],[1, 15], [2, 14], [3, 13], [4, 12], [5, 11], [6, 10], [7, 9], [8, 8], [9, 7], [10, 6], [11, 5], [12, 4], [13, 3], [14, 2], [15, 1],[16,0]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test5(self): from phenum.symmetry import get_concs_for_size size = 13 nspecies = 3 nB = 2 res_concs = True concs = [[3, 7, 13], [5, 12, 13], [1, 3, 13]] out = [[6, 14, 6], [6, 15, 5], [6, 16, 4], [6, 17, 3], [6, 18, 2], [7, 13, 6], [7, 14, 5], [7, 15, 4], [7, 16, 3], [7, 17, 2], [8, 12, 6], [8, 13, 5], [8, 14, 4], [8, 15, 3], [8, 16, 2], [9, 11, 6], [9, 12, 5], [9, 13, 4], [9, 14, 3], [9, 15, 2], [10, 10, 6], [10, 11, 5], [10, 12, 4], [10, 13, 3], [10, 14, 2], [11, 10, 5], [11, 11, 4], [11, 12, 3], [11, 13, 2], [12, 10, 4], [12, 11, 3], [12, 12, 2], [13, 10, 3], [13, 11, 2], [14, 10, 2]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test6(self): from phenum.symmetry import get_concs_for_size size = 13 nspecies = 4 nB = 3 res_concs = True concs = [[2, 5, 13], [5, 12, 13], [3, 4, 13], [4, 6, 13]] out = [] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test7(self): from phenum.symmetry import get_concs_for_size size = 16 nspecies = 3 nB = 2 res_concs = False concs = [] out = [[0, 0, 16], [0, 1, 15], [0, 2, 14], [0, 3, 13], [0, 4, 12], [0, 5, 11], [0, 6, 10], [0, 7, 9], [0, 8, 8], [0, 9, 7], [0, 10, 6], [0, 11, 5], [0, 12, 4], [0, 13, 3], [0, 14, 2], [0, 15, 1], [0, 16, 0], [1, 0, 15], [1, 1, 14], [1, 2, 13], [1, 3, 12], [1, 4, 11], [1, 5, 10], [1, 6, 9], [1, 7, 8], [1, 8, 7], [1, 9, 6], [1, 10, 5], [1, 11, 4], [1, 12, 3], [1, 13, 2], [1, 14, 1], [1, 15, 0], [2, 0, 14], [2, 1, 13], [2, 2, 12], [2, 3, 11], [2, 4, 10], [2, 5, 9], [2, 6, 8], [2, 7, 7], [2, 8, 6], [2, 9, 5], [2, 10, 4], [2, 11, 3], [2, 12, 2], [2, 13, 1], [2, 14, 0], [3, 0, 13], [3, 1, 12], [3, 2, 11], [3, 3, 10], [3, 4, 9], [3, 5, 8], [3, 6, 7], [3, 7, 6], [3, 8, 5], [3, 9, 4], [3, 10, 3], [3, 11, 2], [3, 12, 1], [3, 13, 0], [4, 0, 12], [4, 1, 11], [4, 2, 10], [4, 3, 9], [4, 4, 8], [4, 5, 7], [4, 6, 6], [4, 7, 5], [4, 8, 4], [4, 9, 3], [4, 10, 2], [4, 11, 1], [4, 12, 0], [5, 0, 11], [5, 1, 10], [5, 2, 9], [5, 3, 8], [5, 4, 7], [5, 5, 6], [5, 6, 5], [5, 7, 4], [5, 8, 3], [5, 9, 2], [5, 10, 1], [5, 11, 0], [6, 0, 10], [6, 1, 9], [6, 2, 8], [6, 3, 7], [6, 4, 6], [6, 5, 5], [6, 6, 4], [6, 7, 3], [6, 8, 2], [6, 9, 1], [6, 10, 0], [7, 0, 9], [7, 1, 8], [7, 2, 7], [7, 3, 6], [7, 4, 5], [7, 5, 4], [7, 6, 3], [7, 7, 2], [7, 8, 1], [7, 9, 0], [8, 0, 8], [8, 1, 7], [8, 2, 6], [8, 3, 5], [8, 4, 4], [8, 5, 3], [8, 6, 2], [8, 7, 1], [8, 8, 0], [9, 0, 7], [9, 1, 6], [9, 2, 5], [9, 3, 4], [9, 4, 3], [9, 5, 2], [9, 6, 1], [9, 7, 0], [10, 0, 6], [10, 1, 5], [10, 2, 4], [10, 3, 3], [10, 4, 2], [10, 5, 1], [10, 6, 0], [11, 0, 5], [11, 1, 4], [11, 2, 3], [11, 3, 2], [11, 4, 1], [11, 5, 0], [12, 0, 4], [12, 1, 3], [12, 2, 2], [12, 3, 1], [12, 4, 0], [13, 0, 3], [13, 1, 2], [13, 2, 1], [13, 3, 0], [14, 0, 2], [14, 1, 1], [14, 2, 0], [15, 0, 1], [15, 1, 0], [16, 0, 0]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test8(self): from phenum.symmetry import get_concs_for_size size = 18 nspecies = 3 nB = 4 res_concs = True concs = [[3, 7, 18], [1, 8, 18], [6, 7, 18]] out = [[12, 32, 28], [13, 31, 28], [13, 32, 27], [14, 30, 28], [14, 31, 27], [14, 32, 26], [15, 29, 28], [15, 30, 27], [15, 31, 26], [15, 32, 25], [16, 28, 28], [16, 29, 27], [16, 30, 26], [16, 31, 25], [16, 32, 24], [17, 27, 28], [17, 28, 27], [17, 29, 26], [17, 30, 25], [17, 31, 24], [18, 26, 28], [18, 27, 27], [18, 28, 26], [18, 29, 25], [18, 30, 24], [19, 25, 28], [19, 26, 27], [19, 27, 26], [19, 28, 25], [19, 29, 24], [20, 24, 28], [20, 25, 27], [20, 26, 26], [20, 27, 25], [20, 28, 24], [21, 23, 28], [21, 24, 27], [21, 25, 26], [21, 26, 25], [21, 27, 24], [22, 22, 28], [22, 23, 27], [22, 24, 26], [22, 25, 25], [22, 26, 24], [23, 21, 28], [23, 22, 27], [23, 23, 26], [23, 24, 25], [23, 25, 24], [24, 20, 28], [24, 21, 27], [24, 22, 26], [24, 23, 25], [24, 24, 24], [25, 19, 28], [25, 20, 27], [25, 21, 26], [25, 22, 25], [25, 23, 24], [26, 18, 28], [26, 19, 27], [26, 20, 26], [26, 21, 25], [26, 22, 24], [27, 17, 28], [27, 18, 27], [27, 19, 26], [27, 20, 25], [27, 21, 24], [28, 16, 28], [28, 17, 27], [28, 18, 26], [28, 19, 25], [28, 20, 24]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test9(self): from phenum.symmetry import get_concs_for_size size = 15 nspecies = 2 nB = 3 res_concs = False concs = [] out = [[0, 15], [1, 14], [2, 13], [3, 12], [4, 11], [5, 10], [6, 9], [7, 8], [8, 7], [9, 6], [10, 5], [11, 4], [12, 3], [13, 2], [14, 1], [15, 0]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) def test10(self): from phenum.symmetry import get_concs_for_size size = 14 nspecies = 2 nB = 4 res_concs = True concs = [[3, 3, 14], [2, 13, 14]] out = [[12, 44]] self.assertEqual(get_concs_for_size(size,nspecies,res_concs,nB,concs),out) class TestGetSpaceGroup(ut.TestCase): """Tests of the get_spaceGroup subroutine.""" def _compare_space_group(self,out1,out2): ops1 = out1[0] ops2 = out2[0] fract1 = out1[1] fract2 = out2[1] if len(ops1) == len(ops2): for i in range(len(ops1)): for j in range(3): for k in range(3): self.assertAlmostEqual(ops1[i][j][k],ops2[i][j][k],places=12) else: self.assertEqual(len(ops1),len(ops2)) if len(fract1) == len(fract2): for i in range(len(ops1)): for j in range(3): self.assertAlmostEqual(fract1[i][j],fract2[i][j],places=12) else: self.assertEqual(len(fract1),len(fract2)) def test1(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1] bas_vecs = [[0.0, 0.0, 0.0]] eps = 1e-10 lattcoords = False out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test2(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1] bas_vecs = [[0.0, 0.0, 0.0]] eps = 1e-10 lattcoords = False out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test3(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1] bas_vecs = [[0.0, 0.0, 0.0]] eps = 1e-10 lattcoords = False out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test4(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1] bas_vecs = [[0.0, 0.0, 0.0]] eps = 1e-10 lattcoords = False out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test5(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1, 1] bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75]] eps = 1e-10 lattcoords = False out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test6(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1, 1] bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75]] eps = 1e-10 lattcoords = False out = ([[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.25, 0.25, 0.75], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test7(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1, 1, 1] bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.5, 0.5, 0.25]] eps = 1e-10 lattcoords = False out = ([[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test8(self): from phenum.symmetry import get_spaceGroup par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] atomType = [1, 1, 1] bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.5, 0.5, 0.25]] eps = 1e-10 lattcoords = False out = ([[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test9(self): from phenum.symmetry import get_spaceGroup par_lat = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]] atomType = [1] bas_vecs = [[0.0, 0.0, 0.0]] eps = 1e-10 lattcoords = False out = ([[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test10(self): from phenum.symmetry import get_spaceGroup par_lat = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]] atomType = [1] bas_vecs = [[0.0, 0.0, 0.0]] eps = 1e-10 lattcoords = False out = ([[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 0.0, -1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]], [[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]], [[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) def test_getsg11(self): from phenum.symmetry import get_spaceGroup case = 1 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg12(self): from phenum.symmetry import get_spaceGroup case = 2 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg13(self): from phenum.symmetry import get_spaceGroup case = 3 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg14(self): from phenum.symmetry import get_spaceGroup case = 4 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg15(self): from phenum.symmetry import get_spaceGroup case = 5 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg16(self): from phenum.symmetry import get_spaceGroup case = 6 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg17(self): from phenum.symmetry import get_spaceGroup case = 7 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps=eps,lattcoords=lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg18(self): from phenum.symmetry import get_spaceGroup case = 8 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg19(self): from phenum.symmetry import get_spaceGroup case = 9 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg20(self): from phenum.symmetry import get_spaceGroup from numpy.testing import assert_allclose from numpy import array case = 10 par_lat = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_aVecs.in."+str(case))))) atomType = _read_int_1D(gpath+"get_spaceGroup_atomType.in."+str(case)) bas_vecs = list(map(list,zip(*_read_float_2D(gpath+"get_spaceGroup_input_pos.in."+str(case))))) eps = _read_float(gpath+"get_spaceGroup_eps.in."+str(case)) lattcoords = _read_logical(gpath+"get_spaceGroup_lattcoords.in."+str(case)) out = _read_spaceGroup(case) ops, fract = get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords) self._compare_space_group(out,[ops,fract]) def test_getsg21(self): from phenum.symmetry import get_spaceGroup, _get_transformations par_lat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] (prim_to_cart, cart_to_prim) = _get_transformations(par_lat) atomType = [1, 1, 1] bas_vecs = [[0.0, 0.0, 0.0], [0.25, 0.25, 0.75], [0.5, 0.5, 0.25]] bas_vecs = [np.matmul(cart_to_prim, i).tolist() for i in bas_vecs] eps = 1e-10 lattcoords = True out = ([[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self.assertEqual(get_spaceGroup(par_lat,atomType,bas_vecs,eps,lattcoords),out) class TestBringIntoCell(ut.TestCase): """Tests of the bring_into_cell subroutine.""" def test1(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test2(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test3(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test4(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test5(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test6(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.25, 0.25, 0.75] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.25, 0.25, 0.75] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test7(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test8(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.25, 0.25, 0.75] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.25, 0.25, 0.75] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test9(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test10(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.25, 0.25, 0.75] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.25, 0.25, 0.75] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test11(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.5, 0.5, 0.25] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.5, 0.5, 0.25] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test11(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test12(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.25, 0.25, 0.75] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.25, 0.25, 0.75] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test13(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.5, 0.5, 0.25] cart_to_latt = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] latt_to_cart = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] eps = 1e-10 out = [0.5, 0.5, 0.25] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test14(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[-1.0, 1.0, 1.0], [1.0, 1.0, -1.0], [1.0, -1.0, 1.0]] latt_to_cart = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test15(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.0, 0.0] cart_to_latt = [[-1.0, 1.0, 1.0], [1.0, 1.0, -1.0], [1.0, -1.0, 1.0]] latt_to_cart = [[0.0, 0.5, 0.5], [0.5, 0.5, 0.0], [0.5, 0.0, 0.5]] eps = 1e-10 out = [0.0, 0.0, 0.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test16(self): from phenum.symmetry import bring_into_cell bas_vecs = [1.0,0.0,1.0] cart_to_latt = [[1.0, -1.0, 0.1], [1.0, 1.0, -0.1], [-1.0, 1.0, 0.1]] latt_to_cart = [[0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [5.0, 0.0, 5.0]] eps = 1e-3 out = [1.0,0.0,1.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test17(self): from phenum.symmetry import bring_into_cell bas_vecs = [2.0,0.0,2.0] cart_to_latt = [[1.0, -1.0, 0.1], [1.0, 1.0, -0.1], [-1.0, 1.0, 0.1]] latt_to_cart = [[0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [5.0, 0.0, 5.0]] eps = 1e-3 out = [2.0,0.0,2.0] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test18(self): from phenum.symmetry import bring_into_cell bas_vecs = [3.0,0.0,3.0] cart_to_latt = [[1.0, -1.0, 0.1], [1.0, 1.0, -0.1], [-1.0, 1.0, 0.1]] latt_to_cart = [[0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [5.0, 0.0, 5.0]] eps = 1e-3 out = [3.0000000000000004, 0.0, 3.0000000000000004] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) def test19(self): from phenum.symmetry import bring_into_cell bas_vecs = [0.0, 0.5779502399999998, 1.6329931599999998] cart_to_latt = [[1.0, 0.0, 0.0], [-0.5773502717125849, 1.1547005434251698, 0.0], [0.0, 0.0, 0.6123724213915893]] latt_to_cart = [[1.0, 0.0, 0.0], [0.5, 0.8660254, 0.0], [0.0, 0.0, 1.6329932]] eps = 1e-3 out = [1.0000000000000000, 0.57795023999999980, -4.0000000416390380E-008] self.assertEqual(bring_into_cell(bas_vecs,cart_to_latt,latt_to_cart,eps),out) class TestGetLatticePointGroup(ut.TestCase): def test_getpg1(self): from phenum.symmetry import _get_lattice_pointGroup case = 1 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg2(self): from phenum.symmetry import _get_lattice_pointGroup case = 2 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg3(self): from phenum.symmetry import _get_lattice_pointGroup case = 3 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg4(self): from phenum.symmetry import _get_lattice_pointGroup case = 4 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg5(self): from phenum.symmetry import _get_lattice_pointGroup case = 5 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg6(self): from phenum.symmetry import _get_lattice_pointGroup case = 6 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg7(self): from phenum.symmetry import _get_lattice_pointGroup case = 7 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg8(self): from phenum.symmetry import _get_lattice_pointGroup case = 8 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg9(self): from phenum.symmetry import _get_lattice_pointGroup case = 9 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg10(self): from phenum.symmetry import _get_lattice_pointGroup case = 10 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg11(self): from phenum.symmetry import _get_lattice_pointGroup case = 11 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg12(self): from phenum.symmetry import _get_lattice_pointGroup case = 12 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg13(self): from phenum.symmetry import _get_lattice_pointGroup case = 13 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg14(self): from phenum.symmetry import _get_lattice_pointGroup case = 14 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg15(self): from phenum.symmetry import _get_lattice_pointGroup case = 15 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg16(self): from phenum.symmetry import _get_lattice_pointGroup case = 16 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg17(self): from phenum.symmetry import _get_lattice_pointGroup case = 17 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg18(self): from phenum.symmetry import _get_lattice_pointGroup case = 18 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg19(self): from phenum.symmetry import _get_lattice_pointGroup case = 19 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) def test_getpg20(self): from phenum.symmetry import _get_lattice_pointGroup case = 20 avecs = _read_float_2D(gpath+"get_lattice_pointGroup_aVecs.in."+str(case)) eps = _read_float(gpath+"get_lattice_pointGroup_eps.in."+str(case)) out = _read_float_3D(gpath+"get_lattice_pointGroup_lattpg_op.out."+str(case)) self.assertEqual(_get_lattice_pointGroup(avecs,eps),out) class TestGetTransformations(ut.TestCase): """Tests of the _get_transformations subroutine.""" def _compare_outputs(self,out1,out2): ptc1 = out1[0] ctp1 = out1[1] ptc2 = out2[0] ctp2 = out2[1] for i in range(3): for j in range(3): self.assertAlmostEqual(ptc1[i][j],ptc2[i][j]) self.assertAlmostEqual(ctp1[i][j],ctp2[i][j]) def _trans_out(self,case): ctp = _read_float_2D(gpath+"get_transformations_cart_to_prim.out."+str(case)) ptc = _read_float_2D(gpath+"get_transformations_prim_to_cart.out."+str(case)) return (ptc,ctp) def test1(self): from phenum.symmetry import _get_transformations case = 1 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test2(self): from phenum.symmetry import _get_transformations case = 2 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test3(self): from phenum.symmetry import _get_transformations case = 3 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test4(self): from phenum.symmetry import _get_transformations case = 4 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test5(self): from phenum.symmetry import _get_transformations case = 5 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test6(self): from phenum.symmetry import _get_transformations case = 6 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test7(self): from phenum.symmetry import _get_transformations case = 7 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test8(self): from phenum.symmetry import _get_transformations case = 8 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test9(self): from phenum.symmetry import _get_transformations case = 9 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test10(self): from phenum.symmetry import _get_transformations case = 10 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test11(self): from phenum.symmetry import _get_transformations case = 11 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test12(self): from phenum.symmetry import _get_transformations case = 12 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test13(self): from phenum.symmetry import _get_transformations case = 13 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test14(self): from phenum.symmetry import _get_transformations case = 14 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test15(self): from phenum.symmetry import _get_transformations case = 15 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test16(self): from phenum.symmetry import _get_transformations case = 16 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test17(self): from phenum.symmetry import _get_transformations case = 17 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test18(self): from phenum.symmetry import _get_transformations case = 18 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test19(self): from phenum.symmetry import _get_transformations case = 19 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) def test20(self): from phenum.symmetry import _get_transformations case = 20 aVecs = _read_float_2D(gpath+"get_transformations_aVecs.in."+str(case)) out = self._trans_out(case) self._compare_outputs(_get_transformations(aVecs),out) class TestDoesMappingExist(ut.TestCase): """Tests of the _does_mapping_exist subroutine.""" def test1(self): from phenum.symmetry import _does_mapping_exist case = 1 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test2(self): from phenum.symmetry import _does_mapping_exist case = 2 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test3(self): from phenum.symmetry import _does_mapping_exist case = 3 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test4(self): from phenum.symmetry import _does_mapping_exist case = 4 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test5(self): from phenum.symmetry import _does_mapping_exist case = 5 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test6(self): from phenum.symmetry import _does_mapping_exist case = 6 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test7(self): from phenum.symmetry import _does_mapping_exist case = 7 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test8(self): from phenum.symmetry import _does_mapping_exist case = 8 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test9(self): from phenum.symmetry import _does_mapping_exist case = 9 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test10(self): from phenum.symmetry import _does_mapping_exist case = 10 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test11(self): from phenum.symmetry import _does_mapping_exist case = 11 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test12(self): from phenum.symmetry import _does_mapping_exist case = 12 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test13(self): from phenum.symmetry import _does_mapping_exist case = 13 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test14(self): from phenum.symmetry import _does_mapping_exist case = 14 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test15(self): from phenum.symmetry import _does_mapping_exist case = 15 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test16(self): from phenum.symmetry import _does_mapping_exist case = 16 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test17(self): from phenum.symmetry import _does_mapping_exist case = 17 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test18(self): from phenum.symmetry import _does_mapping_exist case = 18 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test19(self): from phenum.symmetry import _does_mapping_exist case = 19 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out) def test20(self): from phenum.symmetry import _does_mapping_exist case = 20 v = _read_float_1D(gpath+"does_mapping_exist_v.in."+str(case)) this_type = _read_int(gpath+"does_mapping_exist_this_type.in."+str(case)) atom_pos = list(map(list,zip(*_read_float_2D(gpath+"does_mapping_exist_atom_pos.in."+str(case))))) atomType = _read_int_1D(gpath+"does_mapping_exist_atomType.in."+str(case)) eps = _read_float(gpath+"does_mapping_exist_eps.in."+str(case)) out = _read_logical(gpath+"does_mapping_exist_mapped.out."+str(case)) self.assertEqual(_does_mapping_exist(v,this_type,atom_pos,atomType,eps),out)
64.597454
3,451
0.541645
16,775
81,199
2.470939
0.014605
0.305911
0.389747
0.429144
0.922099
0.918384
0.909578
0.904897
0.871942
0.808034
0
0.174219
0.208352
81,199
1,256
3,452
64.648885
0.470606
0.003387
0
0.868522
0
0
0.092863
0.092307
0
0
0
0
0.084453
1
0.118042
false
0
0.110365
0
0.243762
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
1
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
12
ea8233d0933c1e17a579935d2845f691d1c3290b
4,322
py
Python
tests/datetime/test_strings.py
rileyjohngibbs/pendulum
f1df7dc3f838bd4ab1075ba25c8b6ce5d8141995
[ "MIT" ]
null
null
null
tests/datetime/test_strings.py
rileyjohngibbs/pendulum
f1df7dc3f838bd4ab1075ba25c8b6ce5d8141995
[ "MIT" ]
null
null
null
tests/datetime/test_strings.py
rileyjohngibbs/pendulum
f1df7dc3f838bd4ab1075ba25c8b6ce5d8141995
[ "MIT" ]
null
null
null
import pytest import pendulum def test_to_string(): d = pendulum.datetime(1975, 12, 25, 0, 0, 0, 0, tz="local") assert str(d) == d.to_iso8601_string() d = pendulum.datetime(1975, 12, 25, 0, 0, 0, 123456, tz="local") assert str(d) == d.to_iso8601_string() def test_to_date_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16) assert "1975-12-25" == d.to_date_string() def test_to_formatted_date_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16) assert "Dec 25, 1975" == d.to_formatted_date_string() def test_to_timestring(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16) assert "14:15:16" == d.to_time_string() def test_to_atom_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_atom_string() == "1975-12-25T14:15:16-05:00" def test_to_cookie_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_cookie_string() == "Thursday, 25-Dec-1975 14:15:16 EST" def test_to_iso8601_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_iso8601_string() == "1975-12-25T14:15:16-05:00" def test_to_iso8601_string_utc(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16) assert d.to_iso8601_string() == "1975-12-25T14:15:16Z" def test_to_iso8601_extended_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, 123456, tz="local") assert d.to_iso8601_string() == "1975-12-25T14:15:16.123456-05:00" def test_to_rfc822_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rfc822_string() == "Thu, 25 Dec 75 14:15:16 -0500" def test_to_rfc850_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rfc850_string() == "Thursday, 25-Dec-75 14:15:16 EST" def test_to_rfc1036_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rfc1036_string() == "Thu, 25 Dec 75 14:15:16 -0500" def test_to_rfc1123_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rfc1123_string() == "Thu, 25 Dec 1975 14:15:16 -0500" def test_to_rfc2822_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rfc2822_string() == "Thu, 25 Dec 1975 14:15:16 -0500" def test_to_rfc3339_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rfc3339_string() == "1975-12-25T14:15:16-05:00" def test_to_rfc3339_extended_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, 123456, tz="local") assert d.to_rfc3339_string() == "1975-12-25T14:15:16.123456-05:00" def test_to_rss_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_rss_string() == "Thu, 25 Dec 1975 14:15:16 -0500" def test_to_w3c_string(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.to_w3c_string() == "1975-12-25T14:15:16-05:00" def test_to_string_invalid(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") with pytest.raises(ValueError): d._to_string("invalid") def test_repr(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") expected = "DateTime(1975, 12, 25, 14, 15, 16, tzinfo={})".format(repr(d.tzinfo)) assert repr(d) == expected d = pendulum.datetime(1975, 12, 25, 14, 15, 16, 123456, tz="local") expected = "DateTime(1975, 12, 25, 14, 15, 16, 123456, tzinfo={})".format( repr(d.tzinfo) ) assert repr(d) == expected def test_format_with_locale(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") expected = "jeudi 25e jour de décembre 1975 02:15:16 PM -05:00" assert d.format("dddd Do [jour de] MMMM YYYY hh:mm:ss A Z", locale="fr") == expected def test_strftime(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.strftime("%d") == "25" def test_for_json(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="local") assert d.for_json() == "1975-12-25T14:15:16-05:00" def test_format(): d = pendulum.datetime(1975, 12, 25, 14, 15, 16, tz="Europe/Paris") assert f"{d}" == "1975-12-25T14:15:16+01:00" assert f"{d:YYYY}" == "1975" assert f"{d:%Y}" == "1975" assert f"{d:%H:%M %d.%m.%Y}" == "14:15 25.12.1975"
30.43662
88
0.64137
739
4,322
3.603518
0.113667
0.064589
0.076605
0.168231
0.772437
0.747653
0.741645
0.728877
0.726624
0.648517
0
0.234313
0.181398
4,322
141
89
30.652482
0.518372
0
0
0.310345
0
0
0.198519
0.049514
0
0
0
0
0.321839
1
0.275862
false
0
0.022989
0
0.298851
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
8
57945345cb69273cc1ea5c56d5774e69e5752715
12,794
py
Python
WHI_long_term_make_SP2_GC_comparison_table-v2.py
annahs/atmos_research
b5853c9b12e327492f8f8ba5069bca3fd2e981c8
[ "MIT" ]
2
2018-08-17T15:25:26.000Z
2019-04-17T16:50:00.000Z
WHI_long_term_make_SP2_GC_comparison_table-v2.py
annahs/atmos_research
b5853c9b12e327492f8f8ba5069bca3fd2e981c8
[ "MIT" ]
null
null
null
WHI_long_term_make_SP2_GC_comparison_table-v2.py
annahs/atmos_research
b5853c9b12e327492f8f8ba5069bca3fd2e981c8
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import sys import os import numpy as np from pprint import pprint from datetime import datetime from datetime import timedelta import mysql.connector import pickle import math import calendar from math import log10, floor GC_error = True test_case = 'Van'#'default' #default, Van, wet_scav, no_bb, all_together RH_of_interest = 90 #101 = no threshold sig_figs_SP2 = 3 sig_figs_gc = 4 def round_to_n(x,n): return round(x, -int(floor(log10(x))) + (n - 1)) #database connection cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon') cursor = cnx.cursor() data= [] red_list = [] blue_list = [] clusters = ['all','NPac','SPac','Cont','LRT'] GC_row_no = 2 cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source, test_scenario,cluster from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s '),(RH_of_interest,'all','default')) data_raw = cursor.fetchall() cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source, test_scenario,cluster from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s '),(RH_of_interest,'all',test_case)) wet_scav_data = cursor.fetchall() data_raw.append(wet_scav_data[0]) for row in data_raw: data_source = row[5] case= row[6] if data_source == 'SP2': p10_sp2 = row[0] p50_sp2 = row[1] p90_sp2 = row[2] mean_sp2 = row[3] rel_err_sp2 = row[4] if data_source == 'GEOS-Chem' and case == 'default': p10_gc = row[0] p50_gc = row[1] p90_gc = row[2] mean_gc = row[3] if GC_error == True: rel_err_gc = row[4] else: rel_err_gc = 0 if data_source == 'GEOS-Chem' and case == test_case: p10_gc_ws = row[0] p50_gc_ws = row[1] p90_gc_ws = row[2] mean_gc_ws = row[3] if GC_error == True: rel_err_gc_ws = row[4] else: rel_err_gc_ws = 0 SP2_10 = str(round_to_n(p10_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p10_sp2*rel_err_sp2,sig_figs_SP2)) SP2_50 = str(round_to_n(p50_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p50_sp2*rel_err_sp2,sig_figs_SP2)) SP2_90 = str(round_to_n(p90_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p90_sp2*rel_err_sp2,sig_figs_SP2)) SP2_mean = str(round_to_n(mean_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(mean_sp2*rel_err_sp2,sig_figs_SP2)) if GC_error == True: GC_10 = str(round_to_n(p10_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p10_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc/p10_sp2,3)) + ')' GC_50 = str(round_to_n(p50_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p50_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc/p50_sp2,3)) + ')' GC_90 = str(round_to_n(p90_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p90_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc/p90_sp2,3)) + ')' GC_mean = str(round_to_n(mean_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(mean_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc/mean_sp2,3)) + ')' GC_10_ws = str(round_to_n(p10_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p10_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc_ws/p10_sp2,3)) + ')' GC_50_ws = str(round_to_n(p50_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p50_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc_ws/p50_sp2,3)) + ')' GC_90_ws = str(round_to_n(p90_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p90_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc_ws/p90_sp2,3)) + ')' GC_mean_ws = str(round_to_n(mean_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(mean_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc_ws/mean_sp2,3)) + ')' else: GC_10 = str(round_to_n(p10_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc/p10_sp2,3)) + ')' GC_50 = str(round_to_n(p50_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc/p50_sp2,3)) + ')' GC_90 = str(round_to_n(p90_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc/p90_sp2,3)) + ')' GC_mean = str(round_to_n(mean_gc,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc/mean_sp2,3)) + ')' GC_10_ws = str(round_to_n(p10_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc_ws/p10_sp2,3)) + ')' GC_50_ws = str(round_to_n(p50_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc_ws/p50_sp2,3)) + ')' GC_90_ws = str(round_to_n(p90_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc_ws/p90_sp2,3)) + ')' GC_mean_ws = str(round_to_n(mean_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc_ws/mean_sp2,3)) + ')' GC_list = [p10_gc, p50_gc, p90_gc, mean_gc] GC_list_ws = [p10_gc_ws, p50_gc_ws, p90_gc_ws, mean_gc_ws] SP2_list = [p10_sp2, p50_sp2, p90_sp2, mean_sp2] i = 0 for value in GC_list: if (value - value*rel_err_gc) > (SP2_list[i]+ SP2_list[i]*rel_err_sp2): red_list.append((2,i+1)) if (value + value*rel_err_gc) < (SP2_list[i]- SP2_list[i]*rel_err_sp2): blue_list.append((2,i+1)) i+=1 i = 0 for value in GC_list_ws: if (value - value*rel_err_gc_ws) > (SP2_list[i]+ SP2_list[i]*rel_err_sp2): red_list.append((3,i+1)) if (value + value*rel_err_gc_ws) < (SP2_list[i]- SP2_list[i]*rel_err_sp2): blue_list.append((3,i+1)) i+=1 table_row_SP2 = ['Measurement',SP2_10,SP2_50,SP2_90,SP2_mean] table_row_GC = ['GEOS-Chem\ndefault scenario', GC_10,GC_50,GC_90,GC_mean] table_row_GC_ws = ['GEOS-Chem\n' + str(test_case), GC_10_ws,GC_50_ws,GC_90_ws,GC_mean_ws] data.append(table_row_SP2) data.append(table_row_GC) data.append(table_row_GC_ws) colLabels=('data source','10th ptile', '50th ptile', '90th ptile', 'mean') fig=plt.figure() ax = fig.add_subplot(111) ax.axis('off') #do the table the_table = ax.table(cellText=data, colLabels=colLabels, loc='center') table_props=the_table.properties() table_cells=table_props['child_artists'] i=0 for cell in table_cells: ht = cell.get_height() wd = cell.get_width() cell.set_width(wd*1) cell.set_height(ht*2.2) cell.set_fontsize(14) #if i in [1,3,5,7]: # cell.set_linewidth(4) i+=1 cellDict = the_table.get_celld() for cell in red_list: cellDict[cell]._text.set_color('r') for cell in blue_list: cellDict[cell]._text.set_color('b') os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/') plt.savefig('GC default v10 vs SP2 by cluster for WHI - ' + 'all' + ' - ' + str(RH_of_interest) + '% RH threshold - ' + str(test_case) + '.png',bbox_inches='tight') plt.show() ####################### data= [] red_list = [] blue_list = [] clusters = ['NPac','SPac','Cont','LRT'] GC_row_no = 2 for cluster in clusters: cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source, test_scenario,cluster from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s '),(RH_of_interest,cluster,'default')) data_raw = cursor.fetchall() cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source, test_scenario,cluster from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s '),(RH_of_interest,cluster,test_case)) wet_scav_data = cursor.fetchall() data_raw.append(wet_scav_data[0]) pprint(data_raw) for row in data_raw: print row data_source = row[5] case= row[6] if data_source == 'SP2': p10_sp2 = row[0] p50_sp2 = row[1] p90_sp2 = row[2] mean_sp2 = row[3] rel_err_sp2 = row[4] if data_source == 'GEOS-Chem' and case == 'default': p10_gc = row[0] p50_gc = row[1] p90_gc = row[2] mean_gc = row[3] if GC_error == True: rel_err_gc = row[4] else: rel_err_gc = 0 if data_source == 'GEOS-Chem' and case == test_case: p10_gc_ws = row[0] p50_gc_ws = row[1] p90_gc_ws = row[2] mean_gc_ws = row[3] if GC_error == True: rel_err_gc = row[4] else: rel_err_gc = 0 SP2_10 = str(round_to_n(p10_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p10_sp2*rel_err_sp2,sig_figs_SP2)) SP2_50 = str(round_to_n(p50_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p50_sp2*rel_err_sp2,sig_figs_SP2)) SP2_90 = str(round_to_n(p90_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p90_sp2*rel_err_sp2,sig_figs_SP2)) SP2_mean = str(round_to_n(mean_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(mean_sp2*rel_err_sp2,sig_figs_SP2)) if GC_error == True: GC_10 = str(round_to_n(p10_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p10_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc/p10_sp2,3)) + ')' GC_50 = str(round_to_n(p50_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p50_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc/p50_sp2,3)) + ')' GC_90 = str(round_to_n(p90_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p90_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc/p90_sp2,3)) + ')' GC_mean = str(round_to_n(mean_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(mean_gc*rel_err_gc,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc/mean_sp2,3)) + ')' GC_10_ws = str(round_to_n(p10_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p10_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc_ws/p10_sp2,3)) + ')' GC_50_ws = str(round_to_n(p50_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p50_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc_ws/p50_sp2,3)) + ')' GC_90_ws = str(round_to_n(p90_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p90_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc_ws/p90_sp2,3)) + ')' GC_mean_ws = str(round_to_n(mean_gc_ws,sig_figs_gc)) + u'\u00B1' + str(round_to_n(mean_gc_ws*rel_err_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc_ws/mean_sp2,3)) + ')' else: GC_10 = str(round_to_n(p10_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc/p10_sp2,3)) + ')' GC_50 = str(round_to_n(p50_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc/p50_sp2,3)) + ')' GC_90 = str(round_to_n(p90_gc,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc/p90_sp2,3)) + ')' GC_mean = str(round_to_n(mean_gc,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc/mean_sp2,3)) + ')' GC_10_ws = str(round_to_n(p10_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p10_gc_ws/p10_sp2,3)) + ')' GC_50_ws = str(round_to_n(p50_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p50_gc_ws/p50_sp2,3)) + ')' GC_90_ws = str(round_to_n(p90_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(p90_gc_ws/p90_sp2,3)) + ')' GC_mean_ws = str(round_to_n(mean_gc_ws,sig_figs_gc)) + '\n(' + str(round_to_n(mean_gc_ws/mean_sp2,3)) + ')' GC_list = [p10_gc, p50_gc, p90_gc, mean_gc] GC_list_ws = [p10_gc_ws, p50_gc_ws, p90_gc_ws, mean_gc_ws] SP2_list = [p10_sp2, p50_sp2, p90_sp2, mean_sp2] i = 0 for value in GC_list: if (value - value*rel_err_gc) > (SP2_list[i]+ SP2_list[i]*rel_err_sp2): red_list.append((GC_row_no,i+2)) if (value + value*rel_err_gc) < (SP2_list[i]- SP2_list[i]*rel_err_sp2): blue_list.append((GC_row_no,i+2)) i+=1 i = 0 for value in GC_list_ws: if (value - value*rel_err_gc_ws) > (SP2_list[i]+ SP2_list[i]*rel_err_sp2): red_list.append((GC_row_no+1,i+2)) if (value + value*rel_err_gc_ws) < (SP2_list[i]- SP2_list[i]*rel_err_sp2): blue_list.append((GC_row_no+1,i+2)) i+=1 table_row_SP2 = [cluster, 'Measurement',SP2_10,SP2_50,SP2_90,SP2_mean] table_row_GC = ['','GEOS-Chem\ndefault scenario', GC_10,GC_50,GC_90,GC_mean] table_row_GC_ws = ['','GEOS-Chem\n' + str(test_case), GC_10_ws,GC_50_ws,GC_90_ws,GC_mean_ws] data.append(table_row_SP2) data.append(table_row_GC) data.append(table_row_GC_ws) GC_row_no +=3 colLabels=('cluster','data source','10th ptile', '50th ptile', '90th ptile', 'mean') fig=plt.figure() ax = fig.add_subplot(111) ax.axis('off') #do the table the_table = ax.table(cellText=data, colLabels=colLabels, loc='center') table_props=the_table.properties() table_cells=table_props['child_artists'] i=0 for cell in table_cells: ht = cell.get_height() wd = cell.get_width() cell.set_width(wd*1.3) cell.set_height(ht*3) cell.set_fontsize(14) #if i in [1,3,5,7]: # cell.set_linewidth(4) i+=1 cellDict = the_table.get_celld() for cell in red_list: cellDict[cell]._text.set_color('r') for cell in blue_list: cellDict[cell]._text.set_color('b') os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/') plt.savefig('GC default v10 vs SP2 by cluster for WHI - ' + 'by cluster' + ' - ' + str(RH_of_interest) + '% RH threshold - '+str(test_case)+'.png',bbox_inches='tight') plt.show() cnx.close()
39.856698
318
0.708692
2,531
12,794
3.168708
0.072303
0.084663
0.096758
0.131671
0.922693
0.914713
0.905985
0.905736
0.898504
0.898504
0
0.068206
0.126778
12,794
320
319
39.98125
0.64966
0.01532
0
0.766667
0
0.016667
0.159105
0.049347
0
0
0
0
0
0
null
null
0.004167
0.05
null
null
0.0125
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
57e985c16bc1ac23733569a4030cd3e743259bec
9,155
py
Python
src/genie/libs/parser/nxos/tests/ShowIpOspfMplsLdpInterface/cli/equal/golden_output_1_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
204
2018-06-27T00:55:27.000Z
2022-03-06T21:12:18.000Z
src/genie/libs/parser/nxos/tests/ShowIpOspfMplsLdpInterface/cli/equal/golden_output_1_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
468
2018-06-19T00:33:18.000Z
2022-03-31T23:23:35.000Z
src/genie/libs/parser/nxos/tests/ShowIpOspfMplsLdpInterface/cli/equal/golden_output_1_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
309
2019-01-16T20:21:07.000Z
2022-03-30T12:56:41.000Z
expected_output = { 'vrf': {'VRF1': {'address_family': {'ipv4': {'instance': {'1': {'areas': {'0.0.0.1': {'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.1', 'igp_sync': False, 'required': False}}, 'interfaces': {'Ethernet2/1': {'area': '0.0.0.1', 'interface_type': 'broadcast', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.1', 'igp_sync': False, 'required': False}}, 'name': 'Ethernet2/1', 'state': 'bdr'}}, 'sham_links': {'10.151.22.22 10.229.11.11': {'area': '0.0.0.1', 'interface_type': 'point_to_point', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.1', 'igp_sync': False, 'required': False}}, 'name': '10.151.22.22 10.229.11.11', 'state': 'point_to_point'}, '10.151.22.22 10.21.33.33': {'area': '0.0.0.1', 'interface_type': 'point_to_point', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.1', 'igp_sync': False, 'required': False}}, 'name': '10.151.22.22 ' '10.21.33.33', 'state': 'point_to_point'}}}}}}}}}, 'default': {'address_family': {'ipv4': {'instance': {'1': {'areas': {'0.0.0.0': {'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.0', 'igp_sync': False, 'required': False}}, 'interfaces': {'Ethernet2/2': {'area': '0.0.0.0', 'interface_type': 'broadcast', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.0', 'igp_sync': False, 'required': False}}, 'name': 'Ethernet2/2', 'state': 'bdr'}, 'Ethernet2/3': {'area': '0.0.0.0', 'interface_type': 'broadcast', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.0', 'igp_sync': False, 'required': False}}, 'name': 'Ethernet2/3', 'state': 'bdr'}, 'Ethernet2/4': {'area': '0.0.0.0', 'interface_type': 'broadcast', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.0', 'igp_sync': False, 'required': False}}, 'name': 'Ethernet2/4', 'state': 'bdr'}, 'loopback0': {'area': '0.0.0.0', 'interface_type': 'loopback', 'mpls': {'ldp': {'autoconfig': False, 'autoconfig_area_id': '0.0.0.0', 'igp_sync': False, 'required': False}}, 'name': 'loopback0', 'state': 'loopback'}}}}}}}}}, 'VRF2': {'address_family': {'ipv4': {'instance': {'1': {'areas': {'0.0.1.1': {'mpls': {'ldp': {'autoconfig': True, 'autoconfig_area_id': '0.0.1.1', 'igp_sync': False, 'required': False}}, 'interfaces': {'port-channel4001': {'area': '0.0.1.1', 'interface_type': 'point_to_point', 'mpls': {'ldp': {'autoconfig': True, 'autoconfig_area_id': '0.0.1.1', 'igp_sync': False, 'required': False}}, 'name': 'port-channel4001', 'state': 'point_to_point'}, 'port-channel4002': {'area': '0.0.1.1', 'interface_type': 'point_to_point', 'mpls': {'ldp': {'autoconfig': True, 'autoconfig_area_id': '0.0.1.1', 'igp_sync': False, 'required': False}}, 'name': 'port-channel4002', 'state': 'point_to_point'}} }}}}}}} }}
61.033333
96
0.1858
406
9,155
4.027094
0.123153
0.063609
0.051376
0.124771
0.839755
0.839755
0.839755
0.787156
0.737615
0.696024
0
0.08189
0.722556
9,155
149
97
61.442953
0.561811
0
0
0.761905
0
0
0.1771
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
aa01ee5e1277059bfe1d6097ffd9847dffd31939
12,261
py
Python
src/cnn_models/w2v_cnn.py
bgshin/doc-classify-multi-gpu
fe3712c3c3fe410b09cf3ef7f460eaa62d696d67
[ "Apache-2.0" ]
null
null
null
src/cnn_models/w2v_cnn.py
bgshin/doc-classify-multi-gpu
fe3712c3c3fe410b09cf3ef7f460eaa62d696d67
[ "Apache-2.0" ]
null
null
null
src/cnn_models/w2v_cnn.py
bgshin/doc-classify-multi-gpu
fe3712c3c3fe410b09cf3ef7f460eaa62d696d67
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf import numpy as np class W2V_CNN(object): """ A CNN for text classification. Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer. """ def __init__( self, sequence_length, num_classes, embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0): # Placeholders for input, output and dropout self.input_x = tf.placeholder(tf.float32, [None, sequence_length, embedding_size], name="input_x") self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y") self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") # Keeping track of l2 regularization loss (optional) l2_loss = tf.constant(0.0) l1_loss = tf.constant(0.0) # Embedding layer with tf.name_scope("embedding"): # W = tf.Variable( # tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), # name="W") # self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x) self.embedded_chars = self.input_x self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1) print self.embedded_chars_expanded # Create a convolution + maxpool layer for each filter size pooled_outputs = [] for i, filter_size in enumerate(filter_sizes): with tf.name_scope("conv-maxpool-%s" % filter_size): # Convolution Layer filter_shape = [filter_size, embedding_size, 1, num_filters] W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W") b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b") # l2_loss += tf.nn.l2_loss(W)/1000 # l2_loss += tf.nn.l2_loss(b)/1000 conv = tf.nn.conv2d( self.embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") # Apply nonlinearity h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # Maxpooling over the outputs pooled = tf.nn.max_pool( h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) # Combine all the pooled features num_filters_total = num_filters * len(filter_sizes) print 'num_filters_total', num_filters_total print 'pooled_outputs', pooled_outputs self.h_pool = tf.concat(pooled_outputs, 3) self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total]) # Add dropout with tf.name_scope("dropout"): self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob) # Final (unnormalized) scores and predictions with tf.name_scope("output"): W = tf.get_variable( "W", shape=[num_filters_total, num_classes], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b") l2_loss += tf.nn.l2_loss(W)/30 l2_loss += tf.nn.l2_loss(b)/30 self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores") self.predictions = tf.argmax(self.scores, 1, name="predictions") # CalculateMean cross-entropy loss with tf.name_scope("loss"): losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y) self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss # Accuracy with tf.name_scope("accuracy"): self.golds = tf.argmax(self.input_y, 1, name="golds") correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") with tf.name_scope("avg_f1"): self.golds = tf.argmax(self.input_y, 1, name="golds") self.preds = self.predictions # positive recall pos_gold_sel = tf.equal(self.golds, 2) # positive_gold posg_golds = tf.boolean_mask(self.golds, pos_gold_sel) posg_preds = tf.boolean_mask(self.preds, pos_gold_sel) correct_predictions_pr = tf.equal(posg_golds, posg_preds) pos_r = tf.reduce_mean(tf.cast(correct_predictions_pr, "float"), name="pos_recall") # positive precision pos_pred_sel = tf.equal(self.preds, 2) # positive_pred posp_golds = tf.boolean_mask(self.golds, pos_pred_sel) posp_preds = tf.boolean_mask(self.preds, pos_pred_sel) correct_predictions_pp = tf.equal(posp_golds, posp_preds) pos_p = tf.reduce_mean(tf.cast(correct_predictions_pp, "float"), name="pos_precision") # negative recall neg_gold_sel = tf.equal(self.golds, 0) # positive_gold negg_golds = tf.boolean_mask(self.golds, neg_gold_sel) negg_preds = tf.boolean_mask(self.preds, neg_gold_sel) correct_predictions_nr = tf.equal(negg_golds, negg_preds) self.neg_r = tf.reduce_mean(tf.cast(correct_predictions_nr, "float"), name="neg_recall") # negative precision neg_pred_sel = tf.equal(self.preds, 0) # positive_pred negp_golds = tf.boolean_mask(self.golds, neg_pred_sel) negp_preds = tf.boolean_mask(self.preds, neg_pred_sel) correct_predictions_np = tf.equal(negp_golds, negp_preds) self.neg_p = tf.reduce_mean(tf.cast(correct_predictions_np, "float"), name="neg_precision") self.f1_neg = 2 * self.neg_p * self.neg_r / (self.neg_p + self.neg_r) * 100 self.f1_pos = 2 * pos_p * pos_r / (pos_p + pos_r) * 100 self.avg_f1 = (self.f1_neg + self.f1_pos) / 2 class W2V_CNN_TOWER(object): """ A CNN for text classification. Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer. """ def __init__( self, sequence_length, num_classes, embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0): # Placeholders for input, output and dropout self.input_x = tf.placeholder(tf.float32, [None, sequence_length, embedding_size], name="input_x") self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y") self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") # Keeping track of l2 regularization loss (optional) self.l2_loss = tf.constant(0.0) self.filter_sizes = filter_sizes self.num_filters = num_filters self.embedding_size = embedding_size self.num_classes = num_classes self.l2_reg_lambda = l2_reg_lambda def tower_loss(self, scope): # Embedding layer with tf.name_scope("embedding"): self.embedded_chars_expanded = tf.expand_dims(self.input_x, -1) print self.embedded_chars_expanded # Create a convolution + maxpool layer for each filter size pooled_outputs = [] for i, filter_size in enumerate(self.filter_sizes): with tf.name_scope("conv-maxpool-%s" % filter_size): # Convolution Layer filter_shape = [filter_size, self.embedding_size, 1, self.num_filters] W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W") b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name="b") conv = tf.nn.conv2d( self.embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding="VALID", name="conv") # Apply nonlinearity h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") # Maxpooling over the outputs pooled = tf.nn.max_pool( h, ksize=[1, self.sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) # Combine all the pooled features num_filters_total = self.num_filters * len(self.filter_sizes) print 'num_filters_total', num_filters_total print 'pooled_outputs', pooled_outputs self.h_pool = tf.concat(pooled_outputs, 3) self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total]) # Add dropout with tf.name_scope("dropout"): self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob) # Final (unnormalized) scores and predictions with tf.name_scope("output"): W = tf.get_variable( "W", shape=[num_filters_total, self.num_classes], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b") self.l2_loss += tf.nn.l2_loss(W) / 30 self.l2_loss += tf.nn.l2_loss(b) / 30 self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores") self.predictions = tf.argmax(self.scores, 1, name="predictions") # CalculateMean cross-entropy loss with tf.name_scope("loss"): _ = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y) losses = tf.get_collection('losses', scope) self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss return self.loss def acc(self): # Accuracy with tf.name_scope("accuracy"): self.golds = tf.argmax(self.input_y, 1, name="golds") correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") with tf.name_scope("avg_f1"): self.golds = tf.argmax(self.input_y, 1, name="golds") self.preds = self.predictions # positive recall pos_gold_sel = tf.equal(self.golds, 2) # positive_gold posg_golds = tf.boolean_mask(self.golds, pos_gold_sel) posg_preds = tf.boolean_mask(self.preds, pos_gold_sel) correct_predictions_pr = tf.equal(posg_golds, posg_preds) pos_r = tf.reduce_mean(tf.cast(correct_predictions_pr, "float"), name="pos_recall") # positive precision pos_pred_sel = tf.equal(self.preds, 2) # positive_pred posp_golds = tf.boolean_mask(self.golds, pos_pred_sel) posp_preds = tf.boolean_mask(self.preds, pos_pred_sel) correct_predictions_pp = tf.equal(posp_golds, posp_preds) pos_p = tf.reduce_mean(tf.cast(correct_predictions_pp, "float"), name="pos_precision") # negative recall neg_gold_sel = tf.equal(self.golds, 0) # positive_gold negg_golds = tf.boolean_mask(self.golds, neg_gold_sel) negg_preds = tf.boolean_mask(self.preds, neg_gold_sel) correct_predictions_nr = tf.equal(negg_golds, negg_preds) self.neg_r = tf.reduce_mean(tf.cast(correct_predictions_nr, "float"), name="neg_recall") # negative precision neg_pred_sel = tf.equal(self.preds, 0) # positive_pred negp_golds = tf.boolean_mask(self.golds, neg_pred_sel) negp_preds = tf.boolean_mask(self.preds, neg_pred_sel) correct_predictions_np = tf.equal(negp_golds, negp_preds) self.neg_p = tf.reduce_mean(tf.cast(correct_predictions_np, "float"), name="neg_precision") self.f1_neg = 2 * self.neg_p * self.neg_r / (self.neg_p + self.neg_r) * 100 self.f1_pos = 2 * pos_p * pos_r / (pos_p + pos_r) * 100 self.avg_f1 = (self.f1_neg + self.f1_pos) / 2
45.921348
106
0.607618
1,625
12,261
4.321846
0.107692
0.011961
0.029617
0.03873
0.922683
0.917414
0.899616
0.885234
0.870568
0.863449
0
0.018337
0.283908
12,261
266
107
46.093985
0.781549
0.097871
0
0.765714
0
0
0.046919
0
0
0
0
0
0
0
null
null
0
0.011429
null
null
0.034286
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
a4d9493ae008eb18866fd00b9c7f52ce33a12fb8
18,052
py
Python
tests/cli_test.py
BMeu/Aerarium
119946cead727ef68b5ecea339990d982c006391
[ "MIT" ]
null
null
null
tests/cli_test.py
BMeu/Aerarium
119946cead727ef68b5ecea339990d982c006391
[ "MIT" ]
139
2018-12-26T07:54:31.000Z
2021-06-01T23:14:45.000Z
tests/cli_test.py
BMeu/Aerarium
119946cead727ef68b5ecea339990d982c006391
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from time import sleep from unittest import TestCase from unittest.mock import call from unittest.mock import MagicMock from unittest.mock import patch from app import cli from app import create_app from app.configuration import TestConfiguration class CLITest(TestCase): def setUp(self): """ Initialize the test cases. """ self.app = create_app(TestConfiguration) cli.register(self.app) self.cli = self.app.test_cli_runner() @patch('app.cli.generate_password_hash') def test_pw_hash_rounds_minimum_too_long(self, mock_hasher: MagicMock): """ Test that the minimum value is returned if the requested maximum time is too short for it. Expected result: The output suggests the value 4 and informs the user that the minimum value must be used. """ def _hasher_side_effect(_password, rounds): """ Sleep slightly longer than the number of rounds in milliseconds. """ sleep((rounds + 1) / 1000.0) mock_hasher.side_effect = _hasher_side_effect response = self.cli.invoke(args=['pw_hash_rounds', '4']) output = response.output mock_hasher.assert_called_once_with('Aerarium', 4) self.assertIn('The minimum number of rounds took more time than allowed.', output) self.assertIn('However, the number of rounds must be at least: 4', output) self.assertNotIn('Found suiting number of hashing rounds', output) self.assertIn('BCRYPT_LOG_ROUNDS=4', output) self.assertEqual(0, response.exit_code) @patch('app.cli.generate_password_hash') def test_pw_hash_rounds_minimum_ok(self, mock_hasher: MagicMock): """ Test that the minimum value is returned if the requested maximum time fits. Expected result: The output suggests the value 4 and informs the user that the minimum value is fitting the requirements. """ def _hasher_side_effect(_password, rounds): """ Sleep slightly shorter than the number of rounds in milliseconds. """ sleep((rounds - 1) / 1000.0) mock_hasher.side_effect = _hasher_side_effect response = self.cli.invoke(args=['pw_hash_rounds', '4']) output = response.output self.assertEqual(2, mock_hasher.call_count) self.assertTupleEqual(call('Aerarium', 4), mock_hasher.call_args_list[0]) self.assertTupleEqual(call('Aerarium', 5), mock_hasher.call_args_list[1]) self.assertNotIn('| Duration', output) self.assertNotIn('Reached maximum number of hashing rounds.', output) self.assertNotIn('The minimum number of rounds took more time than allowed.', output) self.assertNotIn('However, the number of rounds must be at least', output) self.assertIn('Found suiting number of hashing rounds: 4', output) self.assertIn('BCRYPT_LOG_ROUNDS=4', output) self.assertEqual(0, response.exit_code) @patch('app.cli.generate_password_hash') def test_pw_hash_rounds_medium_value_verbose(self, mock_hasher: MagicMock): """ Test a medium value is returned if the requested maximum time fits. Expected result: The output suggests the value 12 and displays verbose information. """ def _hasher_side_effect(_password, rounds): """ Sleep slightly shorter than the number of rounds in milliseconds. """ sleep((rounds - 1) / 1000.0) mock_hasher.side_effect = _hasher_side_effect response = self.cli.invoke(args=['pw_hash_rounds', '-v', '12']) output = response.output self.assertEqual(10, mock_hasher.call_count) self.assertTupleEqual(call('Aerarium', 4), mock_hasher.call_args_list[0]) self.assertTupleEqual(call('Aerarium', 5), mock_hasher.call_args_list[1]) self.assertTupleEqual(call('Aerarium', 6), mock_hasher.call_args_list[2]) self.assertTupleEqual(call('Aerarium', 7), mock_hasher.call_args_list[3]) self.assertTupleEqual(call('Aerarium', 8), mock_hasher.call_args_list[4]) self.assertTupleEqual(call('Aerarium', 9), mock_hasher.call_args_list[5]) self.assertTupleEqual(call('Aerarium', 10), mock_hasher.call_args_list[6]) self.assertTupleEqual(call('Aerarium', 11), mock_hasher.call_args_list[7]) self.assertTupleEqual(call('Aerarium', 12), mock_hasher.call_args_list[8]) self.assertTupleEqual(call('Aerarium', 13), mock_hasher.call_args_list[9]) self.assertIn('| Duration', output) self.assertNotIn('Reached maximum number of hashing rounds.', output) self.assertNotIn('The minimum number of rounds took more time than allowed.', output) self.assertNotIn('However, the number of rounds must be at least', output) self.assertIn('Found suiting number of hashing rounds: 12', output) self.assertIn('BCRYPT_LOG_ROUNDS=12', output) self.assertEqual(0, response.exit_code) @patch('app.cli.generate_password_hash') def test_pw_hash_rounds_maximum_value(self, mock_hasher: MagicMock): """ Test that the maximum value is returned if the requested maximum time is too long Expected result: The output suggests the value 31. """ def _hasher_side_effect(_password, rounds): """ Sleep slightly shorter than the number of rounds in milliseconds. """ sleep((rounds - 1) / 1000.0) mock_hasher.side_effect = _hasher_side_effect response = self.cli.invoke(args=['pw_hash_rounds', '32']) output = response.output self.assertEqual(28, mock_hasher.call_count) self.assertTupleEqual(call('Aerarium', 4), mock_hasher.call_args_list[0]) self.assertTupleEqual(call('Aerarium', 5), mock_hasher.call_args_list[1]) self.assertTupleEqual(call('Aerarium', 6), mock_hasher.call_args_list[2]) self.assertTupleEqual(call('Aerarium', 7), mock_hasher.call_args_list[3]) self.assertTupleEqual(call('Aerarium', 8), mock_hasher.call_args_list[4]) self.assertTupleEqual(call('Aerarium', 9), mock_hasher.call_args_list[5]) self.assertTupleEqual(call('Aerarium', 10), mock_hasher.call_args_list[6]) self.assertTupleEqual(call('Aerarium', 11), mock_hasher.call_args_list[7]) self.assertTupleEqual(call('Aerarium', 12), mock_hasher.call_args_list[8]) self.assertTupleEqual(call('Aerarium', 13), mock_hasher.call_args_list[9]) self.assertTupleEqual(call('Aerarium', 14), mock_hasher.call_args_list[10]) self.assertTupleEqual(call('Aerarium', 15), mock_hasher.call_args_list[11]) self.assertTupleEqual(call('Aerarium', 16), mock_hasher.call_args_list[12]) self.assertTupleEqual(call('Aerarium', 17), mock_hasher.call_args_list[13]) self.assertTupleEqual(call('Aerarium', 18), mock_hasher.call_args_list[14]) self.assertTupleEqual(call('Aerarium', 19), mock_hasher.call_args_list[15]) self.assertTupleEqual(call('Aerarium', 20), mock_hasher.call_args_list[16]) self.assertTupleEqual(call('Aerarium', 21), mock_hasher.call_args_list[17]) self.assertTupleEqual(call('Aerarium', 22), mock_hasher.call_args_list[18]) self.assertTupleEqual(call('Aerarium', 23), mock_hasher.call_args_list[19]) self.assertTupleEqual(call('Aerarium', 24), mock_hasher.call_args_list[20]) self.assertTupleEqual(call('Aerarium', 25), mock_hasher.call_args_list[21]) self.assertTupleEqual(call('Aerarium', 26), mock_hasher.call_args_list[22]) self.assertTupleEqual(call('Aerarium', 27), mock_hasher.call_args_list[23]) self.assertTupleEqual(call('Aerarium', 28), mock_hasher.call_args_list[24]) self.assertTupleEqual(call('Aerarium', 29), mock_hasher.call_args_list[25]) self.assertTupleEqual(call('Aerarium', 30), mock_hasher.call_args_list[26]) self.assertTupleEqual(call('Aerarium', 31), mock_hasher.call_args_list[27]) self.assertNotIn('| Duration', output) self.assertNotIn('Reached maximum number of hashing rounds.', output) self.assertNotIn('The minimum number of rounds took more time than allowed.', output) self.assertNotIn('However, the number of rounds must be at least', output) self.assertIn('Found suiting number of hashing rounds: 31', output) self.assertIn('BCRYPT_LOG_ROUNDS=31', output) self.assertEqual(0, response.exit_code) @patch('app.cli.generate_password_hash') def test_pw_hash_rounds_maximum_value_verbose(self, mock_hasher: MagicMock): """ Test that the maximum value is returned if the requested maximum time is too long, and test verbose output. Expected result: The output suggests the value 31 and informs the user about the maximum number, since verbose output is enabled. """ def _hasher_side_effect(_password, rounds): """ Sleep slightly shorter than the number of rounds in milliseconds. """ sleep((rounds - 1) / 1000.0) mock_hasher.side_effect = _hasher_side_effect response = self.cli.invoke(args=['pw_hash_rounds', '-v', '32']) output = response.output self.assertEqual(28, mock_hasher.call_count) self.assertTupleEqual(call('Aerarium', 4), mock_hasher.call_args_list[0]) self.assertTupleEqual(call('Aerarium', 5), mock_hasher.call_args_list[1]) self.assertTupleEqual(call('Aerarium', 6), mock_hasher.call_args_list[2]) self.assertTupleEqual(call('Aerarium', 7), mock_hasher.call_args_list[3]) self.assertTupleEqual(call('Aerarium', 8), mock_hasher.call_args_list[4]) self.assertTupleEqual(call('Aerarium', 9), mock_hasher.call_args_list[5]) self.assertTupleEqual(call('Aerarium', 10), mock_hasher.call_args_list[6]) self.assertTupleEqual(call('Aerarium', 11), mock_hasher.call_args_list[7]) self.assertTupleEqual(call('Aerarium', 12), mock_hasher.call_args_list[8]) self.assertTupleEqual(call('Aerarium', 13), mock_hasher.call_args_list[9]) self.assertTupleEqual(call('Aerarium', 14), mock_hasher.call_args_list[10]) self.assertTupleEqual(call('Aerarium', 15), mock_hasher.call_args_list[11]) self.assertTupleEqual(call('Aerarium', 16), mock_hasher.call_args_list[12]) self.assertTupleEqual(call('Aerarium', 17), mock_hasher.call_args_list[13]) self.assertTupleEqual(call('Aerarium', 18), mock_hasher.call_args_list[14]) self.assertTupleEqual(call('Aerarium', 19), mock_hasher.call_args_list[15]) self.assertTupleEqual(call('Aerarium', 20), mock_hasher.call_args_list[16]) self.assertTupleEqual(call('Aerarium', 21), mock_hasher.call_args_list[17]) self.assertTupleEqual(call('Aerarium', 22), mock_hasher.call_args_list[18]) self.assertTupleEqual(call('Aerarium', 23), mock_hasher.call_args_list[19]) self.assertTupleEqual(call('Aerarium', 24), mock_hasher.call_args_list[20]) self.assertTupleEqual(call('Aerarium', 25), mock_hasher.call_args_list[21]) self.assertTupleEqual(call('Aerarium', 26), mock_hasher.call_args_list[22]) self.assertTupleEqual(call('Aerarium', 27), mock_hasher.call_args_list[23]) self.assertTupleEqual(call('Aerarium', 28), mock_hasher.call_args_list[24]) self.assertTupleEqual(call('Aerarium', 29), mock_hasher.call_args_list[25]) self.assertTupleEqual(call('Aerarium', 30), mock_hasher.call_args_list[26]) self.assertTupleEqual(call('Aerarium', 31), mock_hasher.call_args_list[27]) self.assertIn('| Duration', output) self.assertIn('Reached maximum number of hashing rounds.', output) self.assertNotIn('The minimum number of rounds took more time than allowed.', output) self.assertNotIn('However, the number of rounds must be at least', output) self.assertIn('Found suiting number of hashing rounds: 31', output) self.assertIn('BCRYPT_LOG_ROUNDS=31', output) self.assertEqual(0, response.exit_code) @patch('app.cli.os.system') def test_translate_compile_success(self, mock_system: MagicMock): """ Test the Babel compile call. Expected result: Babel is instructed to compile the translations. """ mock_system.return_value = 0 response = self.cli.invoke(args=['translate', 'compile']) mock_system.assert_called_once() self.assertIn('babel compile', str(mock_system.call_args)) self.assertEqual(0, response.exit_code) @patch('app.cli.os.system') def test_translate_compile_failure(self, mock_system: MagicMock): """ Test the Babel compile call. Expected result: Babel is instructed to compile the translations, but fails. """ mock_system.return_value = 1 response = self.cli.invoke(args=['translate', 'compile']) mock_system.assert_called_once() self.assertIn('babel compile', str(mock_system.call_args)) self.assertIn('Compilation failed', response.output) self.assertEqual(1, response.exit_code) @patch('app.cli.os.remove') @patch('app.cli.os.system') def test_translate_extract_failure(self, mock_system: MagicMock, mock_remove: MagicMock): """ Test the Babel extract call. This can only be tested via a CLI call since the extract function is defined within the register function. Expected result: Babel is instructed to extract the translations, but fails. """ def _system_return_value(value): """ Only fail the extract command. """ if 'extract' in value: return 1 return 0 mock_system.side_effect = _system_return_value response = self.cli.invoke(args=['translate', 'init', 'de']) mock_remove.assert_not_called() mock_system.assert_called_once() self.assertIn('babel extract', str(mock_system.call_args)) self.assertIn('Extraction failed', response.output) self.assertEqual(1, response.exit_code) @patch('app.cli.os.remove') @patch('app.cli.os.system') def test_translate_init_success(self, mock_system: MagicMock, mock_remove: MagicMock): """ Test the Babel init call. Expected result: Babel is instructed to extract and initialize the translations. """ mock_system.return_value = 0 response = self.cli.invoke(args=['translate', 'init', 'de']) mock_remove.assert_called_once() mock_system.assert_called() self.assertEqual(2, mock_system.call_count) self.assertIn('babel extract', str(mock_system.call_args_list[0])) self.assertIn('babel init', str(mock_system.call_args_list[1])) self.assertIn('-l de', str(mock_system.call_args_list[1])) self.assertEqual(0, response.exit_code) @patch('app.cli.os.remove') @patch('app.cli.os.system') def test_translate_init_failure(self, mock_system: MagicMock, mock_remove: MagicMock): """ Test the Babel update call. Expected result: Babel is instructed to update the translations, but fails. """ def _system_return_value(value): """ Don't fail the extract command. """ if 'extract' in value: return 0 return 1 mock_system.side_effect = _system_return_value response = self.cli.invoke(args=['translate', 'init', 'de']) mock_remove.assert_not_called() mock_system.assert_called() self.assertIn('babel extract', str(mock_system.call_args_list[0])) self.assertIn('babel init', str(mock_system.call_args_list[1])) self.assertIn('-l de', str(mock_system.call_args_list[1])) self.assertIn('Language initialization failed', response.output) self.assertEqual(1, response.exit_code) @patch('app.cli.os.remove') @patch('app.cli.os.system') def test_translate_update_success(self, mock_system: MagicMock, mock_remove: MagicMock): """ Test the Babel update call. Expected result: Babel is instructed to extract and update the translations. """ mock_system.return_value = 0 response = self.cli.invoke(args=['translate', 'update']) mock_remove.assert_called_once() mock_system.assert_called() self.assertEqual(2, mock_system.call_count) self.assertIn('babel extract', str(mock_system.call_args_list[0])) self.assertIn('babel update', str(mock_system.call_args_list[1])) self.assertEqual(0, response.exit_code) @patch('app.cli.os.remove') @patch('app.cli.os.system') def test_translate_update_failure(self, mock_system: MagicMock, mock_remove: MagicMock): """ Test the Babel update call. Expected result: Babel is instructed to update the translations, but fails. """ def _system_return_value(value): """ Don't fail the extract command. """ if 'extract' in value: return 0 return 1 mock_system.side_effect = _system_return_value response = self.cli.invoke(args=['translate', 'update']) mock_remove.assert_not_called() mock_system.assert_called() self.assertIn('babel extract', str(mock_system.call_args_list[0])) self.assertIn('babel update', str(mock_system.call_args_list[1])) self.assertIn('Update failed', response.output) self.assertEqual(1, response.exit_code)
44.905473
119
0.67023
2,278
18,052
5.101844
0.079456
0.071416
0.080537
0.187231
0.926605
0.918
0.912321
0.903201
0.891843
0.866202
0
0.023045
0.218757
18,052
401
120
45.017456
0.801035
0.125914
0
0.810127
0
0
0.157947
0.010022
0
0
0
0
0.607595
1
0.088608
false
0.042194
0.033755
0
0.151899
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
9
10a0f0bb9352288eb3aed26a631977bf3967c26e
19
py
Python
intrepyd/iec611312py/__init__.py
bobosoft/intrepyd
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
[ "BSD-3-Clause" ]
2
2021-04-25T17:38:03.000Z
2022-03-20T20:48:50.000Z
intrepyd/iec611312py/__init__.py
bobosoft/intrepyd
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
[ "BSD-3-Clause" ]
1
2016-11-30T22:25:00.000Z
2017-01-16T22:43:39.000Z
intrepyd/iec611312py/__init__.py
bobosoft/intrepyd
13f0912b31f86f9bcc50f52ef4ad870e33f0cf65
[ "BSD-3-Clause" ]
null
null
null
""" Iec611312py """
6.333333
11
0.578947
1
19
11
1
0
0
0
0
0
0
0
0
0
0
0.352941
0.105263
19
3
12
6.333333
0.294118
0.578947
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
7
10a51f3bb39aadc236d0b44ffadd0c76e22dbe9b
5,643
py
Python
doink/migrations/0001_initial.py
tolomea/report_builder_problems
aa965252a56dbe90a5522391aa9f7a87cbc69aed
[ "MIT" ]
null
null
null
doink/migrations/0001_initial.py
tolomea/report_builder_problems
aa965252a56dbe90a5522391aa9f7a87cbc69aed
[ "MIT" ]
null
null
null
doink/migrations/0001_initial.py
tolomea/report_builder_problems
aa965252a56dbe90a5522391aa9f7a87cbc69aed
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-04-13 13:11 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Both', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('text_x', models.TextField()), ('both', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Both')), ('both_x', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Both')), ], ), migrations.CreateModel( name='InnerClass', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('text_x', models.TextField()), ('both', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Both')), ('both_x', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Both')), ('inner_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.InnerClass')), ('inner_class_x', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.InnerClass')), ], ), migrations.CreateModel( name='Neither', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('both', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Both')), ('inner_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.InnerClass')), ('neither', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Neither')), ], ), migrations.CreateModel( name='Settings', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('both', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Both')), ('inner_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.InnerClass')), ('neither', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Neither')), ('settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Settings')), ], ), migrations.AddField( model_name='neither', name='settings', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Settings'), ), migrations.AddField( model_name='innerclass', name='neither', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Neither'), ), migrations.AddField( model_name='innerclass', name='neither_x', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Neither'), ), migrations.AddField( model_name='innerclass', name='settings', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Settings'), ), migrations.AddField( model_name='innerclass', name='settings_x', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Settings'), ), migrations.AddField( model_name='both', name='inner_class', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.InnerClass'), ), migrations.AddField( model_name='both', name='inner_class_x', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.InnerClass'), ), migrations.AddField( model_name='both', name='neither', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Neither'), ), migrations.AddField( model_name='both', name='neither_x', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Neither'), ), migrations.AddField( model_name='both', name='settings', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Settings'), ), migrations.AddField( model_name='both', name='settings_x', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='doink.Settings'), ), ]
48.646552
139
0.599504
585
5,643
5.625641
0.095727
0.063203
0.106351
0.167122
0.8976
0.8976
0.8976
0.8976
0.891522
0.891522
0
0.003764
0.246677
5,643
115
140
49.069565
0.770407
0.011696
0
0.831776
1
0
0.122354
0
0
0
0
0
0
1
0
false
0
0.028037
0
0.065421
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
eabd18e28d97f8db1be65f400ff24ff58a8d6cd9
92
py
Python
parameters_8000.py
mbeorlegui/web2py_metereological_station
992b154010a8e6eb8d7470e573b38d67f131ad1b
[ "BSD-3-Clause" ]
null
null
null
parameters_8000.py
mbeorlegui/web2py_metereological_station
992b154010a8e6eb8d7470e573b38d67f131ad1b
[ "BSD-3-Clause" ]
null
null
null
parameters_8000.py
mbeorlegui/web2py_metereological_station
992b154010a8e6eb8d7470e573b38d67f131ad1b
[ "BSD-3-Clause" ]
null
null
null
password="pbkdf2(1000,20,sha512)$8439c33fc2223fae$83a49fd2a5fd68db1912b2a984693a2924203f06"
46
91
0.891304
7
92
11.714286
1
0
0
0
0
0
0
0
0
0
0
0.527473
0.01087
92
1
92
92
0.373626
0
0
0
0
0
0.869565
0.869565
0
0
0
0
0
1
0
false
1
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
eae6eb9df897817b56db8e813c5c0d860ab98197
7,171
py
Python
python/model_utils.py
cdaube/sharedFunctionalFeatures
3b7e8b17973a7fef195626a34bed54517cfd3915
[ "MIT" ]
1
2021-11-18T18:13:39.000Z
2021-11-18T18:13:39.000Z
python/model_utils.py
cdaube/sharedFunctionalFeatures
3b7e8b17973a7fef195626a34bed54517cfd3915
[ "MIT" ]
null
null
null
python/model_utils.py
cdaube/sharedFunctionalFeatures
3b7e8b17973a7fef195626a34bed54517cfd3915
[ "MIT" ]
null
null
null
''' model_utils.py contains custom blocks, etc. for building mdoels. created by shadySource, additions by cdaube THE UNLICENSE ''' import tensorflow as tf from keras.layers import (InputLayer, Conv2D, Conv2DTranspose, BatchNormalization, LeakyReLU, ReLU, MaxPool2D, UpSampling2D, Add, Reshape, GlobalAveragePooling2D, Layer) import keras class ConvBnLRelu(object): def __init__(self, filters, kernelSize, strides=1): self.filters = filters self.kernelSize = kernelSize self.strides = strides # return conv + bn + leaky_relu model def __call__(self, net, training=None): net = Conv2D(self.filters, self.kernelSize, strides=self.strides, padding='same')(net) net = BatchNormalization()(net, training=training) net = LeakyReLU()(net) return net class ConvBnRelu(object): def __init__(self, filters, kernelSize, strides=1): self.filters = filters self.kernelSize = kernelSize self.strides = strides # return conv + bn + relu block def __call__(self, net, training=None): net = Conv2D(self.filters, self.kernelSize, strides=self.strides, padding='same')(net) net = BatchNormalization()(net, training=training) net = ReLU()(net) return net class convBasicBlock(object): '''resnet skip block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernelSize: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well ''' def __init__(self, filters, kernelSize, stage, block, strides=(2, 2), bnMode=0, bnAxis=3): self.filters = filters self.kernelSize = kernelSize self.strides = strides self.stage = stage self.block = block self.bnMode = bnMode self.bnAxis = bnAxis def __call__(self, inputTensor): convNameBase = 'res' + str(self.stage) + self.block + '_branch' bnNameBase = 'bn' + str(self.stage) + self.block + '_branch' x = Conv2D(self.filters, self.kernelSize, padding='same', strides=self.strides, name=convNameBase + '2a')(inputTensor) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2a')(x) x = ReLU()(x) x = Conv2D(self.filters, self.kernelSize, padding='same', name=convNameBase + '2b')(x) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2b')(x) shortcut = Conv2D(self.filters, (1, 1), strides=self.strides,name=convNameBase + '1')(inputTensor) shortcut = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '1')(shortcut) x = Add()([x, shortcut]) x = ReLU()(x) return x class basicblock(object): '''Has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names ''' def __init__(self, filters, kernelSize, stage, block, strides=(1, 1), bnMode=0, bnAxis=3): self.filters = filters self.kernelSize = kernelSize self.strides = strides self.stage = stage self.block = block self.bnMode = bnMode self.bnAxis = bnAxis def __call__(self, inputTensor): convNameBase = 'res' + str(self.stage) + self.block + '_branch' bnNameBase = 'bn' + str(self.stage) + self.block + '_branch' x = Conv2D(self.filters, self.kernelSize, padding='same', strides=self.strides, name=convNameBase + '2a')(inputTensor) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2a')(x) x = ReLU()(x) x = Conv2D(self.filters, self.kernelSize, padding='same', name=convNameBase + '2b')(x) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2b')(x) x = Add()([x, inputTensor]) x = ReLU()(x) return x class convBottleneck(object): ''' like basicblock, but with 1 more conv/bn/relu ''' def __init__(self, filters, kernelSize, stage, block, strides=(2, 2), bnMode=0, bnAxis=3): self.filters = filters self.kernelSize = kernelSize self.strides = strides self.stage = stage self.block = block self.bnMode = bnMode self.bnAxis = bnAxis def __call__(self, inputTensor): convNameBase = 'res' + str(self.stage) + self.block + '_branch' bnNameBase = 'bn' + str(self.stage) + self.block + '_branch' x = Conv2D(self.filters, self.kernelSize, padding='same', strides=self.strides, name=convNameBase + '2a')(inputTensor) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2a')(x) x = ReLU()(x) x = Conv2D(self.filters, self.kernelSize, padding='same', name=convNameBase + '2b')(x) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2b')(x) x = ReLU()(x) x = Conv2D(self.filters, 1, name=convNameBase + '2c')(x) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2c')(x) shortcut = Conv2D(self.filters, (1, 1), strides=self.strides,name=convNameBase + '1')(inputTensor) shortcut = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '1')(shortcut) x = Add()([x, shortcut]) x = ReLU()(x) return x class bottleneck(object): ''' like basicblock, but with 1 more conv/bn/relu ''' def __init__(self, filters, kernelSize, stage, block, strides=(1, 1), bnMode=0, bnAxis=3): self.filters = filters self.kernelSize = kernelSize self.strides = strides self.stage = stage self.block = block self.bnMode = bnMode self.bnAxis = bnAxis def __call__(self, inputTensor): convNameBase = 'res' + str(self.stage) + self.block + '_branch' bnNameBase = 'bn' + str(self.stage) + self.block + '_branch' x = Conv2D(self.filters, self.kernelSize, padding='same', strides=self.strides, name=convNameBase + '2a')(inputTensor) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2a')(x) x = ReLU()(x) x = Conv2D(self.filters, self.kernelSize, padding='same', name=convNameBase + '2b')(x) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2b')(x) x = ReLU()(x) x = Conv2D(self.filters, 1, name=convNameBase + '2c')(x) x = BatchNormalization(axis=self.bnAxis, name=bnNameBase + '2c')(x) x = Add()([x, inputTensor]) x = ReLU()(x) return x
38.972826
126
0.627249
859
7,171
5.16298
0.140861
0.064487
0.075761
0.086584
0.870349
0.866065
0.864938
0.864938
0.864938
0.84239
0
0.014487
0.249198
7,171
184
127
38.972826
0.80925
0.165946
0
0.886957
0
0
0.02729
0
0
0
0
0
0
1
0.104348
false
0
0.026087
0
0.234783
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7