hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f9a3a81ecc1f916f1da23203f267f583b3c374 | 887 | py | Python | obsolete/tests/test_init.py | telegrambotdev/telegram.email.notify | d16880819b2f1887b0e0f0b9841de2a122d81dd6 | [
"MIT"
] | null | null | null | obsolete/tests/test_init.py | telegrambotdev/telegram.email.notify | d16880819b2f1887b0e0f0b9841de2a122d81dd6 | [
"MIT"
] | null | null | null | obsolete/tests/test_init.py | telegrambotdev/telegram.email.notify | d16880819b2f1887b0e0f0b9841de2a122d81dd6 | [
"MIT"
] | null | null | null | # python tests.py ../source test.test_modules.test_init
from modules import remove_line_with, remove_new_lines
from . import TestCaseModule
class TestCaseInit(TestCaseModule):
def test_remove_line_with(self):
source = "Test text\nLine one\nLine two"
result = "Test text\nLine two"
self.assertEqual(remove_line_with(source, " one"), result)
self.assertEqual(remove_line_with(source, "xxx"), source)
self.assertEqual(remove_line_with(source, " text"), source)
self.assertEqual(remove_line_with(source, " two"), source)
def test_remove_new_lines(self):
source = "\nTest text\n\n\nLine one\nLine two"
result = "Test text\n\nLine one\nLine two"
self.assertEqual(remove_new_lines(source), result)
source = "Test text\n\nLine one\nLine two"
self.assertEqual(remove_new_lines(source), source)
| 36.958333 | 67 | 0.70124 |
from modules import remove_line_with, remove_new_lines
from . import TestCaseModule
class TestCaseInit(TestCaseModule):
def test_remove_line_with(self):
source = "Test text\nLine one\nLine two"
result = "Test text\nLine two"
self.assertEqual(remove_line_with(source, " one"), result)
self.assertEqual(remove_line_with(source, "xxx"), source)
self.assertEqual(remove_line_with(source, " text"), source)
self.assertEqual(remove_line_with(source, " two"), source)
def test_remove_new_lines(self):
source = "\nTest text\n\n\nLine one\nLine two"
result = "Test text\n\nLine one\nLine two"
self.assertEqual(remove_new_lines(source), result)
source = "Test text\n\nLine one\nLine two"
self.assertEqual(remove_new_lines(source), source)
| true | true |
f7f9a3f1b6cd85d0b636ba241e8d6f1cbca779e6 | 10,772 | py | Python | tests/neurst/layers/decoders/transformer_decoder_test.py | ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | [
"Apache-2.0"
] | 208 | 2020-11-12T03:56:41.000Z | 2022-03-27T07:01:27.000Z | tests/neurst/layers/decoders/transformer_decoder_test.py | ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | [
"Apache-2.0"
] | 16 | 2021-02-20T07:57:03.000Z | 2022-01-27T07:36:31.000Z | tests/neurst/layers/decoders/transformer_decoder_test.py | ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | [
"Apache-2.0"
] | 33 | 2020-11-12T04:44:50.000Z | 2022-03-23T09:22:29.000Z | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import tensorflow as tf
from neurst.layers.decoders.transformer_decoder import TransformerDecoder
def test_transformer_decoder():
dmodel = 4
batch_size = 2
num_layers = 1
num_self_attention_heads = 2
hidden_size = dmodel
filter_size = 16
self_attention_dropout_rate = 0.1
ffn_dropout_rate = 0.1
layer_postprocess_dropout_rate = 0.1
# max_len = 4
# max_decoder_len = 3
decoder = TransformerDecoder(
num_layers=num_layers,
num_attention_heads=num_self_attention_heads,
hidden_size=hidden_size,
filter_size=filter_size,
attention_dropout_rate=self_attention_dropout_rate,
ffn_dropout_rate=ffn_dropout_rate,
layer_postprocess_dropout_rate=layer_postprocess_dropout_rate)
encoder_outputs = tf.convert_to_tensor(
[[[-0.37282175, 0.62301564, -2.0221813, -0.00875833],
[0.31516594, -1.117763, -1.0697726, 0.80373234],
[-0.717022, 0.3300997, -0.44306225, 1.550383],
[-1.5516962, 0.6025011, 1.8262954, 0.42469704]],
[[-0.98617625, 2.2856202, -1.3063533, 0.4174998],
[1.5724765, 1.2201295, 1.1479746, 0.7810888],
[0.8343642, -1.073388, 1.2718492, -0.7290778],
[-1.4126722, 1.8000795, -2.118672, -0.1366007]]], dtype=tf.float32)
encoder_inputs_padding = tf.convert_to_tensor(
[[0, 0, 0, 0], [0, 0, 1., 1.]], dtype=tf.float32)
decoder_inputs = tf.convert_to_tensor(
[[[8.6675537e-01, 2.2135425e-01, 1.4054185e+00, -4.2268831e-01],
[1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],
[-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]],
[[-1.1870812e+00, -5.4499257e-01, -8.6622888e-01, -7.4098641e-01],
[2.2233427e-01, 5.3582352e-01, 3.0567116e-01, 1.0201423e-01],
[-1.8053315e+00, 7.2125041e-01, 1.0072237e+00, -2.0333264e+00]]], dtype=tf.float32)
# test for training
cache = decoder.create_decoding_internal_cache(
encoder_outputs, encoder_inputs_padding, is_inference=False)
_ = decoder(decoder_inputs, cache, is_training=False)
for w in decoder.trainable_weights:
if "layer_0/self_attention_prepost_wrapper/self_attention/output_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.39332086, -0.3676856, -0.50203305, 0.6782059],
[-0.41239128, -0.15406412, 0.3964849, -0.79016757],
[0.6749844, -0.09548753, 0.16253561, -0.0560202],
[-0.4699119, 0.82842, 0.35657936, -0.45770356]],
dtype=tf.float32))
elif "layer_0/self_attention_prepost_wrapper/self_attention/qkv_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.03949255, 0.32946128, 0.38817757, 0.47047406, 0.07609951,
0.03131855, 0.15958023, 0.3292094, 0.42809182, 0.27969742,
0.39156157, -0.604576],
[0.4869359, -0.590637, 0.3092571, 0.10321742, 0.45608515,
0.27015948, 0.2959339, 0.32079375, 0.480197, -0.35878542,
0.04467481, 0.467416],
[-0.40064478, -0.05089319, -0.0999378, -0.6048573, 0.4379304,
0.3692366, 0.39103013, 0.24920046, -0.37060317, -0.03119427,
0.25101495, -0.21076846],
[0.42842942, 0.48276085, -0.2498649, -0.0978691, -0.01024461,
-0.04072392, -0.43499938, -0.09718102, 0.18174142, 0.07100755,
-0.6075252, -0.3018506]],
dtype=tf.float32))
elif "layer_0/encdec_attention_prepost_wrapper/encdec_attention/output_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[-0.31871676, 0.46451026, -0.32600254, -0.42110354],
[0.45953768, -0.52176374, -0.47615638, -0.7818449],
[0.7724063, -0.25975162, -0.49630436, 0.4681155],
[0.7189149, 0.25591546, 0.2100411, -0.3439259]],
dtype=tf.float32))
elif "layer_0/encdec_attention_prepost_wrapper/encdec_attention/q_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.27346164, -0.12056953, 0.4617111, 0.3126462],
[-0.65311253, 0.24505383, 0.56249744, -0.5582411],
[-0.47464705, -0.60553044, 0.3019113, 0.33609575],
[-0.24644238, -0.16026068, -0.0945828, -0.05111927]],
dtype=tf.float32))
elif "layer_0/encdec_attention_prepost_wrapper/encdec_attention/kv_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[-0.4204824, -0.23150605, 0.12045383, -0.6538836, 0.29070246,
-0.38376695, 0.65055054, -0.51375425],
[0.67025226, 0.0928542, -0.56662744, 0.12781924, -0.6193744,
-0.61801594, 0.07964879, 0.16530299],
[-0.06940353, -0.08732289, 0.24984497, 0.18489975, 0.5354368,
-0.07608587, -0.5801205, -0.17658263],
[0.54784423, -0.39817223, -0.11673075, 0.14106786, -0.1637184,
0.00750518, -0.44365695, -0.38458544]],
dtype=tf.float32))
elif "layer_0/ffn_prepost_wrapper/ffn/dense1/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[-2.9522404e-01, -1.1858380e-01, 1.3743329e-01, -3.3782017e-01,
-3.8876867e-01, 4.8396683e-01, 1.5062505e-01, -3.7749952e-01,
-2.9512924e-01, -1.6212821e-02, -1.8608570e-04, -4.1960135e-01,
5.3800035e-01, 2.7734953e-01, 5.5179596e-03, -3.4055352e-02],
[2.1051055e-01, 3.6151302e-01, 3.1045640e-01, -1.1510965e-01,
4.6738219e-01, 1.2504590e-01, -1.9454169e-01, 4.1786206e-01,
-3.7045652e-01, 3.3854598e-01, -5.0978750e-01, 5.2220762e-01,
1.6077441e-01, -3.9631999e-01, 2.1259248e-01, 2.3286474e-01],
[-1.0005751e-01, -5.0858349e-01, 3.6911082e-01, -5.1783592e-02,
7.1038425e-02, -1.1148521e-01, -5.3392905e-01, 3.6009926e-01,
7.9382658e-02, 1.0371411e-01, -5.0254786e-01, 1.7596281e-01,
-9.2926025e-03, -6.4194202e-04, -1.4125884e-02, 4.7321141e-01],
[2.8647327e-01, 2.6127762e-01, 4.5843053e-01, 4.9775457e-01,
3.8056010e-01, -4.0995055e-01, 3.6980593e-01, 3.3520699e-02,
-1.8056035e-03, 1.6578972e-02, 1.6026449e-01, -2.4952739e-01,
-3.1434530e-01, -1.3158950e-01, 7.9998970e-03, 1.1293548e-01]],
dtype=tf.float32))
elif "layer_0/ffn_prepost_wrapper/ffn/dense2/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.2794218, 0.29263318, 0.42604703, -0.24461824],
[0.32469118, -0.2654639, 0.17872995, 0.06222689],
[-0.07604656, -0.29360557, -0.462821, 0.3731665],
[0.27989155, 0.53663385, -0.12042063, 0.34913152],
[-0.50028926, 0.08958912, 0.50753117, -0.03860039],
[0.12980306, -0.47548878, 0.5443562, -0.41777247],
[0.16824102, -0.5271052, -0.18454444, 0.2987221],
[0.22610295, -0.3761598, 0.4983195, 0.31664205],
[-0.36606842, -0.3778124, 0.01393354, 0.23516071],
[0.26510388, -0.47218412, 0.42749757, 0.22174352],
[0.4139307, 0.09682184, -0.1447433, -0.07231569],
[0.01711905, -0.18132755, 0.03224993, 0.2071482],
[0.12195373, -0.52764714, 0.48840046, -0.21843264],
[0.12467605, -0.45452338, 0.05892056, -0.2852741],
[-0.5464495, -0.4856094, -0.29271287, 0.10828984],
[0.37080926, 0.01543814, 0.10875225, -0.2678996]],
dtype=tf.float32))
assert numpy.sum((decoder(decoder_inputs, cache, is_training=False).numpy()
- numpy.array([[[0.4727962, -0.6863654, 1.387909, -1.1743398],
[1.4770155, -1.2802002, 0.18456227, -0.38137752],
[0.6776164, -0.4934968, 1.1886327, -1.3727522]],
[[-1.6973993, 0.26954588, 0.59817475, 0.82967865],
[-1.6315649, -0.0030859, 0.7861572, 0.8484935],
[-1.4942819, 0.42606276, 1.246516, -0.17829692]]])) ** 2) < 1e-9
# for inference
cache = decoder.create_decoding_internal_cache(
encoder_outputs, encoder_inputs_padding, is_inference=True)
decoder_inputs = tf.convert_to_tensor(
[[1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],
[-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]], dtype=tf.float32)
assert numpy.sum(
(decoder(decoder_inputs, cache, is_training=False).numpy()
- numpy.array([[1.4581295, -1.3640043, -0.1138487, 0.01972346],
[-0.06228875, -1.0514979, 1.6223053, -0.5085185]])) ** 2) < 1e-9
assert numpy.sum(
(cache["decoding_states"]["layer_0"]["self_attention"]["keys"].numpy()
- numpy.array(numpy.reshape([[[-0.63596207, -0.49432975, -0.36614707, 0.03477353]],
[[0.6539597, 0.4846998, 1.2206339, 0.67560077]]],
[batch_size, 1, num_self_attention_heads,
hidden_size // num_self_attention_heads]))) ** 2) < 1e-9
assert numpy.sum(
(cache["decoding_states"]["layer_0"]["self_attention"]["values"].numpy()
- numpy.array(numpy.reshape([[[0.6045396, 0.78576076, 0.3205938, -1.2158906]],
[[0.14660448, -0.38737938, 1.2869109, 0.6795136]]],
[batch_size, 1, num_self_attention_heads,
hidden_size // num_self_attention_heads]))) ** 2) < 1e-9
if __name__ == "__main__":
test_transformer_decoder()
| 57.913978 | 107 | 0.583457 |
import numpy
import tensorflow as tf
from neurst.layers.decoders.transformer_decoder import TransformerDecoder
def test_transformer_decoder():
dmodel = 4
batch_size = 2
num_layers = 1
num_self_attention_heads = 2
hidden_size = dmodel
filter_size = 16
self_attention_dropout_rate = 0.1
ffn_dropout_rate = 0.1
layer_postprocess_dropout_rate = 0.1
decoder = TransformerDecoder(
num_layers=num_layers,
num_attention_heads=num_self_attention_heads,
hidden_size=hidden_size,
filter_size=filter_size,
attention_dropout_rate=self_attention_dropout_rate,
ffn_dropout_rate=ffn_dropout_rate,
layer_postprocess_dropout_rate=layer_postprocess_dropout_rate)
encoder_outputs = tf.convert_to_tensor(
[[[-0.37282175, 0.62301564, -2.0221813, -0.00875833],
[0.31516594, -1.117763, -1.0697726, 0.80373234],
[-0.717022, 0.3300997, -0.44306225, 1.550383],
[-1.5516962, 0.6025011, 1.8262954, 0.42469704]],
[[-0.98617625, 2.2856202, -1.3063533, 0.4174998],
[1.5724765, 1.2201295, 1.1479746, 0.7810888],
[0.8343642, -1.073388, 1.2718492, -0.7290778],
[-1.4126722, 1.8000795, -2.118672, -0.1366007]]], dtype=tf.float32)
encoder_inputs_padding = tf.convert_to_tensor(
[[0, 0, 0, 0], [0, 0, 1., 1.]], dtype=tf.float32)
decoder_inputs = tf.convert_to_tensor(
[[[8.6675537e-01, 2.2135425e-01, 1.4054185e+00, -4.2268831e-01],
[1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],
[-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]],
[[-1.1870812e+00, -5.4499257e-01, -8.6622888e-01, -7.4098641e-01],
[2.2233427e-01, 5.3582352e-01, 3.0567116e-01, 1.0201423e-01],
[-1.8053315e+00, 7.2125041e-01, 1.0072237e+00, -2.0333264e+00]]], dtype=tf.float32)
cache = decoder.create_decoding_internal_cache(
encoder_outputs, encoder_inputs_padding, is_inference=False)
_ = decoder(decoder_inputs, cache, is_training=False)
for w in decoder.trainable_weights:
if "layer_0/self_attention_prepost_wrapper/self_attention/output_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.39332086, -0.3676856, -0.50203305, 0.6782059],
[-0.41239128, -0.15406412, 0.3964849, -0.79016757],
[0.6749844, -0.09548753, 0.16253561, -0.0560202],
[-0.4699119, 0.82842, 0.35657936, -0.45770356]],
dtype=tf.float32))
elif "layer_0/self_attention_prepost_wrapper/self_attention/qkv_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.03949255, 0.32946128, 0.38817757, 0.47047406, 0.07609951,
0.03131855, 0.15958023, 0.3292094, 0.42809182, 0.27969742,
0.39156157, -0.604576],
[0.4869359, -0.590637, 0.3092571, 0.10321742, 0.45608515,
0.27015948, 0.2959339, 0.32079375, 0.480197, -0.35878542,
0.04467481, 0.467416],
[-0.40064478, -0.05089319, -0.0999378, -0.6048573, 0.4379304,
0.3692366, 0.39103013, 0.24920046, -0.37060317, -0.03119427,
0.25101495, -0.21076846],
[0.42842942, 0.48276085, -0.2498649, -0.0978691, -0.01024461,
-0.04072392, -0.43499938, -0.09718102, 0.18174142, 0.07100755,
-0.6075252, -0.3018506]],
dtype=tf.float32))
elif "layer_0/encdec_attention_prepost_wrapper/encdec_attention/output_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[-0.31871676, 0.46451026, -0.32600254, -0.42110354],
[0.45953768, -0.52176374, -0.47615638, -0.7818449],
[0.7724063, -0.25975162, -0.49630436, 0.4681155],
[0.7189149, 0.25591546, 0.2100411, -0.3439259]],
dtype=tf.float32))
elif "layer_0/encdec_attention_prepost_wrapper/encdec_attention/q_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.27346164, -0.12056953, 0.4617111, 0.3126462],
[-0.65311253, 0.24505383, 0.56249744, -0.5582411],
[-0.47464705, -0.60553044, 0.3019113, 0.33609575],
[-0.24644238, -0.16026068, -0.0945828, -0.05111927]],
dtype=tf.float32))
elif "layer_0/encdec_attention_prepost_wrapper/encdec_attention/kv_transform/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[-0.4204824, -0.23150605, 0.12045383, -0.6538836, 0.29070246,
-0.38376695, 0.65055054, -0.51375425],
[0.67025226, 0.0928542, -0.56662744, 0.12781924, -0.6193744,
-0.61801594, 0.07964879, 0.16530299],
[-0.06940353, -0.08732289, 0.24984497, 0.18489975, 0.5354368,
-0.07608587, -0.5801205, -0.17658263],
[0.54784423, -0.39817223, -0.11673075, 0.14106786, -0.1637184,
0.00750518, -0.44365695, -0.38458544]],
dtype=tf.float32))
elif "layer_0/ffn_prepost_wrapper/ffn/dense1/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[-2.9522404e-01, -1.1858380e-01, 1.3743329e-01, -3.3782017e-01,
-3.8876867e-01, 4.8396683e-01, 1.5062505e-01, -3.7749952e-01,
-2.9512924e-01, -1.6212821e-02, -1.8608570e-04, -4.1960135e-01,
5.3800035e-01, 2.7734953e-01, 5.5179596e-03, -3.4055352e-02],
[2.1051055e-01, 3.6151302e-01, 3.1045640e-01, -1.1510965e-01,
4.6738219e-01, 1.2504590e-01, -1.9454169e-01, 4.1786206e-01,
-3.7045652e-01, 3.3854598e-01, -5.0978750e-01, 5.2220762e-01,
1.6077441e-01, -3.9631999e-01, 2.1259248e-01, 2.3286474e-01],
[-1.0005751e-01, -5.0858349e-01, 3.6911082e-01, -5.1783592e-02,
7.1038425e-02, -1.1148521e-01, -5.3392905e-01, 3.6009926e-01,
7.9382658e-02, 1.0371411e-01, -5.0254786e-01, 1.7596281e-01,
-9.2926025e-03, -6.4194202e-04, -1.4125884e-02, 4.7321141e-01],
[2.8647327e-01, 2.6127762e-01, 4.5843053e-01, 4.9775457e-01,
3.8056010e-01, -4.0995055e-01, 3.6980593e-01, 3.3520699e-02,
-1.8056035e-03, 1.6578972e-02, 1.6026449e-01, -2.4952739e-01,
-3.1434530e-01, -1.3158950e-01, 7.9998970e-03, 1.1293548e-01]],
dtype=tf.float32))
elif "layer_0/ffn_prepost_wrapper/ffn/dense2/kernel" in w.name:
tf.compat.v1.assign(w, tf.convert_to_tensor(
[[0.2794218, 0.29263318, 0.42604703, -0.24461824],
[0.32469118, -0.2654639, 0.17872995, 0.06222689],
[-0.07604656, -0.29360557, -0.462821, 0.3731665],
[0.27989155, 0.53663385, -0.12042063, 0.34913152],
[-0.50028926, 0.08958912, 0.50753117, -0.03860039],
[0.12980306, -0.47548878, 0.5443562, -0.41777247],
[0.16824102, -0.5271052, -0.18454444, 0.2987221],
[0.22610295, -0.3761598, 0.4983195, 0.31664205],
[-0.36606842, -0.3778124, 0.01393354, 0.23516071],
[0.26510388, -0.47218412, 0.42749757, 0.22174352],
[0.4139307, 0.09682184, -0.1447433, -0.07231569],
[0.01711905, -0.18132755, 0.03224993, 0.2071482],
[0.12195373, -0.52764714, 0.48840046, -0.21843264],
[0.12467605, -0.45452338, 0.05892056, -0.2852741],
[-0.5464495, -0.4856094, -0.29271287, 0.10828984],
[0.37080926, 0.01543814, 0.10875225, -0.2678996]],
dtype=tf.float32))
assert numpy.sum((decoder(decoder_inputs, cache, is_training=False).numpy()
- numpy.array([[[0.4727962, -0.6863654, 1.387909, -1.1743398],
[1.4770155, -1.2802002, 0.18456227, -0.38137752],
[0.6776164, -0.4934968, 1.1886327, -1.3727522]],
[[-1.6973993, 0.26954588, 0.59817475, 0.82967865],
[-1.6315649, -0.0030859, 0.7861572, 0.8484935],
[-1.4942819, 0.42606276, 1.246516, -0.17829692]]])) ** 2) < 1e-9
cache = decoder.create_decoding_internal_cache(
encoder_outputs, encoder_inputs_padding, is_inference=True)
decoder_inputs = tf.convert_to_tensor(
[[1.9606155e+00, -1.8318410e+00, -1.8158482e+00, -3.7030798e-01],
[-1.1357157e-03, 5.5629879e-01, 6.6107117e-02, -1.7330967e+00]], dtype=tf.float32)
assert numpy.sum(
(decoder(decoder_inputs, cache, is_training=False).numpy()
- numpy.array([[1.4581295, -1.3640043, -0.1138487, 0.01972346],
[-0.06228875, -1.0514979, 1.6223053, -0.5085185]])) ** 2) < 1e-9
assert numpy.sum(
(cache["decoding_states"]["layer_0"]["self_attention"]["keys"].numpy()
- numpy.array(numpy.reshape([[[-0.63596207, -0.49432975, -0.36614707, 0.03477353]],
[[0.6539597, 0.4846998, 1.2206339, 0.67560077]]],
[batch_size, 1, num_self_attention_heads,
hidden_size // num_self_attention_heads]))) ** 2) < 1e-9
assert numpy.sum(
(cache["decoding_states"]["layer_0"]["self_attention"]["values"].numpy()
- numpy.array(numpy.reshape([[[0.6045396, 0.78576076, 0.3205938, -1.2158906]],
[[0.14660448, -0.38737938, 1.2869109, 0.6795136]]],
[batch_size, 1, num_self_attention_heads,
hidden_size // num_self_attention_heads]))) ** 2) < 1e-9
if __name__ == "__main__":
test_transformer_decoder()
| true | true |
f7f9a62a9d60761d21925599218368c0cfde97af | 3,225 | py | Python | python/excel_writer_xlwt.py | extrabacon/pyspreadsheet | e8f83eaff1e5cb7dfbd774fa1764dd28326a6a5f | [
"MIT",
"Unlicense"
] | 24 | 2015-01-06T01:49:50.000Z | 2021-05-17T13:47:59.000Z | python/excel_writer_xlwt.py | extrabacon/pyspreadsheet | e8f83eaff1e5cb7dfbd774fa1764dd28326a6a5f | [
"MIT",
"Unlicense"
] | 17 | 2015-01-14T17:57:34.000Z | 2019-11-17T23:17:48.000Z | python/excel_writer_xlwt.py | extrabacon/pyspreadsheet | e8f83eaff1e5cb7dfbd774fa1764dd28326a6a5f | [
"MIT",
"Unlicense"
] | 10 | 2015-04-03T19:46:54.000Z | 2017-09-04T03:20:33.000Z | import sys, json, datetime, xlwt
from xlwt import *
def create_workbook(self, options = None):
self.workbook = Workbook()
self.sheet_count = 0
self.dump_record("open", self.filename)
if options and "properties" in options:
# TODO: add support for more properties, xlwt has a ton of them
prop = options["properties"]
if "owner" in prop:
self.workbook.owner = prop["owner"]
def add_sheet(self, name = None):
if name == None:
name = "Sheet" + str(self.sheet_count + 1)
self.current_sheet = self.workbook.add_sheet(name)
self.sheet_count += 1
def write(self, row, col, data, format_name = None):
style = self.formats[format_name] if format_name != None else None
def write_one(row, col, val):
if style != None:
self.current_sheet.write(row, col, val, style)
else:
self.current_sheet.write(row, col, val)
if isinstance(data, list):
row_index = row
col_index = col
for v1 in data:
if isinstance(v1, list):
col_index = col
for v2 in v1:
write_one(row_index, col_index, v2)
col_index += 1
row_index += 1
else:
write_one(row_index, col_index, v1)
col_index += 1
else:
write_one(row, col, data)
def format(self, name, properties):
style = XFStyle()
if "font" in properties:
style.font = Font()
font = properties["font"]
if "name" in font:
style.font.name = font["name"]
if "size" in font:
style.font.size = font["size"]
if "color" in font:
# TODO: need to convert color codes
style.font.colour_index = font["color"]
if font.get("bold", False):
style.font.bold = True
if font.get("italic", False):
style.font.italic = True
if "underline" in font:
if font["underline"] == True or font["underline"] == "single":
style.font.underline = Font.UNDERLINE_SINGLE
elif font["underline"] == "double":
style.font.underline = Font.UNDERLINE_DOUBLE
elif font["underline"] == "single accounting":
style.font.underline = Font.UNDERLINE_SINGLE_ACC
elif font["underline"] == "double accounting":
style.font.underline = Font.UNDERLINE_DOUBLE_ACC
if font.get("strikeout", False):
style.font.struck_out = True
if font.get("superscript", False):
style.font.escapement = Font.ESCAPEMENT_SUPERSCRIPT
elif font.get("subscript", False):
style.font.escapement = Font.ESCAPEMENT_SUBSCRIPT
if "numberFormat" in properties:
style.num_format_str = properties["numberFormat"]
# TODO: locked
# TODO: hidden
# TODO: alignment
# TODO: textWrap
# TODO: rotation
# TODO: indent
# TODO: shrinkToFit
# TODO: justifyLastText
# TODO: fill
# TODO: borders
self.formats[name] = style
def activate_sheet(self, id):
# TODO: implement
raise Exception("not implemented")
def set_sheet_settings(self, id, settings = None):
# TODO: implement
raise Exception("not implemented")
def set_row(self, index, settings):
# TODO: implement
raise Exception("not implemented")
def set_column(self, index, settings):
# TODO: implement
raise Exception("not implemented")
def close(self):
self.workbook.save(self.filename)
| 28.289474 | 68 | 0.656434 | import sys, json, datetime, xlwt
from xlwt import *
def create_workbook(self, options = None):
self.workbook = Workbook()
self.sheet_count = 0
self.dump_record("open", self.filename)
if options and "properties" in options:
prop = options["properties"]
if "owner" in prop:
self.workbook.owner = prop["owner"]
def add_sheet(self, name = None):
if name == None:
name = "Sheet" + str(self.sheet_count + 1)
self.current_sheet = self.workbook.add_sheet(name)
self.sheet_count += 1
def write(self, row, col, data, format_name = None):
style = self.formats[format_name] if format_name != None else None
def write_one(row, col, val):
if style != None:
self.current_sheet.write(row, col, val, style)
else:
self.current_sheet.write(row, col, val)
if isinstance(data, list):
row_index = row
col_index = col
for v1 in data:
if isinstance(v1, list):
col_index = col
for v2 in v1:
write_one(row_index, col_index, v2)
col_index += 1
row_index += 1
else:
write_one(row_index, col_index, v1)
col_index += 1
else:
write_one(row, col, data)
def format(self, name, properties):
style = XFStyle()
if "font" in properties:
style.font = Font()
font = properties["font"]
if "name" in font:
style.font.name = font["name"]
if "size" in font:
style.font.size = font["size"]
if "color" in font:
style.font.colour_index = font["color"]
if font.get("bold", False):
style.font.bold = True
if font.get("italic", False):
style.font.italic = True
if "underline" in font:
if font["underline"] == True or font["underline"] == "single":
style.font.underline = Font.UNDERLINE_SINGLE
elif font["underline"] == "double":
style.font.underline = Font.UNDERLINE_DOUBLE
elif font["underline"] == "single accounting":
style.font.underline = Font.UNDERLINE_SINGLE_ACC
elif font["underline"] == "double accounting":
style.font.underline = Font.UNDERLINE_DOUBLE_ACC
if font.get("strikeout", False):
style.font.struck_out = True
if font.get("superscript", False):
style.font.escapement = Font.ESCAPEMENT_SUPERSCRIPT
elif font.get("subscript", False):
style.font.escapement = Font.ESCAPEMENT_SUBSCRIPT
if "numberFormat" in properties:
style.num_format_str = properties["numberFormat"]
self.formats[name] = style
def activate_sheet(self, id):
raise Exception("not implemented")
def set_sheet_settings(self, id, settings = None):
raise Exception("not implemented")
def set_row(self, index, settings):
raise Exception("not implemented")
def set_column(self, index, settings):
raise Exception("not implemented")
def close(self):
self.workbook.save(self.filename)
| true | true |
f7f9a63350d1c14b46a67404c9a69e03c1e36d10 | 13,202 | py | Python | django/utils/html.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/utils/html.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/utils/html.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-02-06T10:31:51.000Z | 2020-02-06T10:31:51.000Z | """HTML utilities suitable for global use."""
import re
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.encoding import force_text
from django.utils.functional import keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.text import normalize_newlines
from .html_parser import HTMLParseError, HTMLParser
# Configuration for urlize() function.
TRAILING_PUNCTUATION_RE = re.compile(
'^' # Beginning of word
'(.*?)' # The URL in word
'([.,:;!]+)' # Allowed non-wrapping, trailing punctuation
'$' # End of word
)
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
@keep_lazy(str, SafeText)
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
This function always escapes its input, even if it's already escaped and
marked as such. This may result in double-escaping. If this is a concern,
use conditional_escape() instead.
"""
return mark_safe(
force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", ''')
)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeText)
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(force_text(value))
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except HTMLParseError:
return s.get_data() + s.rawdata
else:
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = force_text(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim trailing punctuation.
match = TRAILING_PUNCTUATION_RE.match(middle)
if match:
middle = match.group(1)
trail = match.group(2) + trail
trimmed_something = True
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
return lead, middle, trail
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| 35.489247 | 110 | 0.601348 |
import re
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.encoding import force_text
from django.utils.functional import keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.text import normalize_newlines
from .html_parser import HTMLParseError, HTMLParser
TRAILING_PUNCTUATION_RE = re.compile(
'^'
'(.*?)'
'([.,:;!]+)'
'$'
)
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
@keep_lazy(str, SafeText)
def escape(text):
return mark_safe(
force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", ''')
)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeText)
def escapejs(value):
return mark_safe(force_text(value).translate(_js_escapes))
def conditional_escape(text):
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
@keep_lazy_text
def linebreaks(value, autoescape=False):
value = normalize_newlines(force_text(value))
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except HTMLParseError:
return s.get_data() + s.rawdata
else:
return s.get_data()
@keep_lazy_text
def strip_tags(value):
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = force_text(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
return re.sub(r'>\s+<', '><', force_text(value))
def smart_urlquote(url):
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace('&
if trail and unescaped.endswith(trail):
unescaped = unescaped[:-len(trail)]
elif trail == ';':
text += trail
trail = ''
return text, unescaped, trail
def trim_punctuation(lead, middle, trail):
trimmed_something = True
while trimmed_something:
trimmed_something = False
match = TRAILING_PUNCTUATION_RE.match(middle)
if match:
middle = match.group(1)
trail = match.group(2) + trail
trimmed_something = True
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
return lead, middle, trail
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
return value.replace(" ", "\xa0")
def html_safe(klass):
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| true | true |
f7f9a682d5806e5a26e6939c37218f09f20fe12e | 1,873 | py | Python | src/anime_search.py | SparXFusion/Anime-DL-Bot | 94fb942168c31df8bc548f48c6ea3e2b6306c242 | [
"MIT"
] | null | null | null | src/anime_search.py | SparXFusion/Anime-DL-Bot | 94fb942168c31df8bc548f48c6ea3e2b6306c242 | [
"MIT"
] | null | null | null | src/anime_search.py | SparXFusion/Anime-DL-Bot | 94fb942168c31df8bc548f48c6ea3e2b6306c242 | [
"MIT"
] | null | null | null | # Copyright © 2021 BaraniARR
# Encoding = 'utf-8'
# Licensed under MIT License
# Special Thanks for gogoanime
from pyrogram import *
from pyrogram.types import *
from requests_html import HTMLSession
from bs4 import BeautifulSoup
# Searching anime by regex pattern "/search <space> Anime Name"
def anime_search(client, message):
q = message.text
q1 = q.split()
q1.remove(q1[0])
str = " "
query = str.join(q1)
if query == "":
# If no query string is mentioned
message.reply_animation("https://media.tenor.com/images/cfe564edcb140705ce45aeeca8183812/tenor.gif",
caption=f"""**Your Query should be in This format:**
`/search <space> Name of the Anime you want to Search.`""", parse_mode="markdown")
else:
url = f"https://www1.gogoanime.pe//search.html?keyword={query}"
session = HTMLSession()
response = session.get(url)
response_html = response.text
soup = BeautifulSoup(response_html, 'html.parser')
animes = soup.find("ul", {"class": "items"}).find_all("li")
# print(animes)
keyb = []
for anime in animes: # For every anime found
tit = anime.a["title"]
urll = anime.a["href"]
r = urll.split('/')
# aAnimes.append({"title" : anime.a["title"] , "link" : "https://www2.gogoanime.sh{}".format(anime.a["href"])})
keyb.append([InlineKeyboardButton("{}".format(tit), callback_data="dt_{}".format(r[2]))])
if keyb == []:
# If returned list is empty, Send the following message.
message.reply_text("No results found, Check your Spelling and Search Again...")
else:
rep = InlineKeyboardMarkup(keyb)
message.reply_text(text=f"Your Search results for **{query}**", reply_markup=rep, parse_mode="markdown")
| 39.851064 | 123 | 0.61559 |
from pyrogram import *
from pyrogram.types import *
from requests_html import HTMLSession
from bs4 import BeautifulSoup
def anime_search(client, message):
q = message.text
q1 = q.split()
q1.remove(q1[0])
str = " "
query = str.join(q1)
if query == "":
message.reply_animation("https://media.tenor.com/images/cfe564edcb140705ce45aeeca8183812/tenor.gif",
caption=f"""**Your Query should be in This format:**
`/search <space> Name of the Anime you want to Search.`""", parse_mode="markdown")
else:
url = f"https://www1.gogoanime.pe//search.html?keyword={query}"
session = HTMLSession()
response = session.get(url)
response_html = response.text
soup = BeautifulSoup(response_html, 'html.parser')
animes = soup.find("ul", {"class": "items"}).find_all("li")
keyb = []
for anime in animes:
tit = anime.a["title"]
urll = anime.a["href"]
r = urll.split('/')
keyb.append([InlineKeyboardButton("{}".format(tit), callback_data="dt_{}".format(r[2]))])
if keyb == []:
message.reply_text("No results found, Check your Spelling and Search Again...")
else:
rep = InlineKeyboardMarkup(keyb)
message.reply_text(text=f"Your Search results for **{query}**", reply_markup=rep, parse_mode="markdown")
| true | true |
f7f9a704da9eb36e8a4c4758cd9c7d4a60880097 | 569 | py | Python | charts/random_coord.py | taimur1871/plotly_examples | 04cccbc1c60963c4a6b5405614d9136de93846f6 | [
"MIT"
] | null | null | null | charts/random_coord.py | taimur1871/plotly_examples | 04cccbc1c60963c4a6b5405614d9136de93846f6 | [
"MIT"
] | null | null | null | charts/random_coord.py | taimur1871/plotly_examples | 04cccbc1c60963c4a6b5405614d9136de93846f6 | [
"MIT"
] | 1 | 2022-01-19T03:15:52.000Z | 2022-01-19T03:15:52.000Z | import numpy as np
import pandas as pd
# set coordinate range
max_lat = 41.986046
min_lat = 41.056583
max_long = -89.766294
min_long = -92.238217
# random data
fake_data = []
for i in range(10):
rand_lat = np.random.uniform(min_lat, max_lat)
rand_long = np.random.uniform(min_long, max_long)
rand_dist = np.random.uniform(2500.00, 5500.00)
well_name = 'well' + str(i)
fake_data.append((well_name, rand_long, rand_lat, rand_dist))
df = pd.DataFrame(fake_data)
df.rename({0:'Well', 1:'Longitude', 2:'Latitude', 3:'Distance'}, axis=1, inplace=True) | 27.095238 | 86 | 0.706503 | import numpy as np
import pandas as pd
max_lat = 41.986046
min_lat = 41.056583
max_long = -89.766294
min_long = -92.238217
fake_data = []
for i in range(10):
rand_lat = np.random.uniform(min_lat, max_lat)
rand_long = np.random.uniform(min_long, max_long)
rand_dist = np.random.uniform(2500.00, 5500.00)
well_name = 'well' + str(i)
fake_data.append((well_name, rand_long, rand_lat, rand_dist))
df = pd.DataFrame(fake_data)
df.rename({0:'Well', 1:'Longitude', 2:'Latitude', 3:'Distance'}, axis=1, inplace=True) | true | true |
f7f9a7572e4eb73ebe22a3e5d8af7b4ddccfcf01 | 24,385 | py | Python | relate/checks.py | romer8/relate | d25e3479f7746cf77d45fe0d34aff495dea9bd84 | [
"Unlicense"
] | null | null | null | relate/checks.py | romer8/relate | d25e3479f7746cf77d45fe0d34aff495dea9bd84 | [
"Unlicense"
] | 6 | 2015-08-18T00:13:40.000Z | 2018-01-31T05:55:13.000Z | relate/checks.py | davis68/relate | eb40c8c17d4a724a60de3caa3334521a833bad5c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
__copyright__ = "Copyright (C) 2017 Dong Zhuang"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from django.conf import settings
from django.core.checks import Critical, Warning, register
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
REQUIRED_CONF_ERROR_PATTERN = (
"You must configure %(location)s for RELATE to run properly.")
INSTANCE_ERROR_PATTERN = "%(location)s must be an instance of %(types)s."
GENERIC_ERROR_PATTERN = "Error in '%(location)s': %(error_type)s: %(error_str)s"
USE_I18N = "USE_I18N"
LANGUAGES = "LANGUAGES"
RELATE_SITE_NAME = "RELATE_SITE_NAME"
RELATE_CUTOMIZED_SITE_NAME = "RELATE_CUTOMIZED_SITE_NAME"
RELATE_OVERRIDE_TEMPLATES_DIRS = "RELATE_OVERRIDE_TEMPLATES_DIRS"
EMAIL_CONNECTIONS = "EMAIL_CONNECTIONS"
RELATE_BASE_URL = "RELATE_BASE_URL"
RELATE_FACILITIES = "RELATE_FACILITIES"
RELATE_MAINTENANCE_MODE_EXCEPTIONS = "RELATE_MAINTENANCE_MODE_EXCEPTIONS"
RELATE_SESSION_RESTART_COOLDOWN_SECONDS = "RELATE_SESSION_RESTART_COOLDOWN_SECONDS"
RELATE_TICKET_MINUTES_VALID_AFTER_USE = "RELATE_TICKET_MINUTES_VALID_AFTER_USE"
GIT_ROOT = "GIT_ROOT"
RELATE_BULK_STORAGE = "RELATE_BULK_STORAGE"
RELATE_STARTUP_CHECKS = "RELATE_STARTUP_CHECKS"
RELATE_STARTUP_CHECKS_EXTRA = "RELATE_STARTUP_CHECKS_EXTRA"
RELATE_STARTUP_CHECKS_TAG = "start_up_check"
RELATE_STARTUP_CHECKS_EXTRA_TAG = "startup_checks_extra"
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION = (
"RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION")
RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE = (
"RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE")
class RelateCriticalCheckMessage(Critical):
def __init__(self, *args, **kwargs):
super(RelateCriticalCheckMessage, self).__init__(*args, **kwargs)
self.obj = self.obj or ImproperlyConfigured.__name__
class DeprecatedException(Exception):
pass
def get_ip_network(ip_range):
import ipaddress
return ipaddress.ip_network(str(ip_range))
def check_relate_settings(app_configs, **kwargs):
errors = []
# {{{ check RELATE_BASE_URL
relate_base_url = getattr(settings, RELATE_BASE_URL, None)
if relate_base_url is None:
errors.append(RelateCriticalCheckMessage(
msg=REQUIRED_CONF_ERROR_PATTERN % {"location": RELATE_BASE_URL},
id="relate_base_url.E001"
))
elif not isinstance(relate_base_url, str):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_BASE_URL, "types": "str"}),
id="relate_base_url.E002"
))
elif not relate_base_url.strip():
errors.append(RelateCriticalCheckMessage(
msg="%(location)s should not be an empty string"
% {"location": RELATE_BASE_URL},
id="relate_base_url.E003"
))
# }}}
from accounts.utils import relate_user_method_settings
# check RELATE_EMAIL_APPELLATION_PRIORITY_LIST
errors.extend(
relate_user_method_settings.check_email_appellation_priority_list())
# check RELATE_CSV_SETTINGS
errors.extend(relate_user_method_settings.check_custom_full_name_method())
# check RELATE_USER_PROFILE_MASK_METHOD
errors.extend(relate_user_method_settings.check_user_profile_mask_method())
# {{{ check EMAIL_CONNECTIONS
email_connections = getattr(settings, EMAIL_CONNECTIONS, None)
if email_connections is not None:
if not isinstance(email_connections, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location": EMAIL_CONNECTIONS,
"types": "dict"}),
id="email_connections.E001"
))
else:
for label, c in email_connections.items():
if not isinstance(c, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location": "'%s' in '%s'"
% (label, EMAIL_CONNECTIONS),
"types": "dict"}),
id="email_connections.E002"
))
else:
if "backend" in c:
try:
import_string(c["backend"])
except ImportError as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {
"location":
"'%s' in %s"
% (label, RELATE_FACILITIES),
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="email_connections.E003")
)
# }}}
# {{{ check RELATE_FACILITIES
relate_facilities_conf = getattr(settings, RELATE_FACILITIES, None)
if relate_facilities_conf is not None:
from course.utils import get_facilities_config
try:
facilities = get_facilities_config()
except Exception as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {
"location": RELATE_FACILITIES,
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="relate_facilities.E001")
)
else:
if not isinstance(facilities, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
"'%(location)s' must either be or return a dictionary"
% {"location": RELATE_FACILITIES}),
id="relate_facilities.E002")
)
else:
for facility, conf in facilities.items():
if not isinstance(conf, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location":
"Facility `%s` in %s"
% (facility, RELATE_FACILITIES),
"types": "dict"}),
id="relate_facilities.E003")
)
else:
ip_ranges = conf.get("ip_ranges", [])
if ip_ranges:
if not isinstance(ip_ranges, (list, tuple)):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location":
"'ip_ranges' in facility `%s` in %s"
% (facilities, RELATE_FACILITIES),
"types": "list or tuple"}),
id="relate_facilities.E004")
)
else:
for ip_range in ip_ranges:
try:
get_ip_network(ip_range)
except Exception as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {
"location":
"'ip_ranges' in "
"facility `%s` in %s"
% (facility,
RELATE_FACILITIES),
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="relate_facilities.E005")
)
else:
if not callable(relate_facilities_conf):
errors.append(Warning(
msg=(
"Faclity `%s` in %s is an open facility "
"as it has no configured `ip_ranges`"
% (facility, RELATE_FACILITIES)
),
id="relate_facilities.W001"
))
# }}}
# {{{ check RELATE_MAINTENANCE_MODE_EXCEPTIONS
relate_maintenance_mode_exceptions = getattr(
settings, RELATE_MAINTENANCE_MODE_EXCEPTIONS, None)
if relate_maintenance_mode_exceptions is not None:
if not isinstance(relate_maintenance_mode_exceptions, (list, tuple)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_MAINTENANCE_MODE_EXCEPTIONS,
"types": "list or tuple"}),
id="relate_maintenance_mode_exceptions.E001")
)
else:
for ip in relate_maintenance_mode_exceptions:
try:
get_ip_network(ip)
except Exception as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {"location":
"ip/ip_ranges '%s' in %s"
% (ip, RELATE_FACILITIES),
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="relate_maintenance_mode_exceptions.E002")
)
# }}}
# {{{ check RELATE_SESSION_RESTART_COOLDOWN_SECONDS
relate_session_restart_cooldown_seconds = getattr(
settings, RELATE_SESSION_RESTART_COOLDOWN_SECONDS, None)
if relate_session_restart_cooldown_seconds is not None:
if not isinstance(relate_session_restart_cooldown_seconds, (int, float)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_SESSION_RESTART_COOLDOWN_SECONDS,
"types": "int or float"}),
id="relate_session_restart_cooldown_seconds.E001")
)
else:
if relate_session_restart_cooldown_seconds < 0:
errors.append(RelateCriticalCheckMessage(
msg=(
"%(location)s must be a positive number, "
"got %(value)s instead"
% {"location": RELATE_SESSION_RESTART_COOLDOWN_SECONDS,
"value": relate_session_restart_cooldown_seconds}),
id="relate_session_restart_cooldown_seconds.E002")
)
# }}}
# {{{ check RELATE_TICKET_MINUTES_VALID_AFTER_USE
relate_ticket_minutes_valid_after_use = getattr(
settings, RELATE_TICKET_MINUTES_VALID_AFTER_USE, None)
if relate_ticket_minutes_valid_after_use is not None:
if not isinstance(relate_ticket_minutes_valid_after_use, (int, float)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_TICKET_MINUTES_VALID_AFTER_USE,
"types": "int or float"}),
id="relate_ticket_minutes_valid_after_use.E001")
)
else:
if relate_ticket_minutes_valid_after_use < 0:
errors.append(RelateCriticalCheckMessage(
msg=(
"%(location)s must be a positive number, "
"got %(value)s instead"
% {"location": RELATE_TICKET_MINUTES_VALID_AFTER_USE,
"value": relate_ticket_minutes_valid_after_use}),
id="relate_ticket_minutes_valid_after_use.E002")
)
# }}}
# {{{ check GIT_ROOT
git_root = getattr(settings, GIT_ROOT, None)
if git_root is None:
errors.append(RelateCriticalCheckMessage(
msg=REQUIRED_CONF_ERROR_PATTERN % {"location": GIT_ROOT},
id="git_root.E001"
))
elif not isinstance(git_root, str):
errors.append(RelateCriticalCheckMessage(
msg=INSTANCE_ERROR_PATTERN % {"location": GIT_ROOT, "types": "str"},
id="git_root.E002"
))
else:
if not os.path.isdir(git_root):
errors.append(RelateCriticalCheckMessage(
msg=("`%(path)s` configured in %(location)s is not a valid path"
% {"path": git_root, "location": GIT_ROOT}),
id="git_root.E003"
))
else:
if not os.access(git_root, os.W_OK):
errors.append(RelateCriticalCheckMessage(
msg=("`%(path)s` configured in %(location)s is not writable "
"by RELATE"
% {"path": git_root, "location": GIT_ROOT}),
id="git_root.E004"
))
if not os.access(git_root, os.R_OK):
errors.append(RelateCriticalCheckMessage(
msg=("`%(path)s` configured in %(location)s is not readable "
"by RELATE"
% {"path": git_root, "location": GIT_ROOT}),
id="git_root.E005"
))
# }}}
# {{{ check RELATE_BULK_STORAGE
bulk_storage = getattr(settings, RELATE_BULK_STORAGE, None)
from django.core.files.storage import Storage
if bulk_storage is None:
errors.append(RelateCriticalCheckMessage(
msg=REQUIRED_CONF_ERROR_PATTERN % {
"location": RELATE_BULK_STORAGE},
id="bulk_storage.E001"
))
elif not isinstance(bulk_storage, Storage):
errors.append(RelateCriticalCheckMessage(
msg=INSTANCE_ERROR_PATTERN % {
"location": RELATE_BULK_STORAGE, "types": "Storage"},
id="bulk_storage.E002"
))
# }}}
# {{{ check RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION
relate_disable_codehilite_markdown_extension = getattr(
settings, RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION, None)
if relate_disable_codehilite_markdown_extension is not None:
if not isinstance(relate_disable_codehilite_markdown_extension, bool):
errors.append(
Warning(
msg="%(location)s is not a Boolean value: `%(value)s`, "
"assuming True"
% {"location":
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION,
"value":
repr(relate_disable_codehilite_markdown_extension)},
id="relate_disable_codehilite_markdown_extension.W001"))
elif not relate_disable_codehilite_markdown_extension:
errors.append(
Warning(
msg="%(location)s is set to False "
"(with 'markdown.extensions.codehilite' enabled'), "
"noticing that some pages with code fence markdown "
"might crash"
% {"location":
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION,
},
id="relate_disable_codehilite_markdown_extension.W002"))
# }}}
# {{{ check LANGUAGES, why this is not done in django?
languages = settings.LANGUAGES
from django.utils.itercompat import is_iterable
if (isinstance(languages, str)
or not is_iterable(languages)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": LANGUAGES,
"types": "an iterable (e.g., a list or tuple)."}),
id="relate_languages.E001")
)
else:
if any(isinstance(choice, str)
or not is_iterable(choice) or len(choice) != 2
for choice in languages):
errors.append(RelateCriticalCheckMessage(
msg=("'%s' must be an iterable containing "
"(language code, language description) tuples, just "
"like the format of LANGUAGES setting ("
"https://docs.djangoproject.com/en/dev/ref/settings/"
"#languages)" % LANGUAGES),
id="relate_languages.E002")
)
else:
from collections import OrderedDict
options_dict = OrderedDict(tuple(settings.LANGUAGES))
all_lang_codes = [lang_code for lang_code, lang_descr
in tuple(settings.LANGUAGES)]
for lang_code in options_dict.keys():
if all_lang_codes.count(lang_code) > 1:
errors.append(Warning(
msg=(
"Duplicate language entries were found in "
"settings.LANGUAGES for '%s', '%s' will be used "
"as its language_description"
% (lang_code, options_dict[lang_code])),
id="relate_languages.W001"
))
# }}}
# {{{ check RELATE_SITE_NAME
try:
site_name = settings.RELATE_SITE_NAME
if site_name is None:
errors.append(
RelateCriticalCheckMessage(
msg=("%s must not be None" % RELATE_SITE_NAME),
id="relate_site_name.E002")
)
else:
if not isinstance(site_name, str):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": "%s/%s" % (RELATE_SITE_NAME,
RELATE_CUTOMIZED_SITE_NAME),
"types": "string"}),
id="relate_site_name.E003"))
elif not site_name.strip():
errors.append(RelateCriticalCheckMessage(
msg=("%s must not be an empty string" % RELATE_SITE_NAME),
id="relate_site_name.E004"))
except AttributeError:
# This happens when RELATE_SITE_NAME is DELETED from settings.
errors.append(
RelateCriticalCheckMessage(
msg=(REQUIRED_CONF_ERROR_PATTERN
% {"location": RELATE_SITE_NAME}),
id="relate_site_name.E001")
)
# }}}
# {{{ check RELATE_OVERRIDE_TEMPLATES_DIRS
relate_override_templates_dirs = getattr(settings,
RELATE_OVERRIDE_TEMPLATES_DIRS, None)
if relate_override_templates_dirs is not None:
if (isinstance(relate_override_templates_dirs, str)
or not is_iterable(relate_override_templates_dirs)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_OVERRIDE_TEMPLATES_DIRS,
"types": "an iterable (e.g., a list or tuple)."}),
id="relate_override_templates_dirs.E001"))
else:
if any(not isinstance(directory, str)
for directory in relate_override_templates_dirs):
errors.append(RelateCriticalCheckMessage(
msg=("'%s' must contain only string of paths."
% RELATE_OVERRIDE_TEMPLATES_DIRS),
id="relate_override_templates_dirs.E002"))
else:
for directory in relate_override_templates_dirs:
if not os.path.isdir(directory):
errors.append(
Warning(
msg=(
"Invalid Templates Dirs item '%s' in '%s', "
"it will be ignored."
% (directory, RELATE_OVERRIDE_TEMPLATES_DIRS)),
id="relate_override_templates_dirs.W001"
))
# }}}
# {{{ check RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE
relate_custom_page_types_removed_deadline = getattr(
settings, RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE, None)
if relate_custom_page_types_removed_deadline is not None:
from datetime import datetime
if not isinstance(relate_custom_page_types_removed_deadline, datetime):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE,
"types": "datetime.datetime"}),
id="relate_custom_page_types_removed_deadline.E001"))
# }}}
return errors
def register_startup_checks():
register(check_relate_settings, RELATE_STARTUP_CHECKS_TAG)
def register_startup_checks_extra():
"""
Register extra checks provided by user.
Here we will have to raise error for Exceptions, as that can not be done
via check: all checks, including check_relate_settings, will only be
executed after AppConfig.ready() is done.
"""
startup_checks_extra = getattr(settings, RELATE_STARTUP_CHECKS_EXTRA, None)
if startup_checks_extra is not None:
if not isinstance(startup_checks_extra, (list, tuple)):
raise ImproperlyConfigured(
INSTANCE_ERROR_PATTERN
% {"location": RELATE_STARTUP_CHECKS_EXTRA,
"types": "list or tuple"
}
)
for c in startup_checks_extra:
try:
check_item = import_string(c)
except Exception as e:
raise ImproperlyConfigured(
GENERIC_ERROR_PATTERN
% {
"location": RELATE_STARTUP_CHECKS_EXTRA,
"error_type": type(e).__name__,
"error_str": str(e)
})
else:
register(check_item, RELATE_STARTUP_CHECKS_EXTRA_TAG)
# vim: foldmethod=marker
| 43.779174 | 83 | 0.536518 |
from __future__ import division
__copyright__ = "Copyright (C) 2017 Dong Zhuang"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from django.conf import settings
from django.core.checks import Critical, Warning, register
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
REQUIRED_CONF_ERROR_PATTERN = (
"You must configure %(location)s for RELATE to run properly.")
INSTANCE_ERROR_PATTERN = "%(location)s must be an instance of %(types)s."
GENERIC_ERROR_PATTERN = "Error in '%(location)s': %(error_type)s: %(error_str)s"
USE_I18N = "USE_I18N"
LANGUAGES = "LANGUAGES"
RELATE_SITE_NAME = "RELATE_SITE_NAME"
RELATE_CUTOMIZED_SITE_NAME = "RELATE_CUTOMIZED_SITE_NAME"
RELATE_OVERRIDE_TEMPLATES_DIRS = "RELATE_OVERRIDE_TEMPLATES_DIRS"
EMAIL_CONNECTIONS = "EMAIL_CONNECTIONS"
RELATE_BASE_URL = "RELATE_BASE_URL"
RELATE_FACILITIES = "RELATE_FACILITIES"
RELATE_MAINTENANCE_MODE_EXCEPTIONS = "RELATE_MAINTENANCE_MODE_EXCEPTIONS"
RELATE_SESSION_RESTART_COOLDOWN_SECONDS = "RELATE_SESSION_RESTART_COOLDOWN_SECONDS"
RELATE_TICKET_MINUTES_VALID_AFTER_USE = "RELATE_TICKET_MINUTES_VALID_AFTER_USE"
GIT_ROOT = "GIT_ROOT"
RELATE_BULK_STORAGE = "RELATE_BULK_STORAGE"
RELATE_STARTUP_CHECKS = "RELATE_STARTUP_CHECKS"
RELATE_STARTUP_CHECKS_EXTRA = "RELATE_STARTUP_CHECKS_EXTRA"
RELATE_STARTUP_CHECKS_TAG = "start_up_check"
RELATE_STARTUP_CHECKS_EXTRA_TAG = "startup_checks_extra"
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION = (
"RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION")
RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE = (
"RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE")
class RelateCriticalCheckMessage(Critical):
def __init__(self, *args, **kwargs):
super(RelateCriticalCheckMessage, self).__init__(*args, **kwargs)
self.obj = self.obj or ImproperlyConfigured.__name__
class DeprecatedException(Exception):
pass
def get_ip_network(ip_range):
import ipaddress
return ipaddress.ip_network(str(ip_range))
def check_relate_settings(app_configs, **kwargs):
errors = []
relate_base_url = getattr(settings, RELATE_BASE_URL, None)
if relate_base_url is None:
errors.append(RelateCriticalCheckMessage(
msg=REQUIRED_CONF_ERROR_PATTERN % {"location": RELATE_BASE_URL},
id="relate_base_url.E001"
))
elif not isinstance(relate_base_url, str):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_BASE_URL, "types": "str"}),
id="relate_base_url.E002"
))
elif not relate_base_url.strip():
errors.append(RelateCriticalCheckMessage(
msg="%(location)s should not be an empty string"
% {"location": RELATE_BASE_URL},
id="relate_base_url.E003"
))
from accounts.utils import relate_user_method_settings
errors.extend(
relate_user_method_settings.check_email_appellation_priority_list())
errors.extend(relate_user_method_settings.check_custom_full_name_method())
errors.extend(relate_user_method_settings.check_user_profile_mask_method())
email_connections = getattr(settings, EMAIL_CONNECTIONS, None)
if email_connections is not None:
if not isinstance(email_connections, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location": EMAIL_CONNECTIONS,
"types": "dict"}),
id="email_connections.E001"
))
else:
for label, c in email_connections.items():
if not isinstance(c, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location": "'%s' in '%s'"
% (label, EMAIL_CONNECTIONS),
"types": "dict"}),
id="email_connections.E002"
))
else:
if "backend" in c:
try:
import_string(c["backend"])
except ImportError as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {
"location":
"'%s' in %s"
% (label, RELATE_FACILITIES),
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="email_connections.E003")
)
relate_facilities_conf = getattr(settings, RELATE_FACILITIES, None)
if relate_facilities_conf is not None:
from course.utils import get_facilities_config
try:
facilities = get_facilities_config()
except Exception as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {
"location": RELATE_FACILITIES,
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="relate_facilities.E001")
)
else:
if not isinstance(facilities, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
"'%(location)s' must either be or return a dictionary"
% {"location": RELATE_FACILITIES}),
id="relate_facilities.E002")
)
else:
for facility, conf in facilities.items():
if not isinstance(conf, dict):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location":
"Facility `%s` in %s"
% (facility, RELATE_FACILITIES),
"types": "dict"}),
id="relate_facilities.E003")
)
else:
ip_ranges = conf.get("ip_ranges", [])
if ip_ranges:
if not isinstance(ip_ranges, (list, tuple)):
errors.append(RelateCriticalCheckMessage(
msg=(
INSTANCE_ERROR_PATTERN
% {"location":
"'ip_ranges' in facility `%s` in %s"
% (facilities, RELATE_FACILITIES),
"types": "list or tuple"}),
id="relate_facilities.E004")
)
else:
for ip_range in ip_ranges:
try:
get_ip_network(ip_range)
except Exception as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {
"location":
"'ip_ranges' in "
"facility `%s` in %s"
% (facility,
RELATE_FACILITIES),
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="relate_facilities.E005")
)
else:
if not callable(relate_facilities_conf):
errors.append(Warning(
msg=(
"Faclity `%s` in %s is an open facility "
"as it has no configured `ip_ranges`"
% (facility, RELATE_FACILITIES)
),
id="relate_facilities.W001"
))
relate_maintenance_mode_exceptions = getattr(
settings, RELATE_MAINTENANCE_MODE_EXCEPTIONS, None)
if relate_maintenance_mode_exceptions is not None:
if not isinstance(relate_maintenance_mode_exceptions, (list, tuple)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_MAINTENANCE_MODE_EXCEPTIONS,
"types": "list or tuple"}),
id="relate_maintenance_mode_exceptions.E001")
)
else:
for ip in relate_maintenance_mode_exceptions:
try:
get_ip_network(ip)
except Exception as e:
errors.append(RelateCriticalCheckMessage(
msg=(
GENERIC_ERROR_PATTERN
% {"location":
"ip/ip_ranges '%s' in %s"
% (ip, RELATE_FACILITIES),
"error_type": type(e).__name__,
"error_str": str(e)
}),
id="relate_maintenance_mode_exceptions.E002")
)
relate_session_restart_cooldown_seconds = getattr(
settings, RELATE_SESSION_RESTART_COOLDOWN_SECONDS, None)
if relate_session_restart_cooldown_seconds is not None:
if not isinstance(relate_session_restart_cooldown_seconds, (int, float)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_SESSION_RESTART_COOLDOWN_SECONDS,
"types": "int or float"}),
id="relate_session_restart_cooldown_seconds.E001")
)
else:
if relate_session_restart_cooldown_seconds < 0:
errors.append(RelateCriticalCheckMessage(
msg=(
"%(location)s must be a positive number, "
"got %(value)s instead"
% {"location": RELATE_SESSION_RESTART_COOLDOWN_SECONDS,
"value": relate_session_restart_cooldown_seconds}),
id="relate_session_restart_cooldown_seconds.E002")
)
relate_ticket_minutes_valid_after_use = getattr(
settings, RELATE_TICKET_MINUTES_VALID_AFTER_USE, None)
if relate_ticket_minutes_valid_after_use is not None:
if not isinstance(relate_ticket_minutes_valid_after_use, (int, float)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_TICKET_MINUTES_VALID_AFTER_USE,
"types": "int or float"}),
id="relate_ticket_minutes_valid_after_use.E001")
)
else:
if relate_ticket_minutes_valid_after_use < 0:
errors.append(RelateCriticalCheckMessage(
msg=(
"%(location)s must be a positive number, "
"got %(value)s instead"
% {"location": RELATE_TICKET_MINUTES_VALID_AFTER_USE,
"value": relate_ticket_minutes_valid_after_use}),
id="relate_ticket_minutes_valid_after_use.E002")
)
git_root = getattr(settings, GIT_ROOT, None)
if git_root is None:
errors.append(RelateCriticalCheckMessage(
msg=REQUIRED_CONF_ERROR_PATTERN % {"location": GIT_ROOT},
id="git_root.E001"
))
elif not isinstance(git_root, str):
errors.append(RelateCriticalCheckMessage(
msg=INSTANCE_ERROR_PATTERN % {"location": GIT_ROOT, "types": "str"},
id="git_root.E002"
))
else:
if not os.path.isdir(git_root):
errors.append(RelateCriticalCheckMessage(
msg=("`%(path)s` configured in %(location)s is not a valid path"
% {"path": git_root, "location": GIT_ROOT}),
id="git_root.E003"
))
else:
if not os.access(git_root, os.W_OK):
errors.append(RelateCriticalCheckMessage(
msg=("`%(path)s` configured in %(location)s is not writable "
"by RELATE"
% {"path": git_root, "location": GIT_ROOT}),
id="git_root.E004"
))
if not os.access(git_root, os.R_OK):
errors.append(RelateCriticalCheckMessage(
msg=("`%(path)s` configured in %(location)s is not readable "
"by RELATE"
% {"path": git_root, "location": GIT_ROOT}),
id="git_root.E005"
))
bulk_storage = getattr(settings, RELATE_BULK_STORAGE, None)
from django.core.files.storage import Storage
if bulk_storage is None:
errors.append(RelateCriticalCheckMessage(
msg=REQUIRED_CONF_ERROR_PATTERN % {
"location": RELATE_BULK_STORAGE},
id="bulk_storage.E001"
))
elif not isinstance(bulk_storage, Storage):
errors.append(RelateCriticalCheckMessage(
msg=INSTANCE_ERROR_PATTERN % {
"location": RELATE_BULK_STORAGE, "types": "Storage"},
id="bulk_storage.E002"
))
relate_disable_codehilite_markdown_extension = getattr(
settings, RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION, None)
if relate_disable_codehilite_markdown_extension is not None:
if not isinstance(relate_disable_codehilite_markdown_extension, bool):
errors.append(
Warning(
msg="%(location)s is not a Boolean value: `%(value)s`, "
"assuming True"
% {"location":
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION,
"value":
repr(relate_disable_codehilite_markdown_extension)},
id="relate_disable_codehilite_markdown_extension.W001"))
elif not relate_disable_codehilite_markdown_extension:
errors.append(
Warning(
msg="%(location)s is set to False "
"(with 'markdown.extensions.codehilite' enabled'), "
"noticing that some pages with code fence markdown "
"might crash"
% {"location":
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION,
},
id="relate_disable_codehilite_markdown_extension.W002"))
# }}}
# {{{ check LANGUAGES, why this is not done in django?
languages = settings.LANGUAGES
from django.utils.itercompat import is_iterable
if (isinstance(languages, str)
or not is_iterable(languages)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": LANGUAGES,
"types": "an iterable (e.g., a list or tuple)."}),
id="relate_languages.E001")
)
else:
if any(isinstance(choice, str)
or not is_iterable(choice) or len(choice) != 2
for choice in languages):
errors.append(RelateCriticalCheckMessage(
msg=("'%s' must be an iterable containing "
"(language code, language description) tuples, just "
"like the format of LANGUAGES setting ("
"https://docs.djangoproject.com/en/dev/ref/settings/"
"#languages)" % LANGUAGES),
id="relate_languages.E002")
)
else:
from collections import OrderedDict
options_dict = OrderedDict(tuple(settings.LANGUAGES))
all_lang_codes = [lang_code for lang_code, lang_descr
in tuple(settings.LANGUAGES)]
for lang_code in options_dict.keys():
if all_lang_codes.count(lang_code) > 1:
errors.append(Warning(
msg=(
"Duplicate language entries were found in "
"settings.LANGUAGES for '%s', '%s' will be used "
"as its language_description"
% (lang_code, options_dict[lang_code])),
id="relate_languages.W001"
))
# }}}
# {{{ check RELATE_SITE_NAME
try:
site_name = settings.RELATE_SITE_NAME
if site_name is None:
errors.append(
RelateCriticalCheckMessage(
msg=("%s must not be None" % RELATE_SITE_NAME),
id="relate_site_name.E002")
)
else:
if not isinstance(site_name, str):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": "%s/%s" % (RELATE_SITE_NAME,
RELATE_CUTOMIZED_SITE_NAME),
"types": "string"}),
id="relate_site_name.E003"))
elif not site_name.strip():
errors.append(RelateCriticalCheckMessage(
msg=("%s must not be an empty string" % RELATE_SITE_NAME),
id="relate_site_name.E004"))
except AttributeError:
# This happens when RELATE_SITE_NAME is DELETED from settings.
errors.append(
RelateCriticalCheckMessage(
msg=(REQUIRED_CONF_ERROR_PATTERN
% {"location": RELATE_SITE_NAME}),
id="relate_site_name.E001")
)
# }}}
# {{{ check RELATE_OVERRIDE_TEMPLATES_DIRS
relate_override_templates_dirs = getattr(settings,
RELATE_OVERRIDE_TEMPLATES_DIRS, None)
if relate_override_templates_dirs is not None:
if (isinstance(relate_override_templates_dirs, str)
or not is_iterable(relate_override_templates_dirs)):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_OVERRIDE_TEMPLATES_DIRS,
"types": "an iterable (e.g., a list or tuple)."}),
id="relate_override_templates_dirs.E001"))
else:
if any(not isinstance(directory, str)
for directory in relate_override_templates_dirs):
errors.append(RelateCriticalCheckMessage(
msg=("'%s' must contain only string of paths."
% RELATE_OVERRIDE_TEMPLATES_DIRS),
id="relate_override_templates_dirs.E002"))
else:
for directory in relate_override_templates_dirs:
if not os.path.isdir(directory):
errors.append(
Warning(
msg=(
"Invalid Templates Dirs item '%s' in '%s', "
"it will be ignored."
% (directory, RELATE_OVERRIDE_TEMPLATES_DIRS)),
id="relate_override_templates_dirs.W001"
))
# }}}
# {{{ check RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE
relate_custom_page_types_removed_deadline = getattr(
settings, RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE, None)
if relate_custom_page_types_removed_deadline is not None:
from datetime import datetime
if not isinstance(relate_custom_page_types_removed_deadline, datetime):
errors.append(RelateCriticalCheckMessage(
msg=(INSTANCE_ERROR_PATTERN
% {"location": RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE,
"types": "datetime.datetime"}),
id="relate_custom_page_types_removed_deadline.E001"))
# }}}
return errors
def register_startup_checks():
register(check_relate_settings, RELATE_STARTUP_CHECKS_TAG)
def register_startup_checks_extra():
startup_checks_extra = getattr(settings, RELATE_STARTUP_CHECKS_EXTRA, None)
if startup_checks_extra is not None:
if not isinstance(startup_checks_extra, (list, tuple)):
raise ImproperlyConfigured(
INSTANCE_ERROR_PATTERN
% {"location": RELATE_STARTUP_CHECKS_EXTRA,
"types": "list or tuple"
}
)
for c in startup_checks_extra:
try:
check_item = import_string(c)
except Exception as e:
raise ImproperlyConfigured(
GENERIC_ERROR_PATTERN
% {
"location": RELATE_STARTUP_CHECKS_EXTRA,
"error_type": type(e).__name__,
"error_str": str(e)
})
else:
register(check_item, RELATE_STARTUP_CHECKS_EXTRA_TAG)
# vim: foldmethod=marker
| true | true |
f7f9a8288fcf6d5609e2107fcf8ce75c377db132 | 1,000 | py | Python | src/split_train_test.py | ibraheem-moosa/protein-asa-prediction | d9e49ec70008a41387739651a1e66798c0c2d5a6 | [
"MIT"
] | null | null | null | src/split_train_test.py | ibraheem-moosa/protein-asa-prediction | d9e49ec70008a41387739651a1e66798c0c2d5a6 | [
"MIT"
] | null | null | null | src/split_train_test.py | ibraheem-moosa/protein-asa-prediction | d9e49ec70008a41387739651a1e66798c0c2d5a6 | [
"MIT"
] | null | null | null | import random
from itertools import chain
def split_train_test_without_chaining(proteins, asas, train_ratio):
indices = list(range(len(proteins)))
random.shuffle(indices)
train_indices = indices[:int(train_ratio * len(indices))]
test_indices = indices[int(train_ratio * len(indices)):]
train_proteins = [proteins[i] for i in train_indices]
train_asas = [asas[i] for i in train_indices]
test_proteins = [proteins[i] for i in test_indices]
test_asas = [asas[i] for i in test_indices]
return train_proteins, train_asas, test_proteins, test_asas
def split_train_test(proteins, asas, train_ratio):
train_proteins, train_asas, test_proteins, test_asas = split_train_test_without_chaining(proteins, asas, train_ratio)
train_proteins = list(chain(*train_proteins))
train_asas = list(chain(*train_asas))
test_proteins = list(chain(*test_proteins))
test_asas = list(chain(*test_asas))
return train_proteins, train_asas, test_proteins, test_asas
| 41.666667 | 121 | 0.753 | import random
from itertools import chain
def split_train_test_without_chaining(proteins, asas, train_ratio):
indices = list(range(len(proteins)))
random.shuffle(indices)
train_indices = indices[:int(train_ratio * len(indices))]
test_indices = indices[int(train_ratio * len(indices)):]
train_proteins = [proteins[i] for i in train_indices]
train_asas = [asas[i] for i in train_indices]
test_proteins = [proteins[i] for i in test_indices]
test_asas = [asas[i] for i in test_indices]
return train_proteins, train_asas, test_proteins, test_asas
def split_train_test(proteins, asas, train_ratio):
train_proteins, train_asas, test_proteins, test_asas = split_train_test_without_chaining(proteins, asas, train_ratio)
train_proteins = list(chain(*train_proteins))
train_asas = list(chain(*train_asas))
test_proteins = list(chain(*test_proteins))
test_asas = list(chain(*test_asas))
return train_proteins, train_asas, test_proteins, test_asas
| true | true |
f7f9a88d99d63b1b98b730f1b0d9d97ffdb62133 | 3,207 | py | Python | whiskyton/models.py | gabrielusvicente/whiskyton | 2c3781120c44aced68ef0770f44c880491ff8cf0 | [
"MIT"
] | null | null | null | whiskyton/models.py | gabrielusvicente/whiskyton | 2c3781120c44aced68ef0770f44c880491ff8cf0 | [
"MIT"
] | 1 | 2021-03-26T00:32:58.000Z | 2021-03-26T00:32:58.000Z | whiskyton/models.py | gabrielusvicente/whiskyton | 2c3781120c44aced68ef0770f44c880491ff8cf0 | [
"MIT"
] | null | null | null | # coding: utf-8
from re import compile
from whiskyton import app, db
class Whisky(db.Model):
id = db.Column(db.Integer, primary_key=True)
distillery = db.Column(db.String(64), index=True, unique=True)
slug = db.Column(db.String(64), index=True, unique=True)
body = db.Column(db.Integer)
sweetness = db.Column(db.Integer)
smoky = db.Column(db.Integer)
medicinal = db.Column(db.Integer)
tobacco = db.Column(db.Integer)
honey = db.Column(db.Integer)
spicy = db.Column(db.Integer)
winey = db.Column(db.Integer)
nutty = db.Column(db.Integer)
malty = db.Column(db.Integer)
fruity = db.Column(db.Integer)
floral = db.Column(db.Integer)
postcode = db.Column(db.String(16))
latitude = db.Column(db.Integer)
longitude = db.Column(db.Integer)
views = db.Column(db.Integer)
def __repr__(self):
return '<Distillery: {}>'.format(self.distillery)
def get_tastes(self):
"""
Return a list of tastes of the whisky.
:return: (list of strings) tastes of the whisky
"""
tastes = app.config['TASTES']
return [str(getattr(self, taste, None)) for taste in tastes]
def get_slug(self):
"""
Returns a slug, a lower case string with only letters.
:return: (string) the inputted string converted to lower case and
deleting any non-letter character
"""
regex = compile('[^a-z]+')
return regex.sub('', self.distillery.lower())
def get_correlation(self, comparison):
"""
Returns the id of the two whiskies and the index of correlation
:param comparison: (whiskyton.models.Whisky) whisky for comparison
:return: (dictionary) contains the id (int) of each whisky (whisky and
reference) and the index of correlation (r) between them (float)
"""
return {
'reference': self.id,
'whisky': comparison.id,
'r': self.__pearson_r(self.get_tastes(), comparison.get_tastes())
}
@staticmethod
def __pearson_r(x, y):
"""
Returns the index of correlation between two whiskies.
:param x: (list of strings) tastes of a whisky
:param y: (list of strings) tastes of a whisky
:return: (float) index of correlation
"""
x = [float(n) for n in x]
y = [float(n) for n in y]
n = len(x)
sum_x = sum(x)
sum_y = sum(y)
sum_x_sq = sum(i ** 2 for i in x)
sum_y_sq = sum(i ** 2 for i in y)
p_sum = sum(i * j for i, j in zip(x, y))
num = p_sum - ((sum_x * sum_y) / n)
multiplier_1 = sum_x_sq - ((sum_x ** 2) / n)
multiplier_2 = sum_y_sq - ((sum_y ** 2) / n)
den = (multiplier_1 * multiplier_2) ** 0.5
try:
return num / den
except ZeroDivisionError:
return 0
class Correlation(db.Model):
id = db.Column(db.Integer, primary_key=True)
reference = db.Column(db.Integer, index=True)
whisky = db.Column(db.Integer, db.ForeignKey('whisky.id'))
r = db.Column(db.Float, index=True)
def __repr__(self):
return '<Correlation: {}>'.format(self.r)
| 33.40625 | 78 | 0.598067 |
from re import compile
from whiskyton import app, db
class Whisky(db.Model):
id = db.Column(db.Integer, primary_key=True)
distillery = db.Column(db.String(64), index=True, unique=True)
slug = db.Column(db.String(64), index=True, unique=True)
body = db.Column(db.Integer)
sweetness = db.Column(db.Integer)
smoky = db.Column(db.Integer)
medicinal = db.Column(db.Integer)
tobacco = db.Column(db.Integer)
honey = db.Column(db.Integer)
spicy = db.Column(db.Integer)
winey = db.Column(db.Integer)
nutty = db.Column(db.Integer)
malty = db.Column(db.Integer)
fruity = db.Column(db.Integer)
floral = db.Column(db.Integer)
postcode = db.Column(db.String(16))
latitude = db.Column(db.Integer)
longitude = db.Column(db.Integer)
views = db.Column(db.Integer)
def __repr__(self):
return '<Distillery: {}>'.format(self.distillery)
def get_tastes(self):
tastes = app.config['TASTES']
return [str(getattr(self, taste, None)) for taste in tastes]
def get_slug(self):
regex = compile('[^a-z]+')
return regex.sub('', self.distillery.lower())
def get_correlation(self, comparison):
return {
'reference': self.id,
'whisky': comparison.id,
'r': self.__pearson_r(self.get_tastes(), comparison.get_tastes())
}
@staticmethod
def __pearson_r(x, y):
x = [float(n) for n in x]
y = [float(n) for n in y]
n = len(x)
sum_x = sum(x)
sum_y = sum(y)
sum_x_sq = sum(i ** 2 for i in x)
sum_y_sq = sum(i ** 2 for i in y)
p_sum = sum(i * j for i, j in zip(x, y))
num = p_sum - ((sum_x * sum_y) / n)
multiplier_1 = sum_x_sq - ((sum_x ** 2) / n)
multiplier_2 = sum_y_sq - ((sum_y ** 2) / n)
den = (multiplier_1 * multiplier_2) ** 0.5
try:
return num / den
except ZeroDivisionError:
return 0
class Correlation(db.Model):
id = db.Column(db.Integer, primary_key=True)
reference = db.Column(db.Integer, index=True)
whisky = db.Column(db.Integer, db.ForeignKey('whisky.id'))
r = db.Column(db.Float, index=True)
def __repr__(self):
return '<Correlation: {}>'.format(self.r)
| true | true |
f7f9a9e1f30286d4b9732e44006a015568c7fa31 | 640 | py | Python | tests/utils/test_utils_udf.py | annelhote/fonduer | bd5b1feebfb2860286ae8b5a520b24baa023b445 | [
"MIT"
] | 379 | 2018-03-29T10:06:02.000Z | 2022-03-30T22:51:43.000Z | tests/utils/test_utils_udf.py | annelhote/fonduer | bd5b1feebfb2860286ae8b5a520b24baa023b445 | [
"MIT"
] | 328 | 2018-03-29T03:34:46.000Z | 2021-09-02T21:24:41.000Z | tests/utils/test_utils_udf.py | annelhote/fonduer | bd5b1feebfb2860286ae8b5a520b24baa023b445 | [
"MIT"
] | 86 | 2018-03-30T12:56:33.000Z | 2022-01-12T09:10:48.000Z | """Fonduer UDF utils' unit tests."""
import logging
import numpy as np
from fonduer.utils.utils_udf import shift_label_matrix, unshift_label_matrix
def test_shift_label_matrix(caplog):
"""Test the label matrix shifter and unshifter."""
caplog.set_level(logging.INFO)
"""
L is a dense label matrix (ABSTAIN as -1) with values:
-1 0
1 -1
"""
L = np.array([[-1, 0], [1, -1]])
"""
L_sparse is a sparse label matrix (ABSTAIN as 0)
0 1
2 0
"""
L_sparse = shift_label_matrix(L)
assert np.array_equal(L, unshift_label_matrix(L_sparse))
assert L_sparse.count_nonzero() == 2
| 23.703704 | 76 | 0.651563 | import logging
import numpy as np
from fonduer.utils.utils_udf import shift_label_matrix, unshift_label_matrix
def test_shift_label_matrix(caplog):
caplog.set_level(logging.INFO)
L = np.array([[-1, 0], [1, -1]])
L_sparse = shift_label_matrix(L)
assert np.array_equal(L, unshift_label_matrix(L_sparse))
assert L_sparse.count_nonzero() == 2
| true | true |
f7f9a9e58ded44b7fc6c7e8ce65bb85b7f475e31 | 1,455 | py | Python | src/transformers/models/__init__.py | ArneBinder/transformers | ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1 | [
"Apache-2.0"
] | 309 | 2020-02-07T23:09:27.000Z | 2022-03-31T08:01:53.000Z | src/transformers/models/__init__.py | ArneBinder/transformers | ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1 | [
"Apache-2.0"
] | 93 | 2020-02-22T05:56:28.000Z | 2022-03-27T08:43:38.000Z | src/transformers/models/__init__.py | ArneBinder/transformers | ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1 | [
"Apache-2.0"
] | 148 | 2020-02-14T22:16:11.000Z | 2022-03-22T17:08:04.000Z | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import (
albert,
auto,
bart,
barthez,
bert,
bert_generation,
bert_japanese,
bertweet,
blenderbot,
blenderbot_small,
camembert,
convbert,
ctrl,
deberta,
dialogpt,
distilbert,
dpr,
electra,
encoder_decoder,
flaubert,
fsmt,
funnel,
gpt2,
herbert,
layoutlm,
led,
longformer,
lxmert,
marian,
mbart,
mmbt,
mobilebert,
mpnet,
mt5,
openai,
pegasus,
phobert,
prophetnet,
rag,
reformer,
retribert,
roberta,
squeezebert,
t5,
tapas,
transfo_xl,
wav2vec2,
xlm,
xlm_roberta,
xlnet,
)
| 20.492958 | 77 | 0.656357 |
# module, but to preserve other warnings. So, don't check this module at all.
from . import (
albert,
auto,
bart,
barthez,
bert,
bert_generation,
bert_japanese,
bertweet,
blenderbot,
blenderbot_small,
camembert,
convbert,
ctrl,
deberta,
dialogpt,
distilbert,
dpr,
electra,
encoder_decoder,
flaubert,
fsmt,
funnel,
gpt2,
herbert,
layoutlm,
led,
longformer,
lxmert,
marian,
mbart,
mmbt,
mobilebert,
mpnet,
mt5,
openai,
pegasus,
phobert,
prophetnet,
rag,
reformer,
retribert,
roberta,
squeezebert,
t5,
tapas,
transfo_xl,
wav2vec2,
xlm,
xlm_roberta,
xlnet,
)
| true | true |
f7f9aa1264295e425b199cd156872887672fee2f | 1,126 | py | Python | lib/coginvasion/dna/DNALoader.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | 1 | 2020-03-12T16:44:10.000Z | 2020-03-12T16:44:10.000Z | lib/coginvasion/dna/DNALoader.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | lib/coginvasion/dna/DNALoader.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.dna.DNALoader
from direct.stdpy import threading
from libpandadna import *
class DNABulkLoader:
def __init__(self, storage, files):
self.dnaStorage = storage
self.dnaFiles = files
def loadDNAFiles(self):
for file in self.dnaFiles:
print 'Reading DNA file...', file
loadDNABulk(self.dnaStorage, file)
del self.dnaStorage
del self.dnaFiles
def loadDNABulk(dnaStorage, file):
dnaLoader = DNALoader()
dnaLoader.loadDNAFile(dnaStorage, file)
def loadDNAFile(dnaStorage, file):
print 'Reading DNA file...', file
dnaLoader = DNALoader()
node = dnaLoader.loadDNAFile(dnaStorage, file)
if not node.isEmpty():
if node.node().getNumChildren() > 0:
return node.node()
return
def loadDNAFileAI(dnaStorage, file):
dnaLoader = DNALoader()
data = dnaLoader.loadDNAFileAI(dnaStorage, file)
return data | 27.463415 | 104 | 0.674067 |
from direct.stdpy import threading
from libpandadna import *
class DNABulkLoader:
def __init__(self, storage, files):
self.dnaStorage = storage
self.dnaFiles = files
def loadDNAFiles(self):
for file in self.dnaFiles:
print 'Reading DNA file...', file
loadDNABulk(self.dnaStorage, file)
del self.dnaStorage
del self.dnaFiles
def loadDNABulk(dnaStorage, file):
dnaLoader = DNALoader()
dnaLoader.loadDNAFile(dnaStorage, file)
def loadDNAFile(dnaStorage, file):
print 'Reading DNA file...', file
dnaLoader = DNALoader()
node = dnaLoader.loadDNAFile(dnaStorage, file)
if not node.isEmpty():
if node.node().getNumChildren() > 0:
return node.node()
return
def loadDNAFileAI(dnaStorage, file):
dnaLoader = DNALoader()
data = dnaLoader.loadDNAFileAI(dnaStorage, file)
return data | false | true |
f7f9ab3b2346c9ea8b3904cf6065a172b03d0ea9 | 2,794 | py | Python | api/tests/test_repo_data.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | null | null | null | api/tests/test_repo_data.py | elisabettai/osparc-simcore | ad7b6e05111b50fe95e49306a992170490a7247f | [
"MIT"
] | 1 | 2021-11-29T13:38:09.000Z | 2021-11-29T13:38:09.000Z | api/tests/test_repo_data.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | null | null | null | """ Keeps up-to-date all mock data in repo with schemas
"""
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import json
from pathlib import Path
import jsonschema
import pytest
import yaml
from utils import current_repo_dir
SYNCED_VERSIONS_SUFFIX = [
".json", # json-schema specs file
"-converted.yaml", # equivalent openapi specs file (see scripts/json-schema-to-openapi-schema)
]
# Add here paths to files containing project's data that can be validated with projects schema
PROJECTS_NAMES = [
"fake-project.json",
"fake-template-projects.hack08.notebooks.json",
"fake-template-projects.isan.2dplot.json",
"fake-template-projects.isan.matward.json",
"fake-template-projects.isan.paraview.json",
"fake-template-projects.isan.ucdavis.json",
"fake-template-projects.sleepers.json",
]
PROJECTS_PATHS = [f"services/web/server/tests/data/{name}" for name in PROJECTS_NAMES]
def _load_data(fpath: Path):
with open(fpath) as fh:
try:
data = json.load(fh)
except json.JSONDecodeError:
fh.seek(0)
data = yaml.safe_load(fh)
return data
@pytest.fixture(
scope="module",
params=[
str(schema_path)
for suffix in SYNCED_VERSIONS_SUFFIX
for schema_path in current_repo_dir.rglob(f"schemas/project*{suffix}")
],
)
def project_schema(request, api_specs_dir):
schema_path = Path(request.param)
return _load_data(schema_path)
# TESTS --------------------------------------------------
@pytest.mark.parametrize("data_path", PROJECTS_PATHS)
def test_project_against_schema(data_path, project_schema, this_repo_root_dir):
"""
Both projects and workbench datasets are tested against the project schema
"""
data = _load_data(this_repo_root_dir / data_path)
# Adapts workbench-only data: embedds data within a fake project skeleton
if "workbench" in data_path:
# TODO: Ideally project is faked to a schema.
# NOTE: tried already `faker-schema` but it does not do the job right
prj = {
"uuid": "eiusmod",
"name": "minim",
"description": "ad",
"prjOwner": "ullamco eu voluptate",
"creationDate": "8715-11-30T9:1:51.388Z",
"lastChangeDate": "0944-02-31T5:1:7.795Z",
"thumbnail": "labore incid",
"accessRights": {},
"workbench": data["workbench"],
"ui": {},
"dev": {},
}
data = prj
assert any(isinstance(data, _type) for _type in [list, dict])
if isinstance(data, dict):
data = [
data,
]
for project_data in data:
jsonschema.validate(project_data, project_schema)
| 29.723404 | 99 | 0.641374 |
import json
from pathlib import Path
import jsonschema
import pytest
import yaml
from utils import current_repo_dir
SYNCED_VERSIONS_SUFFIX = [
".json",
"-converted.yaml",
]
PROJECTS_NAMES = [
"fake-project.json",
"fake-template-projects.hack08.notebooks.json",
"fake-template-projects.isan.2dplot.json",
"fake-template-projects.isan.matward.json",
"fake-template-projects.isan.paraview.json",
"fake-template-projects.isan.ucdavis.json",
"fake-template-projects.sleepers.json",
]
PROJECTS_PATHS = [f"services/web/server/tests/data/{name}" for name in PROJECTS_NAMES]
def _load_data(fpath: Path):
with open(fpath) as fh:
try:
data = json.load(fh)
except json.JSONDecodeError:
fh.seek(0)
data = yaml.safe_load(fh)
return data
@pytest.fixture(
scope="module",
params=[
str(schema_path)
for suffix in SYNCED_VERSIONS_SUFFIX
for schema_path in current_repo_dir.rglob(f"schemas/project*{suffix}")
],
)
def project_schema(request, api_specs_dir):
schema_path = Path(request.param)
return _load_data(schema_path)
# TESTS --------------------------------------------------
@pytest.mark.parametrize("data_path", PROJECTS_PATHS)
def test_project_against_schema(data_path, project_schema, this_repo_root_dir):
data = _load_data(this_repo_root_dir / data_path)
# Adapts workbench-only data: embedds data within a fake project skeleton
if "workbench" in data_path:
# TODO: Ideally project is faked to a schema.
# NOTE: tried already `faker-schema` but it does not do the job right
prj = {
"uuid": "eiusmod",
"name": "minim",
"description": "ad",
"prjOwner": "ullamco eu voluptate",
"creationDate": "8715-11-30T9:1:51.388Z",
"lastChangeDate": "0944-02-31T5:1:7.795Z",
"thumbnail": "labore incid",
"accessRights": {},
"workbench": data["workbench"],
"ui": {},
"dev": {},
}
data = prj
assert any(isinstance(data, _type) for _type in [list, dict])
if isinstance(data, dict):
data = [
data,
]
for project_data in data:
jsonschema.validate(project_data, project_schema)
| true | true |
f7f9abd526cb81b8ef6749da0f96f8ad4f10b3cc | 24,783 | py | Python | chia/farmer/farmer_api.py | nim-lang-cn/chia-blockchain | 02eb24b8d3ebfc65dfa602d88a72a946e97edb70 | [
"Apache-2.0"
] | null | null | null | chia/farmer/farmer_api.py | nim-lang-cn/chia-blockchain | 02eb24b8d3ebfc65dfa602d88a72a946e97edb70 | [
"Apache-2.0"
] | null | null | null | chia/farmer/farmer_api.py | nim-lang-cn/chia-blockchain | 02eb24b8d3ebfc65dfa602d88a72a946e97edb70 | [
"Apache-2.0"
] | null | null | null | import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.network_type import NetworkType
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.constants.NETWORK_TYPE != NetworkType.MAINNET:
# This is meant to make testnets more stable, when difficulty is very low
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)]
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| 50.269777 | 120 | 0.589315 | import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.network_type import NetworkType
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.constants.NETWORK_TYPE != NetworkType.MAINNET:
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)]
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| true | true |
f7f9ac9e1e3038e4b1b2b7b257d085f0922391fb | 269 | py | Python | anki_helpers/models/AddNoteRequest.py | AndrewDang-Tran/AnkiConnectClient | f3c1cbbcd45b7e3391d9fe5b77dccbaa5c70ad69 | [
"MIT"
] | null | null | null | anki_helpers/models/AddNoteRequest.py | AndrewDang-Tran/AnkiConnectClient | f3c1cbbcd45b7e3391d9fe5b77dccbaa5c70ad69 | [
"MIT"
] | null | null | null | anki_helpers/models/AddNoteRequest.py | AndrewDang-Tran/AnkiConnectClient | f3c1cbbcd45b7e3391d9fe5b77dccbaa5c70ad69 | [
"MIT"
] | null | null | null | from typing import Dict, List
from dataclasses import dataclass
from .AddNoteOptions import AddNoteOptions
@dataclass(frozen = True)
class AddNoteRequest:
deckName: str
modelName: str
fields: Dict[str, str]
tags: List[str]
options: AddNoteOptions
| 20.692308 | 42 | 0.747212 | from typing import Dict, List
from dataclasses import dataclass
from .AddNoteOptions import AddNoteOptions
@dataclass(frozen = True)
class AddNoteRequest:
deckName: str
modelName: str
fields: Dict[str, str]
tags: List[str]
options: AddNoteOptions
| true | true |
f7f9ad62c91089b5561b759d0e7310ef4bc56565 | 46 | py | Python | hatspil/reports/__init__.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
] | 2 | 2018-12-20T08:54:17.000Z | 2019-10-19T18:35:33.000Z | hatspil/reports/__init__.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
] | null | null | null | hatspil/reports/__init__.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
] | null | null | null | """A package to handle reports generation."""
| 23 | 45 | 0.717391 | true | true | |
f7f9ad8bf6c5f7536d6708d7d63bcf239c8c2b15 | 11,362 | py | Python | tests/util.py | themanifold/pyinfra | 5b2eef0b22fa6b1750d595d7eb3e258ce350a693 | [
"MIT"
] | 1 | 2021-09-25T09:21:37.000Z | 2021-09-25T09:21:37.000Z | tests/util.py | themanifold/pyinfra | 5b2eef0b22fa6b1750d595d7eb3e258ce350a693 | [
"MIT"
] | null | null | null | tests/util.py | themanifold/pyinfra | 5b2eef0b22fa6b1750d595d7eb3e258ce350a693 | [
"MIT"
] | null | null | null | import json
import os
from datetime import datetime
from io import open
from os import listdir, path
import six
from mock import patch
from pyinfra.api import Config, Inventory
from pyinfra.api.util import get_kwargs_str
from . import logger
def get_command_string(command):
value = command.get_raw_value()
masked_value = command.get_masked_value()
if value == masked_value:
return value
else:
return [value, masked_value]
def make_inventory(hosts=('somehost', 'anotherhost'), **kwargs):
return Inventory(
(hosts, {}),
test_group=([
'somehost',
], {
'group_data': 'hello world',
}),
ssh_user='vagrant',
**kwargs
)
class FakeState(object):
active = True
deploy_dir = '/'
in_op = True
in_deploy = True
pipelining = False
deploy_name = None
deploy_kwargs = None
def __init__(self):
self.inventory = Inventory(([], {}))
self.config = Config()
def get_temp_filename(*args):
return '_tempfile_'
def parse_fact(fact):
'''
Convert JSON types to more complex Python types because JSON is lacking.
'''
# Handle datetimes
if isinstance(fact, six.string_types) and fact.startswith('datetime:'):
return datetime.strptime(fact[9:], '%Y-%m-%dT%H:%M:%S')
elif isinstance(fact, list):
# Handle sets
if len(fact) > 1 and fact[0] == '_set':
return set(parse_fact(value) for value in fact[1:])
return [parse_fact(value) for value in fact]
elif isinstance(fact, dict):
return {
key: parse_fact(value)
for key, value in six.iteritems(fact)
}
return fact
class FakeFact(object):
def __init__(self, data):
self.data = parse_fact(data)
def __iter__(self):
return iter(self.data)
def __getattr__(self, key):
return getattr(self.data, key)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __contains__(self, key):
return key in self.data
def __call__(self, *args, **kwargs):
item = self.data
for arg in args:
if arg is None:
continue
# Support for non-JSON-able fact arguments by turning them into JSON!
if isinstance(arg, list):
arg = json.dumps(arg)
item = item.get(arg)
return item
def __str__(self):
return str(self.data)
def __unicode__(self):
return self.data
def __eq__(self, other_thing):
return self.data == other_thing
def __ne__(self, other_thing):
return self.data != other_thing
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
class FakeFacts(object):
def __init__(self, facts):
self.facts = {
key: FakeFact(value)
for key, value in facts.items()
}
def __getattr__(self, key):
return self.facts.get(key)
def __setitem__(self, key, value):
self.facts[key] = value
def _create(self, key, data=None, args=None):
self.facts[key][args[0]] = data
def _delete(self, key, args=None):
self.facts[key].pop(args[0], None)
# TODO: remove after python2 removal, as only required because of different default ordering in 2/3
def _sort_kwargs_str(string):
return ', '.join(sorted(string.split(', ')))
class FakeHost(object):
noop_description = None
def __init__(self, name, facts, data):
self.name = name
self.fact = FakeFacts(facts)
self.data = data
self.connector_data = {}
@property
def print_prefix(self):
return ''
def noop(self, description):
self.noop_description = description
@staticmethod
def _get_fact_key(fact_cls):
return '{0}.{1}'.format(fact_cls.__module__.split('.')[-1], fact_cls.__name__)
def get_fact(self, fact_cls, **kwargs):
fact_key = self._get_fact_key(fact_cls)
fact = getattr(self.fact, fact_key, None)
if fact is None:
raise KeyError('Missing test fact data: {0}'.format(fact_key))
if kwargs:
fact_ordered_keys = {
_sort_kwargs_str(key): value
for key, value in fact.items()
}
kwargs_str = _sort_kwargs_str(get_kwargs_str(kwargs))
if kwargs_str not in fact:
logger.info('Possible missing fact key: {0}'.format(kwargs_str))
return fact_ordered_keys.get(kwargs_str)
return fact
def create_fact(self, fact_cls, data, kwargs):
try:
fact = self.get_fact(fact_cls)
except KeyError:
fact_key = self._get_fact_key(fact_cls)
fact = self.fact[fact_key] = {}
fact[_sort_kwargs_str(get_kwargs_str(kwargs))] = data
def delete_fact(self, fact_cls, kwargs):
try:
fact = self.get_fact(fact_cls)
except KeyError:
return
ordered_kwargs = _sort_kwargs_str(get_kwargs_str(kwargs))
for key in fact.keys():
ordered_key = _sort_kwargs_str(key)
if ordered_key == ordered_kwargs:
fact.pop(key)
break
class FakeFile(object):
_read = False
_data = None
def __init__(self, name, data=None):
self._name = name
self._data = data
def read(self, *args, **kwargs):
if self._read is False:
self._read = True
if self._data:
return self._data
else:
return '_test_data_'
return ''
def readlines(self, *args, **kwargs):
if self._read is False:
self._read = True
if self._data:
return self._data.split()
else:
return ['_test_data_']
return []
def seek(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
pass
class patch_files(object):
def __init__(self, local_files):
directories, files, files_data = self._parse_local_files(local_files)
self._files = files
self._files_data = files_data
self._directories = directories
@staticmethod
def _parse_local_files(local_files, prefix=FakeState.deploy_dir):
files = []
files_data = {}
directories = {}
prefix = path.normpath(prefix)
for filename, file_data in local_files.get('files', {}).items():
filepath = path.join(prefix, filename)
files.append(filepath)
files_data[filepath] = file_data
for dirname, dir_files in local_files.get('dirs', {}).items():
sub_dirname = path.join(prefix, dirname)
sub_directories, sub_files, sub_files_data = patch_files._parse_local_files(
dir_files,
sub_dirname,
)
files.extend(sub_files)
files_data.update(sub_files_data)
directories[sub_dirname] = {
'files': list(dir_files['files'].keys()),
'dirs': list(dir_files['dirs'].keys()),
}
directories.update(sub_directories)
return directories, files, files_data
def __enter__(self):
self.patches = [
patch('pyinfra.operations.files.os_path.exists', self.exists),
patch('pyinfra.operations.files.os_path.isfile', self.isfile),
patch('pyinfra.operations.files.os_path.isdir', self.isdir),
patch('pyinfra.operations.files.walk', self.walk),
patch('pyinfra.operations.files.makedirs', lambda path: True),
patch('pyinfra.api.util.stat', self.stat),
# Builtin patches
patch('pyinfra.operations.files.open', self.get_file, create=True),
patch('pyinfra.operations.server.open', self.get_file, create=True),
patch('pyinfra.api.util.open', self.get_file, create=True),
]
for patched in self.patches:
patched.start()
def __exit__(self, type_, value, traceback):
for patched in self.patches:
patched.stop()
def get_file(self, filename, *args):
if self.isfile(filename):
normalized_path = path.normpath(filename)
return FakeFile(normalized_path, self._files_data.get(normalized_path))
raise IOError('Missing FakeFile: {0}'.format(filename))
def exists(self, filename, *args):
return self.isfile(filename) or self.isdir(filename)
def isfile(self, filename, *args):
normalized_path = path.normpath(filename)
return normalized_path in self._files
def isdir(self, dirname, *args):
normalized_path = path.normpath(dirname)
return normalized_path in self._directories
def stat(self, pathname):
if self.isfile(pathname):
mode_int = 33188 # 644 file
elif self.isdir(pathname):
mode_int = 16877 # 755 directory
else:
raise IOError('No such file or directory: {0}'.format(pathname))
return os.stat_result((mode_int, 0, 0, 0, 0, 0, 0, 0, 0, 0))
def walk(self, dirname, topdown=True, onerror=None, followlinks=False):
if not self.isdir(dirname):
return
normalized_path = path.normpath(dirname)
dir_definition = self._directories[normalized_path]
child_dirs = dir_definition.get('dirs', [])
child_files = dir_definition.get('files', [])
yield dirname, child_dirs, child_files
for child in child_dirs:
full_child = path.join(dirname, child)
for recursive_return in self.walk(full_child, topdown, onerror, followlinks):
yield recursive_return
def create_host(name=None, facts=None, data=None):
'''
Creates a FakeHost object with attached fact data.
'''
real_facts = {}
facts = facts or {}
for name, fact_data in six.iteritems(facts):
real_facts[name] = fact_data
return FakeHost(name, facts=real_facts, data=data)
class JsonTest(type):
def __new__(cls, name, bases, attrs):
# Get the JSON files
files = listdir(attrs['jsontest_files'])
files = [f for f in files if f.endswith('.json')]
test_prefix = attrs.get('jsontest_prefix', 'test_')
def gen_test(test_name, filename):
def test(self):
test_data = json.loads(open(
path.join(attrs['jsontest_files'], filename),
encoding='utf-8',
).read())
self.jsontest_function(test_name, test_data)
return test
# Loop them and create class methods to call the jsontest_function
for filename in files:
test_name = filename[:-5]
# Attach the method
method_name = '{0}{1}'.format(test_prefix, test_name)
attrs[method_name] = gen_test(test_name, filename)
return type.__new__(cls, name, bases, attrs)
| 27.985222 | 99 | 0.594878 | import json
import os
from datetime import datetime
from io import open
from os import listdir, path
import six
from mock import patch
from pyinfra.api import Config, Inventory
from pyinfra.api.util import get_kwargs_str
from . import logger
def get_command_string(command):
value = command.get_raw_value()
masked_value = command.get_masked_value()
if value == masked_value:
return value
else:
return [value, masked_value]
def make_inventory(hosts=('somehost', 'anotherhost'), **kwargs):
return Inventory(
(hosts, {}),
test_group=([
'somehost',
], {
'group_data': 'hello world',
}),
ssh_user='vagrant',
**kwargs
)
class FakeState(object):
active = True
deploy_dir = '/'
in_op = True
in_deploy = True
pipelining = False
deploy_name = None
deploy_kwargs = None
def __init__(self):
self.inventory = Inventory(([], {}))
self.config = Config()
def get_temp_filename(*args):
return '_tempfile_'
def parse_fact(fact):
if isinstance(fact, six.string_types) and fact.startswith('datetime:'):
return datetime.strptime(fact[9:], '%Y-%m-%dT%H:%M:%S')
elif isinstance(fact, list):
if len(fact) > 1 and fact[0] == '_set':
return set(parse_fact(value) for value in fact[1:])
return [parse_fact(value) for value in fact]
elif isinstance(fact, dict):
return {
key: parse_fact(value)
for key, value in six.iteritems(fact)
}
return fact
class FakeFact(object):
def __init__(self, data):
self.data = parse_fact(data)
def __iter__(self):
return iter(self.data)
def __getattr__(self, key):
return getattr(self.data, key)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __contains__(self, key):
return key in self.data
def __call__(self, *args, **kwargs):
item = self.data
for arg in args:
if arg is None:
continue
if isinstance(arg, list):
arg = json.dumps(arg)
item = item.get(arg)
return item
def __str__(self):
return str(self.data)
def __unicode__(self):
return self.data
def __eq__(self, other_thing):
return self.data == other_thing
def __ne__(self, other_thing):
return self.data != other_thing
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
class FakeFacts(object):
def __init__(self, facts):
self.facts = {
key: FakeFact(value)
for key, value in facts.items()
}
def __getattr__(self, key):
return self.facts.get(key)
def __setitem__(self, key, value):
self.facts[key] = value
def _create(self, key, data=None, args=None):
self.facts[key][args[0]] = data
def _delete(self, key, args=None):
self.facts[key].pop(args[0], None)
def _sort_kwargs_str(string):
return ', '.join(sorted(string.split(', ')))
class FakeHost(object):
noop_description = None
def __init__(self, name, facts, data):
self.name = name
self.fact = FakeFacts(facts)
self.data = data
self.connector_data = {}
@property
def print_prefix(self):
return ''
def noop(self, description):
self.noop_description = description
@staticmethod
def _get_fact_key(fact_cls):
return '{0}.{1}'.format(fact_cls.__module__.split('.')[-1], fact_cls.__name__)
def get_fact(self, fact_cls, **kwargs):
fact_key = self._get_fact_key(fact_cls)
fact = getattr(self.fact, fact_key, None)
if fact is None:
raise KeyError('Missing test fact data: {0}'.format(fact_key))
if kwargs:
fact_ordered_keys = {
_sort_kwargs_str(key): value
for key, value in fact.items()
}
kwargs_str = _sort_kwargs_str(get_kwargs_str(kwargs))
if kwargs_str not in fact:
logger.info('Possible missing fact key: {0}'.format(kwargs_str))
return fact_ordered_keys.get(kwargs_str)
return fact
def create_fact(self, fact_cls, data, kwargs):
try:
fact = self.get_fact(fact_cls)
except KeyError:
fact_key = self._get_fact_key(fact_cls)
fact = self.fact[fact_key] = {}
fact[_sort_kwargs_str(get_kwargs_str(kwargs))] = data
def delete_fact(self, fact_cls, kwargs):
try:
fact = self.get_fact(fact_cls)
except KeyError:
return
ordered_kwargs = _sort_kwargs_str(get_kwargs_str(kwargs))
for key in fact.keys():
ordered_key = _sort_kwargs_str(key)
if ordered_key == ordered_kwargs:
fact.pop(key)
break
class FakeFile(object):
_read = False
_data = None
def __init__(self, name, data=None):
self._name = name
self._data = data
def read(self, *args, **kwargs):
if self._read is False:
self._read = True
if self._data:
return self._data
else:
return '_test_data_'
return ''
def readlines(self, *args, **kwargs):
if self._read is False:
self._read = True
if self._data:
return self._data.split()
else:
return ['_test_data_']
return []
def seek(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
pass
class patch_files(object):
def __init__(self, local_files):
directories, files, files_data = self._parse_local_files(local_files)
self._files = files
self._files_data = files_data
self._directories = directories
@staticmethod
def _parse_local_files(local_files, prefix=FakeState.deploy_dir):
files = []
files_data = {}
directories = {}
prefix = path.normpath(prefix)
for filename, file_data in local_files.get('files', {}).items():
filepath = path.join(prefix, filename)
files.append(filepath)
files_data[filepath] = file_data
for dirname, dir_files in local_files.get('dirs', {}).items():
sub_dirname = path.join(prefix, dirname)
sub_directories, sub_files, sub_files_data = patch_files._parse_local_files(
dir_files,
sub_dirname,
)
files.extend(sub_files)
files_data.update(sub_files_data)
directories[sub_dirname] = {
'files': list(dir_files['files'].keys()),
'dirs': list(dir_files['dirs'].keys()),
}
directories.update(sub_directories)
return directories, files, files_data
def __enter__(self):
self.patches = [
patch('pyinfra.operations.files.os_path.exists', self.exists),
patch('pyinfra.operations.files.os_path.isfile', self.isfile),
patch('pyinfra.operations.files.os_path.isdir', self.isdir),
patch('pyinfra.operations.files.walk', self.walk),
patch('pyinfra.operations.files.makedirs', lambda path: True),
patch('pyinfra.api.util.stat', self.stat),
patch('pyinfra.operations.files.open', self.get_file, create=True),
patch('pyinfra.operations.server.open', self.get_file, create=True),
patch('pyinfra.api.util.open', self.get_file, create=True),
]
for patched in self.patches:
patched.start()
def __exit__(self, type_, value, traceback):
for patched in self.patches:
patched.stop()
def get_file(self, filename, *args):
if self.isfile(filename):
normalized_path = path.normpath(filename)
return FakeFile(normalized_path, self._files_data.get(normalized_path))
raise IOError('Missing FakeFile: {0}'.format(filename))
def exists(self, filename, *args):
return self.isfile(filename) or self.isdir(filename)
def isfile(self, filename, *args):
normalized_path = path.normpath(filename)
return normalized_path in self._files
def isdir(self, dirname, *args):
normalized_path = path.normpath(dirname)
return normalized_path in self._directories
def stat(self, pathname):
if self.isfile(pathname):
mode_int = 33188
elif self.isdir(pathname):
mode_int = 16877
else:
raise IOError('No such file or directory: {0}'.format(pathname))
return os.stat_result((mode_int, 0, 0, 0, 0, 0, 0, 0, 0, 0))
def walk(self, dirname, topdown=True, onerror=None, followlinks=False):
if not self.isdir(dirname):
return
normalized_path = path.normpath(dirname)
dir_definition = self._directories[normalized_path]
child_dirs = dir_definition.get('dirs', [])
child_files = dir_definition.get('files', [])
yield dirname, child_dirs, child_files
for child in child_dirs:
full_child = path.join(dirname, child)
for recursive_return in self.walk(full_child, topdown, onerror, followlinks):
yield recursive_return
def create_host(name=None, facts=None, data=None):
real_facts = {}
facts = facts or {}
for name, fact_data in six.iteritems(facts):
real_facts[name] = fact_data
return FakeHost(name, facts=real_facts, data=data)
class JsonTest(type):
def __new__(cls, name, bases, attrs):
files = listdir(attrs['jsontest_files'])
files = [f for f in files if f.endswith('.json')]
test_prefix = attrs.get('jsontest_prefix', 'test_')
def gen_test(test_name, filename):
def test(self):
test_data = json.loads(open(
path.join(attrs['jsontest_files'], filename),
encoding='utf-8',
).read())
self.jsontest_function(test_name, test_data)
return test
for filename in files:
test_name = filename[:-5]
method_name = '{0}{1}'.format(test_prefix, test_name)
attrs[method_name] = gen_test(test_name, filename)
return type.__new__(cls, name, bases, attrs)
| true | true |
f7f9ae1d606adaf47619f3f0c44e0a87b8b3ca94 | 783 | py | Python | tests/_01_docs/_overview_routes_3.py | qedalab/spira | 32e4d2096e298b9fcc5952abd654312dc232a259 | [
"MIT"
] | 10 | 2018-07-13T09:46:21.000Z | 2021-06-22T13:34:50.000Z | tests/_01_docs/_overview_routes_3.py | qedalab/spira | 32e4d2096e298b9fcc5952abd654312dc232a259 | [
"MIT"
] | 8 | 2018-09-09T11:32:40.000Z | 2019-10-08T07:47:31.000Z | tests/_01_docs/_overview_routes_3.py | qedalab/spira | 32e4d2096e298b9fcc5952abd654312dc232a259 | [
"MIT"
] | 7 | 2019-01-17T18:50:17.000Z | 2022-01-13T20:27:52.000Z | import spira.all as spira
class RouteExample(spira.Cell):
layer = spira.LayerParameter(default=spira.RDD.PLAYER.M1.METAL, doc='Layer to be used when creating the route object.')
@spira.cache()
def get_io_ports(self):
p1 = spira.Port(name='P1', midpoint=(0,0), orientation=180, process=self.layer.process)
p2 = spira.Port(name='P2', midpoint=(20,10), orientation=0, process=self.layer.process)
return [p1, p2]
def create_elements(self, elems):
ports = self.get_io_ports()
elems += spira.RouteManhattan(ports=ports, layer=self.layer)
return elems
def create_ports(self, ports):
ports += self.get_io_ports()
return ports
D = RouteExample()
D.gdsii_view()
D.gdsii_output(file_name='Route')
| 27.964286 | 123 | 0.666667 | import spira.all as spira
class RouteExample(spira.Cell):
layer = spira.LayerParameter(default=spira.RDD.PLAYER.M1.METAL, doc='Layer to be used when creating the route object.')
@spira.cache()
def get_io_ports(self):
p1 = spira.Port(name='P1', midpoint=(0,0), orientation=180, process=self.layer.process)
p2 = spira.Port(name='P2', midpoint=(20,10), orientation=0, process=self.layer.process)
return [p1, p2]
def create_elements(self, elems):
ports = self.get_io_ports()
elems += spira.RouteManhattan(ports=ports, layer=self.layer)
return elems
def create_ports(self, ports):
ports += self.get_io_ports()
return ports
D = RouteExample()
D.gdsii_view()
D.gdsii_output(file_name='Route')
| true | true |
f7f9ae409701d86181a410df9b2590811761cf91 | 5,773 | py | Python | tests/test_pdfpapersaver.py | beyond-content/python-pdf-paper-saver | 48f9e65ab809288cfa63b7c37a090d60b21f7777 | [
"BSD-2-Clause"
] | null | null | null | tests/test_pdfpapersaver.py | beyond-content/python-pdf-paper-saver | 48f9e65ab809288cfa63b7c37a090d60b21f7777 | [
"BSD-2-Clause"
] | null | null | null | tests/test_pdfpapersaver.py | beyond-content/python-pdf-paper-saver | 48f9e65ab809288cfa63b7c37a090d60b21f7777 | [
"BSD-2-Clause"
] | null | null | null | from itertools import chain
from random import randint
from unittest import TestCase
from cStringIO import StringIO
from hamcrest import *
from rect import Rect
from reportlab.lib import pagesizes
from reportlab.lib.colors import black, getAllNamedColors
from reportlab.lib.units import mm
from reportlab.pdfgen.canvas import Canvas
from PyPDF2 import PdfFileWriter, PdfFileReader
from rect.packer import pack
from pdfpapersaver import PDFPagePacker
class ColoredPDFPage(object):
def __init__(self, width, height, background_color=None, text=None, size_unit=mm):
super(ColoredPDFPage, self).__init__()
self.size_unit = size_unit
self.width = width * self.size_unit
self.height = height * self.size_unit
self.background_color = background_color
self.text = "%s x %s" % (width, height) if text is None else text
self._page = None
@property
def pagesize(self):
return self.width, self.height
@property
def page(self):
self._page = self._page or self.to_page()
return self._page
@property
def pdf_page_width(self):
return self.page.mediaBox.getWidth()
@property
def pdf_page_height(self):
return self.page.mediaBox.getHeight()
@classmethod
def create_randomly_sized_and_colored_page(cls, min_width, max_width, min_height, max_height, extra_text):
colors_and_names = getAllNamedColors().items()
width = randint(min_width, max_width)
height = randint(min_height, max_height)
color_name, color = colors_and_names[randint(0, len(colors_and_names) - 1)]
# text = "%s [Size: %d x %d][Color: %s] " % (extra_text, width, height, color_name)
text = extra_text
return cls(width, height, background_color=color, text=text)
def to_page(self):
stream = StringIO()
c = Canvas(stream, pagesize=self.pagesize)
if self.background_color:
c.setFillColor(self.background_color)
c.rect(0, 0, self.width, self.height, stroke=0, fill=1)
if self.text:
c.setFillColor(black)
c.drawString(10, 10, self.text)
c.save()
stream.seek(0)
return PdfFileReader(stream).pages[0]
def extract_stripped_text(self):
return self.page.extractText().strip()
def to_rect(self):
return Rect([self.pdf_page_width, self.pdf_page_height])
class BaseTestCase(TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.source_pdf = StringIO()
self.colored_pages = []
self.create_randomly_sized_pdf_pages()
def create_randomly_sized_pdf_pages(self):
writer = PdfFileWriter()
for id in range(0, 100):
max_width, max_height = [int(round(x / mm / 2)) for x in pagesizes.A4]
colored_page = ColoredPDFPage.create_randomly_sized_and_colored_page(40, max_width,
40, max_height,
extra_text="#%d" % id,
)
writer.addPage(colored_page.page)
self.colored_pages.append(colored_page)
writer.write(self.source_pdf)
def test_expected_page_count(self):
reader = PdfFileReader(self.source_pdf)
assert_that(reader.numPages, equal_to(100), "Expected page count")
def test_colored_page_creation_results_in_the_correct_page_sizes_and_size(self):
min_width, min_height = 50, 50
max_width, max_height = 100, 200
colored_page = ColoredPDFPage.create_randomly_sized_and_colored_page(min_width, max_width,
min_height, max_height,
"sometext!!")
pdf_page_width = colored_page.pdf_page_width
pdf_page_height = colored_page.pdf_page_height
assert_that(colored_page.width, close_to(float(pdf_page_width), delta=0.001))
assert_that(colored_page.height, close_to(float(pdf_page_height), delta=0.001))
assert_that(pdf_page_height, less_than_or_equal_to(max_height * mm))
assert_that(pdf_page_width, less_than_or_equal_to(max_width * mm))
assert_that(pdf_page_height, greater_than_or_equal_to(min_height * mm))
assert_that(pdf_page_width, greater_than_or_equal_to(min_width * mm))
found_text = colored_page.extract_stripped_text()
assert_that(found_text, contains_string("sometext!!"))
def test_pack_pages(self):
canvas = (306, 303)
rects = [Rect([100, 200]), Rect([200, 300])]
pack(canvas, rects, 3)
def test_pack_pdf_pages(self):
packer = PDFPagePacker(self.source_pdf)
assert_that(packer.page_count, equal_to(100))
assert_that(len(packer.rects), equal_to(100))
pages = packer.pack()
placed_rects = list(chain.from_iterable(pages))
rect_count = len(placed_rects)
assert_that(rect_count, equal_to(100))
for r in placed_rects:
assert_that(r.width, close_to(float(r.page.mediaBox.getWidth()), delta=0.001))
assert_that(r.height, close_to(float(r.page.mediaBox.getHeight()), delta=0.001))
packed_file = StringIO()
packer.get_packed_file(packed_file)
r = PdfFileReader(packed_file)
assert_that(has_length(pages), r.numPages)
f = file("/Users/jp/Desktop/mypdf_processed.pdf", "wb")
packer.get_packed_file(f)
f.close()
return
rects = [page.to_rect() for page in self.colored_pages]
canvas = pagesizes.A4
pack(canvas, rects, 1)
| 37.980263 | 110 | 0.642127 | from itertools import chain
from random import randint
from unittest import TestCase
from cStringIO import StringIO
from hamcrest import *
from rect import Rect
from reportlab.lib import pagesizes
from reportlab.lib.colors import black, getAllNamedColors
from reportlab.lib.units import mm
from reportlab.pdfgen.canvas import Canvas
from PyPDF2 import PdfFileWriter, PdfFileReader
from rect.packer import pack
from pdfpapersaver import PDFPagePacker
class ColoredPDFPage(object):
def __init__(self, width, height, background_color=None, text=None, size_unit=mm):
super(ColoredPDFPage, self).__init__()
self.size_unit = size_unit
self.width = width * self.size_unit
self.height = height * self.size_unit
self.background_color = background_color
self.text = "%s x %s" % (width, height) if text is None else text
self._page = None
@property
def pagesize(self):
return self.width, self.height
@property
def page(self):
self._page = self._page or self.to_page()
return self._page
@property
def pdf_page_width(self):
return self.page.mediaBox.getWidth()
@property
def pdf_page_height(self):
return self.page.mediaBox.getHeight()
@classmethod
def create_randomly_sized_and_colored_page(cls, min_width, max_width, min_height, max_height, extra_text):
colors_and_names = getAllNamedColors().items()
width = randint(min_width, max_width)
height = randint(min_height, max_height)
color_name, color = colors_and_names[randint(0, len(colors_and_names) - 1)]
text = extra_text
return cls(width, height, background_color=color, text=text)
def to_page(self):
stream = StringIO()
c = Canvas(stream, pagesize=self.pagesize)
if self.background_color:
c.setFillColor(self.background_color)
c.rect(0, 0, self.width, self.height, stroke=0, fill=1)
if self.text:
c.setFillColor(black)
c.drawString(10, 10, self.text)
c.save()
stream.seek(0)
return PdfFileReader(stream).pages[0]
def extract_stripped_text(self):
return self.page.extractText().strip()
def to_rect(self):
return Rect([self.pdf_page_width, self.pdf_page_height])
class BaseTestCase(TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.source_pdf = StringIO()
self.colored_pages = []
self.create_randomly_sized_pdf_pages()
def create_randomly_sized_pdf_pages(self):
writer = PdfFileWriter()
for id in range(0, 100):
max_width, max_height = [int(round(x / mm / 2)) for x in pagesizes.A4]
colored_page = ColoredPDFPage.create_randomly_sized_and_colored_page(40, max_width,
40, max_height,
extra_text="#%d" % id,
)
writer.addPage(colored_page.page)
self.colored_pages.append(colored_page)
writer.write(self.source_pdf)
def test_expected_page_count(self):
reader = PdfFileReader(self.source_pdf)
assert_that(reader.numPages, equal_to(100), "Expected page count")
def test_colored_page_creation_results_in_the_correct_page_sizes_and_size(self):
min_width, min_height = 50, 50
max_width, max_height = 100, 200
colored_page = ColoredPDFPage.create_randomly_sized_and_colored_page(min_width, max_width,
min_height, max_height,
"sometext!!")
pdf_page_width = colored_page.pdf_page_width
pdf_page_height = colored_page.pdf_page_height
assert_that(colored_page.width, close_to(float(pdf_page_width), delta=0.001))
assert_that(colored_page.height, close_to(float(pdf_page_height), delta=0.001))
assert_that(pdf_page_height, less_than_or_equal_to(max_height * mm))
assert_that(pdf_page_width, less_than_or_equal_to(max_width * mm))
assert_that(pdf_page_height, greater_than_or_equal_to(min_height * mm))
assert_that(pdf_page_width, greater_than_or_equal_to(min_width * mm))
found_text = colored_page.extract_stripped_text()
assert_that(found_text, contains_string("sometext!!"))
def test_pack_pages(self):
canvas = (306, 303)
rects = [Rect([100, 200]), Rect([200, 300])]
pack(canvas, rects, 3)
def test_pack_pdf_pages(self):
packer = PDFPagePacker(self.source_pdf)
assert_that(packer.page_count, equal_to(100))
assert_that(len(packer.rects), equal_to(100))
pages = packer.pack()
placed_rects = list(chain.from_iterable(pages))
rect_count = len(placed_rects)
assert_that(rect_count, equal_to(100))
for r in placed_rects:
assert_that(r.width, close_to(float(r.page.mediaBox.getWidth()), delta=0.001))
assert_that(r.height, close_to(float(r.page.mediaBox.getHeight()), delta=0.001))
packed_file = StringIO()
packer.get_packed_file(packed_file)
r = PdfFileReader(packed_file)
assert_that(has_length(pages), r.numPages)
f = file("/Users/jp/Desktop/mypdf_processed.pdf", "wb")
packer.get_packed_file(f)
f.close()
return
rects = [page.to_rect() for page in self.colored_pages]
canvas = pagesizes.A4
pack(canvas, rects, 1)
| true | true |
f7f9af65dfa00d71a2d381178aa540a3b00ffc98 | 4,109 | py | Python | src/flask_easy/repository/mongo_repository.py | Josephmaclean/flask-easy | 64cb647b0dbcd031cb8d27cc60889e50c959e1ca | [
"MIT"
] | 1 | 2021-12-30T12:25:05.000Z | 2021-12-30T12:25:05.000Z | src/flask_easy/repository/mongo_repository.py | Josephmaclean/flask-easy | 64cb647b0dbcd031cb8d27cc60889e50c959e1ca | [
"MIT"
] | null | null | null | src/flask_easy/repository/mongo_repository.py | Josephmaclean/flask-easy | 64cb647b0dbcd031cb8d27cc60889e50c959e1ca | [
"MIT"
] | null | null | null | """
mongo_repository.py
Author: Joseph Maclean Arhin
"""
import typing as t
import mongoengine as me
from ..exc import OperationError, NotFoundException
from .repository_interface import RepositoryInterface
class MongoRepository(RepositoryInterface):
"""
MongoRepository to be inherited
"""
model: t.Type[me.Document]
@classmethod
def index(cls) -> t.List[me.Document]:
"""
gets all documents in a mongodb collection
:return: list of mongodb documents
"""
try:
return cls.model.objects()
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def create(cls, data: dict) -> t.Type[me.Document]:
"""
creates a mongodb document with the data passed to it
:param data: data to persist in the database
:return: mongodb document
"""
try:
db_obj = cls.model(**data)
db_obj.save()
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def create_all(cls, data: t.List[dict]) -> t.List[t.Type[me.Document]]:
try:
obj_data = [cls.model(**item) for item in data]
return cls.model.objects.insert(obj_data)
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def update_by_id(cls, obj_id: t.Union[int, str], data: dict) -> t.Type[me.Document]:
"""
:param obj_id:
:param data:
:return:
"""
try:
db_obj = cls.find_by_id(obj_id)
db_obj.modify(**data)
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find(cls, query_params: dict) -> t.Type[me.Document]:
"""
returns an item that satisfies the data passed to it if it exists in
the database
:param query_params: {dict}
:return: model_object - Returns an instance object of the model passed
"""
try:
db_obj = cls.model.objects.get(**query_params)
return db_obj
except me.DoesNotExist as error:
raise NotFoundException({"error": "Resource does not exist"}) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find_all(cls, query_params: dict) -> t.List[t.Type[me.Document]]:
"""
returns all items that satisfy the filter query_params passed to it
:param query_params: query parameters to filter by
:return: model_object - Returns an instance object of the model passed
"""
try:
db_obj = cls.model.objects(**query_params)
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find_by_id(cls, obj_id: t.Union[int, str]) -> t.Type[me.Document]:
try:
db_obj = cls.model.objects.get(pk=obj_id)
return db_obj
except me.DoesNotExist as error:
raise NotFoundException(
{"error": f"Resource of id {obj_id} does not exist"}
) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def delete(cls, obj_id: t.Union[int, str]) -> bool:
"""
delete an object matching the id
:param obj_id: id of object to be deleted
:return:
"""
try:
db_obj = cls.model.objects.get(pk=obj_id)
db_obj.delete()
return True
except me.DoesNotExist as error:
raise NotFoundException(
{"error": f"Resource of id {obj_id} does not exist"}
) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
| 31.366412 | 88 | 0.595765 |
import typing as t
import mongoengine as me
from ..exc import OperationError, NotFoundException
from .repository_interface import RepositoryInterface
class MongoRepository(RepositoryInterface):
model: t.Type[me.Document]
@classmethod
def index(cls) -> t.List[me.Document]:
try:
return cls.model.objects()
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def create(cls, data: dict) -> t.Type[me.Document]:
try:
db_obj = cls.model(**data)
db_obj.save()
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def create_all(cls, data: t.List[dict]) -> t.List[t.Type[me.Document]]:
try:
obj_data = [cls.model(**item) for item in data]
return cls.model.objects.insert(obj_data)
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def update_by_id(cls, obj_id: t.Union[int, str], data: dict) -> t.Type[me.Document]:
try:
db_obj = cls.find_by_id(obj_id)
db_obj.modify(**data)
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find(cls, query_params: dict) -> t.Type[me.Document]:
try:
db_obj = cls.model.objects.get(**query_params)
return db_obj
except me.DoesNotExist as error:
raise NotFoundException({"error": "Resource does not exist"}) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find_all(cls, query_params: dict) -> t.List[t.Type[me.Document]]:
try:
db_obj = cls.model.objects(**query_params)
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find_by_id(cls, obj_id: t.Union[int, str]) -> t.Type[me.Document]:
try:
db_obj = cls.model.objects.get(pk=obj_id)
return db_obj
except me.DoesNotExist as error:
raise NotFoundException(
{"error": f"Resource of id {obj_id} does not exist"}
) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def delete(cls, obj_id: t.Union[int, str]) -> bool:
try:
db_obj = cls.model.objects.get(pk=obj_id)
db_obj.delete()
return True
except me.DoesNotExist as error:
raise NotFoundException(
{"error": f"Resource of id {obj_id} does not exist"}
) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
| true | true |
f7f9afbf6ffa62516cd10da080e06b707285fb60 | 6,659 | py | Python | simulated_annealing.py | Epsilon-Infinity/Self-driving-rides | 1ecbc05b6a51509d3f193260a131deca3383957f | [
"MIT"
] | null | null | null | simulated_annealing.py | Epsilon-Infinity/Self-driving-rides | 1ecbc05b6a51509d3f193260a131deca3383957f | [
"MIT"
] | null | null | null | simulated_annealing.py | Epsilon-Infinity/Self-driving-rides | 1ecbc05b6a51509d3f193260a131deca3383957f | [
"MIT"
] | null | null | null | from random import randint
import random
import math
from copy import deepcopy
from ride import Ride
from simulator import Simulator
# from main import solver
from collections import defaultdict
from vehicle import Vehicle
from timer import Timer
def solver(inputs):
rides_list = inputs['rides_list']
rides_list = sorted([Ride(ride_info, i) for i, ride_info in enumerate(rides_list)])
sol = defaultdict(list)
timers = [Timer() for t in range(inputs["vehicles"])]
vehicles = [Vehicle(i, timers[i], inputs['bonus']) for i in range(inputs['vehicles'])]
for ride in rides_list:
best, vehicle = None, None
for v in vehicles:
cur = v.score_if_assigned(ride)
if (not vehicle) or (cur[0] > best[0] or cur[0] == best[0] and cur[1] < best[1]):
best, vehicle = cur, v
vehicle.assign(ride)
sol = {v.id: v.get_idx() for v in vehicles}
return sol
class SimulatedAnnealing:
def __init__(self, data, T=100, n_iter=1000, temp_update=.9):
self.data = data
self.rides = dict()
self.T = T
self.n_iter = n_iter
self.cur_score = 0
self.temp_update = temp_update
for ride in range(data["rides"]):
self.rides[ride] = Ride(data["rides_list"][ride], ride)
self.solution = self.get_greedy_solution()
simulator = Simulator(self.solution, self.data)
# if not simulator.validate():
# print("Something is wrong with solution")
self.cur_score = simulator.start()
def fit(self):
for iteration in range(self.n_iter):
candidate_solution, changes = self.get_random_solution(self.solution)
simulator = Simulator(candidate_solution, self.data)
# if not simulator.validate():
# print("Something is wrong with candidate solution")
score = simulator.start()
if score > self.cur_score:
self.solution = candidate_solution
self.cur_score = score
print("update score :", score)
elif score < self.cur_score:
prop_acceptance = math.exp(-(self.cur_score - score) / self.T)
print("Acceptance probability ", prop_acceptance)
accept = random.choices([True, False], [prop_acceptance, 1 - prop_acceptance])[0]
if accept:
self.solution = candidate_solution
self.cur_score = score
print("accept score :", score)
self.T = self.temp_update * self.T
else:
self.revert_changes(changes)
return self.solution
def get_greedy_solution(self):
return solver(self.data)
def get_random_solution(self, cur_solution=None):
if cur_solution is None:
solution = dict()
rides = self.rides.keys()
for vehicle in range(self.data["vehicles"]):
solution[vehicle] = []
count = len(rides) // self.data["vehicles"]
for ride in rides:
solution[vehicle].append(ride)
count -= 1
if count == 0:
rides = rides.__sub__(set(solution[vehicle]))
break
return solution, None
else:
#cur_solution = deepcopy(cur_solution)
actions = ["delete", "swap", "add"]
count_selected = sum([len(rides) for v, rides in cur_solution.items()])
p_delete = 0.1 * (count_selected - 1) / len(self.rides)
p_add = 0.1 * (1 - count_selected / len(self.rides))
probs = [p_delete, 1 - p_delete - p_add, p_add]
action = random.choices(actions, probs)[0]
if action == "delete":
# To-Do make it as a weigted choice
v = random.randint(0, self.data["vehicles"] - 1)
while len(cur_solution[v]) == 0:
v = random.randint(0, self.data["vehicles"] - 1)
to_remove = random.randint(0, len(cur_solution[v]) - 1)
removed_ride = cur_solution[v].pop(to_remove)
changes = ("delete", v, to_remove, removed_ride)
return cur_solution, changes
elif action == "add":
rides = self.rides.keys()
selected_rides = set()
for v, r in cur_solution.items():
selected_rides.update(set(r))
remaining = list(rides.__sub__(selected_rides))
to_add = random.choices(remaining)[0]
vehicle = random.randint(0, self.data["vehicles"] - 1)
if len(cur_solution[vehicle]) >= 1:
index = random.randint(0, len(cur_solution[vehicle]) - 1)
else:
index = 0
changes = ("add", vehicle, index)
cur_solution[vehicle].insert(index, to_add)
return cur_solution, changes
else: ## action is swap
from_v = random.randint(0, self.data["vehicles"] - 1)
while len(cur_solution[from_v]) == 0:
from_v = random.randint(0, self.data["vehicles"] - 1)
to_v = random.randint(0, self.data["vehicles"] - 1)
while len(cur_solution[to_v]) == 0:
to_v = random.randint(0, self.data["vehicles"] - 1)
from_index = random.randint(0, len(cur_solution[from_v]) - 1)
to_index = random.randint(0, len(cur_solution[to_v]) - 1)
cur_solution[from_v][from_index], cur_solution[to_v][to_index] = cur_solution[to_v][to_index], \
cur_solution[from_v][from_index]
changes = ("swap", from_v, from_index, to_v, to_index)
return cur_solution, changes
def revert_changes(self, changes):
if changes[0] == "add":
self.solution[changes[1]].pop(changes[2])
elif changes[0] == "delete":
self.solution[changes[1]].insert(changes[2], changes[3])
else:
self.solution[changes[1]][changes[2]], self.solution[changes[3]][changes[4]] = \
self.solution[changes[3]][changes[4]], self.solution[changes[1]][changes[2]]
def write(self, output_file="solution.out"):
f = open(output_file)
for v, r in self.solution.items():
f.write(str(v + 1) + " ")
f.write(" ".join([str(ride) for ride in r]) + "\n")
| 41.880503 | 113 | 0.54768 | from random import randint
import random
import math
from copy import deepcopy
from ride import Ride
from simulator import Simulator
from collections import defaultdict
from vehicle import Vehicle
from timer import Timer
def solver(inputs):
rides_list = inputs['rides_list']
rides_list = sorted([Ride(ride_info, i) for i, ride_info in enumerate(rides_list)])
sol = defaultdict(list)
timers = [Timer() for t in range(inputs["vehicles"])]
vehicles = [Vehicle(i, timers[i], inputs['bonus']) for i in range(inputs['vehicles'])]
for ride in rides_list:
best, vehicle = None, None
for v in vehicles:
cur = v.score_if_assigned(ride)
if (not vehicle) or (cur[0] > best[0] or cur[0] == best[0] and cur[1] < best[1]):
best, vehicle = cur, v
vehicle.assign(ride)
sol = {v.id: v.get_idx() for v in vehicles}
return sol
class SimulatedAnnealing:
def __init__(self, data, T=100, n_iter=1000, temp_update=.9):
self.data = data
self.rides = dict()
self.T = T
self.n_iter = n_iter
self.cur_score = 0
self.temp_update = temp_update
for ride in range(data["rides"]):
self.rides[ride] = Ride(data["rides_list"][ride], ride)
self.solution = self.get_greedy_solution()
simulator = Simulator(self.solution, self.data)
self.cur_score = simulator.start()
def fit(self):
for iteration in range(self.n_iter):
candidate_solution, changes = self.get_random_solution(self.solution)
simulator = Simulator(candidate_solution, self.data)
score = simulator.start()
if score > self.cur_score:
self.solution = candidate_solution
self.cur_score = score
print("update score :", score)
elif score < self.cur_score:
prop_acceptance = math.exp(-(self.cur_score - score) / self.T)
print("Acceptance probability ", prop_acceptance)
accept = random.choices([True, False], [prop_acceptance, 1 - prop_acceptance])[0]
if accept:
self.solution = candidate_solution
self.cur_score = score
print("accept score :", score)
self.T = self.temp_update * self.T
else:
self.revert_changes(changes)
return self.solution
def get_greedy_solution(self):
return solver(self.data)
def get_random_solution(self, cur_solution=None):
if cur_solution is None:
solution = dict()
rides = self.rides.keys()
for vehicle in range(self.data["vehicles"]):
solution[vehicle] = []
count = len(rides) // self.data["vehicles"]
for ride in rides:
solution[vehicle].append(ride)
count -= 1
if count == 0:
rides = rides.__sub__(set(solution[vehicle]))
break
return solution, None
else:
actions = ["delete", "swap", "add"]
count_selected = sum([len(rides) for v, rides in cur_solution.items()])
p_delete = 0.1 * (count_selected - 1) / len(self.rides)
p_add = 0.1 * (1 - count_selected / len(self.rides))
probs = [p_delete, 1 - p_delete - p_add, p_add]
action = random.choices(actions, probs)[0]
if action == "delete":
v = random.randint(0, self.data["vehicles"] - 1)
while len(cur_solution[v]) == 0:
v = random.randint(0, self.data["vehicles"] - 1)
to_remove = random.randint(0, len(cur_solution[v]) - 1)
removed_ride = cur_solution[v].pop(to_remove)
changes = ("delete", v, to_remove, removed_ride)
return cur_solution, changes
elif action == "add":
rides = self.rides.keys()
selected_rides = set()
for v, r in cur_solution.items():
selected_rides.update(set(r))
remaining = list(rides.__sub__(selected_rides))
to_add = random.choices(remaining)[0]
vehicle = random.randint(0, self.data["vehicles"] - 1)
if len(cur_solution[vehicle]) >= 1:
index = random.randint(0, len(cur_solution[vehicle]) - 1)
else:
index = 0
changes = ("add", vehicle, index)
cur_solution[vehicle].insert(index, to_add)
return cur_solution, changes
else: from_v = random.randint(0, self.data["vehicles"] - 1)
while len(cur_solution[from_v]) == 0:
from_v = random.randint(0, self.data["vehicles"] - 1)
to_v = random.randint(0, self.data["vehicles"] - 1)
while len(cur_solution[to_v]) == 0:
to_v = random.randint(0, self.data["vehicles"] - 1)
from_index = random.randint(0, len(cur_solution[from_v]) - 1)
to_index = random.randint(0, len(cur_solution[to_v]) - 1)
cur_solution[from_v][from_index], cur_solution[to_v][to_index] = cur_solution[to_v][to_index], \
cur_solution[from_v][from_index]
changes = ("swap", from_v, from_index, to_v, to_index)
return cur_solution, changes
def revert_changes(self, changes):
if changes[0] == "add":
self.solution[changes[1]].pop(changes[2])
elif changes[0] == "delete":
self.solution[changes[1]].insert(changes[2], changes[3])
else:
self.solution[changes[1]][changes[2]], self.solution[changes[3]][changes[4]] = \
self.solution[changes[3]][changes[4]], self.solution[changes[1]][changes[2]]
def write(self, output_file="solution.out"):
f = open(output_file)
for v, r in self.solution.items():
f.write(str(v + 1) + " ")
f.write(" ".join([str(ride) for ride in r]) + "\n")
| true | true |
f7f9b06a44dc85ff43bc120d85ebe44a12eb08f2 | 12,953 | py | Python | var/spack/repos/builtin/packages/dray/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-11-26T12:31:07.000Z | 2020-11-26T12:31:07.000Z | var/spack/repos/builtin/packages/dray/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 15 | 2021-04-14T12:34:46.000Z | 2022-03-02T19:08:00.000Z | var/spack/repos/builtin/packages/dray/package.py | mahermanns/spack | 849943c63d1948c65f75cb8164e96ed361ca7a7f | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import socket
import llnl.util.tty as tty
def cmake_cache_entry(name, value, vtype=None):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Dray(Package, CudaPackage):
"""High-Order Mesh Ray Tracer."""
homepage = "https://github.com/LLNL/devil_ray"
git = "https://github.com/LLNL/devil_ray.git"
url = "https://github.com/LLNL/devil_ray/releases/download/v0.1.2/dray-v0.1.2.tar.gz"
maintainers = ['mclarsen', 'cyrush']
version('develop', branch='develop', submodules='True')
version('0.1.6', sha256='43f39039599e3493cbbaeaf5621b611bef301ff504bed6e32c98f30bb2179e92')
version('0.1.5', sha256='aaf0975561a8e7910b9353e2dc30bd78abf9f01c306ec042422b7da223d3a8b8')
version('0.1.4', sha256='e763a3aa537b23486a4788f9d68db0a3eb545f6a2e617cd7c8a876682ca2d0a0')
version('0.1.3', sha256='b2f624a072463189997343b1ed911cc34c9bb1b6c7f0c3e48efeb40c05dd0d92')
version('0.1.2', sha256='46937f20124b28dc78a634e8e063a3e7a3bbfd9f424ce2680b08417010c376da')
version('0.1.1', sha256='e5daa49ee3367c087f5028dc5a08655298beb318014c6f3f65ef4a08fcbe346c')
version('0.1.0', sha256='8b341138e1069361351e0a94478608c5af479cca76e2f97d556229aed45c0169')
variant('cuda', default=False, description='Build with CUDA backend')
variant('openmp', default=True, description='Build OpenMP backend')
variant("shared", default=True, description="Build as shared libs")
variant("test", default=True, description='Build unit tests')
variant("utils", default=True, description='Build utilities')
variant("logging", default=False, description='Enable logging')
variant("stats", default=False, description='Enable stats')
variant("mpi", default=True, description='Enable MPI compiler')
depends_on('cuda', when='+cuda')
depends_on('mpi', when='+mpi')
depends_on('cmake@3.9:', type='build')
depends_on('cmake@3.14:', when='+cuda', type='build')
depends_on("conduit~shared", when="~shared")
depends_on("conduit+shared", when="+shared")
depends_on("apcomp~shared+openmp+mpi", when="~shared+openmp+mpi")
depends_on("apcomp+shared+openmp+mpi", when="+shared+openmp+mpi")
depends_on("apcomp~shared~openmp+mpi", when="~shared~openmp+mpi")
depends_on("apcomp+shared~openmp+mpi", when="+shared~openmp+mpi")
depends_on("apcomp~shared+openmp~mpi", when="~shared+openmp~mpi")
depends_on("apcomp+shared+openmp~mpi", when="+shared+openmp~mpi")
depends_on("apcomp~shared~openmp~mpi", when="~shared~openmp~mpi")
depends_on("apcomp+shared~openmp~mpi", when="+shared~openmp~mpi")
depends_on("raja+cuda~openmp+shared", when="+cuda~openmp+shared")
depends_on("raja+cuda+openmp+shared", when="+cuda+openmp+shared")
depends_on("raja+cuda~openmp~shared", when="+cuda~openmp~shared")
depends_on("raja+cuda+openmp~shared", when="+cuda+openmp~shared")
depends_on("raja~cuda~openmp+shared", when="~cuda~openmp+shared")
depends_on("raja~cuda+openmp+shared", when="~cuda+openmp+shared")
depends_on("raja~cuda~openmp~shared", when="~cuda~openmp~shared")
depends_on("raja~cuda+openmp~shared", when="~cuda+openmp~shared")
depends_on("umpire+cuda+shared", when="+cuda+shared")
depends_on("umpire+cuda~shared", when="+cuda~shared")
depends_on("umpire~cuda+shared", when="~cuda+shared")
depends_on("umpire~cuda~shared", when="~cuda~shared")
depends_on("mfem+shared+conduit~threadsafe", when="+shared")
depends_on("mfem~shared+conduit~threadsafe", when="~shared")
def setup_build_environment(self, env):
env.set('CTEST_OUTPUT_ON_FAILURE', '1')
def install(self, spec, prefix):
"""
Build and install Devil Ray.
"""
with working_dir('spack-build', create=True):
host_cfg_fname = self.create_host_config(spec,
prefix)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring Devil Ray...")
cmake(*cmake_args)
print("Building Devil Ray...")
make()
# run unit tests if requested
if "+test" in spec and self.run_tests:
print("Running Devil Ray Unit Tests...")
make("test")
print("Installing Devil Ray...")
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
def create_host_config(self, spec, prefix):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build ascent.
For more details about 'host-config' files see:
http://ascent.readthedocs.io/en/latest/BuildingAscent.html
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-devil_ray.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
if "+mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
mpicc_path = spec['mpi'].mpicc
mpicxx_path = spec['mpi'].mpicxx
# if we are using compiler wrappers on cray systems
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
# etc make return the spack compiler wrappers
# which can trip up mpi detection in CMake 3.14
if cpp_compiler == "CC":
mpicc_path = "cc"
mpicxx_path = "CC"
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", mpicc_path))
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", mpicxx_path))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
# use global spack compiler flags
cppflags = ' '.join(spec.compiler_flags['cppflags'])
if cppflags:
# avoid always ending up with ' ' with no flags defined
cppflags += ' '
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
if cflags:
cfg.write(cmake_cache_entry("CMAKE_C_FLAGS", cflags))
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
if cxxflags:
cfg.write(cmake_cache_entry("CMAKE_CXX_FLAGS", cxxflags))
fflags = ' '.join(spec.compiler_flags['fflags'])
if self.spec.satisfies('%cce'):
fflags += " -ef"
if fflags:
cfg.write(cmake_cache_entry("CMAKE_Fortran_FLAGS", fflags))
#######################
# Backends
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
if 'cuda_arch' in spec.variants:
cuda_value = spec.variants['cuda_arch'].value
cuda_arch = cuda_value[0]
cfg.write(cmake_cache_entry('CUDA_ARCH',
'sm_{0}'.format(cuda_arch)))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
if "+openmp" in spec:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
# shared vs static libs
if "+shared" in spec:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
#######################
# Unit Tests
#######################
if "+test" in spec:
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "ON"))
# we need this to control BLT tests
cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON"))
else:
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "OFF"))
# we need this to control BLT tests
cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF"))
#######################
# Utilities
#######################
if "+utils" in spec:
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "ON"))
else:
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "OFF"))
#######################
# Logging
#######################
if "+logging" in spec:
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "OFF"))
#######################
# Status
#######################
if "+stats" in spec:
cfg.write(cmake_cache_entry("ENABLE_STATS", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_STATS", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
cfg.write("# mfem from spack \n")
cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix))
cfg.write("# raja from spack \n")
cfg.write(cmake_cache_entry("RAJA_DIR", spec['raja'].prefix))
cfg.write("# umpire from spack \n")
cfg.write(cmake_cache_entry("UMPIRE_DIR", spec['umpire'].prefix))
cfg.write("# apcompositor from spack \n")
cfg.write(cmake_cache_entry("APCOMP_DIR", spec['apcomp'].prefix))
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
| 41.383387 | 96 | 0.560179 |
from spack import *
import os
import socket
import llnl.util.tty as tty
def cmake_cache_entry(name, value, vtype=None):
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Dray(Package, CudaPackage):
homepage = "https://github.com/LLNL/devil_ray"
git = "https://github.com/LLNL/devil_ray.git"
url = "https://github.com/LLNL/devil_ray/releases/download/v0.1.2/dray-v0.1.2.tar.gz"
maintainers = ['mclarsen', 'cyrush']
version('develop', branch='develop', submodules='True')
version('0.1.6', sha256='43f39039599e3493cbbaeaf5621b611bef301ff504bed6e32c98f30bb2179e92')
version('0.1.5', sha256='aaf0975561a8e7910b9353e2dc30bd78abf9f01c306ec042422b7da223d3a8b8')
version('0.1.4', sha256='e763a3aa537b23486a4788f9d68db0a3eb545f6a2e617cd7c8a876682ca2d0a0')
version('0.1.3', sha256='b2f624a072463189997343b1ed911cc34c9bb1b6c7f0c3e48efeb40c05dd0d92')
version('0.1.2', sha256='46937f20124b28dc78a634e8e063a3e7a3bbfd9f424ce2680b08417010c376da')
version('0.1.1', sha256='e5daa49ee3367c087f5028dc5a08655298beb318014c6f3f65ef4a08fcbe346c')
version('0.1.0', sha256='8b341138e1069361351e0a94478608c5af479cca76e2f97d556229aed45c0169')
variant('cuda', default=False, description='Build with CUDA backend')
variant('openmp', default=True, description='Build OpenMP backend')
variant("shared", default=True, description="Build as shared libs")
variant("test", default=True, description='Build unit tests')
variant("utils", default=True, description='Build utilities')
variant("logging", default=False, description='Enable logging')
variant("stats", default=False, description='Enable stats')
variant("mpi", default=True, description='Enable MPI compiler')
depends_on('cuda', when='+cuda')
depends_on('mpi', when='+mpi')
depends_on('cmake@3.9:', type='build')
depends_on('cmake@3.14:', when='+cuda', type='build')
depends_on("conduit~shared", when="~shared")
depends_on("conduit+shared", when="+shared")
depends_on("apcomp~shared+openmp+mpi", when="~shared+openmp+mpi")
depends_on("apcomp+shared+openmp+mpi", when="+shared+openmp+mpi")
depends_on("apcomp~shared~openmp+mpi", when="~shared~openmp+mpi")
depends_on("apcomp+shared~openmp+mpi", when="+shared~openmp+mpi")
depends_on("apcomp~shared+openmp~mpi", when="~shared+openmp~mpi")
depends_on("apcomp+shared+openmp~mpi", when="+shared+openmp~mpi")
depends_on("apcomp~shared~openmp~mpi", when="~shared~openmp~mpi")
depends_on("apcomp+shared~openmp~mpi", when="+shared~openmp~mpi")
depends_on("raja+cuda~openmp+shared", when="+cuda~openmp+shared")
depends_on("raja+cuda+openmp+shared", when="+cuda+openmp+shared")
depends_on("raja+cuda~openmp~shared", when="+cuda~openmp~shared")
depends_on("raja+cuda+openmp~shared", when="+cuda+openmp~shared")
depends_on("raja~cuda~openmp+shared", when="~cuda~openmp+shared")
depends_on("raja~cuda+openmp+shared", when="~cuda+openmp+shared")
depends_on("raja~cuda~openmp~shared", when="~cuda~openmp~shared")
depends_on("raja~cuda+openmp~shared", when="~cuda+openmp~shared")
depends_on("umpire+cuda+shared", when="+cuda+shared")
depends_on("umpire+cuda~shared", when="+cuda~shared")
depends_on("umpire~cuda+shared", when="~cuda+shared")
depends_on("umpire~cuda~shared", when="~cuda~shared")
depends_on("mfem+shared+conduit~threadsafe", when="+shared")
depends_on("mfem~shared+conduit~threadsafe", when="~shared")
def setup_build_environment(self, env):
env.set('CTEST_OUTPUT_ON_FAILURE', '1')
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
host_cfg_fname = self.create_host_config(spec,
prefix)
cmake_args = []
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring Devil Ray...")
cmake(*cmake_args)
print("Building Devil Ray...")
make()
# run unit tests if requested
if "+test" in spec and self.run_tests:
print("Running Devil Ray Unit Tests...")
make("test")
print("Installing Devil Ray...")
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
def create_host_config(self, spec, prefix):
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-devil_ray.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
if "+mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
mpicc_path = spec['mpi'].mpicc
mpicxx_path = spec['mpi'].mpicxx
# if we are using compiler wrappers on cray systems
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
# etc make return the spack compiler wrappers
# which can trip up mpi detection in CMake 3.14
if cpp_compiler == "CC":
mpicc_path = "cc"
mpicxx_path = "CC"
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", mpicc_path))
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", mpicxx_path))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
# use global spack compiler flags
cppflags = ' '.join(spec.compiler_flags['cppflags'])
if cppflags:
# avoid always ending up with ' ' with no flags defined
cppflags += ' '
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
if cflags:
cfg.write(cmake_cache_entry("CMAKE_C_FLAGS", cflags))
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
if cxxflags:
cfg.write(cmake_cache_entry("CMAKE_CXX_FLAGS", cxxflags))
fflags = ' '.join(spec.compiler_flags['fflags'])
if self.spec.satisfies('%cce'):
fflags += " -ef"
if fflags:
cfg.write(cmake_cache_entry("CMAKE_Fortran_FLAGS", fflags))
#######################
# Backends
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
if 'cuda_arch' in spec.variants:
cuda_value = spec.variants['cuda_arch'].value
cuda_arch = cuda_value[0]
cfg.write(cmake_cache_entry('CUDA_ARCH',
'sm_{0}'.format(cuda_arch)))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
if "+openmp" in spec:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
# shared vs static libs
if "+shared" in spec:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
#######################
# Unit Tests
#######################
if "+test" in spec:
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "ON"))
# we need this to control BLT tests
cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON"))
else:
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "OFF"))
# we need this to control BLT tests
cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF"))
#######################
# Utilities
#######################
if "+utils" in spec:
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "ON"))
else:
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "OFF"))
#######################
# Logging
#######################
if "+logging" in spec:
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "OFF"))
#######################
# Status
#######################
if "+stats" in spec:
cfg.write(cmake_cache_entry("ENABLE_STATS", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_STATS", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
cfg.write("# mfem from spack \n")
cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix))
cfg.write("# raja from spack \n")
cfg.write(cmake_cache_entry("RAJA_DIR", spec['raja'].prefix))
cfg.write("# umpire from spack \n")
cfg.write(cmake_cache_entry("UMPIRE_DIR", spec['umpire'].prefix))
cfg.write("# apcompositor from spack \n")
cfg.write(cmake_cache_entry("APCOMP_DIR", spec['apcomp'].prefix))
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
| true | true |
f7f9b0bca3dbbd24fdc40a133125f52a8fa74272 | 7,441 | py | Python | nfv/nfv-common/nfv_common/thread/_thread.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | null | null | null | nfv/nfv-common/nfv_common/thread/_thread.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | null | null | null | nfv/nfv-common/nfv_common/thread/_thread.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import ctypes
from multiprocessing import Process
import signal
import sys
from nfv_common import debug
from nfv_common import selectable
from nfv_common import selobj
from nfv_common import timers
from nfv_common.helpers import coroutine
from nfv_common.thread._thread_progress_marker import ThreadProgressMarker
DLOG = debug.debug_get_logger('nfv_common.thread')
class ThreadState(object):
"""
Thread State
"""
def __init__(self):
self.stay_on = True
self.debug_reload = False
class Thread(object):
"""
Thread
"""
ACTION_DEBUG_CONFIG_RELOAD = "thread-debug-config-reload"
ACTION_STOP = "thread-stop"
def __init__(self, name, thread_worker, check_interval_in_secs=30):
"""
Create thread
"""
self._name = name
self._work_queue = selectable.MultiprocessQueue()
self._thread_worker = thread_worker
self._progress_marker = ThreadProgressMarker()
self._process = Process(target=_thread_main,
args=(self._name, self._progress_marker,
debug.debug_get_config(),
thread_worker, self._work_queue),
name=self._name)
self._process.daemon = True
self._check_timer_id = None
self._check_interval_in_secs = check_interval_in_secs
self._last_marker_value = None
self._stall_timestamp_ms = None
debug.debug_register_config_change_callback(self.debug_config_change)
@property
def name(self):
"""
Return the name of the thread
"""
return self._name
@property
def selobj(self):
"""
Returns the selection object that signals when thread work
is complete
"""
return self._thread_worker.selobj
@property
def stall_elapsed_secs(self):
"""
Returns the elapsed time in seconds that the thread has been stalled
"""
if self._stall_timestamp_ms is not None:
now = timers.get_monotonic_timestamp_in_ms()
return int((now - self._stall_timestamp_ms) / 1000)
return 0
@coroutine
def do_check(self):
"""
Check the Thread for progress
"""
while True:
(yield)
if self._last_marker_value is not None:
if self._last_marker_value == self._progress_marker.value:
if self._stall_timestamp_ms is None:
self._stall_timestamp_ms = \
timers.get_monotonic_timestamp_in_ms()
DLOG.error("Thread %s stalled, progress_marker=%s, "
"elapsed_secs=%s." % (self._name,
self._progress_marker.value,
self.stall_elapsed_secs))
else:
self._stall_timestamp_ms = None
self._last_marker_value = self._progress_marker.value
def start(self):
"""
Start the Thread
"""
self._process.start()
if self._check_timer_id is None:
self._check_timer_id = timers.timers_create_timer(
self._name, self._check_interval_in_secs,
self._check_interval_in_secs, self.do_check)
def stop(self, max_wait_in_seconds):
"""
Stop the Thread
"""
self._work_queue.put([Thread.ACTION_STOP, None])
self._process.join(max_wait_in_seconds)
if self._process.is_alive():
self._process.terminate()
if self._check_timer_id is not None:
timers.timers_delete_timer(self._check_timer_id)
self._work_queue.close()
def debug_config_change(self):
self._work_queue.put([Thread.ACTION_DEBUG_CONFIG_RELOAD, None])
def send_work(self, action, work):
"""
Send work to Thread
"""
self._work_queue.put([action, work])
def get_result(self):
"""
Get work result
"""
return self._thread_worker.get_result()
@coroutine
def _thread_dispatch_work(thread_state, thread_worker, work_queue):
"""
Dispatch thread work
"""
while True:
select_obj = (yield)
if select_obj == work_queue.selobj:
work_entry = work_queue.get()
if work_entry is not None:
action, work = work_entry
DLOG.verbose("Received work, action=%s." % action)
if Thread.ACTION_DEBUG_CONFIG_RELOAD == action:
thread_state.debug_reload = True
elif Thread.ACTION_STOP == action:
thread_state.stay_on = False
else:
thread_worker.do_work(action, work)
def _thread_main(thread_name, progress_marker, debug_config, thread_worker,
work_queue):
"""
Main loop for the thread
"""
from ctypes import util
PR_SET_PDEATHSIG = 1
PR_SET_NAME = 15
PR_SIGKILL = 9
libc = ctypes.cdll.LoadLibrary(util.find_library("c"))
result = libc.prctl(PR_SET_NAME, thread_name)
if 0 != result:
DLOG.error("PRCTL set-name failed with error=%s." % result)
sys.exit(200)
result = libc.prctl(PR_SET_PDEATHSIG, PR_SIGKILL)
if 0 != result:
DLOG.error("PRCTL set-parent-death-signal failed with error=%s." % result)
sys.exit(201)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
try:
thread_state = ThreadState()
debug.debug_initialize(debug_config, thread_name=thread_name)
selobj.selobj_initialize()
timers.timers_initialize(thread_worker.tick_interval_in_ms,
thread_worker.tick_max_delay_in_ms,
thread_worker.tick_delay_debounce_in_ms)
DLOG.debug("Thread %s: initializing." % thread_name)
thread_worker.initialize()
selobj.selobj_add_read_obj(work_queue.selobj, _thread_dispatch_work,
thread_state, thread_worker, work_queue)
DLOG.debug("Thread %s: started." % thread_name)
while thread_state.stay_on:
progress_marker.increment()
selobj.selobj_dispatch(thread_worker.tick_interval_in_ms)
timers.timers_schedule()
if not timers.timers_scheduling_on_time():
DLOG.info("Thread %s: not scheduling on time" % thread_name)
if thread_state.debug_reload:
debug.debug_reload_config()
thread_state.debug_reload = False
except KeyboardInterrupt:
print("Keyboard Interrupt received.")
except Exception as e:
DLOG.exception("%s" % e)
sys.exit(202)
finally:
DLOG.info("Thread %s: shutting down." % thread_name)
thread_worker.finalize()
timers.timers_finalize()
selobj.selobj_finalize()
DLOG.info("Thread %s: shutdown." % thread_name)
debug.debug_finalize()
| 31.264706 | 82 | 0.605833 |
import ctypes
from multiprocessing import Process
import signal
import sys
from nfv_common import debug
from nfv_common import selectable
from nfv_common import selobj
from nfv_common import timers
from nfv_common.helpers import coroutine
from nfv_common.thread._thread_progress_marker import ThreadProgressMarker
DLOG = debug.debug_get_logger('nfv_common.thread')
class ThreadState(object):
def __init__(self):
self.stay_on = True
self.debug_reload = False
class Thread(object):
ACTION_DEBUG_CONFIG_RELOAD = "thread-debug-config-reload"
ACTION_STOP = "thread-stop"
def __init__(self, name, thread_worker, check_interval_in_secs=30):
self._name = name
self._work_queue = selectable.MultiprocessQueue()
self._thread_worker = thread_worker
self._progress_marker = ThreadProgressMarker()
self._process = Process(target=_thread_main,
args=(self._name, self._progress_marker,
debug.debug_get_config(),
thread_worker, self._work_queue),
name=self._name)
self._process.daemon = True
self._check_timer_id = None
self._check_interval_in_secs = check_interval_in_secs
self._last_marker_value = None
self._stall_timestamp_ms = None
debug.debug_register_config_change_callback(self.debug_config_change)
@property
def name(self):
return self._name
@property
def selobj(self):
return self._thread_worker.selobj
@property
def stall_elapsed_secs(self):
if self._stall_timestamp_ms is not None:
now = timers.get_monotonic_timestamp_in_ms()
return int((now - self._stall_timestamp_ms) / 1000)
return 0
@coroutine
def do_check(self):
while True:
(yield)
if self._last_marker_value is not None:
if self._last_marker_value == self._progress_marker.value:
if self._stall_timestamp_ms is None:
self._stall_timestamp_ms = \
timers.get_monotonic_timestamp_in_ms()
DLOG.error("Thread %s stalled, progress_marker=%s, "
"elapsed_secs=%s." % (self._name,
self._progress_marker.value,
self.stall_elapsed_secs))
else:
self._stall_timestamp_ms = None
self._last_marker_value = self._progress_marker.value
def start(self):
self._process.start()
if self._check_timer_id is None:
self._check_timer_id = timers.timers_create_timer(
self._name, self._check_interval_in_secs,
self._check_interval_in_secs, self.do_check)
def stop(self, max_wait_in_seconds):
self._work_queue.put([Thread.ACTION_STOP, None])
self._process.join(max_wait_in_seconds)
if self._process.is_alive():
self._process.terminate()
if self._check_timer_id is not None:
timers.timers_delete_timer(self._check_timer_id)
self._work_queue.close()
def debug_config_change(self):
self._work_queue.put([Thread.ACTION_DEBUG_CONFIG_RELOAD, None])
def send_work(self, action, work):
self._work_queue.put([action, work])
def get_result(self):
return self._thread_worker.get_result()
@coroutine
def _thread_dispatch_work(thread_state, thread_worker, work_queue):
while True:
select_obj = (yield)
if select_obj == work_queue.selobj:
work_entry = work_queue.get()
if work_entry is not None:
action, work = work_entry
DLOG.verbose("Received work, action=%s." % action)
if Thread.ACTION_DEBUG_CONFIG_RELOAD == action:
thread_state.debug_reload = True
elif Thread.ACTION_STOP == action:
thread_state.stay_on = False
else:
thread_worker.do_work(action, work)
def _thread_main(thread_name, progress_marker, debug_config, thread_worker,
work_queue):
from ctypes import util
PR_SET_PDEATHSIG = 1
PR_SET_NAME = 15
PR_SIGKILL = 9
libc = ctypes.cdll.LoadLibrary(util.find_library("c"))
result = libc.prctl(PR_SET_NAME, thread_name)
if 0 != result:
DLOG.error("PRCTL set-name failed with error=%s." % result)
sys.exit(200)
result = libc.prctl(PR_SET_PDEATHSIG, PR_SIGKILL)
if 0 != result:
DLOG.error("PRCTL set-parent-death-signal failed with error=%s." % result)
sys.exit(201)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
try:
thread_state = ThreadState()
debug.debug_initialize(debug_config, thread_name=thread_name)
selobj.selobj_initialize()
timers.timers_initialize(thread_worker.tick_interval_in_ms,
thread_worker.tick_max_delay_in_ms,
thread_worker.tick_delay_debounce_in_ms)
DLOG.debug("Thread %s: initializing." % thread_name)
thread_worker.initialize()
selobj.selobj_add_read_obj(work_queue.selobj, _thread_dispatch_work,
thread_state, thread_worker, work_queue)
DLOG.debug("Thread %s: started." % thread_name)
while thread_state.stay_on:
progress_marker.increment()
selobj.selobj_dispatch(thread_worker.tick_interval_in_ms)
timers.timers_schedule()
if not timers.timers_scheduling_on_time():
DLOG.info("Thread %s: not scheduling on time" % thread_name)
if thread_state.debug_reload:
debug.debug_reload_config()
thread_state.debug_reload = False
except KeyboardInterrupt:
print("Keyboard Interrupt received.")
except Exception as e:
DLOG.exception("%s" % e)
sys.exit(202)
finally:
DLOG.info("Thread %s: shutting down." % thread_name)
thread_worker.finalize()
timers.timers_finalize()
selobj.selobj_finalize()
DLOG.info("Thread %s: shutdown." % thread_name)
debug.debug_finalize()
| true | true |
f7f9b220725c89f0afb7ef7cd28df2b538a9eb11 | 1,629 | py | Python | resources/AccountResource.py | kckotcherlakota/workindia_passwordkeeper | 4168dc8310073588029359d2ede380d435e592ba | [
"Apache-2.0"
] | null | null | null | resources/AccountResource.py | kckotcherlakota/workindia_passwordkeeper | 4168dc8310073588029359d2ede380d435e592ba | [
"Apache-2.0"
] | null | null | null | resources/AccountResource.py | kckotcherlakota/workindia_passwordkeeper | 4168dc8310073588029359d2ede380d435e592ba | [
"Apache-2.0"
] | null | null | null | from flask_restful import Resource,reqparse
import pymysql
from models.Account import Account
from storage_security import StorageSecurity
class AccountListResource(Resource):
def get(self):
try:
SS = StorageSecurity()
parser = reqparse.RequestParser()
parser.add_argument("user")
args = parser.parse_args()
accounts = Account.getByUserId(args["user"])
if accounts and len(accounts)>0:
""" decrypt the passwords """
for account in accounts:
account["password"]=SS.decrypt(account["password"])
return accounts
elif accounts and len(accounts)==0:
return {"msg":"No accounts found"}
except Exception as e:
print(e)
return {"status":"failed","msg":"server error"},500
class CreateAccountResource(Resource):
def post(self):
try:
SS = StorageSecurity()
parser = reqparse.RequestParser()
parser.add_argument("user")
parser.add_argument("website")
parser.add_argument("username")
parser.add_argument("password")
args = parser.parse_args()
print(args)
#encrypt the password
args["password"] = SS.encrypt(args["password"])
Account.save(args)
return {"status":"success"},200
except Exception as e:
print(e)
return {"status":"failure","msg":"server error"},500
| 29.089286 | 70 | 0.54205 | from flask_restful import Resource,reqparse
import pymysql
from models.Account import Account
from storage_security import StorageSecurity
class AccountListResource(Resource):
def get(self):
try:
SS = StorageSecurity()
parser = reqparse.RequestParser()
parser.add_argument("user")
args = parser.parse_args()
accounts = Account.getByUserId(args["user"])
if accounts and len(accounts)>0:
for account in accounts:
account["password"]=SS.decrypt(account["password"])
return accounts
elif accounts and len(accounts)==0:
return {"msg":"No accounts found"}
except Exception as e:
print(e)
return {"status":"failed","msg":"server error"},500
class CreateAccountResource(Resource):
def post(self):
try:
SS = StorageSecurity()
parser = reqparse.RequestParser()
parser.add_argument("user")
parser.add_argument("website")
parser.add_argument("username")
parser.add_argument("password")
args = parser.parse_args()
print(args)
args["password"] = SS.encrypt(args["password"])
Account.save(args)
return {"status":"success"},200
except Exception as e:
print(e)
return {"status":"failure","msg":"server error"},500
| true | true |
f7f9b242e63a1464608be1618e12f03ed606d82f | 1,560 | py | Python | Assignment2_spark_ML/CC_A2_code/cc_a2/matrix.py | franciszhangkk/Cloud_computing | e53e91199119ea72a434d7b7b424f9029a11451c | [
"MIT"
] | null | null | null | Assignment2_spark_ML/CC_A2_code/cc_a2/matrix.py | franciszhangkk/Cloud_computing | e53e91199119ea72a434d7b7b424f9029a11451c | [
"MIT"
] | null | null | null | Assignment2_spark_ML/CC_A2_code/cc_a2/matrix.py | franciszhangkk/Cloud_computing | e53e91199119ea72a434d7b7b424f9029a11451c | [
"MIT"
] | null | null | null | import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import csv
import math
# def show_confusion_matrix(confusion, xlabels, ylabels):
# plt.figure(figsize=(14, 11))
# df_cm = pd.DataFrame(confusion, range(10), range(10))
# df_cm.astype(int)
#
# sn.heatmap(df_cm, annot=True, xticklabels=xlabels, yticklabels=ylabels, vmin=0, vmax=8000, cmap=sn.cm.rocket_r,
# fmt='.5g')
# plt.show()
# print('Fig. 3 Confusion Matrix of the test-data(X_axis is the predicted labels & Y_axis is the actual labels)')
# return
matrix = []
with open('/Users/zekunzhang/Desktop/ML_A2/matrix/Confusion_MLP.csv') as training_label0:
spamreader_label = csv.reader(training_label0, quotechar=',')
for row in spamreader_label:
arr=[]
for k in row:
arr.append(int(math.floor(float(k))))
matrix.append(arr)
def statistic(confusion_test):
re_list = []
label = np.arange(0, 10)
for i in label:
TP = confusion_test[i, i]
FN = np.sum(confusion_test[i]) - TP
FP = np.sum(confusion_test[:, i]) - TP
TN = np.sum(confusion_test) - TP - FN - FP
precision = (TP / (TP + FP))
recall = TP / (TP + FN)
F_measure = TP / (2 * TP + FP + FN)
Support = (TP + FN)
row = [int(label[i]), round(float(precision), 3), round(float(recall), 3), round(float(F_measure), 3),
round(float(Support), 0)]
re_list.append(row)
return re_list
statistic_list = statistic(matrix)
| 30.588235 | 117 | 0.619872 | import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import csv
import math
matrix = []
with open('/Users/zekunzhang/Desktop/ML_A2/matrix/Confusion_MLP.csv') as training_label0:
spamreader_label = csv.reader(training_label0, quotechar=',')
for row in spamreader_label:
arr=[]
for k in row:
arr.append(int(math.floor(float(k))))
matrix.append(arr)
def statistic(confusion_test):
re_list = []
label = np.arange(0, 10)
for i in label:
TP = confusion_test[i, i]
FN = np.sum(confusion_test[i]) - TP
FP = np.sum(confusion_test[:, i]) - TP
TN = np.sum(confusion_test) - TP - FN - FP
precision = (TP / (TP + FP))
recall = TP / (TP + FN)
F_measure = TP / (2 * TP + FP + FN)
Support = (TP + FN)
row = [int(label[i]), round(float(precision), 3), round(float(recall), 3), round(float(F_measure), 3),
round(float(Support), 0)]
re_list.append(row)
return re_list
statistic_list = statistic(matrix)
| true | true |
f7f9b3ad8099082d90427adab7943f28d601265d | 2,370 | py | Python | trading_bot/pull_data.py | PabloJabat/trading-bot-python | f97930a34b614ae3538a27398ee82d9fde72cf9a | [
"MIT"
] | 4 | 2020-08-11T16:16:47.000Z | 2021-03-13T14:44:43.000Z | trading_bot/pull_data.py | fcarazo/trading-bot-python | db1144f18cd12814fa4806cffa1020c6bfcad2a4 | [
"MIT"
] | 6 | 2021-03-13T16:06:21.000Z | 2022-03-29T22:28:48.000Z | trading_bot/pull_data.py | fcarazo/trading-bot-python | db1144f18cd12814fa4806cffa1020c6bfcad2a4 | [
"MIT"
] | 2 | 2021-03-13T15:45:23.000Z | 2021-10-16T11:10:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pull data"""
import time
import concurrent.futures
from .config import *
from typing import List
import alpaca_trade_api
BASE_URL = "https://paper-api.alpaca.markets"
BARS_URL = f"https://data.alpaca.markets/v1/bars"
HEADERS = {
"APCA-API-KEY-ID": API_KEY,
"APCA-API-SECRET-KEY": SECRET_KEY
}
market_api = alpaca_trade_api.REST(API_KEY, SECRET_KEY, BASE_URL)
bars_api = alpaca_trade_api.REST(API_KEY, SECRET_KEY)
ALL_ASSETS = market_api.list_assets()
ALL_ACTIVE_ASSETS = list(filter(lambda x: x.status == "active", ALL_ASSETS))
ALL_INACTIVE_ASSETS = list(
filter(lambda x: x.status == "inactive", ALL_ASSETS))
assert len(ALL_INACTIVE_ASSETS) + len(ALL_ACTIVE_ASSETS) == len(ALL_ASSETS)
def get_available_exchanges():
exchanges = set()
for asset in ALL_ASSETS:
exchanges.add(asset.exchange)
return exchanges
def get_assets_from(exchange, inactive=True) -> List[str]:
exchanges = get_available_exchanges()
if exchange not in exchanges:
for index in exchanges:
print(index)
raise Exception("Invalid exchange")
else:
if inactive:
assets = ALL_ASSETS
else:
assets = ALL_ACTIVE_ASSETS
return list(map(lambda asset: asset.symbol,
filter(lambda asset: asset.exchange == exchange,
assets)))
def get_nasdaq_symbols(inactive=True) -> List[str]:
return get_assets_from("NASDAQ", inactive=inactive)
def pull_symbol_data(symbol: str, limit: int) -> None:
data = bars_api.get_barset(symbol, "1D", limit=limit).df
data.to_csv(f"data/{symbol}.csv")
def pull_symbols_data(symbols: List[str], limit: int) -> None:
# NOTE: It seems that adding passing more parameters slows the runtime
args = [(symbol, limit) for symbol in symbols]
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(lambda p: pull_symbol_data(*p), args)
if __name__ == "__main__":
# Apparently is too much data to be pulled if we want to get all the symbols
# from a specific exchange index like NASDAQ.
start = time.perf_counter()
nasdaq_symbols = get_nasdaq_symbols()
pull_symbols_data(nasdaq_symbols[:200], limit=50)
end = time.perf_counter()
print(f"Pulled data in {round(end - start, 2)} secs")
| 30.779221 | 80 | 0.685654 |
import time
import concurrent.futures
from .config import *
from typing import List
import alpaca_trade_api
BASE_URL = "https://paper-api.alpaca.markets"
BARS_URL = f"https://data.alpaca.markets/v1/bars"
HEADERS = {
"APCA-API-KEY-ID": API_KEY,
"APCA-API-SECRET-KEY": SECRET_KEY
}
market_api = alpaca_trade_api.REST(API_KEY, SECRET_KEY, BASE_URL)
bars_api = alpaca_trade_api.REST(API_KEY, SECRET_KEY)
ALL_ASSETS = market_api.list_assets()
ALL_ACTIVE_ASSETS = list(filter(lambda x: x.status == "active", ALL_ASSETS))
ALL_INACTIVE_ASSETS = list(
filter(lambda x: x.status == "inactive", ALL_ASSETS))
assert len(ALL_INACTIVE_ASSETS) + len(ALL_ACTIVE_ASSETS) == len(ALL_ASSETS)
def get_available_exchanges():
exchanges = set()
for asset in ALL_ASSETS:
exchanges.add(asset.exchange)
return exchanges
def get_assets_from(exchange, inactive=True) -> List[str]:
exchanges = get_available_exchanges()
if exchange not in exchanges:
for index in exchanges:
print(index)
raise Exception("Invalid exchange")
else:
if inactive:
assets = ALL_ASSETS
else:
assets = ALL_ACTIVE_ASSETS
return list(map(lambda asset: asset.symbol,
filter(lambda asset: asset.exchange == exchange,
assets)))
def get_nasdaq_symbols(inactive=True) -> List[str]:
return get_assets_from("NASDAQ", inactive=inactive)
def pull_symbol_data(symbol: str, limit: int) -> None:
data = bars_api.get_barset(symbol, "1D", limit=limit).df
data.to_csv(f"data/{symbol}.csv")
def pull_symbols_data(symbols: List[str], limit: int) -> None:
args = [(symbol, limit) for symbol in symbols]
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(lambda p: pull_symbol_data(*p), args)
if __name__ == "__main__":
start = time.perf_counter()
nasdaq_symbols = get_nasdaq_symbols()
pull_symbols_data(nasdaq_symbols[:200], limit=50)
end = time.perf_counter()
print(f"Pulled data in {round(end - start, 2)} secs")
| true | true |
f7f9b5ad2ceee052e0ba12955d679dab4e84e53e | 702 | py | Python | ex19.py | nopythoner/python | 7d39eb361f3d3dd78d61c92740897ab04c80b195 | [
"MIT"
] | null | null | null | ex19.py | nopythoner/python | 7d39eb361f3d3dd78d61c92740897ab04c80b195 | [
"MIT"
] | null | null | null | ex19.py | nopythoner/python | 7d39eb361f3d3dd78d61c92740897ab04c80b195 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# coding:utf-8
def cheese_and_crackers(cheese_count,boxes_of_crackers):
print "You have %d cheeses!" %cheese_count
print "You have %d boxes of rackers!" %boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20,30)
print "OR,we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese,amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10+20,5+6)
print "And we can combine the two,variables and math:"
cheese_and_crackers(amount_of_cheese +100,amount_of_crackers +1000)
| 26 | 67 | 0.780627 |
def cheese_and_crackers(cheese_count,boxes_of_crackers):
print "You have %d cheeses!" %cheese_count
print "You have %d boxes of rackers!" %boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20,30)
print "OR,we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese,amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10+20,5+6)
print "And we can combine the two,variables and math:"
cheese_and_crackers(amount_of_cheese +100,amount_of_crackers +1000)
| false | true |
f7f9b6ab91d1c90241ce69b3942c4f394a37f0a9 | 1,775 | py | Python | src/daily-coding-problem/easy/word-board/word_board.py | nwthomas/code-challenges | 49c2532ff597495474e67b13f2ed9b9ad93d40b5 | [
"MIT"
] | 1 | 2020-12-11T05:54:59.000Z | 2020-12-11T05:54:59.000Z | src/daily-coding-problem/easy/word-board/word_board.py | nwthomas/code-challenges | 49c2532ff597495474e67b13f2ed9b9ad93d40b5 | [
"MIT"
] | 1 | 2021-04-10T06:53:30.000Z | 2021-04-10T06:53:30.000Z | src/daily-coding-problem/easy/word-board/word_board.py | nwthomas/code-challenges | 49c2532ff597495474e67b13f2ed9b9ad93d40b5 | [
"MIT"
] | 7 | 2019-11-24T12:10:35.000Z | 2020-12-14T22:36:31.000Z | """
Good morning! Here's your coding interview problem for today.
This problem was asked by Coursera.
Given a 2D board of characters and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
For example, given the following board:
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
exists(board, "ABCCED") returns true, exists(board, "SEE") returns true, exists(board, "ABCB") returns false.
"""
def is_word_present(board, word):
if type(board) != list or type(board[0]) != list or type(word) != str:
return None
isFound = False
root_x = 0
root_y = 0
def find_word(x, y, current_word=""):
nonlocal isFound
new_current_word = current_word + board[x][y]
if new_current_word == word:
isFound = True
else:
if new_current_word == word[:len(new_current_word)]:
if (x + 1) < len(board):
find_word(x + 1, y, new_current_word)
if (y + 1) < len(board[0]):
find_word(x, y + 1, new_current_word)
if (x - 1) >= 0:
find_word(x - 1, y, new_current_word)
if (y - 1) >= 0:
find_word(x, y - 1, new_current_word)
else:
return
for _ in range(len(board) * len(board[0])):
find_word(root_x, root_y)
if isFound:
break
else:
if root_y >= len(board[0]) - 1:
root_y = 0
root_x += 1
else:
root_y += 1
return isFound
| 30.603448 | 197 | 0.544789 |
def is_word_present(board, word):
if type(board) != list or type(board[0]) != list or type(word) != str:
return None
isFound = False
root_x = 0
root_y = 0
def find_word(x, y, current_word=""):
nonlocal isFound
new_current_word = current_word + board[x][y]
if new_current_word == word:
isFound = True
else:
if new_current_word == word[:len(new_current_word)]:
if (x + 1) < len(board):
find_word(x + 1, y, new_current_word)
if (y + 1) < len(board[0]):
find_word(x, y + 1, new_current_word)
if (x - 1) >= 0:
find_word(x - 1, y, new_current_word)
if (y - 1) >= 0:
find_word(x, y - 1, new_current_word)
else:
return
for _ in range(len(board) * len(board[0])):
find_word(root_x, root_y)
if isFound:
break
else:
if root_y >= len(board[0]) - 1:
root_y = 0
root_x += 1
else:
root_y += 1
return isFound
| true | true |
f7f9b776c46f0dde37bc1e6abdf12c6f13e16868 | 4,260 | py | Python | salt/modules/aix_group.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 5 | 2018-05-01T20:51:14.000Z | 2021-11-09T05:43:00.000Z | salt/modules/aix_group.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 4 | 2019-02-08T17:53:38.000Z | 2019-06-06T16:17:27.000Z | salt/modules/aix_group.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 7 | 2017-09-29T18:49:53.000Z | 2021-11-09T05:42:49.000Z | # -*- coding: utf-8 -*-
'''
Manage groups on AIX
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
log = logging.getLogger(__name__)
try:
import grp
except ImportError:
pass
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is AIX
'''
if __grains__['kernel'] == 'AIX':
return __virtualname__
return (False, 'The aix_group execution module failed to load: '
'only available on AIX systems.')
def add(name, gid=None, system=False, root=None):
'''
Add the specified group
CLI Example:
.. code-block:: bash
salt '*' group.add foo 3456
'''
cmd = 'mkgroup '
if system and root is not None:
cmd += '-a '
if gid:
cmd += 'id={0} '.format(gid)
cmd += name
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
return not ret['retcode']
def delete(name):
'''
Remove the named group
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
ret = __salt__['cmd.run_all']('rmgroup {0}'.format(name), python_shell=False)
return not ret['retcode']
def info(name):
'''
Return information about a group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
grinfo = grp.getgrnam(name)
except KeyError:
return {}
else:
return {'name': grinfo.gr_name,
'passwd': grinfo.gr_passwd,
'gid': grinfo.gr_gid,
'members': grinfo.gr_mem}
def getent(refresh=False):
'''
Return info on all groups
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
for grinfo in grp.getgrall():
ret.append(info(grinfo.gr_name))
__context__['group.getent'] = ret
return ret
def chgid(name, gid):
'''
Change the gid for a named group
CLI Example:
.. code-block:: bash
salt '*' group.chgid foo 4376
'''
pre_gid = __salt__['file.group_to_gid'](name)
if gid == pre_gid:
return True
cmd = 'chgroup id={0} {1}'.format(gid, name)
__salt__['cmd.run'](cmd, python_shell=False)
post_gid = __salt__['file.group_to_gid'](name)
if post_gid != pre_gid:
return post_gid == gid
return False
def adduser(name, username, root=None):
'''
Add a user in the group.
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
'''
cmd = 'chgrpmem -m + {0} {1}'.format(username, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode
def deluser(name, username, root=None):
'''
Remove a user from the group.
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo bar
Removes a member user 'bar' from a group 'foo'. If group is not present
then returns True.
'''
grp_info = __salt__['group.info'](name)
try:
if username in grp_info['members']:
cmd = 'chgrpmem -m - {0} {1}'.format(username, name)
ret = __salt__['cmd.run'](cmd, python_shell=False)
return not ret['retcode']
else:
return True
except Exception:
return True
def members(name, members_list, root=None):
'''
Replaces members of the group with a provided list.
CLI Example:
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
'''
cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode
| 21.19403 | 81 | 0.598592 |
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
try:
import grp
except ImportError:
pass
__virtualname__ = 'group'
def __virtual__():
if __grains__['kernel'] == 'AIX':
return __virtualname__
return (False, 'The aix_group execution module failed to load: '
'only available on AIX systems.')
def add(name, gid=None, system=False, root=None):
cmd = 'mkgroup '
if system and root is not None:
cmd += '-a '
if gid:
cmd += 'id={0} '.format(gid)
cmd += name
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
return not ret['retcode']
def delete(name):
ret = __salt__['cmd.run_all']('rmgroup {0}'.format(name), python_shell=False)
return not ret['retcode']
def info(name):
try:
grinfo = grp.getgrnam(name)
except KeyError:
return {}
else:
return {'name': grinfo.gr_name,
'passwd': grinfo.gr_passwd,
'gid': grinfo.gr_gid,
'members': grinfo.gr_mem}
def getent(refresh=False):
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
for grinfo in grp.getgrall():
ret.append(info(grinfo.gr_name))
__context__['group.getent'] = ret
return ret
def chgid(name, gid):
pre_gid = __salt__['file.group_to_gid'](name)
if gid == pre_gid:
return True
cmd = 'chgroup id={0} {1}'.format(gid, name)
__salt__['cmd.run'](cmd, python_shell=False)
post_gid = __salt__['file.group_to_gid'](name)
if post_gid != pre_gid:
return post_gid == gid
return False
def adduser(name, username, root=None):
cmd = 'chgrpmem -m + {0} {1}'.format(username, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode
def deluser(name, username, root=None):
grp_info = __salt__['group.info'](name)
try:
if username in grp_info['members']:
cmd = 'chgrpmem -m - {0} {1}'.format(username, name)
ret = __salt__['cmd.run'](cmd, python_shell=False)
return not ret['retcode']
else:
return True
except Exception:
return True
def members(name, members_list, root=None):
cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode
| true | true |
f7f9b8613102fb6706f8828e2bfabe654e588fa7 | 6,207 | py | Python | bin/augustus_parallel.py | gamcil/funannotate | e37418b754d018fcd18dc87d3931ab8bac892538 | [
"BSD-2-Clause"
] | null | null | null | bin/augustus_parallel.py | gamcil/funannotate | e37418b754d018fcd18dc87d3931ab8bac892538 | [
"BSD-2-Clause"
] | null | null | null | bin/augustus_parallel.py | gamcil/funannotate | e37418b754d018fcd18dc87d3931ab8bac892538 | [
"BSD-2-Clause"
] | 1 | 2021-01-19T02:11:17.000Z | 2021-01-19T02:11:17.000Z | #!/usr/bin/env python
import sys, multiprocessing, subprocess, os, shutil, argparse, time, inspect
from Bio import SeqIO
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import lib.library as lib
#setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=48)
parser=argparse.ArgumentParser(prog='augustus_parallel.py', usage="%(prog)s [options] -i genome.fasta -s botrytis_cinera -o new_genome",
description='''Script runs augustus in parallel to use multiple processors''',
epilog="""Written by Jon Palmer (2016) nextgenusfs@gmail.com""",
formatter_class = MyFormatter)
parser.add_argument('-i','--input', required=True, help='Genome in FASTA format')
parser.add_argument('-o','--out', required=True, help='Basename of output files')
parser.add_argument('-s','--species', required=True, help='Augustus species name')
parser.add_argument('--hints', help='Hints file (PE)')
parser.add_argument('--cpus', default=2, type=int, help='Number of CPUs to run')
parser.add_argument('--debug', action='store_true', help='Keep intermediate files')
parser.add_argument('--logfile', default ='augustus-parallel.log', help='logfile')
args=parser.parse_args()
#check for augustus installation
try:
AUGUSTUS = os.environ["AUGUSTUS_CONFIG_PATH"]
except KeyError:
if not args.AUGUSTUS_CONFIG_PATH:
print("$AUGUSTUS_CONFIG_PATH environmental variable not found, Augustus is not properly configured")
os._exit(1)
if AUGUSTUS.endswith('config'):
AUGUSTUS_BASE = AUGUSTUS.replace('config', '')
elif AUGUSTUS.endswith('config'+os.sep):
AUGUSTUS_BASE = AUGUSTUS.replace('config'+os.sep, '')
#setup hints and extrinic input, hard coded for protein and transcript alignments from funannotate
extrinsic = '--extrinsicCfgFile='+os.path.join(AUGUSTUS_BASE, 'config', 'extrinsic', 'extrinsic.E.XNT.cfg')
def countGFFgenes(input):
count = 0
with open(input, 'rU') as f:
for line in f:
if "\tgene\t" in line:
count += 1
return count
def runAugustus(Input):
if '_part' in Input:
chr = Input.split('_part')[0]
else:
chr = Input
species='--species='+args.species
hints_input = '--hintsfile='+args.hints
aug_out = os.path.join(tmpdir, Input+'.augustus.gff3')
core_cmd = ['augustus', species, '--gff3=on', '--UTR=off', '--stopCodonExcludedFromCDS=False', os.path.join(tmpdir, chr+'.fa')]
if args.hints:
core_cmd.insert(2, extrinsic)
core_cmd.insert(3, hints_input)
if Input in ranges:
start = ranges.get(Input)[0]
end = ranges.get(Input)[1]
core_cmd.insert(2, '--predictionStart='+str(start))
core_cmd.insert(3, '--predictionEnd='+str(end))
#try using library module
lib.runSubprocess2(core_cmd, '.', lib.log, aug_out)
log_name = args.logfile
if os.path.isfile(log_name):
os.remove(log_name)
#initialize script, log system info and cmd issue at runtime
lib.setupLogging(log_name)
cmd_args = " ".join(sys.argv)+'\n'
lib.log.debug(cmd_args)
#first step is to split input fasta file into individual files in tmp folder
lib.log.debug("Splitting contigs and hints files")
tmpdir = 'augustus_tmp_'+str(os.getpid())
os.makedirs(tmpdir)
scaffolds = []
global ranges
ranges = {}
with open(args.input, 'rU') as InputFasta:
for record in SeqIO.parse(InputFasta, 'fasta'):
contiglength = len(record.seq)
if contiglength > 500000: #split large contigs
num_parts = contiglength / 500000 + 1
chunks = contiglength / num_parts
for i in range(0,num_parts):
name = str(record.id)+'_part'+str(i+1)
scaffolds.append(name)
outputfile = os.path.join(tmpdir, str(record.id)+'.fa')
if i == 0: #this is first record
start = 1
end = chunks + 10000
else:
start = end - 10000
end = start + chunks + 10000
if end > contiglength:
end = contiglength
if not name in ranges:
ranges[name] = (start, end)
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
else:
name = str(record.id)
scaffolds.append(name)
outputfile = os.path.join(tmpdir, name+'.fa')
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
'''
#if hints file passed, split it up by scaffold
if args.hints:
for i in scaffolds:
if '_part' in i:
i = i.split('_part')[0]
if not os.path.isfile(os.path.join(tmpdir, i+'.hints.gff')):
with open(os.path.join(tmpdir, i+'.hints.gff'), 'w') as output:
with open(args.hints, 'rU') as hintsfile:
for line in hintsfile:
cols = line.split('\t')
if cols[0] == i:
output.write(line)
'''
#now loop through each scaffold running augustus
if args.cpus > len(scaffolds):
num = len(scaffolds)
else:
num = args.cpus
lib.log.debug("Running Augustus on %i chunks, using %i CPUs" % (len(scaffolds), num))
lib.runMultiProgress(runAugustus, scaffolds, num)
lib.log.debug("Augustus prediction is finished, now concatenating results")
with open(os.path.join(tmpdir, 'augustus_all.gff3'), 'w') as output:
for file in scaffolds:
file = os.path.join(tmpdir, file+'.augustus.gff3')
with open(file) as input:
output.write(input.read())
join_script = os.path.join(AUGUSTUS_BASE, 'scripts', 'join_aug_pred.pl')
with open(args.out, 'w') as finalout:
with open(os.path.join(tmpdir, 'augustus_all.gff3'), 'rU') as input:
subprocess.call([join_script],stdin = input, stdout = finalout)
if not args.debug:
shutil.rmtree(tmpdir)
lib.log.info('Found {0:,}'.format(countGFFgenes(args.out))+' gene models')
| 40.305195 | 136 | 0.641695 |
import sys, multiprocessing, subprocess, os, shutil, argparse, time, inspect
from Bio import SeqIO
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import lib.library as lib
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=48)
parser=argparse.ArgumentParser(prog='augustus_parallel.py', usage="%(prog)s [options] -i genome.fasta -s botrytis_cinera -o new_genome",
description='''Script runs augustus in parallel to use multiple processors''',
epilog="""Written by Jon Palmer (2016) nextgenusfs@gmail.com""",
formatter_class = MyFormatter)
parser.add_argument('-i','--input', required=True, help='Genome in FASTA format')
parser.add_argument('-o','--out', required=True, help='Basename of output files')
parser.add_argument('-s','--species', required=True, help='Augustus species name')
parser.add_argument('--hints', help='Hints file (PE)')
parser.add_argument('--cpus', default=2, type=int, help='Number of CPUs to run')
parser.add_argument('--debug', action='store_true', help='Keep intermediate files')
parser.add_argument('--logfile', default ='augustus-parallel.log', help='logfile')
args=parser.parse_args()
try:
AUGUSTUS = os.environ["AUGUSTUS_CONFIG_PATH"]
except KeyError:
if not args.AUGUSTUS_CONFIG_PATH:
print("$AUGUSTUS_CONFIG_PATH environmental variable not found, Augustus is not properly configured")
os._exit(1)
if AUGUSTUS.endswith('config'):
AUGUSTUS_BASE = AUGUSTUS.replace('config', '')
elif AUGUSTUS.endswith('config'+os.sep):
AUGUSTUS_BASE = AUGUSTUS.replace('config'+os.sep, '')
extrinsic = '--extrinsicCfgFile='+os.path.join(AUGUSTUS_BASE, 'config', 'extrinsic', 'extrinsic.E.XNT.cfg')
def countGFFgenes(input):
count = 0
with open(input, 'rU') as f:
for line in f:
if "\tgene\t" in line:
count += 1
return count
def runAugustus(Input):
if '_part' in Input:
chr = Input.split('_part')[0]
else:
chr = Input
species='--species='+args.species
hints_input = '--hintsfile='+args.hints
aug_out = os.path.join(tmpdir, Input+'.augustus.gff3')
core_cmd = ['augustus', species, '--gff3=on', '--UTR=off', '--stopCodonExcludedFromCDS=False', os.path.join(tmpdir, chr+'.fa')]
if args.hints:
core_cmd.insert(2, extrinsic)
core_cmd.insert(3, hints_input)
if Input in ranges:
start = ranges.get(Input)[0]
end = ranges.get(Input)[1]
core_cmd.insert(2, '--predictionStart='+str(start))
core_cmd.insert(3, '--predictionEnd='+str(end))
lib.runSubprocess2(core_cmd, '.', lib.log, aug_out)
log_name = args.logfile
if os.path.isfile(log_name):
os.remove(log_name)
lib.setupLogging(log_name)
cmd_args = " ".join(sys.argv)+'\n'
lib.log.debug(cmd_args)
lib.log.debug("Splitting contigs and hints files")
tmpdir = 'augustus_tmp_'+str(os.getpid())
os.makedirs(tmpdir)
scaffolds = []
global ranges
ranges = {}
with open(args.input, 'rU') as InputFasta:
for record in SeqIO.parse(InputFasta, 'fasta'):
contiglength = len(record.seq)
if contiglength > 500000:
num_parts = contiglength / 500000 + 1
chunks = contiglength / num_parts
for i in range(0,num_parts):
name = str(record.id)+'_part'+str(i+1)
scaffolds.append(name)
outputfile = os.path.join(tmpdir, str(record.id)+'.fa')
if i == 0:
start = 1
end = chunks + 10000
else:
start = end - 10000
end = start + chunks + 10000
if end > contiglength:
end = contiglength
if not name in ranges:
ranges[name] = (start, end)
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
else:
name = str(record.id)
scaffolds.append(name)
outputfile = os.path.join(tmpdir, name+'.fa')
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
if args.cpus > len(scaffolds):
num = len(scaffolds)
else:
num = args.cpus
lib.log.debug("Running Augustus on %i chunks, using %i CPUs" % (len(scaffolds), num))
lib.runMultiProgress(runAugustus, scaffolds, num)
lib.log.debug("Augustus prediction is finished, now concatenating results")
with open(os.path.join(tmpdir, 'augustus_all.gff3'), 'w') as output:
for file in scaffolds:
file = os.path.join(tmpdir, file+'.augustus.gff3')
with open(file) as input:
output.write(input.read())
join_script = os.path.join(AUGUSTUS_BASE, 'scripts', 'join_aug_pred.pl')
with open(args.out, 'w') as finalout:
with open(os.path.join(tmpdir, 'augustus_all.gff3'), 'rU') as input:
subprocess.call([join_script],stdin = input, stdout = finalout)
if not args.debug:
shutil.rmtree(tmpdir)
lib.log.info('Found {0:,}'.format(countGFFgenes(args.out))+' gene models')
| true | true |
f7f9b87c455908d11aaf14d364cbc3e51a3159bf | 215 | py | Python | bigcommerce/resources/v3/redirects.py | aglensmith/bigcommerce-api-python | 2f83ae30dbaa3cd9b7d465e40df2862a7f13795c | [
"MIT"
] | null | null | null | bigcommerce/resources/v3/redirects.py | aglensmith/bigcommerce-api-python | 2f83ae30dbaa3cd9b7d465e40df2862a7f13795c | [
"MIT"
] | null | null | null | bigcommerce/resources/v3/redirects.py | aglensmith/bigcommerce-api-python | 2f83ae30dbaa3cd9b7d465e40df2862a7f13795c | [
"MIT"
] | null | null | null | from ..base import *
class Redirects(ListableApiResource, CreateableApiResource,
UpdateableApiResource, DeleteableApiResource):
resource_version = 'v3'
resource_name = 'storefront/redirects' | 30.714286 | 61 | 0.744186 | from ..base import *
class Redirects(ListableApiResource, CreateableApiResource,
UpdateableApiResource, DeleteableApiResource):
resource_version = 'v3'
resource_name = 'storefront/redirects' | true | true |
f7f9b940106e34f91dc65350ce17d8d76ecf78f5 | 48,550 | py | Python | sympy/utilities/lambdify.py | synthetic-tensors/sympy | d7da4f357f6a556f4d4933502335d49642927640 | [
"BSD-3-Clause"
] | null | null | null | sympy/utilities/lambdify.py | synthetic-tensors/sympy | d7da4f357f6a556f4d4933502335d49642927640 | [
"BSD-3-Clause"
] | null | null | null | sympy/utilities/lambdify.py | synthetic-tensors/sympy | d7da4f357f6a556f4d4933502335d49642927640 | [
"BSD-3-Clause"
] | null | null | null | """
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from typing import Any, Dict, Iterable
import builtins
import inspect
import keyword
import textwrap
import linecache
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.compatibility import (is_sequence, iterable,
NotIterable)
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
__doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
MATH_DEFAULT = {} # type: Dict[str, Any]
MPMATH_DEFAULT = {} # type: Dict[str, Any]
NUMPY_DEFAULT = {"I": 1j} # type: Dict[str, Any]
SCIPY_DEFAULT = {"I": 1j} # type: Dict[str, Any]
TENSORFLOW_DEFAULT = {} # type: Dict[str, Any]
TORCH_DEFAULT = {} # type: Dict[str, Any]
SYMPY_DEFAULT = {} # type: Dict[str, Any]
NUMEXPR_DEFAULT = {} # type: Dict[str, Any]
# These are the namespaces the lambda functions will use.
# These are separate from the names above because they are modified
# throughout this file, whereas the defaults should remain unmodified.
MATH = MATH_DEFAULT.copy()
MPMATH = MPMATH_DEFAULT.copy()
NUMPY = NUMPY_DEFAULT.copy()
SCIPY = SCIPY_DEFAULT.copy()
TENSORFLOW = TENSORFLOW_DEFAULT.copy()
TORCH = TORCH_DEFAULT.copy()
SYMPY = SYMPY_DEFAULT.copy()
NUMEXPR = NUMEXPR_DEFAULT.copy()
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
# NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses
# of Function to automatically evalf.
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableDenseMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci",
"RisingFactorial": "rf",
"FallingFactorial": "ff",
"betainc_regularized": "betainc",
}
NUMPY_TRANSLATIONS = {} # type: Dict[str, str]
SCIPY_TRANSLATIONS = {} # type: Dict[str, str]
TENSORFLOW_TRANSLATIONS = {} # type: Dict[str, str]
TORCH_TRANSLATIONS = {} # type: Dict[str, str]
NUMEXPR_TRANSLATIONS = {} # type: Dict[str, str]
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)),
"scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import numpy; import scipy; from scipy import *; from scipy.special import *",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)),
"torch": (TORCH, TORCH_DEFAULT, TORCH_TRANSLATIONS, ("import torch",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload=False):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy", "tensorflow".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
# Required despite static analysis claiming it is not used
from sympy.external import import_module # noqa:F401
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a sympy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
# Used for dynamically generated filenames that are inserted into the
# linecache.
_lambdify_generated_counter = 1
@doctest_depends_on(modules=('numpy', 'tensorflow', ), python_version=(3,))
def lambdify(args: Iterable, expr, modules=None, printer=None, use_imps=True,
dummify=False):
"""Convert a SymPy expression into a function that allows for fast
numeric evaluation.
.. warning::
This function uses ``exec``, and thus shouldn't be used on
unsanitized input.
.. versionchanged:: 1.7.0
Passing a set for the *args* parameter is deprecated as sets are
unordered. Use an ordered iterable such as a list or tuple.
Explanation
===========
For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an
equivalent NumPy function that numerically evaluates it:
>>> from sympy import sin, cos, symbols, lambdify
>>> import numpy as np
>>> x = symbols('x')
>>> expr = sin(x) + cos(x)
>>> expr
sin(x) + cos(x)
>>> f = lambdify(x, expr, 'numpy')
>>> a = np.array([1, 2])
>>> f(a)
[1.38177329 0.49315059]
The primary purpose of this function is to provide a bridge from SymPy
expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath,
and tensorflow. In general, SymPy functions do not work with objects from
other libraries, such as NumPy arrays, and functions from numeric
libraries like NumPy or mpmath do not work on SymPy expressions.
``lambdify`` bridges the two by converting a SymPy expression to an
equivalent numeric function.
The basic workflow with ``lambdify`` is to first create a SymPy expression
representing whatever mathematical function you wish to evaluate. This
should be done using only SymPy functions and expressions. Then, use
``lambdify`` to convert this to an equivalent function for numerical
evaluation. For instance, above we created ``expr`` using the SymPy symbol
``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an
equivalent NumPy function ``f``, and called it on a NumPy array ``a``.
Parameters
==========
args : List[Symbol]
A variable or a list of variables whose nesting represents the
nesting of the arguments that will be passed to the function.
Variables can be symbols, undefined functions, or matrix symbols.
>>> from sympy import Eq
>>> from sympy.abc import x, y, z
The list of variables should match the structure of how the
arguments will be passed to the function. Simply enclose the
parameters as they will be passed in a list.
To call a function like ``f(x)`` then ``[x]``
should be the first argument to ``lambdify``; for this
case a single ``x`` can also be used:
>>> f = lambdify(x, x + 1)
>>> f(1)
2
>>> f = lambdify([x], x + 1)
>>> f(1)
2
To call a function like ``f(x, y)`` then ``[x, y]`` will
be the first argument of the ``lambdify``:
>>> f = lambdify([x, y], x + y)
>>> f(1, 1)
2
To call a function with a single 3-element tuple like
``f((x, y, z))`` then ``[(x, y, z)]`` will be the first
argument of the ``lambdify``:
>>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2))
>>> f((3, 4, 5))
True
If two args will be passed and the first is a scalar but
the second is a tuple with two arguments then the items
in the list should match that structure:
>>> f = lambdify([x, (y, z)], x + y + z)
>>> f(1, (2, 3))
6
expr : Expr
An expression, list of expressions, or matrix to be evaluated.
Lists may be nested.
If the expression is a list, the output will also be a list.
>>> f = lambdify(x, [x, [x + 1, x + 2]])
>>> f(1)
[1, [2, 3]]
If it is a matrix, an array will be returned (for the NumPy module).
>>> from sympy import Matrix
>>> f = lambdify(x, Matrix([x, x + 1]))
>>> f(1)
[[1]
[2]]
Note that the argument order here (variables then expression) is used
to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works
(roughly) like ``lambda x: expr``
(see :ref:`lambdify-how-it-works` below).
modules : str, optional
Specifies the numeric library to use.
If not specified, *modules* defaults to:
- ``["scipy", "numpy"]`` if SciPy is installed
- ``["numpy"]`` if only NumPy is installed
- ``["math", "mpmath", "sympy"]`` if neither is installed.
That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise.
*modules* can be one of the following types:
- The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``,
``"scipy"``, ``"sympy"``, or ``"tensorflow"``. This uses the
corresponding printer and namespace mapping for that module.
- A module (e.g., ``math``). This uses the global namespace of the
module. If the module is one of the above known modules, it will
also use the corresponding printer and namespace mapping
(i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``).
- A dictionary that maps names of SymPy functions to arbitrary
functions
(e.g., ``{'sin': custom_sin}``).
- A list that contains a mix of the arguments above, with higher
priority given to entries appearing first
(e.g., to use the NumPy module but override the ``sin`` function
with a custom version, you can use
``[{'sin': custom_sin}, 'numpy']``).
dummify : bool, optional
Whether or not the variables in the provided expression that are not
valid Python identifiers are substituted with dummy symbols.
This allows for undefined functions like ``Function('f')(t)`` to be
supplied as arguments. By default, the variables are only dummified
if they are not valid Python identifiers.
Set ``dummify=True`` to replace all arguments with dummy symbols
(if ``args`` is not a string) - for example, to ensure that the
arguments do not redefine any built-in names.
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
``lambdify`` can be used to translate SymPy expressions into mpmath
functions. This may be preferable to using ``evalf`` (which uses mpmath on
the backend) in some cases.
>>> f = lambdify(x, sin(x), 'mpmath')
>>> f(1)
0.8414709848078965
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
The ``flatten`` function can be used to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in ``expr`` can also carry their own numerical
implementations, in a callable attached to the ``_imp_`` attribute. This
can be used with undefined functions using the ``implemented_function``
factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow:
>>> import tensorflow as tf
>>> from sympy import Max, sin, lambdify
>>> from sympy.abc import x
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
After tensorflow v2, eager execution is enabled by default.
If you want to get the compatible result across tensorflow v1 and v2
as same as this tutorial, run this line.
>>> tf.compat.v1.enable_eager_execution()
If you have eager execution enabled, you can get the result out
immediately as you can use numpy.
If you pass tensorflow objects, you may get an ``EagerTensor``
object instead of value.
>>> result = func(tf.constant(1.0))
>>> print(result)
tf.Tensor(1.0, shape=(), dtype=float32)
>>> print(result.__class__)
<class 'tensorflow.python.framework.ops.EagerTensor'>
You can use ``.numpy()`` to get the numpy value of the tensor.
>>> result.numpy()
1.0
>>> var = tf.Variable(2.0)
>>> result = func(var) # also works for tf.Variable and tf.Placeholder
>>> result.numpy()
2.0
And it works with any shape array.
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> result = func(tensor)
>>> result.numpy()
[[1. 2.]
[3. 4.]]
Notes
=====
- For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
``implemented_function`` and user defined subclasses of Function. If
specified, numexpr may be the only option in modules. The official list
of numexpr functions can be found at:
https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions
- In previous versions of SymPy, ``lambdify`` replaced ``Matrix`` with
``numpy.matrix`` by default. As of SymPy 1.0 ``numpy.array`` is the
default. To get the old default behavior you must pass in
``[{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']`` to the
``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
[[1]
[2]]
- In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> from sympy import Piecewise
>>> from sympy.testing.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(numpy.array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(numpy.array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
.. _lambdify-how-it-works:
How it works
============
When using this function, it helps a great deal to have an idea of what it
is doing. At its core, lambdify is nothing more than a namespace
translation, on top of a special printer that makes some corner cases work
properly.
To understand lambdify, first we must properly understand how Python
namespaces work. Say we had two files. One called ``sin_cos_sympy.py``,
with
.. code:: python
# sin_cos_sympy.py
from sympy import sin, cos
def sin_cos(x):
return sin(x) + cos(x)
and one called ``sin_cos_numpy.py`` with
.. code:: python
# sin_cos_numpy.py
from numpy import sin, cos
def sin_cos(x):
return sin(x) + cos(x)
The two files define an identical function ``sin_cos``. However, in the
first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and
``cos``. In the second, they are defined as the NumPy versions.
If we were to import the first file and use the ``sin_cos`` function, we
would get something like
>>> from sin_cos_sympy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
cos(1) + sin(1)
On the other hand, if we imported ``sin_cos`` from the second file, we
would get
>>> from sin_cos_numpy import sin_cos # doctest: +SKIP
>>> sin_cos(1) # doctest: +SKIP
1.38177329068
In the first case we got a symbolic output, because it used the symbolic
``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric
result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions
from NumPy. But notice that the versions of ``sin`` and ``cos`` that were
used was not inherent to the ``sin_cos`` function definition. Both
``sin_cos`` definitions are exactly the same. Rather, it was based on the
names defined at the module where the ``sin_cos`` function was defined.
The key point here is that when function in Python references a name that
is not defined in the function, that name is looked up in the "global"
namespace of the module where that function is defined.
Now, in Python, we can emulate this behavior without actually writing a
file to disk using the ``exec`` function. ``exec`` takes a string
containing a block of Python code, and a dictionary that should contain
the global variables of the module. It then executes the code "in" that
dictionary, as if it were the module globals. The following is equivalent
to the ``sin_cos`` defined in ``sin_cos_sympy.py``:
>>> import sympy
>>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
cos(1) + sin(1)
and similarly with ``sin_cos_numpy``:
>>> import numpy
>>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos}
>>> exec('''
... def sin_cos(x):
... return sin(x) + cos(x)
... ''', module_dictionary)
>>> sin_cos = module_dictionary['sin_cos']
>>> sin_cos(1)
1.38177329068
So now we can get an idea of how ``lambdify`` works. The name "lambdify"
comes from the fact that we can think of something like ``lambdify(x,
sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where
``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why
the symbols argument is first in ``lambdify``, as opposed to most SymPy
functions where it comes after the expression: to better mimic the
``lambda`` keyword.
``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and
1. Converts it to a string
2. Creates a module globals dictionary based on the modules that are
passed in (by default, it uses the NumPy module)
3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the
list of variables separated by commas, and ``{expr}`` is the string
created in step 1., then ``exec``s that string with the module globals
namespace and returns ``func``.
In fact, functions returned by ``lambdify`` support inspection. So you can
see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you
are using IPython or the Jupyter notebook.
>>> f = lambdify(x, sin(x) + cos(x))
>>> import inspect
>>> print(inspect.getsource(f))
def _lambdifygenerated(x):
return (sin(x) + cos(x))
This shows us the source code of the function, but not the namespace it
was defined in. We can inspect that by looking at the ``__globals__``
attribute of ``f``:
>>> f.__globals__['sin']
<ufunc 'sin'>
>>> f.__globals__['cos']
<ufunc 'cos'>
>>> f.__globals__['sin'] is numpy.sin
True
This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be
``numpy.sin`` and ``numpy.cos``.
Note that there are some convenience layers in each of these steps, but at
the core, this is how ``lambdify`` works. Step 1 is done using the
``LambdaPrinter`` printers defined in the printing module (see
:mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions
to define how they should be converted to a string for different modules.
You can change which printer ``lambdify`` uses by passing a custom printer
in to the ``printer`` argument.
Step 2 is augmented by certain translations. There are default
translations for each module, but you can provide your own by passing a
list to the ``modules`` argument. For instance,
>>> def mysin(x):
... print('taking the sin of', x)
... return numpy.sin(x)
...
>>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy'])
>>> f(1)
taking the sin of 1
0.8414709848078965
The globals dictionary is generated from the list by merging the
dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The
merging is done so that earlier items take precedence, which is why
``mysin`` is used above instead of ``numpy.sin``.
If you want to modify the way ``lambdify`` works for a given function, it
is usually easiest to do so by modifying the globals dictionary as such.
In more complicated cases, it may be necessary to create and pass in a
custom printer.
Finally, step 3 is augmented with certain convenience operations, such as
the addition of a docstring.
Understanding how ``lambdify`` works can make it easier to avoid certain
gotchas when using it. For instance, a common mistake is to create a
lambdified function for one module (say, NumPy), and pass it objects from
another (say, a SymPy expression).
For instance, say we create
>>> from sympy.abc import x
>>> f = lambdify(x, x + 1, 'numpy')
Now if we pass in a NumPy array, we get that array plus 1
>>> import numpy
>>> a = numpy.array([1, 2])
>>> f(a)
[2 3]
But what happens if you make the mistake of passing in a SymPy expression
instead of a NumPy array:
>>> f(x + 1)
x + 2
This worked, but it was only by accident. Now take a different lambdified
function:
>>> from sympy import sin
>>> g = lambdify(x, x + sin(x), 'numpy')
This works as expected on NumPy arrays:
>>> g(a)
[1.84147098 2.90929743]
But if we try to pass in a SymPy expression, it fails
>>> try:
... g(x + 1)
... # NumPy release after 1.17 raises TypeError instead of
... # AttributeError
... except (AttributeError, TypeError):
... raise AttributeError() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError:
Now, let's look at what happened. The reason this fails is that ``g``
calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not
know how to operate on a SymPy object. **As a general rule, NumPy
functions do not know how to operate on SymPy expressions, and SymPy
functions do not know how to operate on NumPy arrays. This is why lambdify
exists: to provide a bridge between SymPy and NumPy.**
However, why is it that ``f`` did work? That's because ``f`` doesn't call
any functions, it only adds 1. So the resulting function that is created,
``def _lambdifygenerated(x): return x + 1`` does not depend on the globals
namespace it is defined in. Thus it works, but only by accident. A future
version of ``lambdify`` may remove this behavior.
Be aware that certain implementation details described here may change in
future versions of SymPy. The API of passing in custom modules and
printers will not change, but the details of how a lambda function is
created may change. However, the basic idea will remain the same, and
understanding it will be helpful to understanding the behavior of
lambdify.
**In general: you should create lambdified functions for one module (say,
NumPy), and only pass it input types that are compatible with that module
(say, NumPy arrays).** Remember that by default, if the ``module``
argument is not provided, ``lambdify`` creates functions using the NumPy
and SciPy namespaces.
"""
from sympy.core.symbol import Symbol
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["numpy", "scipy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {} # type: Dict[str, Any]
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore
elif _module_present('scipy', namespaces):
from sympy.printing.pycode import SciPyPrinter as Printer # type: ignore
elif _module_present('numpy', namespaces):
from sympy.printing.pycode import NumPyPrinter as Printer # type: ignore
elif _module_present('numexpr', namespaces):
from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore
elif _module_present('tensorflow', namespaces):
from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore
elif _module_present('torch', namespaces):
from sympy.printing.torch import TorchPrinter as Printer # type: ignore
elif _module_present('sympy', namespaces):
from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore
else:
from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
if isinstance(args, set):
SymPyDeprecationWarning(
feature="The list of arguments is a `set`. This leads to unpredictable results",
useinstead=": Convert set into list or tuple",
issue=20013,
deprecated_since_version="1.6.3"
).warn()
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) # type: _EvaluatorPrinter
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
funcstr = funcprinter.doprint(funcname, args, expr)
# Collect the module imports from the code printers.
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
ln = "from %s import %s" % (mod, k)
try:
exec(ln, {}, namespace)
except ImportError:
# Tensorflow 2.0 has issues with importing a specific
# function from its submodule.
# https://github.com/tensorflow/tensorflow/issues/33022
ln = "%s = %s.%s" % (k, mod, k)
exec(ln, {}, namespace)
imp_mod_lines.append(ln)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
funclocals = {} # type: Dict[str, Any]
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore
func = funclocals[funcname]
# Apply the docstring
sig = "func({})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=None):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(_0,_1[0],_1[1])'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten, Derivative, Basic
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
# replace these with Dummy symbols
if isinstance(args, (Function, Symbol, Derivative)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
expr = sympify(expr)
# dict/tuple are sympified to Basic
if isinstance(expr, Basic):
expr = expr.xreplace(dummies_dict)
# list is not sympified to Basic
elif isinstance(expr, list):
expr = [sub_expr(a, dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
def flat_indexes(iterable):
n = 0
for el in iterable:
if isiter(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
if dummify is None:
dummify = any(isinstance(a, Basic) and
a.atoms(Function, Derivative) for a in (
args if isiter(args) else [args]))
if isiter(args) and any(isiter(i) for i in args):
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
indexed_args = ','.join([
dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]])
for ind in flat_indexes(args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args)
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
class _EvaluatorPrinter:
def __init__(self, printer=None, dummify=False):
self._dummify = dummify
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import LambdaPrinter
if printer is None:
printer = LambdaPrinter()
if inspect.isfunction(printer):
self._exprrepr = printer
else:
if inspect.isclass(printer):
printer = printer()
self._exprrepr = printer.doprint
#if hasattr(printer, '_print_Symbol'):
# symbolrepr = printer._print_Symbol
#if hasattr(printer, '_print_Dummy'):
# dummyrepr = printer._print_Dummy
# Used to print the generated function arguments in a standard way
self._argrepr = LambdaPrinter().doprint
def doprint(self, funcname, args, expr):
"""Returns the function definition code as a string."""
from sympy import Dummy
funcbody = []
if not iterable(args):
args = [args]
argstrs, expr = self._preprocess(args, expr)
# Generate argument unpacking and final argument list
funcargs = []
unpackings = []
for argstr in argstrs:
if iterable(argstr):
funcargs.append(self._argrepr(Dummy()))
unpackings.extend(self._print_unpacking(argstr, funcargs[-1]))
else:
funcargs.append(argstr)
funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs))
# Wrap input arguments before unpacking
funcbody.extend(self._print_funcargwrapping(funcargs))
funcbody.extend(unpackings)
funcbody.append('return ({})'.format(self._exprrepr(expr)))
funclines = [funcsig]
funclines.extend(' ' + line for line in funcbody)
return '\n'.join(funclines) + '\n'
@classmethod
def _is_safe_ident(cls, ident):
return isinstance(ident, str) and ident.isidentifier() \
and not keyword.iskeyword(ident)
def _preprocess(self, args, expr):
"""Preprocess args, expr to replace arguments that do not map
to valid Python identifiers.
Returns string form of args, and updated expr.
"""
from sympy import Dummy, Function, flatten, Derivative, ordered, Basic
from sympy.matrices import DeferredVector
from sympy.core.symbol import uniquely_named_symbol
from sympy.core.expr import Expr
# Args of type Dummy can cause name collisions with args
# of type Symbol. Force dummify of everything in this
# situation.
dummify = self._dummify or any(
isinstance(arg, Dummy) for arg in flatten(args))
argstrs = [None]*len(args)
for arg, i in reversed(list(ordered(zip(args, range(len(args)))))):
if iterable(arg):
s, expr = self._preprocess(arg, expr)
elif isinstance(arg, DeferredVector):
s = str(arg)
elif isinstance(arg, Basic) and arg.is_symbol:
s = self._argrepr(arg)
if dummify or not self._is_safe_ident(s):
dummy = Dummy()
if isinstance(expr, Expr):
dummy = uniquely_named_symbol(
dummy.name, expr, modify=lambda s: '_' + s)
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
elif dummify or isinstance(arg, (Function, Derivative)):
dummy = Dummy()
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
else:
s = str(arg)
argstrs[i] = s
return argstrs, expr
def _subexpr(self, expr, dummies_dict):
from sympy.matrices import DeferredVector
from sympy import sympify
expr = sympify(expr)
xreplace = getattr(expr, 'xreplace', None)
if xreplace is not None:
expr = xreplace(dummies_dict)
else:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()]
v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [self._subexpr(sympify(a), dummies_dict) for a in expr]
return expr
def _print_funcargwrapping(self, args):
"""Generate argument wrapping code.
args is the argument list of the generated function (strings).
Return value is a list of lines of code that will be inserted at
the beginning of the function definition.
"""
return []
def _print_unpacking(self, unpackto, arg):
"""Generate argument unpacking code.
arg is the function argument to be unpacked (a string), and
unpackto is a list or nested lists of the variable names (strings) to
unpack to.
"""
def unpack_lhs(lvalues):
return '[{}]'.format(', '.join(
unpack_lhs(val) if iterable(val) else val for val in lvalues))
return ['{} = {}'.format(unpack_lhs(unpackto), arg)]
class _TensorflowEvaluatorPrinter(_EvaluatorPrinter):
def _print_unpacking(self, lvalues, rvalue):
"""Generate argument unpacking code.
This method is used when the input value is not interable,
but can be indexed (see issue #14655).
"""
from sympy import flatten
def flat_indexes(elems):
n = 0
for el in elems:
if iterable(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind)))
for ind in flat_indexes(lvalues))
return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)]
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as ``expr``. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within ``expr`` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If ``symfunc`` is an Undefined function, create a new function
with the same name and the implemented function attached.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> f = implemented_function('f', lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
kwargs = {}
if isinstance(symfunc, UndefinedFunction):
kwargs = symfunc._kwargs
symfunc = symfunc.__name__
if isinstance(symfunc, str):
# Keyword arguments to UndefinedFunction are added as attributes to
# the created class.
symfunc = UndefinedFunction(
symfunc, _imp_=staticmethod(implementation), **kwargs)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError(filldedent('''
symfunc should be either a string or
an UndefinedFunction instance.'''))
return symfunc
| 36.367041 | 139 | 0.618393 |
from typing import Any, Dict, Iterable
import builtins
import inspect
import keyword
import textwrap
import linecache
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.compatibility import (is_sequence, iterable,
NotIterable)
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
__doctest_requires__ = {('lambdify',): ['numpy', 'tensorflow']}
# by simple variable maps, like I => 1j
MATH_DEFAULT = {} # type: Dict[str, Any]
MPMATH_DEFAULT = {} # type: Dict[str, Any]
NUMPY_DEFAULT = {"I": 1j} # type: Dict[str, Any]
SCIPY_DEFAULT = {"I": 1j} # type: Dict[str, Any]
TENSORFLOW_DEFAULT = {} # type: Dict[str, Any]
TORCH_DEFAULT = {} # type: Dict[str, Any]
SYMPY_DEFAULT = {} # type: Dict[str, Any]
NUMEXPR_DEFAULT = {} # type: Dict[str, Any]
# These are the namespaces the lambda functions will use.
# These are separate from the names above because they are modified
# throughout this file, whereas the defaults should remain unmodified.
MATH = MATH_DEFAULT.copy()
MPMATH = MPMATH_DEFAULT.copy()
NUMPY = NUMPY_DEFAULT.copy()
SCIPY = SCIPY_DEFAULT.copy()
TENSORFLOW = TENSORFLOW_DEFAULT.copy()
TORCH = TORCH_DEFAULT.copy()
SYMPY = SYMPY_DEFAULT.copy()
NUMEXPR = NUMEXPR_DEFAULT.copy()
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
# NOTE: This dictionary is reused in Function._eval_evalf to allow subclasses
# of Function to automatically evalf.
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableDenseMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci",
"RisingFactorial": "rf",
"FallingFactorial": "ff",
"betainc_regularized": "betainc",
}
NUMPY_TRANSLATIONS = {} # type: Dict[str, str]
SCIPY_TRANSLATIONS = {} # type: Dict[str, str]
TENSORFLOW_TRANSLATIONS = {} # type: Dict[str, str]
TORCH_TRANSLATIONS = {} # type: Dict[str, str]
NUMEXPR_TRANSLATIONS = {} # type: Dict[str, str]
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import numpy; from numpy import *; from numpy.linalg import *",)),
"scipy": (SCIPY, SCIPY_DEFAULT, SCIPY_TRANSLATIONS, ("import numpy; import scipy; from scipy import *; from scipy.special import *",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import tensorflow",)),
"torch": (TORCH, TORCH_DEFAULT, TORCH_TRANSLATIONS, ("import torch",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload=False):
# Required despite static analysis claiming it is not used
from sympy.external import import_module # noqa:F401
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
if namespace != namespace_default:
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
# Used for dynamically generated filenames that are inserted into the
# linecache.
_lambdify_generated_counter = 1
@doctest_depends_on(modules=('numpy', 'tensorflow', ), python_version=(3,))
def lambdify(args: Iterable, expr, modules=None, printer=None, use_imps=True,
dummify=False):
from sympy.core.symbol import Symbol
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["numpy", "scipy"]
namespaces = []
if use_imps:
namespaces.append(_imp_namespace(expr))
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
from sympy.printing.pycode import MpmathPrinter as Printer
elif _module_present('scipy', namespaces):
from sympy.printing.pycode import SciPyPrinter as Printer
elif _module_present('numpy', namespaces):
from sympy.printing.pycode import NumPyPrinter as Printer
elif _module_present('numexpr', namespaces):
from sympy.printing.lambdarepr import NumExprPrinter as Printer
elif _module_present('tensorflow', namespaces):
from sympy.printing.tensorflow import TensorflowPrinter as Printer
elif _module_present('torch', namespaces):
from sympy.printing.torch import TorchPrinter as Printer
elif _module_present('sympy', namespaces):
from sympy.printing.pycode import SymPyPrinter as Printer
else:
from sympy.printing.pycode import PythonCodePrinter as Printer
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
if isinstance(args, set):
SymPyDeprecationWarning(
feature="The list of arguments is a `set`. This leads to unpredictable results",
useinstead=": Convert set into list or tuple",
issue=20013,
deprecated_since_version="1.6.3"
).warn()
if not iterable(args):
args = (args,)
names = []
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) # type: _EvaluatorPrinter
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
funcstr = funcprinter.doprint(funcname, args, expr)
# Collect the module imports from the code printers.
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
ln = "from %s import %s" % (mod, k)
try:
exec(ln, {}, namespace)
except ImportError:
# Tensorflow 2.0 has issues with importing a specific
# function from its submodule.
# https://github.com/tensorflow/tensorflow/issues/33022
ln = "%s = %s.%s" % (k, mod, k)
exec(ln, {}, namespace)
imp_mod_lines.append(ln)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
funclocals = {} # type: Dict[str, Any]
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore
func = funclocals[funcname]
# Apply the docstring
sig = "func({})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=None):
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten, Derivative, Basic
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
# replace these with Dummy symbols
if isinstance(args, (Function, Symbol, Derivative)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
expr = sympify(expr)
# dict/tuple are sympified to Basic
if isinstance(expr, Basic):
expr = expr.xreplace(dummies_dict)
# list is not sympified to Basic
elif isinstance(expr, list):
expr = [sub_expr(a, dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
def flat_indexes(iterable):
n = 0
for el in iterable:
if isiter(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
if dummify is None:
dummify = any(isinstance(a, Basic) and
a.atoms(Function, Derivative) for a in (
args if isiter(args) else [args]))
if isiter(args) and any(isiter(i) for i in args):
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
indexed_args = ','.join([
dum_args[ind[0]] + ''.join(["[%s]" % k for k in ind[1:]])
for ind in flat_indexes(args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
return 'lambda %s: (%s)(%s)' % (','.join(dum_args), lstr, indexed_args)
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
class _EvaluatorPrinter:
def __init__(self, printer=None, dummify=False):
self._dummify = dummify
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import LambdaPrinter
if printer is None:
printer = LambdaPrinter()
if inspect.isfunction(printer):
self._exprrepr = printer
else:
if inspect.isclass(printer):
printer = printer()
self._exprrepr = printer.doprint
#if hasattr(printer, '_print_Symbol'):
# symbolrepr = printer._print_Symbol
#if hasattr(printer, '_print_Dummy'):
# dummyrepr = printer._print_Dummy
# Used to print the generated function arguments in a standard way
self._argrepr = LambdaPrinter().doprint
def doprint(self, funcname, args, expr):
from sympy import Dummy
funcbody = []
if not iterable(args):
args = [args]
argstrs, expr = self._preprocess(args, expr)
# Generate argument unpacking and final argument list
funcargs = []
unpackings = []
for argstr in argstrs:
if iterable(argstr):
funcargs.append(self._argrepr(Dummy()))
unpackings.extend(self._print_unpacking(argstr, funcargs[-1]))
else:
funcargs.append(argstr)
funcsig = 'def {}({}):'.format(funcname, ', '.join(funcargs))
# Wrap input arguments before unpacking
funcbody.extend(self._print_funcargwrapping(funcargs))
funcbody.extend(unpackings)
funcbody.append('return ({})'.format(self._exprrepr(expr)))
funclines = [funcsig]
funclines.extend(' ' + line for line in funcbody)
return '\n'.join(funclines) + '\n'
@classmethod
def _is_safe_ident(cls, ident):
return isinstance(ident, str) and ident.isidentifier() \
and not keyword.iskeyword(ident)
def _preprocess(self, args, expr):
from sympy import Dummy, Function, flatten, Derivative, ordered, Basic
from sympy.matrices import DeferredVector
from sympy.core.symbol import uniquely_named_symbol
from sympy.core.expr import Expr
# Args of type Dummy can cause name collisions with args
# of type Symbol. Force dummify of everything in this
# situation.
dummify = self._dummify or any(
isinstance(arg, Dummy) for arg in flatten(args))
argstrs = [None]*len(args)
for arg, i in reversed(list(ordered(zip(args, range(len(args)))))):
if iterable(arg):
s, expr = self._preprocess(arg, expr)
elif isinstance(arg, DeferredVector):
s = str(arg)
elif isinstance(arg, Basic) and arg.is_symbol:
s = self._argrepr(arg)
if dummify or not self._is_safe_ident(s):
dummy = Dummy()
if isinstance(expr, Expr):
dummy = uniquely_named_symbol(
dummy.name, expr, modify=lambda s: '_' + s)
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
elif dummify or isinstance(arg, (Function, Derivative)):
dummy = Dummy()
s = self._argrepr(dummy)
expr = self._subexpr(expr, {arg: dummy})
else:
s = str(arg)
argstrs[i] = s
return argstrs, expr
def _subexpr(self, expr, dummies_dict):
from sympy.matrices import DeferredVector
from sympy import sympify
expr = sympify(expr)
xreplace = getattr(expr, 'xreplace', None)
if xreplace is not None:
expr = xreplace(dummies_dict)
else:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [self._subexpr(sympify(a), dummies_dict) for a in expr.keys()]
v = [self._subexpr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(self._subexpr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [self._subexpr(sympify(a), dummies_dict) for a in expr]
return expr
def _print_funcargwrapping(self, args):
return []
def _print_unpacking(self, unpackto, arg):
def unpack_lhs(lvalues):
return '[{}]'.format(', '.join(
unpack_lhs(val) if iterable(val) else val for val in lvalues))
return ['{} = {}'.format(unpack_lhs(unpackto), arg)]
class _TensorflowEvaluatorPrinter(_EvaluatorPrinter):
def _print_unpacking(self, lvalues, rvalue):
from sympy import flatten
def flat_indexes(elems):
n = 0
for el in elems:
if iterable(el):
for ndeep in flat_indexes(el):
yield (n,) + ndeep
else:
yield (n,)
n += 1
indexed = ', '.join('{}[{}]'.format(rvalue, ']['.join(map(str, ind)))
for ind in flat_indexes(lvalues))
return ['[{}] = [{}]'.format(', '.join(flatten(lvalues)), indexed)]
def _imp_namespace(expr, namespace=None):
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
kwargs = {}
if isinstance(symfunc, UndefinedFunction):
kwargs = symfunc._kwargs
symfunc = symfunc.__name__
if isinstance(symfunc, str):
# Keyword arguments to UndefinedFunction are added as attributes to
# the created class.
symfunc = UndefinedFunction(
symfunc, _imp_=staticmethod(implementation), **kwargs)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError(filldedent('''
symfunc should be either a string or
an UndefinedFunction instance.'''))
return symfunc
| true | true |
f7f9b949e5f6e382015eabefcef7631166191ea0 | 1,952 | py | Python | snoop/urls.py | liquidinvestigations/hoover-snoop2 | 28e328401609f53fb56abaa4817619085aa3fbee | [
"MIT"
] | null | null | null | snoop/urls.py | liquidinvestigations/hoover-snoop2 | 28e328401609f53fb56abaa4817619085aa3fbee | [
"MIT"
] | 168 | 2019-11-07T12:38:07.000Z | 2021-04-19T09:53:51.000Z | snoop/urls.py | liquidinvestigations/hoover-snoop2 | 28e328401609f53fb56abaa4817619085aa3fbee | [
"MIT"
] | null | null | null | """Root URL routes file.
Points to global health check, admin sites, API documentation generators and the [snoop.data.urls][] URLs.
Also sets global URL prefixes.
"""
from django.urls import path, include, re_path
from django.http import HttpResponseRedirect
from django.conf import settings
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from snoop import views
from snoop.data import admin
def redirect_to_admin(request):
return HttpResponseRedirect(f'/{settings.URL_PREFIX}admin/_default/')
base_urlpatterns = [
re_path(r'^_health$', views.health),
re_path(r'^collections/', include('snoop.data.urls', namespace='data')),
path(r'drf-api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
base_urlpatterns += [path(f'admin/{k}/', v.urls) for k, v in admin.sites.items()]
base_urlpatterns += [re_path(r'^$', redirect_to_admin)]
# DRF-YASG
# ========
if settings.DEBUG:
schema_view = get_schema_view(
openapi.Info(
title="Snoop API",
default_version='v0',
# description="Liquid API for Tags",
# contact=openapi.Contact(email="contact@liquiddemo.org"),
# license=openapi.License(name="MIT License"),
),
public=True,
permission_classes=[permissions.AllowAny],
validators=['ssv'],
)
schema_urlpatterns = [
re_path(r'^swagger(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
base_urlpatterns += schema_urlpatterns
if settings.URL_PREFIX:
urlpatterns = [path(settings.URL_PREFIX, include(base_urlpatterns))]
else:
urlpatterns = base_urlpatterns
| 32.533333 | 106 | 0.688525 |
from django.urls import path, include, re_path
from django.http import HttpResponseRedirect
from django.conf import settings
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from snoop import views
from snoop.data import admin
def redirect_to_admin(request):
return HttpResponseRedirect(f'/{settings.URL_PREFIX}admin/_default/')
base_urlpatterns = [
re_path(r'^_health$', views.health),
re_path(r'^collections/', include('snoop.data.urls', namespace='data')),
path(r'drf-api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
base_urlpatterns += [path(f'admin/{k}/', v.urls) for k, v in admin.sites.items()]
base_urlpatterns += [re_path(r'^$', redirect_to_admin)]
if settings.DEBUG:
schema_view = get_schema_view(
openapi.Info(
title="Snoop API",
default_version='v0',
),
public=True,
permission_classes=[permissions.AllowAny],
validators=['ssv'],
)
schema_urlpatterns = [
re_path(r'^swagger(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
base_urlpatterns += schema_urlpatterns
if settings.URL_PREFIX:
urlpatterns = [path(settings.URL_PREFIX, include(base_urlpatterns))]
else:
urlpatterns = base_urlpatterns
| true | true |
f7f9b9e95f206c9a16415c219040fa1e279fd4ea | 2,148 | py | Python | src/django_dart_reverse/management/commands/collectstatic_dart_reverse.py | marekprochazka/django-dart-reverse | b1bf6f076cf96782437dcba0a2f264d6e179e9c0 | [
"MIT"
] | null | null | null | src/django_dart_reverse/management/commands/collectstatic_dart_reverse.py | marekprochazka/django-dart-reverse | b1bf6f076cf96782437dcba0a2f264d6e179e9c0 | [
"MIT"
] | null | null | null | src/django_dart_reverse/management/commands/collectstatic_dart_reverse.py | marekprochazka/django-dart-reverse | b1bf6f076cf96782437dcba0a2f264d6e179e9c0 | [
"MIT"
] | null | null | null | import os
import sys
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand
from django_dart_reverse.utils.collect_urls import collect_urls
from django.urls import get_resolver
from django.template import loader
from django.conf import settings
from django_dart_reverse.utils.reverse_class import Reverse
from typing import List
from django.template.loaders.app_directories import get_app_template_dirs
class Command(BaseCommand):
help = 'Creates .dart file with reverse dictionary'
requires_system_checks = False
def __get_location(self) -> str:
output_path = getattr(settings, 'DART_REVERSE_PATH')
if output_path:
return output_path
if not hasattr(settings, 'STATIC_ROOT') or not settings.STATIC_ROOT:
raise ImproperlyConfigured(
'The collectstatic_dart_reverse command needs settings.DART_REVERSE_PATH or settings.STATIC_ROOT to be set.')
return os.path.join(settings.STATIC_ROOT, 'django_js_reverse', 'js')
def __get_urls(self) -> List[Reverse]:
urls = list()
for value in collect_urls(get_resolver().url_patterns):
urls.append(value)
return urls
def handle(self, *args, **kwargs) -> None:
location = self.__get_location()
urls = self.__get_urls()
throw_exception = getattr(settings, 'DART_REVERSE_THROW_EXCEPTION', False)
throw_warning = getattr(settings, 'DART_REVERSE_THROW_WARNING', True)
content = loader.render_to_string('dart/dart_file.tpl',
dict(urls=urls, throw_exception=throw_exception, throw_warning=throw_warning))
file = 'reverse.dart'
fs = FileSystemStorage(location=location)
if fs.exists(file):
fs.delete(file)
fs.save(file, ContentFile(content))
if len(sys.argv) > 1 and sys.argv[1] in ['collectstatic_dart_reverse']:
self.stdout.write('dart-reverse file written to %s' % location)
| 37.034483 | 125 | 0.708101 | import os
import sys
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand
from django_dart_reverse.utils.collect_urls import collect_urls
from django.urls import get_resolver
from django.template import loader
from django.conf import settings
from django_dart_reverse.utils.reverse_class import Reverse
from typing import List
from django.template.loaders.app_directories import get_app_template_dirs
class Command(BaseCommand):
help = 'Creates .dart file with reverse dictionary'
requires_system_checks = False
def __get_location(self) -> str:
output_path = getattr(settings, 'DART_REVERSE_PATH')
if output_path:
return output_path
if not hasattr(settings, 'STATIC_ROOT') or not settings.STATIC_ROOT:
raise ImproperlyConfigured(
'The collectstatic_dart_reverse command needs settings.DART_REVERSE_PATH or settings.STATIC_ROOT to be set.')
return os.path.join(settings.STATIC_ROOT, 'django_js_reverse', 'js')
def __get_urls(self) -> List[Reverse]:
urls = list()
for value in collect_urls(get_resolver().url_patterns):
urls.append(value)
return urls
def handle(self, *args, **kwargs) -> None:
location = self.__get_location()
urls = self.__get_urls()
throw_exception = getattr(settings, 'DART_REVERSE_THROW_EXCEPTION', False)
throw_warning = getattr(settings, 'DART_REVERSE_THROW_WARNING', True)
content = loader.render_to_string('dart/dart_file.tpl',
dict(urls=urls, throw_exception=throw_exception, throw_warning=throw_warning))
file = 'reverse.dart'
fs = FileSystemStorage(location=location)
if fs.exists(file):
fs.delete(file)
fs.save(file, ContentFile(content))
if len(sys.argv) > 1 and sys.argv[1] in ['collectstatic_dart_reverse']:
self.stdout.write('dart-reverse file written to %s' % location)
| true | true |
f7f9b9ffaa84be12beaa72e62b7c1b1d877cca69 | 1,962 | py | Python | tests/test_classification_pipeline.py | giorgiosavastano/casa | 8ecfdb121ec5b6814a5c15bc75d6879848c99ec9 | [
"MIT"
] | 1 | 2022-03-22T11:35:06.000Z | 2022-03-22T11:35:06.000Z | tests/test_classification_pipeline.py | giorgiosavastano/CASA | 8ecfdb121ec5b6814a5c15bc75d6879848c99ec9 | [
"MIT"
] | 1 | 2022-03-27T16:08:56.000Z | 2022-03-27T16:08:56.000Z | tests/test_classification_pipeline.py | giorgiosavastano/CASA | 8ecfdb121ec5b6814a5c15bc75d6879848c99ec9 | [
"MIT"
] | null | null | null | from pathlib import Path
from unittest import TestCase
import numpy as np
import pytest
from cassa.classification_pipeline import (
eigen_decomposition,
get_affinity_matrix,
get_clusters_spectral,
)
from cassa.distance_matrix import DistanceMatrix
path = Path(__file__)
class TestDistMatrix(TestCase):
def setUp(self):
pass
@pytest.mark.unit
def test_distance_matrix(self):
matrix_arrays = np.random.random((100, 10, 50))
dmatrix = DistanceMatrix(matrix_arrays)
dist_matr = dmatrix.compute_distance_matrix(parallel=True)
self.assertEqual(matrix_arrays.shape[0], dist_matr.shape[0])
aff_matrix = get_affinity_matrix(dist_matr)
self.assertEqual(aff_matrix.shape, dist_matr.shape)
n_cl = eigen_decomposition(aff_matrix)[0]
self.assertGreater(n_cl, 0)
l_labels, cl_colors, clusterer = get_clusters_spectral(
dist_matr, ncl=n_cl, self_tuned=True
)
self.assertEqual(len(l_labels), len(cl_colors))
@pytest.mark.unit
def test_distance_matrix_parallel(self):
matrix_arrays = np.random.random((100, 10, 50))
dmatrix = DistanceMatrix(matrix_arrays)
dist_matr_1 = dmatrix.compute_distance_matrix(parallel=True)
dist_matr_2 = dmatrix.compute_distance_matrix(parallel=False)
self.assertTrue((dist_matr_1 == dist_matr_2).all())
@pytest.mark.unit
def test_classify_new_data(self):
matrix_arrays = np.random.random((1000, 10, 50))
new_data = matrix_arrays[269]
dmatrix = DistanceMatrix(matrix_arrays)
dmatrix.k_step_new_data = int(matrix_arrays.shape[0] / 5)
indexes_arr, dists_arr = dmatrix.classify_new_data(new_data=new_data)
self.assertEqual(indexes_arr[0], 269)
self.assertEqual(np.argmin(dists_arr), indexes_arr[0])
self.assertEqual(len(dists_arr[dists_arr == 0]), 1)
def tearDown(self):
pass
| 30.65625 | 77 | 0.699286 | from pathlib import Path
from unittest import TestCase
import numpy as np
import pytest
from cassa.classification_pipeline import (
eigen_decomposition,
get_affinity_matrix,
get_clusters_spectral,
)
from cassa.distance_matrix import DistanceMatrix
path = Path(__file__)
class TestDistMatrix(TestCase):
def setUp(self):
pass
@pytest.mark.unit
def test_distance_matrix(self):
matrix_arrays = np.random.random((100, 10, 50))
dmatrix = DistanceMatrix(matrix_arrays)
dist_matr = dmatrix.compute_distance_matrix(parallel=True)
self.assertEqual(matrix_arrays.shape[0], dist_matr.shape[0])
aff_matrix = get_affinity_matrix(dist_matr)
self.assertEqual(aff_matrix.shape, dist_matr.shape)
n_cl = eigen_decomposition(aff_matrix)[0]
self.assertGreater(n_cl, 0)
l_labels, cl_colors, clusterer = get_clusters_spectral(
dist_matr, ncl=n_cl, self_tuned=True
)
self.assertEqual(len(l_labels), len(cl_colors))
@pytest.mark.unit
def test_distance_matrix_parallel(self):
matrix_arrays = np.random.random((100, 10, 50))
dmatrix = DistanceMatrix(matrix_arrays)
dist_matr_1 = dmatrix.compute_distance_matrix(parallel=True)
dist_matr_2 = dmatrix.compute_distance_matrix(parallel=False)
self.assertTrue((dist_matr_1 == dist_matr_2).all())
@pytest.mark.unit
def test_classify_new_data(self):
matrix_arrays = np.random.random((1000, 10, 50))
new_data = matrix_arrays[269]
dmatrix = DistanceMatrix(matrix_arrays)
dmatrix.k_step_new_data = int(matrix_arrays.shape[0] / 5)
indexes_arr, dists_arr = dmatrix.classify_new_data(new_data=new_data)
self.assertEqual(indexes_arr[0], 269)
self.assertEqual(np.argmin(dists_arr), indexes_arr[0])
self.assertEqual(len(dists_arr[dists_arr == 0]), 1)
def tearDown(self):
pass
| true | true |
f7f9ba475ec001028cdc14895a97b63e91f5b204 | 5,507 | py | Python | results/aggregate_results.py | mostafamahdieh/ClusteringFaultPronenessTCP | 567184740d24f464cde7d623f84ec3a6d989d401 | [
"MIT"
] | null | null | null | results/aggregate_results.py | mostafamahdieh/ClusteringFaultPronenessTCP | 567184740d24f464cde7d623f84ec3a6d989d401 | [
"MIT"
] | null | null | null | results/aggregate_results.py | mostafamahdieh/ClusteringFaultPronenessTCP | 567184740d24f464cde7d623f84ec3a6d989d401 | [
"MIT"
] | 1 | 2021-09-21T13:29:01.000Z | 2021-09-21T13:29:01.000Z | import pandas as pd
from pandas import Categorical
import numpy as np
from numpy import std, mean, sqrt
import os
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats as stats
import itertools as it
def effect_size(lst1, lst2):
return improvement(lst1, lst2)
def improvement(x, y):
n = len(x)
a = 0
for i in range(0, n):
# print("x",x[i],"y",y[i],"x-y",x[i]-y[i])
a = a + (x[i] - y[i])
improvement = (a/n)
return improvement
def read_results(file_names, project, from_version, to_version):
first_fail = pd.DataFrame(columns=['version'])
apfd = pd.DataFrame(columns=['version'])
for version_number in range(from_version, to_version):
data_path = "../../WTP-data/%s/%d" % (project, version_number)
results_dict_first_fail = {'version': version_number}
results_dict_apfd = {'version': version_number}
skipped = False
for file_name in file_names:
file_path = '%s/%s' % (data_path, file_name)
if os.path.isfile(file_path):
print("Reading %s" % file_path)
results = pd.read_csv(file_path, delimiter=',')
for i, row in results.iterrows():
results_dict_first_fail[row['alg']] = row['first_fail'] * 100
results_dict_apfd[row['alg']] = row['apfd']
else:
print("Skipping %s" % file_path)
skipped = True
if not skipped:
first_fail = first_fail.append(results_dict_first_fail, ignore_index=True)
apfd = apfd.append(results_dict_apfd, ignore_index=True)
return first_fail, apfd
def main():
# projects = ['Chart', 'Closure', 'Lang', 'Math', 'Time']
# from_version = [1, 1, 1, 1, 1]
# to_version = [26, 119, 65, 106, 26]
projects = ['Chart', 'Closure', 'Lang', 'Math', 'Time']
from_version = [1, 1, 1, 1, 1]
to_version = [13, 50, 33, 50, 14]
results_path = '../../WTP-data/aggregate/first_fail/a11'
try:
os.stat(results_path)
except:
os.mkdir(results_path)
matplotlib.rcParams.update({'font.size': 14})
pd.set_option('display.max_columns', 1000)
data_vals_stats = pd.DataFrame()
improvement_stats = pd.DataFrame(columns=["project", "improvement_clustering", "improvement_clustering_fp"])
first_fail_all = pd.DataFrame()
for index, project in enumerate(projects):
first_fail, apfd = read_results(['std2.csv', 'agg11_4.csv', 'std2_c95.csv', 'agg11_c95.csv'],
project, from_version[index], to_version[index] + 1)
plt.close('all')
# 'fp2_1__1_aa': 'fp_0_aa' --> c_dp1=1.0, c_dp2=0
# first_fail = first_fail.rename(columns={"a12_3_c0_200_tt":"Clustering","a12_3_c0999_200_tt":"Clustering+FP",
# "add_c0":"Additional","tot_c0":"Total",
# "add_c0999":'Additional+FP', "tot_c0999":"Total+FP"})
first_fail = first_fail.rename(columns={"a11_4_c0_at": "Clustering", "a11_c95_at": "Clustering+FP",
"add_c0":"Additional","tot_c0":"Total",
"add_c95":'Additional+FP', "tot_c95":"Total+FP"})
# print(first_fail)
# print(apfd)
improvement_clustering = improvement(first_fail[['Additional', 'Total']].min(axis=1), first_fail["Clustering"])
improvement_clustering_fp = improvement(first_fail[['Additional+FP', 'Total+FP']].min(axis=1), first_fail["Clustering+FP"])
print("improvement_clustering", improvement_clustering)
print("improvement_clustering_fp", improvement_clustering_fp)
improvement_stats.add([project, improvement_clustering, improvement_clustering_fp])
if index == 0:
first_fail_all = first_fail
else:
first_fail_all = first_fail_all.append(first_fail)
first_fail_mean = first_fail.mean()
first_fail_mean = first_fail_mean.drop('version')
data_vals_stats = data_vals_stats.append(first_fail_mean, ignore_index=True)
columns = ['Total', 'Additional', 'Clustering', 'Total+FP', 'Additional+FP', 'Clustering+FP']
plot1 = first_fail.boxplot(column=columns)
plot1.set_ylabel('First Fail (%)')
plot1.set_ylim(0, 100)
#plot1.set_title(project)
fig1 = plot1.get_figure()
fig1.autofmt_xdate(rotation=32)
#fig1.savefig('%s/first_fail/%s.first_fail.boxplot.png' % (results_path, project), bbox_inches='tight')
fig1.savefig('%s/%s.first_fail.boxplot.png' % (results_path, project))
plt.close('all')
first_fail_all = first_fail_all.reset_index()
print(first_fail_all)
print("first_fail_total", stats.wilcoxon(first_fail_all["Total"],first_fail_all["Clustering"]))
print("first_fail_additional", stats.wilcoxon(first_fail_all["Additional"],first_fail_all["Clustering"]))
print("first_fail_tolal+fp", stats.wilcoxon(first_fail_all["Total+FP"],first_fail_all["Clustering+FP"]))
print("first_fail_additional+fp", stats.wilcoxon(first_fail_all["Additional+FP"],first_fail_all["Clustering+FP"]))
data_vals_stats.insert(0, 'project', projects)
data_vals_stats.to_csv(results_path+'/stats.csv')
main()
| 40.19708 | 132 | 0.610677 | import pandas as pd
from pandas import Categorical
import numpy as np
from numpy import std, mean, sqrt
import os
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats as stats
import itertools as it
def effect_size(lst1, lst2):
return improvement(lst1, lst2)
def improvement(x, y):
n = len(x)
a = 0
for i in range(0, n):
a = a + (x[i] - y[i])
improvement = (a/n)
return improvement
def read_results(file_names, project, from_version, to_version):
first_fail = pd.DataFrame(columns=['version'])
apfd = pd.DataFrame(columns=['version'])
for version_number in range(from_version, to_version):
data_path = "../../WTP-data/%s/%d" % (project, version_number)
results_dict_first_fail = {'version': version_number}
results_dict_apfd = {'version': version_number}
skipped = False
for file_name in file_names:
file_path = '%s/%s' % (data_path, file_name)
if os.path.isfile(file_path):
print("Reading %s" % file_path)
results = pd.read_csv(file_path, delimiter=',')
for i, row in results.iterrows():
results_dict_first_fail[row['alg']] = row['first_fail'] * 100
results_dict_apfd[row['alg']] = row['apfd']
else:
print("Skipping %s" % file_path)
skipped = True
if not skipped:
first_fail = first_fail.append(results_dict_first_fail, ignore_index=True)
apfd = apfd.append(results_dict_apfd, ignore_index=True)
return first_fail, apfd
def main():
projects = ['Chart', 'Closure', 'Lang', 'Math', 'Time']
from_version = [1, 1, 1, 1, 1]
to_version = [13, 50, 33, 50, 14]
results_path = '../../WTP-data/aggregate/first_fail/a11'
try:
os.stat(results_path)
except:
os.mkdir(results_path)
matplotlib.rcParams.update({'font.size': 14})
pd.set_option('display.max_columns', 1000)
data_vals_stats = pd.DataFrame()
improvement_stats = pd.DataFrame(columns=["project", "improvement_clustering", "improvement_clustering_fp"])
first_fail_all = pd.DataFrame()
for index, project in enumerate(projects):
first_fail, apfd = read_results(['std2.csv', 'agg11_4.csv', 'std2_c95.csv', 'agg11_c95.csv'],
project, from_version[index], to_version[index] + 1)
plt.close('all')
first_fail = first_fail.rename(columns={"a11_4_c0_at": "Clustering", "a11_c95_at": "Clustering+FP",
"add_c0":"Additional","tot_c0":"Total",
"add_c95":'Additional+FP', "tot_c95":"Total+FP"})
improvement_clustering = improvement(first_fail[['Additional', 'Total']].min(axis=1), first_fail["Clustering"])
improvement_clustering_fp = improvement(first_fail[['Additional+FP', 'Total+FP']].min(axis=1), first_fail["Clustering+FP"])
print("improvement_clustering", improvement_clustering)
print("improvement_clustering_fp", improvement_clustering_fp)
improvement_stats.add([project, improvement_clustering, improvement_clustering_fp])
if index == 0:
first_fail_all = first_fail
else:
first_fail_all = first_fail_all.append(first_fail)
first_fail_mean = first_fail.mean()
first_fail_mean = first_fail_mean.drop('version')
data_vals_stats = data_vals_stats.append(first_fail_mean, ignore_index=True)
columns = ['Total', 'Additional', 'Clustering', 'Total+FP', 'Additional+FP', 'Clustering+FP']
plot1 = first_fail.boxplot(column=columns)
plot1.set_ylabel('First Fail (%)')
plot1.set_ylim(0, 100)
fig1 = plot1.get_figure()
fig1.autofmt_xdate(rotation=32)
fig1.savefig('%s/%s.first_fail.boxplot.png' % (results_path, project))
plt.close('all')
first_fail_all = first_fail_all.reset_index()
print(first_fail_all)
print("first_fail_total", stats.wilcoxon(first_fail_all["Total"],first_fail_all["Clustering"]))
print("first_fail_additional", stats.wilcoxon(first_fail_all["Additional"],first_fail_all["Clustering"]))
print("first_fail_tolal+fp", stats.wilcoxon(first_fail_all["Total+FP"],first_fail_all["Clustering+FP"]))
print("first_fail_additional+fp", stats.wilcoxon(first_fail_all["Additional+FP"],first_fail_all["Clustering+FP"]))
data_vals_stats.insert(0, 'project', projects)
data_vals_stats.to_csv(results_path+'/stats.csv')
main()
| true | true |
f7f9bb06540b930a8b6704051424343cb499ec30 | 803 | py | Python | python/program.py | dejanfajfar/enigma | 51d737a843571d173c93d0abba7017518e1b217b | [
"MIT"
] | null | null | null | python/program.py | dejanfajfar/enigma | 51d737a843571d173c93d0abba7017518e1b217b | [
"MIT"
] | null | null | null | python/program.py | dejanfajfar/enigma | 51d737a843571d173c93d0abba7017518e1b217b | [
"MIT"
] | null | null | null | import Rotor
import Reflector
from Enigma import Enigma
rotor1 = Rotor.Rotor("C", Rotor.Type1, 'r1')
rotor2 = Rotor.Rotor("A", Rotor.Type1, 'r2')
rotor3 = Rotor.Rotor("D", Rotor.Type1, 'r3')
rotor12 = Rotor.Rotor("C", Rotor.Type1, 'r1')
rotor22 = Rotor.Rotor("A", Rotor.Type1, 'r2')
rotor32 = Rotor.Rotor("D", Rotor.Type1, 'r3')
reflector = Reflector.Reflector(Reflector.default_configuration)
enigma1 = Enigma([rotor1, rotor2, rotor3], reflector)
enigma2 = Enigma([rotor12, rotor22, rotor32], reflector)
original_message = 'THEQUICKBROWNFOXJUMPSOVERTHELAZYDOG'
encoded_message = enigma1.encode(original_message)
decoded_message = enigma2.decode(encoded_message)
print('----')
print(f'original : {original_message}')
print(f'encoded : {encoded_message}')
print(f'decoded : {decoded_message}') | 28.678571 | 64 | 0.739726 | import Rotor
import Reflector
from Enigma import Enigma
rotor1 = Rotor.Rotor("C", Rotor.Type1, 'r1')
rotor2 = Rotor.Rotor("A", Rotor.Type1, 'r2')
rotor3 = Rotor.Rotor("D", Rotor.Type1, 'r3')
rotor12 = Rotor.Rotor("C", Rotor.Type1, 'r1')
rotor22 = Rotor.Rotor("A", Rotor.Type1, 'r2')
rotor32 = Rotor.Rotor("D", Rotor.Type1, 'r3')
reflector = Reflector.Reflector(Reflector.default_configuration)
enigma1 = Enigma([rotor1, rotor2, rotor3], reflector)
enigma2 = Enigma([rotor12, rotor22, rotor32], reflector)
original_message = 'THEQUICKBROWNFOXJUMPSOVERTHELAZYDOG'
encoded_message = enigma1.encode(original_message)
decoded_message = enigma2.decode(encoded_message)
print('----')
print(f'original : {original_message}')
print(f'encoded : {encoded_message}')
print(f'decoded : {decoded_message}') | true | true |
f7f9bb2ba1a602a40bcb09a7b2118dd904b7e424 | 2,381 | py | Python | rlpyt/utils/tensor.py | cambel/rlpyt | 96e231d6c77ba5ff06dd09f6e9c8837f0abb1a89 | [
"MIT"
] | 17 | 2020-12-07T11:10:03.000Z | 2022-03-21T04:18:13.000Z | rlpyt/utils/tensor.py | cambel/rlpyt | 96e231d6c77ba5ff06dd09f6e9c8837f0abb1a89 | [
"MIT"
] | 3 | 2021-02-20T01:59:20.000Z | 2021-12-08T09:19:43.000Z | rlpyt/utils/tensor.py | cambel/rlpyt | 96e231d6c77ba5ff06dd09f6e9c8837f0abb1a89 | [
"MIT"
] | 3 | 2021-04-19T14:40:32.000Z | 2022-03-29T15:56:38.000Z |
import torch
def select_at_indexes(indexes, tensor):
"""Leading dimensions of tensor must match dimensions of indexes."""
dim = len(indexes.shape)
assert indexes.shape == tensor.shape[:dim]
num = indexes.numel()
t_flat = tensor.view((num,) + tensor.shape[dim:])
s_flat = t_flat[torch.arange(num), indexes.view(-1)]
return s_flat.view(tensor.shape[:dim] + tensor.shape[dim + 1:])
def to_onehot(indexes, num, dtype=None):
"""Dimension of size num added to the end of indexes.shape."""
if dtype is None:
dtype = indexes.dtype
onehot = torch.zeros(indexes.shape + (num,),
dtype=dtype, device=indexes.device)
onehot.scatter_(-1, indexes.unsqueeze(-1).type(torch.long), 1)
return onehot
def from_onehot(onehot, dim=-1, dtype=None):
"""Selected dimension of onehot is removed by argmax."""
indexes = torch.argmax(onehot, dim=dim)
if dtype is not None:
indexes = indexes.type(dtype)
return indexes
def valid_mean(tensor, valid=None, dim=None):
dim = () if dim is None else dim
if valid is None:
return tensor.mean(dim=dim)
valid = valid.type(tensor.dtype) # Convert as needed.
return (tensor * valid).sum(dim=dim) / valid.sum(dim=dim)
def infer_leading_dims(tensor, dim):
"""Param 'dim': number of non-leading dimensions in tensor.
Returns:
lead_dim: int --number of leading dims found.
T: int --size of first leading dim, if two leading dims, o/w 1.
B: int --size of first leading dim if one, second leading dim if two, o/w 1.
shape: tensor shape after leading dims.
"""
lead_dim = tensor.dim() - dim
assert lead_dim in (0, 1, 2)
if lead_dim == 2:
T, B = tensor.shape[:2]
else:
T = 1
B = 1 if lead_dim == 0 else tensor.shape[0]
shape = tensor.shape[-dim:]
return lead_dim, T, B, shape
def restore_leading_dims(tensors, lead_dim, T=1, B=1):
"""Assume tensors have leading Batch dimension (might need removed)."""
is_seq = isinstance(tensors, (tuple, list))
tensors = tensors if is_seq else (tensors,)
if lead_dim == 2: # (Put T dim.)
tensors = tuple(t.view((T, B) + t.shape[1:]) for t in tensors)
if lead_dim == 0: # (Remove B=1 dim.)
assert B == 1
tensors = tuple(t.squeeze(0) for t in tensors)
return tensors if is_seq else tensors[0]
| 34.014286 | 80 | 0.642587 |
import torch
def select_at_indexes(indexes, tensor):
dim = len(indexes.shape)
assert indexes.shape == tensor.shape[:dim]
num = indexes.numel()
t_flat = tensor.view((num,) + tensor.shape[dim:])
s_flat = t_flat[torch.arange(num), indexes.view(-1)]
return s_flat.view(tensor.shape[:dim] + tensor.shape[dim + 1:])
def to_onehot(indexes, num, dtype=None):
if dtype is None:
dtype = indexes.dtype
onehot = torch.zeros(indexes.shape + (num,),
dtype=dtype, device=indexes.device)
onehot.scatter_(-1, indexes.unsqueeze(-1).type(torch.long), 1)
return onehot
def from_onehot(onehot, dim=-1, dtype=None):
indexes = torch.argmax(onehot, dim=dim)
if dtype is not None:
indexes = indexes.type(dtype)
return indexes
def valid_mean(tensor, valid=None, dim=None):
dim = () if dim is None else dim
if valid is None:
return tensor.mean(dim=dim)
valid = valid.type(tensor.dtype)
return (tensor * valid).sum(dim=dim) / valid.sum(dim=dim)
def infer_leading_dims(tensor, dim):
lead_dim = tensor.dim() - dim
assert lead_dim in (0, 1, 2)
if lead_dim == 2:
T, B = tensor.shape[:2]
else:
T = 1
B = 1 if lead_dim == 0 else tensor.shape[0]
shape = tensor.shape[-dim:]
return lead_dim, T, B, shape
def restore_leading_dims(tensors, lead_dim, T=1, B=1):
is_seq = isinstance(tensors, (tuple, list))
tensors = tensors if is_seq else (tensors,)
if lead_dim == 2:
tensors = tuple(t.view((T, B) + t.shape[1:]) for t in tensors)
if lead_dim == 0:
assert B == 1
tensors = tuple(t.squeeze(0) for t in tensors)
return tensors if is_seq else tensors[0]
| true | true |
f7f9bbb2632c6f35444faaf33af8f70a8b3075fb | 1,876 | py | Python | pygments_base16/base16-mellow-purple.py | philj56/base16-pygments | 04cf1b28ad4a5603cd3336a3c4dba976cf5f1e5b | [
"MIT"
] | null | null | null | pygments_base16/base16-mellow-purple.py | philj56/base16-pygments | 04cf1b28ad4a5603cd3336a3c4dba976cf5f1e5b | [
"MIT"
] | null | null | null | pygments_base16/base16-mellow-purple.py | philj56/base16-pygments | 04cf1b28ad4a5603cd3336a3c4dba976cf5f1e5b | [
"MIT"
] | null | null | null | from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class BaseSixteenStyle(Style):
base00 = '#1e0528'
base01 = '#1A092D'
base02 = '#331354'
base03 = '#320f55'
base04 = '#873582'
base05 = '#ffeeff'
base06 = '#ffeeff'
base07 = '#f8c0ff'
base08 = '#00d9e9'
base09 = '#aa00a3'
base0a = '#955ae7'
base0b = '#05cb0d'
base0c = '#b900b1'
base0d = '#550068'
base0e = '#8991bb'
base0f = '#4d6fff'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08, # .err
Comment: f'italic {base03}', # .c
Comment.Preproc: base0f, # .cp
Comment.PreprocFile: base0b, # .cpf
Keyword: base0e, # .k
Keyword.Type: base08, # .kt
Name.Attribute: base0d, # .na
Name.Builtin: base0d, # .nb
Name.Builtin.Pseudo: base08, # .bp
Name.Class: base0d, # .nc
Name.Constant: base09, # .no
Name.Decorator: base09, # .nd
Name.Function: base0d, # .nf
Name.Namespace: base0d, # .nn
Name.Tag: base0e, # .nt
Name.Variable: base0d, # .nv
Name.Variable.Instance: base08, # .vi
Number: base09, # .m
Operator: base0c, # .o
Operator.Word: base0e, # .ow
Literal: base0b, # .l
String: base0b, # .s
String.Interpol: base0f, # .si
String.Regex: base0c, # .sr
String.Symbol: base09, # .ss
}
from string import capwords # noqa: E402
BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format(
capwords('mellow-purple', '-').replace('-', '')
)
globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle']
del globals()['BaseSixteenStyle']
del capwords
| 25.351351 | 74 | 0.570896 | from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class BaseSixteenStyle(Style):
base00 = '#1e0528'
base01 = '#1A092D'
base02 = '#331354'
base03 = '#320f55'
base04 = '#873582'
base05 = '#ffeeff'
base06 = '#ffeeff'
base07 = '#f8c0ff'
base08 = '#00d9e9'
base09 = '#aa00a3'
base0a = '#955ae7'
base0b = '#05cb0d'
base0c = '#b900b1'
base0d = '#550068'
base0e = '#8991bb'
base0f = '#4d6fff'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08,
Comment: f'italic {base03}',
Comment.Preproc: base0f,
Comment.PreprocFile: base0b,
Keyword: base0e,
Keyword.Type: base08,
Name.Attribute: base0d,
Name.Builtin: base0d,
Name.Builtin.Pseudo: base08,
Name.Class: base0d,
Name.Constant: base09,
Name.Decorator: base09,
Name.Function: base0d,
Name.Namespace: base0d,
Name.Tag: base0e,
Name.Variable: base0d,
Name.Variable.Instance: base08,
Number: base09,
Operator: base0c,
Operator.Word: base0e,
Literal: base0b,
String: base0b,
String.Interpol: base0f,
String.Regex: base0c,
String.Symbol: base09,
}
from string import capwords
BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format(
capwords('mellow-purple', '-').replace('-', '')
)
globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle']
del globals()['BaseSixteenStyle']
del capwords
| true | true |
f7f9bcabdc23c3b530e5db4bbcb30206e5e9becd | 595 | py | Python | accounts/admin.py | mcastellin/anem-per-feina | 5c7072c560e8e34355f7bbf7db12e36403766e68 | [
"MIT"
] | null | null | null | accounts/admin.py | mcastellin/anem-per-feina | 5c7072c560e8e34355f7bbf7db12e36403766e68 | [
"MIT"
] | null | null | null | accounts/admin.py | mcastellin/anem-per-feina | 5c7072c560e8e34355f7bbf7db12e36403766e68 | [
"MIT"
] | null | null | null | from django.contrib import admin # type: ignore
# Register your models here.
from accounts.models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = [
"email",
"first_name",
"last_name",
"is_active",
"date_joined",
]
list_filter = ["role", "is_active", "is_staff", "is_superuser"]
fields = [
"email",
"first_name",
"last_name",
"gender",
"role",
"is_active",
"is_staff",
"is_superuser",
"date_joined",
"last_login",
]
| 20.517241 | 67 | 0.544538 | from django.contrib import admin
from accounts.models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = [
"email",
"first_name",
"last_name",
"is_active",
"date_joined",
]
list_filter = ["role", "is_active", "is_staff", "is_superuser"]
fields = [
"email",
"first_name",
"last_name",
"gender",
"role",
"is_active",
"is_staff",
"is_superuser",
"date_joined",
"last_login",
]
| true | true |
f7f9bd42690d032cbf16620fa48e6d3d3afc9813 | 30,393 | py | Python | azure-mgmt-eventgrid/azure/mgmt/eventgrid/operations/domains_operations.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | null | null | null | azure-mgmt-eventgrid/azure/mgmt/eventgrid/operations/domains_operations.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-eventgrid/azure/mgmt/eventgrid/operations/domains_operations.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DomainsOperations(object):
"""DomainsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Constant value: "2018-09-15-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-09-15-preview"
self.config = config
def get(
self, resource_group_name, domain_name, custom_headers=None, raw=False, **operation_config):
"""Get a domain.
Get properties of a domain.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain
:type domain_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Domain or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.eventgrid.models.Domain or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def _create_or_update_initial(
self, resource_group_name, domain_name, domain_info, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(domain_info, 'Domain')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, domain_name, domain_info, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create a domain.
Asynchronously creates a new domain with the specified parameters.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain
:type domain_name: str
:param domain_info: Domain information
:type domain_info: ~azure.mgmt.eventgrid.models.Domain
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Domain or
ClientRawResponse<Domain> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.eventgrid.models.Domain]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.eventgrid.models.Domain]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain_info=domain_info,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def _delete_initial(
self, resource_group_name, domain_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, domain_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Delete a domain.
Delete existing domain.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain
:type domain_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def _update_initial(
self, resource_group_name, domain_name, tags=None, custom_headers=None, raw=False, **operation_config):
domain_update_parameters = models.DomainUpdateParameters(tags=tags)
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(domain_update_parameters, 'DomainUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, domain_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Update a domain.
Asynchronously updates a domain with the specified parameters.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain
:type domain_name: str
:param tags: Tags of the domains resource
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Domain or
ClientRawResponse<Domain> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.eventgrid.models.Domain]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.eventgrid.models.Domain]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def list_by_subscription(
self, custom_headers=None, raw=False, **operation_config):
"""List domains under an Azure subscription.
List all the domains under an Azure subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Domain
:rtype:
~azure.mgmt.eventgrid.models.DomainPaged[~azure.mgmt.eventgrid.models.Domain]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DomainPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DomainPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/domains'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""List domains under a resource group.
List all the domains under a resource group.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Domain
:rtype:
~azure.mgmt.eventgrid.models.DomainPaged[~azure.mgmt.eventgrid.models.Domain]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DomainPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DomainPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains'}
def list_shared_access_keys(
self, resource_group_name, domain_name, custom_headers=None, raw=False, **operation_config):
"""List keys for a domain.
List the two keys used to publish to a domain.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain
:type domain_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DomainSharedAccessKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.eventgrid.models.DomainSharedAccessKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_shared_access_keys.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainSharedAccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_shared_access_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/listKeys'}
def regenerate_key(
self, resource_group_name, domain_name, key_name, custom_headers=None, raw=False, **operation_config):
"""Regenerate key for a domain.
Regenerate a shared access key for a domain.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain
:type domain_name: str
:param key_name: Key name to regenerate key1 or key2
:type key_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DomainSharedAccessKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.eventgrid.models.DomainSharedAccessKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
regenerate_key_request = models.DomainRegenerateKeyRequest(key_name=key_name)
# Construct URL
url = self.regenerate_key.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key_request, 'DomainRegenerateKeyRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainSharedAccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/regenerateKey'}
| 45.362687 | 176 | 0.667884 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DomainsOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-09-15-preview"
self.config = config
def get(
self, resource_group_name, domain_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def _create_or_update_initial(
self, resource_group_name, domain_name, domain_info, custom_headers=None, raw=False, **operation_config):
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(domain_info, 'Domain')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, domain_name, domain_info, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain_info=domain_info,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def _delete_initial(
self, resource_group_name, domain_name, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, domain_name, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def _update_initial(
self, resource_group_name, domain_name, tags=None, custom_headers=None, raw=False, **operation_config):
domain_update_parameters = models.DomainUpdateParameters(tags=tags)
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(domain_update_parameters, 'DomainUpdateParameters')
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, domain_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Domain', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}'}
def list_by_subscription(
self, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = self.list_by_subscription.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.DomainPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DomainPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/domains'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.DomainPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DomainPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains'}
def list_shared_access_keys(
self, resource_group_name, domain_name, custom_headers=None, raw=False, **operation_config):
url = self.list_shared_access_keys.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainSharedAccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_shared_access_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/listKeys'}
def regenerate_key(
self, resource_group_name, domain_name, key_name, custom_headers=None, raw=False, **operation_config):
regenerate_key_request = models.DomainRegenerateKeyRequest(key_name=key_name)
url = self.regenerate_key.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(regenerate_key_request, 'DomainRegenerateKeyRequest')
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainSharedAccessKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/regenerateKey'}
| true | true |
f7f9bd4802acfc26e5fcfcf2ce4b87743031bf20 | 115 | py | Python | Examples/execute_command.py | GrayHatsUWB/Slacked | 46819015c449ac5c0527e6d5fd8ed7c30ed53e02 | [
"BSD-3-Clause"
] | 3 | 2019-11-01T07:19:44.000Z | 2021-07-05T06:45:54.000Z | Examples/execute_command.py | GrayHatsUWB/Slacked | 46819015c449ac5c0527e6d5fd8ed7c30ed53e02 | [
"BSD-3-Clause"
] | null | null | null | Examples/execute_command.py | GrayHatsUWB/Slacked | 46819015c449ac5c0527e6d5fd8ed7c30ed53e02 | [
"BSD-3-Clause"
] | 2 | 2018-05-26T01:51:44.000Z | 2020-04-11T18:46:52.000Z | import subprocess
output = subprocess.run(['echo', 'You got hacked'], stdout=subprocess.PIPE)
print(output.stdout)
| 28.75 | 75 | 0.765217 | import subprocess
output = subprocess.run(['echo', 'You got hacked'], stdout=subprocess.PIPE)
print(output.stdout)
| true | true |
f7f9bda241d84da72808b519280b2468239d4042 | 6,099 | py | Python | scapy/lib/python2.7/site-packages/setuptools/build_meta.py | akellermann97/college-dump | 5c82d93767038709ad71b8f212fdb6243eeb0aec | [
"MIT"
] | 1 | 2018-10-22T10:42:08.000Z | 2018-10-22T10:42:08.000Z | scapy/lib/python2.7/site-packages/setuptools/build_meta.py | akellermann97/college-dump | 5c82d93767038709ad71b8f212fdb6243eeb0aec | [
"MIT"
] | 6 | 2018-09-20T15:27:32.000Z | 2022-03-11T23:29:42.000Z | virtual/lib/python3.6/site-packages/setuptools/build_meta.py | kd-kinuthiadavid/TWMRS | 4a5b8240226a0063bfc9e7651013947c36a7be39 | [
"MIT"
] | 3 | 2018-09-17T12:46:34.000Z | 2018-09-18T06:03:51.000Z | """A PEP 517 interface to setuptools
Previously, when a user or a command line tool (let's call it a "frontend")
needed to make a request of setuptools to take a certain action, for
example, generating a list of installation requirements, the frontend would
would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
PEP 517 defines a different method of interfacing with setuptools. Rather
than calling "setup.py" directly, the frontend should:
1. Set the current directory to the directory with a setup.py file
2. Import this module into a safe python interpreter (one in which
setuptools can potentially set global variables or crash hard).
3. Call one of the functions defined in PEP 517.
What each function does is defined in PEP 517. However, here is a "casual"
definition of the functions (this definition should not be relied on for
bug reports or API stability):
- `build_wheel`: build a wheel in the folder and return the basename
- `get_requires_for_build_wheel`: get the `setup_requires` to build
- `prepare_metadata_for_build_wheel`: get the `install_requires`
- `build_sdist`: build an sdist in the folder and return the basename
- `get_requires_for_build_sdist`: get the `setup_requires` to build
Again, this is not a formal definition! Just a "taste" of the module.
"""
import os
import sys
import tokenize
import shutil
import contextlib
import setuptools
import distutils
class SetupRequirementsError(BaseException):
def __init__(self, specifiers):
self.specifiers = specifiers
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
raise SetupRequirementsError(specifiers)
@classmethod
@contextlib.contextmanager
def patch(cls):
"""
Replace
distutils.dist.Distribution with this class
for the duration of this context.
"""
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
yield
finally:
distutils.core.Distribution = orig
def _to_str(s):
"""
Convert a filename to a string (on Python 2, explicitly
a byte string, not Unicode) as distutils checks for the
exact type str.
"""
if sys.version_info[0] == 2 and not isinstance(s, str):
# Assume it's Unicode, as that's what the PEP says
# should be provided.
return s.encode(sys.getfilesystemencoding())
return s
def _run_setup(setup_script='setup.py'):
# Note that we can reuse our build directory between calls
# Correctness comes first, then optimization later
__file__ = setup_script
__name__ = '__main__'
f = getattr(tokenize, 'open', open)(__file__)
code = f.read().replace('\\r\\n', '\\n')
f.close()
exec(compile(code, __file__, 'exec'), locals())
def _fix_config(config_settings):
config_settings = config_settings or {}
config_settings.setdefault('--global-option', [])
return config_settings
def _get_build_requires(config_settings, requirements):
config_settings = _fix_config(config_settings)
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
try:
with Distribution.patch():
_run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements
def _get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_requires_for_build_wheel(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['setuptools', 'wheel'])
def get_requires_for_build_sdist(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['setuptools'])
def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]
_run_setup()
dist_info_directory = metadata_directory
while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
if len(dist_infos) == 0 and \
len(_get_immediate_subdirectories(dist_info_directory)) == 1:
dist_info_directory = os.path.join(
dist_info_directory, os.listdir(dist_info_directory)[0])
continue
assert len(dist_infos) == 1
break
# PEP 517 requires that the .dist-info directory be placed in the
# metadata_directory. To comply, we MUST copy the directory to the root
if dist_info_directory != metadata_directory:
shutil.move(
os.path.join(dist_info_directory, dist_infos[0]),
metadata_directory)
shutil.rmtree(dist_info_directory, ignore_errors=True)
return dist_infos[0]
def build_wheel(wheel_directory, config_settings=None,
metadata_directory=None):
config_settings = _fix_config(config_settings)
wheel_directory = os.path.abspath(wheel_directory)
sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
config_settings["--global-option"]
_run_setup()
if wheel_directory != 'dist':
shutil.rmtree(wheel_directory)
shutil.copytree('dist', wheel_directory)
wheels = [f for f in os.listdir(wheel_directory)
if f.endswith('.whl')]
assert len(wheels) == 1
return wheels[0]
def build_sdist(sdist_directory, config_settings=None):
config_settings = _fix_config(config_settings)
sdist_directory = os.path.abspath(sdist_directory)
sys.argv = sys.argv[:1] + ['sdist'] + \
config_settings["--global-option"]
_run_setup()
if sdist_directory != 'dist':
shutil.rmtree(sdist_directory)
shutil.copytree('dist', sdist_directory)
sdists = [f for f in os.listdir(sdist_directory)
if f.endswith('.tar.gz')]
assert len(sdists) == 1
return sdists[0]
| 32.967568 | 86 | 0.696836 |
import os
import sys
import tokenize
import shutil
import contextlib
import setuptools
import distutils
class SetupRequirementsError(BaseException):
def __init__(self, specifiers):
self.specifiers = specifiers
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
raise SetupRequirementsError(specifiers)
@classmethod
@contextlib.contextmanager
def patch(cls):
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
yield
finally:
distutils.core.Distribution = orig
def _to_str(s):
if sys.version_info[0] == 2 and not isinstance(s, str):
return s.encode(sys.getfilesystemencoding())
return s
def _run_setup(setup_script='setup.py'):
__file__ = setup_script
__name__ = '__main__'
f = getattr(tokenize, 'open', open)(__file__)
code = f.read().replace('\\r\\n', '\\n')
f.close()
exec(compile(code, __file__, 'exec'), locals())
def _fix_config(config_settings):
config_settings = config_settings or {}
config_settings.setdefault('--global-option', [])
return config_settings
def _get_build_requires(config_settings, requirements):
config_settings = _fix_config(config_settings)
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
try:
with Distribution.patch():
_run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements
def _get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_requires_for_build_wheel(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['setuptools', 'wheel'])
def get_requires_for_build_sdist(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['setuptools'])
def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]
_run_setup()
dist_info_directory = metadata_directory
while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
if len(dist_infos) == 0 and \
len(_get_immediate_subdirectories(dist_info_directory)) == 1:
dist_info_directory = os.path.join(
dist_info_directory, os.listdir(dist_info_directory)[0])
continue
assert len(dist_infos) == 1
break
if dist_info_directory != metadata_directory:
shutil.move(
os.path.join(dist_info_directory, dist_infos[0]),
metadata_directory)
shutil.rmtree(dist_info_directory, ignore_errors=True)
return dist_infos[0]
def build_wheel(wheel_directory, config_settings=None,
metadata_directory=None):
config_settings = _fix_config(config_settings)
wheel_directory = os.path.abspath(wheel_directory)
sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
config_settings["--global-option"]
_run_setup()
if wheel_directory != 'dist':
shutil.rmtree(wheel_directory)
shutil.copytree('dist', wheel_directory)
wheels = [f for f in os.listdir(wheel_directory)
if f.endswith('.whl')]
assert len(wheels) == 1
return wheels[0]
def build_sdist(sdist_directory, config_settings=None):
config_settings = _fix_config(config_settings)
sdist_directory = os.path.abspath(sdist_directory)
sys.argv = sys.argv[:1] + ['sdist'] + \
config_settings["--global-option"]
_run_setup()
if sdist_directory != 'dist':
shutil.rmtree(sdist_directory)
shutil.copytree('dist', sdist_directory)
sdists = [f for f in os.listdir(sdist_directory)
if f.endswith('.tar.gz')]
assert len(sdists) == 1
return sdists[0]
| true | true |
f7f9be8cd1fdd85121a701f48f4f2fe02cc3ffda | 3,758 | py | Python | Bilibili/Article/article_img.py | THFX/python-webspider | 18feaff330f069a90cc43ee5768a504f8d90e476 | [
"MIT"
] | 7 | 2019-09-02T05:33:36.000Z | 2020-08-17T07:24:51.000Z | Bilibili/Article/article_img.py | THFX/python-webspider | 18feaff330f069a90cc43ee5768a504f8d90e476 | [
"MIT"
] | null | null | null | Bilibili/Article/article_img.py | THFX/python-webspider | 18feaff330f069a90cc43ee5768a504f8d90e476 | [
"MIT"
] | 7 | 2020-04-08T07:19:17.000Z | 2021-11-01T10:42:26.000Z | import requests
import re
import os
import time
def Download():
print("开始下载......")
i = 1
# 建立以时间命名的目录
#
# ----获取系统时间 年 月 日
ymd = time.strftime("%Y%m%d", time.localtime(time.time()))
# ---- 获取系统时间 小时 分钟 秒。 输出:120043
hms = time.strftime("%H%M%S", time.localtime(time.time()))
#
path = str(ymd)+"_"+str(hms)
os.mkdir("./"+path)
print("目录已建立:"+path)
#
# 逐行读取url.txt中的链接文本,
urlfile = open("./url.txt", "r")
lines = urlfile.readlines()
urlfile.close()
for line in lines:
# 重要!!下面这句使用了strip方法
# 目的是移除line开头和结尾的换行符、空格等
# 否则在requests.get里面会出现404错误
line = line.strip()
print("\n\n"+line)
# 使用try对下载部分进行异常处理
try:
# 用requests.get下载图片
# 设置timeout防止卡住,第一个是连接时间,第二个是读取时间
response = requests.get(line, headers=hea, timeout=(12, 60))
# 取response中二进制数据
img = response.content
print(response)
#
for j in range(1, 100):
f = open("./"+path+"/"+str(i)+".png", "wb")
f.write(img)
f.close
# try中的语句如果出现异常,则执行except里面的代码
except Exception:
# 输出出错的链接到errors.txt,并提示
data2 = open("./errors.txt", "a", encoding='utf-8')
data2.write(line+"\n")
data2.close
print("!!!出现错误!!!\n出错链接已保存至errors.txt")
# 使用continue跳过本次出错的循环
continue
i += 1
def Getsource():
inputurl = input('请输入网址(含http):')
html = requests.get(inputurl, headers=hea, timeout=(72, 120))
#
# 转为utf-8编码
html.encoding = 'utf-8'
#
# 输出获取的源码
print("即将显示网页源码\n")
time.sleep(2)
print(html.text)
#
# 输出源码到文件
data0 = open("./source.html", 'w+', encoding='utf-8')
print(html.text, file=data0)
data0.close()
#
# 延迟2秒后清屏
time.sleep(2)
# os.system('clear') #for Unix
os.system('cls') # for Windows
#
# PART 1 此为 正则表达式 部分。(写在''里面)。找到规律,利用正则,内容就可以出来 ps.注意表达式里的空格。
text = re.findall('meta itemprop="image" content="(.*?)"', html.text)
#
# 输出正则提取结果至文件
data1 = open("./url.txt", "a", encoding='utf-8')
for each in text:
print(each)
# 逐行写入保存到文本文件
data1.write(each+"\n")
# PART 2 此为 正则表达式 部分。(写在''里面)。找到规律,利用正则,内容就可以出来 ps.注意表达式里的空格。
text = re.findall('img data-src="(.*?)" width="', html.text)
#
# 输出正则提取结果至文件
data1 = open("./url.txt", "a", encoding='utf-8')
for each in text:
print("http:"+each)
# 逐行写入保存到文本文件
data1.write("http:"+each+"\n")
def Delfiles():
while True:
# 删除文件
print("\n####文件删除选项####\n")
print("1.删除 url.txt\n2.删除 errors.txt\n3.删除两者\n4.保留两者")
str_s2 = "\n\n选择:"
str_in2 = input(str_s2)
if str_in2 in ('1'):
os.remove("./url.txt")
break
if str_in2 in ('2'):
os.remove("./errors.txt")
break
if str_in2 in ('3'):
os.remove("./url.txt")
os.remove("./errors.txt")
break
if str_in2 in ('4'):
break
#
#
# ==================================
# ===============main===============
# ==================================
#
#
# 设置hea,即useragent,让目标网站误以为本程序是浏览器,并非爬虫。
# 从网站的Requests Header中获取。审查元素
hea = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'}
# 获取网页源码
Getsource()
#
# 下载文件
str_s1 = "\n是否下载文件? (y/n)\n选择:"
while True:
str_in1 = input(str_s1)
if str_in1 in ('N', 'n'):
break
if str_in1 in ('Y', 'y'):
Download()
break
#
print("\n*****运行完毕~*****")
#
# 删除文件?
Delfiles()
| 24.887417 | 135 | 0.509313 | import requests
import re
import os
import time
def Download():
print("开始下载......")
i = 1
ymd = time.strftime("%Y%m%d", time.localtime(time.time()))
hms = time.strftime("%H%M%S", time.localtime(time.time()))
path = str(ymd)+"_"+str(hms)
os.mkdir("./"+path)
print("目录已建立:"+path)
urlfile = open("./url.txt", "r")
lines = urlfile.readlines()
urlfile.close()
for line in lines:
line = line.strip()
print("\n\n"+line)
try:
response = requests.get(line, headers=hea, timeout=(12, 60))
img = response.content
print(response)
for j in range(1, 100):
f = open("./"+path+"/"+str(i)+".png", "wb")
f.write(img)
f.close
except Exception:
data2 = open("./errors.txt", "a", encoding='utf-8')
data2.write(line+"\n")
data2.close
print("!!!出现错误!!!\n出错链接已保存至errors.txt")
continue
i += 1
def Getsource():
inputurl = input('请输入网址(含http):')
html = requests.get(inputurl, headers=hea, timeout=(72, 120))
html.encoding = 'utf-8'
print("即将显示网页源码\n")
time.sleep(2)
print(html.text)
data0 = open("./source.html", 'w+', encoding='utf-8')
print(html.text, file=data0)
data0.close()
time.sleep(2)
ystem('cls')
text = re.findall('meta itemprop="image" content="(.*?)"', html.text)
data1 = open("./url.txt", "a", encoding='utf-8')
for each in text:
print(each)
data1.write(each+"\n")
text = re.findall('img data-src="(.*?)" width="', html.text)
#
# 输出正则提取结果至文件
data1 = open("./url.txt", "a", encoding='utf-8')
for each in text:
print("http:"+each)
# 逐行写入保存到文本文件
data1.write("http:"+each+"\n")
def Delfiles():
while True:
# 删除文件
print("\nr_s2 = "\n\n选择:"
str_in2 = input(str_s2)
if str_in2 in ('1'):
os.remove("./url.txt")
break
if str_in2 in ('2'):
os.remove("./errors.txt")
break
if str_in2 in ('3'):
os.remove("./url.txt")
os.remove("./errors.txt")
break
if str_in2 in ('4'):
break
#
#
# ==================================
# ===============main===============
# ==================================
#
#
# 设置hea,即useragent,让目标网站误以为本程序是浏览器,并非爬虫。
# 从网站的Requests Header中获取。审查元素
hea = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'}
# 获取网页源码
Getsource()
#
# 下载文件
str_s1 = "\n是否下载文件? (y/n)\n选择:"
while True:
str_in1 = input(str_s1)
if str_in1 in ('N', 'n'):
break
if str_in1 in ('Y', 'y'):
Download()
break
#
print("\n*****运行完毕~*****")
#
# 删除文件?
Delfiles()
| true | true |
f7f9c07d2aeab74ea3d23de97a392c72a05b315f | 11,074 | py | Python | cogs/moderation.py | xxopcode90xx/DiscordChatBotProject | d9577fabe9d5b57ad31c36a6abcc8c907a15c69d | [
"Apache-2.0"
] | null | null | null | cogs/moderation.py | xxopcode90xx/DiscordChatBotProject | d9577fabe9d5b57ad31c36a6abcc8c907a15c69d | [
"Apache-2.0"
] | null | null | null | cogs/moderation.py | xxopcode90xx/DiscordChatBotProject | d9577fabe9d5b57ad31c36a6abcc8c907a15c69d | [
"Apache-2.0"
] | null | null | null | """"
Copyright © Krypton 2021 - https://github.com/kkrypt0nn
Description:
This is a template to create your own discord bot in python.
Version: 3.1.1
"""
import json
import os
import sys
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_option
from helpers import checks
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
with open("config.json") as file:
config = json.load(file)
class Moderation(commands.Cog, name="moderation"):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(
name='kick',
description="Kick a user out of the server.",
options=[
create_option(
name="user",
description="The user you want to kick.",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason you kicked the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def kick(self, context: SlashContext, user: discord.User, reason: str = "Not specified"):
"""
Kick a user out of the server.
"""
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.kick_members:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to kick this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
if member.guild_permissions.administrator:
embed = discord.Embed(
title="Error!",
description="User has Admin permissions.",
color=0xE02B2B
)
await context.send(embed=embed)
else:
try:
await member.kick(reason=reason)
embed = discord.Embed(
title="User Kicked!",
description=f"**{member}** was kicked by **{context.author}**!",
color=0x42F56C
)
embed.add_field(
name="Reason:",
value=reason
)
await context.send(embed=embed)
try:
await member.send(
f"You were kicked by **{context.author}**!\nReason: {reason}"
)
except:
pass
except:
embed = discord.Embed(
title="Error!",
description="An error occurred while trying to kick the user. Make sure my role is above the role of the user you want to kick.",
color=0xE02B2B
)
await context.message.channel.send(embed=embed)
@cog_ext.cog_slash(
name='nick',
description="Change the nickname of a user on a server.",
options=[
create_option(
name="user",
description="The user you want to change the nickname.",
option_type=6,
required=True
),
create_option(
name="nickname",
description="The new nickname of the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def nick(self, context: SlashContext, user: discord.User, nickname: str = None):
"""
Change the nickname of a user on a server.
"""
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.manage_nicknames:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to change the nickname of this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
try:
await member.edit(nick=nickname)
embed = discord.Embed(
title="Changed Nickname!",
description=f"**{member}'s** new nickname is **{nickname}**!",
color=0x42F56C
)
await context.send(embed=embed)
except:
embed = discord.Embed(
title="Error!",
description="An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.",
color=0xE02B2B
)
await context.message.channel.send(embed=embed)
@cog_ext.cog_slash(
name='ban',
description="Bans a user from the server.",
options=[
create_option(
name="user",
description="The user you want to ban.",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason you banned the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def ban(self, context, user: discord.User, reason: str = "Not specified"):
"""
Bans a user from the server.
"""
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.ban_members:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to ban this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
try:
if member.guild_permissions.administrator:
embed = discord.Embed(
title="Error!",
description="User has Admin permissions.",
color=0xE02B2B
)
await context.send(embed=embed)
else:
await member.ban(reason=reason)
embed = discord.Embed(
title="User Banned!",
description=f"**{member}** was banned by **{context.author}**!",
color=0x42F56C
)
embed.add_field(
name="Reason:",
value=reason
)
await context.send(embed=embed)
await member.send(f"You were banned by **{context.author}**!\nReason: {reason}")
except:
embed = discord.Embed(
title="Error!",
description="An error occurred while trying to ban the user. Make sure my role is above the role of the user you want to ban.",
color=0xE02B2B
)
await context.send(embed=embed)
@cog_ext.cog_slash(
name='warn',
description="Warns a user from the server.",
options=[
create_option(
name="user",
description="The user you want to warn.",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason you warned the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def warn(self, context, user: discord.User, reason: str = "Not specified"):
"""
Warns a user in his private messages.
"""
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.manage_messages:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to warn this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
embed = discord.Embed(
title="User Warned!",
description=f"**{member}** was warned by **{context.author}**!",
color=0x42F56C
)
embed.add_field(
name="Reason:",
value=reason
)
await context.send(embed=embed)
try:
await member.send(f"You were warned by **{context.author}**!\nReason: {reason}")
except:
pass
@cog_ext.cog_slash(
name='purge',
description="Delete a number of messages.",
options=[
create_option(
name="amount",
description="The amount of messages you want to delete.",
option_type=4,
required=True
)
],
)
@checks.not_blacklisted()
async def purge(self, context, amount: int):
"""
Delete a number of messages.
"""
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.manage_messages or not author.guild_permissions.manage_channels:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions purge the chat.",
color=0xE02B2B
)
return await context.send(embed=embed)
try:
amount = int(amount)
except:
embed = discord.Embed(
title="Error!",
description=f"`{amount}` is not a valid number.",
color=0xE02B2B
)
await context.send(embed=embed)
return
if amount < 1:
embed = discord.Embed(
title="Error!",
description=f"`{amount}` is not a valid number.",
color=0xE02B2B
)
await context.send(embed=embed)
return
purged_messages = await context.channel.purge(limit=amount)
embed = discord.Embed(
title="Chat Cleared!",
description=f"**{context.author}** cleared **{len(purged_messages)}** messages!",
color=0x42F56C
)
await context.send(embed=embed)
def setup(bot):
bot.add_cog(Moderation(bot))
| 36.071661 | 178 | 0.530793 |
import json
import os
import sys
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_option
from helpers import checks
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
with open("config.json") as file:
config = json.load(file)
class Moderation(commands.Cog, name="moderation"):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(
name='kick',
description="Kick a user out of the server.",
options=[
create_option(
name="user",
description="The user you want to kick.",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason you kicked the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def kick(self, context: SlashContext, user: discord.User, reason: str = "Not specified"):
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.kick_members:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to kick this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
if member.guild_permissions.administrator:
embed = discord.Embed(
title="Error!",
description="User has Admin permissions.",
color=0xE02B2B
)
await context.send(embed=embed)
else:
try:
await member.kick(reason=reason)
embed = discord.Embed(
title="User Kicked!",
description=f"**{member}** was kicked by **{context.author}**!",
color=0x42F56C
)
embed.add_field(
name="Reason:",
value=reason
)
await context.send(embed=embed)
try:
await member.send(
f"You were kicked by **{context.author}**!\nReason: {reason}"
)
except:
pass
except:
embed = discord.Embed(
title="Error!",
description="An error occurred while trying to kick the user. Make sure my role is above the role of the user you want to kick.",
color=0xE02B2B
)
await context.message.channel.send(embed=embed)
@cog_ext.cog_slash(
name='nick',
description="Change the nickname of a user on a server.",
options=[
create_option(
name="user",
description="The user you want to change the nickname.",
option_type=6,
required=True
),
create_option(
name="nickname",
description="The new nickname of the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def nick(self, context: SlashContext, user: discord.User, nickname: str = None):
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.manage_nicknames:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to change the nickname of this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
try:
await member.edit(nick=nickname)
embed = discord.Embed(
title="Changed Nickname!",
description=f"**{member}'s** new nickname is **{nickname}**!",
color=0x42F56C
)
await context.send(embed=embed)
except:
embed = discord.Embed(
title="Error!",
description="An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.",
color=0xE02B2B
)
await context.message.channel.send(embed=embed)
@cog_ext.cog_slash(
name='ban',
description="Bans a user from the server.",
options=[
create_option(
name="user",
description="The user you want to ban.",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason you banned the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def ban(self, context, user: discord.User, reason: str = "Not specified"):
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.ban_members:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to ban this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
try:
if member.guild_permissions.administrator:
embed = discord.Embed(
title="Error!",
description="User has Admin permissions.",
color=0xE02B2B
)
await context.send(embed=embed)
else:
await member.ban(reason=reason)
embed = discord.Embed(
title="User Banned!",
description=f"**{member}** was banned by **{context.author}**!",
color=0x42F56C
)
embed.add_field(
name="Reason:",
value=reason
)
await context.send(embed=embed)
await member.send(f"You were banned by **{context.author}**!\nReason: {reason}")
except:
embed = discord.Embed(
title="Error!",
description="An error occurred while trying to ban the user. Make sure my role is above the role of the user you want to ban.",
color=0xE02B2B
)
await context.send(embed=embed)
@cog_ext.cog_slash(
name='warn',
description="Warns a user from the server.",
options=[
create_option(
name="user",
description="The user you want to warn.",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason you warned the user.",
option_type=3,
required=False
)
],
)
@checks.not_blacklisted()
async def warn(self, context, user: discord.User, reason: str = "Not specified"):
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.manage_messages:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions to warn this user.",
color=0xE02B2B
)
return await context.send(embed=embed)
member = context.guild.get_member(user.id) or await context.guild.fetch_member(user.id)
embed = discord.Embed(
title="User Warned!",
description=f"**{member}** was warned by **{context.author}**!",
color=0x42F56C
)
embed.add_field(
name="Reason:",
value=reason
)
await context.send(embed=embed)
try:
await member.send(f"You were warned by **{context.author}**!\nReason: {reason}")
except:
pass
@cog_ext.cog_slash(
name='purge',
description="Delete a number of messages.",
options=[
create_option(
name="amount",
description="The amount of messages you want to delete.",
option_type=4,
required=True
)
],
)
@checks.not_blacklisted()
async def purge(self, context, amount: int):
author = context.guild.get_member(context.author_id) or await context.guild.fetch_member(context.author_id)
if not author.guild_permissions.manage_messages or not author.guild_permissions.manage_channels:
embed = discord.Embed(
title="Error!",
description="You don't have enough permissions purge the chat.",
color=0xE02B2B
)
return await context.send(embed=embed)
try:
amount = int(amount)
except:
embed = discord.Embed(
title="Error!",
description=f"`{amount}` is not a valid number.",
color=0xE02B2B
)
await context.send(embed=embed)
return
if amount < 1:
embed = discord.Embed(
title="Error!",
description=f"`{amount}` is not a valid number.",
color=0xE02B2B
)
await context.send(embed=embed)
return
purged_messages = await context.channel.purge(limit=amount)
embed = discord.Embed(
title="Chat Cleared!",
description=f"**{context.author}** cleared **{len(purged_messages)}** messages!",
color=0x42F56C
)
await context.send(embed=embed)
def setup(bot):
bot.add_cog(Moderation(bot))
| true | true |
f7f9c0cb72bce30399792056fa4ca80a491e9fca | 2,618 | py | Python | census/migrations/0001_initial.py | escobar022/cens_django | 984fb224d2cb7f16e1d54564a1f1fc5e8ff76e3b | [
"MIT"
] | null | null | null | census/migrations/0001_initial.py | escobar022/cens_django | 984fb224d2cb7f16e1d54564a1f1fc5e8ff76e3b | [
"MIT"
] | null | null | null | census/migrations/0001_initial.py | escobar022/cens_django | 984fb224d2cb7f16e1d54564a1f1fc5e8ff76e3b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='APISetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('api_description', models.CharField(max_length=200)),
('api_key', models.CharField(max_length=200)),
('api_used', models.CharField(default=b'false', max_length=200)),
('state_selected', models.CharField(default=b'', max_length=2, choices=[(b'', b'Select a State'), (b'1', b'Alabama'), (b'2', b'Alaska'), (b'3', b'Arizona'), (b'4', b'Arkansas'), (b'5', b'California'), (b'6', b'Colorado'), (b'7', b'Connecticut'), (b'8', b'Delaware'), (b'9', b'Florida'), (b'10', b'Georgia'), (b'11', b'Hawaii'), (b'12', b'Idaho'), (b'13', b'Illinois'), (b'14', b'Indiana'), (b'15', b'Iowa'), (b'16', b'Kansas'), (b'17', b'Kentucky'), (b'18', b'Louisiana'), (b'19', b'Maine'), (b'20', b'Maryland'), (b'21', b'Massachusetts'), (b'22', b'Michigan'), (b'23', b'Minnesota'), (b'24', b'Mississippi'), (b'25', b'Missouri'), (b'26', b'Montana'), (b'27', b'Nebraska'), (b'28', b'Nevada'), (b'29', b'New Hampshire'), (b'30', b'New Jersey'), (b'31', b'New Mexico'), (b'32', b'New York'), (b'33', b'North Carolina'), (b'34', b'North Dakota'), (b'35', b'Ohio'), (b'36', b'Oklahoma'), (b'37', b'Oregon'), (b'38', b'Pennsylvania'), (b'39', b'Rhode Island'), (b'40', b'South Carolina'), (b'41', b'South Dakota'), (b'42', b'Tennessee'), (b'43', b'Texas'), (b'44', b'Utah'), (b'45', b'Vermont'), (b'46', b'Virginia'), (b'47', b'Washington'), (b'48', b'West Virginia'), (b'49', b'Wisconsin'), (b'50', b'Wyoming')])),
],
),
migrations.CreateModel(
name='CensusInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('census_info', models.CharField(max_length=200)),
('api_view_id', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='HousingVariable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('housing_variable', models.CharField(max_length=200)),
('view_housing_variables', models.ForeignKey(to='census.APISetting')),
],
),
]
| 65.45 | 1,229 | 0.559969 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='APISetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('api_description', models.CharField(max_length=200)),
('api_key', models.CharField(max_length=200)),
('api_used', models.CharField(default=b'false', max_length=200)),
('state_selected', models.CharField(default=b'', max_length=2, choices=[(b'', b'Select a State'), (b'1', b'Alabama'), (b'2', b'Alaska'), (b'3', b'Arizona'), (b'4', b'Arkansas'), (b'5', b'California'), (b'6', b'Colorado'), (b'7', b'Connecticut'), (b'8', b'Delaware'), (b'9', b'Florida'), (b'10', b'Georgia'), (b'11', b'Hawaii'), (b'12', b'Idaho'), (b'13', b'Illinois'), (b'14', b'Indiana'), (b'15', b'Iowa'), (b'16', b'Kansas'), (b'17', b'Kentucky'), (b'18', b'Louisiana'), (b'19', b'Maine'), (b'20', b'Maryland'), (b'21', b'Massachusetts'), (b'22', b'Michigan'), (b'23', b'Minnesota'), (b'24', b'Mississippi'), (b'25', b'Missouri'), (b'26', b'Montana'), (b'27', b'Nebraska'), (b'28', b'Nevada'), (b'29', b'New Hampshire'), (b'30', b'New Jersey'), (b'31', b'New Mexico'), (b'32', b'New York'), (b'33', b'North Carolina'), (b'34', b'North Dakota'), (b'35', b'Ohio'), (b'36', b'Oklahoma'), (b'37', b'Oregon'), (b'38', b'Pennsylvania'), (b'39', b'Rhode Island'), (b'40', b'South Carolina'), (b'41', b'South Dakota'), (b'42', b'Tennessee'), (b'43', b'Texas'), (b'44', b'Utah'), (b'45', b'Vermont'), (b'46', b'Virginia'), (b'47', b'Washington'), (b'48', b'West Virginia'), (b'49', b'Wisconsin'), (b'50', b'Wyoming')])),
],
),
migrations.CreateModel(
name='CensusInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('census_info', models.CharField(max_length=200)),
('api_view_id', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='HousingVariable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('housing_variable', models.CharField(max_length=200)),
('view_housing_variables', models.ForeignKey(to='census.APISetting')),
],
),
]
| true | true |
f7f9c11e3b690ded7d61e1f4f5a7684443accee3 | 4,367 | py | Python | schedule/urls.py | drodger/django-scheduler | 564b47da32d909188e2563a0b4312fbaebe3f309 | [
"BSD-3-Clause"
] | null | null | null | schedule/urls.py | drodger/django-scheduler | 564b47da32d909188e2563a0b4312fbaebe3f309 | [
"BSD-3-Clause"
] | null | null | null | schedule/urls.py | drodger/django-scheduler | 564b47da32d909188e2563a0b4312fbaebe3f309 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from django.views.generic.list import ListView
from schedule.feeds import CalendarICalendar, UpcomingEventsFeed
from schedule.models import Calendar
from schedule.periods import Day, Month, Week, Year
from schedule.views import (
CalendarByPeriodsView, CalendarView, CancelOccurrenceView, CreateEventView,
CreateOccurrenceView, DeleteEventView, EditEventView, EditOccurrenceView,
EventView, FullCalendarView, OccurrencePreview, OccurrenceView,
api_move_or_resize_by_code, api_occurrences, api_select_create,
)
urlpatterns = [
url(r'^$', ListView.as_view(model=Calendar), name='calendar_list'),
url(r'^calendar/year/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_year.html'),
name='year_calendar',
kwargs={'period': Year}),
url(r'^calendar/tri_month/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_tri_month.html'),
name='tri_month_calendar',
kwargs={'period': Month}),
url(r'^calendar/compact_month/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_compact_month.html'),
name='compact_calendar',
kwargs={'period': Month}),
url(r'^calendar/month/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_month.html'),
name='month_calendar',
kwargs={'period': Month}),
url(r'^calendar/week/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_week.html'),
name='week_calendar',
kwargs={'period': Week}),
url(r'^calendar/daily/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_day.html'),
name='day_calendar',
kwargs={'period': Day}),
url(r'^calendar/(?P<calendar_slug>[-\w]+)/$',
CalendarView.as_view(),
name='calendar_home',
),
url(r'^fullcalendar/(?P<calendar_slug>[-\w]+)/$',
FullCalendarView.as_view(),
name='fullcalendar'),
# Event Urls
url(r'^event/create/(?P<calendar_slug>[-\w]+)/$',
CreateEventView.as_view(),
name='calendar_create_event'),
url(r'^event/edit/(?P<calendar_slug>[-\w]+)/(?P<event_id>\d+)/$',
EditEventView.as_view(),
name='edit_event'),
url(r'^event/(?P<event_id>\d+)/$',
EventView.as_view(),
name='event'),
url(r'^event/delete/(?P<event_id>\d+)/$',
DeleteEventView.as_view(),
name='delete_event'),
# urls for already persisted occurrences
url(r'^occurrence/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
OccurrenceView.as_view(),
name='occurrence'),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
CancelOccurrenceView.as_view(),
name='cancel_occurrence'),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
EditOccurrenceView.as_view(),
name='edit_occurrence'),
# urls for unpersisted occurrences
url(r'^occurrence/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
OccurrencePreview.as_view(),
name='occurrence_by_date'),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
CancelOccurrenceView.as_view(),
name='cancel_occurrence_by_date'),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
CreateOccurrenceView.as_view(),
name='edit_occurrence_by_date'),
# feed urls
url(r'^feed/calendar/upcoming/(?P<calendar_id>\d+)/$', UpcomingEventsFeed(), name='upcoming_events_feed'),
url(r'^ical/calendar/(.*)/$', CalendarICalendar(), name='calendar_ical'),
# api urls
url(r'^api/occurrences', api_occurrences, name='api_occurences'),
url(r'^api/move_or_resize/$',
api_move_or_resize_by_code,
name='api_move_or_resize'),
url(r'^api/select_create/$',
api_select_create,
name='api_select_create'),
url(r'^$', ListView.as_view(queryset=Calendar.objects.all()), name='schedule'),
]
| 41.590476 | 138 | 0.641401 | from django.conf.urls import url
from django.views.generic.list import ListView
from schedule.feeds import CalendarICalendar, UpcomingEventsFeed
from schedule.models import Calendar
from schedule.periods import Day, Month, Week, Year
from schedule.views import (
CalendarByPeriodsView, CalendarView, CancelOccurrenceView, CreateEventView,
CreateOccurrenceView, DeleteEventView, EditEventView, EditOccurrenceView,
EventView, FullCalendarView, OccurrencePreview, OccurrenceView,
api_move_or_resize_by_code, api_occurrences, api_select_create,
)
urlpatterns = [
url(r'^$', ListView.as_view(model=Calendar), name='calendar_list'),
url(r'^calendar/year/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_year.html'),
name='year_calendar',
kwargs={'period': Year}),
url(r'^calendar/tri_month/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_tri_month.html'),
name='tri_month_calendar',
kwargs={'period': Month}),
url(r'^calendar/compact_month/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_compact_month.html'),
name='compact_calendar',
kwargs={'period': Month}),
url(r'^calendar/month/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_month.html'),
name='month_calendar',
kwargs={'period': Month}),
url(r'^calendar/week/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_week.html'),
name='week_calendar',
kwargs={'period': Week}),
url(r'^calendar/daily/(?P<calendar_slug>[-\w]+)/$',
CalendarByPeriodsView.as_view(template_name='schedule/calendar_day.html'),
name='day_calendar',
kwargs={'period': Day}),
url(r'^calendar/(?P<calendar_slug>[-\w]+)/$',
CalendarView.as_view(),
name='calendar_home',
),
url(r'^fullcalendar/(?P<calendar_slug>[-\w]+)/$',
FullCalendarView.as_view(),
name='fullcalendar'),
url(r'^event/create/(?P<calendar_slug>[-\w]+)/$',
CreateEventView.as_view(),
name='calendar_create_event'),
url(r'^event/edit/(?P<calendar_slug>[-\w]+)/(?P<event_id>\d+)/$',
EditEventView.as_view(),
name='edit_event'),
url(r'^event/(?P<event_id>\d+)/$',
EventView.as_view(),
name='event'),
url(r'^event/delete/(?P<event_id>\d+)/$',
DeleteEventView.as_view(),
name='delete_event'),
url(r'^occurrence/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
OccurrenceView.as_view(),
name='occurrence'),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
CancelOccurrenceView.as_view(),
name='cancel_occurrence'),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<occurrence_id>\d+)/$',
EditOccurrenceView.as_view(),
name='edit_occurrence'),
url(r'^occurrence/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
OccurrencePreview.as_view(),
name='occurrence_by_date'),
url(r'^occurrence/cancel/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
CancelOccurrenceView.as_view(),
name='cancel_occurrence_by_date'),
url(r'^occurrence/edit/(?P<event_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<hour>\d+)/(?P<minute>\d+)/(?P<second>\d+)/$',
CreateOccurrenceView.as_view(),
name='edit_occurrence_by_date'),
url(r'^feed/calendar/upcoming/(?P<calendar_id>\d+)/$', UpcomingEventsFeed(), name='upcoming_events_feed'),
url(r'^ical/calendar/(.*)/$', CalendarICalendar(), name='calendar_ical'),
url(r'^api/occurrences', api_occurrences, name='api_occurences'),
url(r'^api/move_or_resize/$',
api_move_or_resize_by_code,
name='api_move_or_resize'),
url(r'^api/select_create/$',
api_select_create,
name='api_select_create'),
url(r'^$', ListView.as_view(queryset=Calendar.objects.all()), name='schedule'),
]
| true | true |
f7f9c16fda69f33e7ba7f55acdf3328788822fde | 641 | py | Python | setup.py | DESHRAJ/fjord | 8899b6286b23347c9b024334e61c33fe133e836d | [
"BSD-3-Clause"
] | 16 | 2015-02-06T14:35:57.000Z | 2021-07-10T11:14:00.000Z | setup.py | DESHRAJ/fjord | 8899b6286b23347c9b024334e61c33fe133e836d | [
"BSD-3-Clause"
] | 310 | 2015-01-07T14:39:35.000Z | 2016-05-02T17:41:30.000Z | setup.py | DESHRAJ/fjord | 8899b6286b23347c9b024334e61c33fe133e836d | [
"BSD-3-Clause"
] | 22 | 2015-01-15T13:46:03.000Z | 2020-07-24T10:08:51.000Z | import os
from setuptools import setup, find_packages
setup(
name='fjord',
version='1.0',
description='Django application.',
long_description='',
author='Mozilla Foundation',
author_email='',
license='BSD',
url='https://github.com/mozilla/fjord',
include_package_data=True,
classifiers=[
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(exclude=['tests']),
install_requires=[],
)
| 24.653846 | 49 | 0.613105 | import os
from setuptools import setup, find_packages
setup(
name='fjord',
version='1.0',
description='Django application.',
long_description='',
author='Mozilla Foundation',
author_email='',
license='BSD',
url='https://github.com/mozilla/fjord',
include_package_data=True,
classifiers=[
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(exclude=['tests']),
install_requires=[],
)
| true | true |
f7f9c3220e75629f8f0dcc88021598b779e44f1c | 9,132 | py | Python | deep-learning/GANs and Variational Autoencoders/BigGAN-PyTorch/train.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 7adab3877fc1d3f1d5f57e6c1743dae8f76f72c5 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | train.py | darthsuogles/BigGAN-PyTorch | 6988f1f3ccfa4f6794ce269f056422da4ce9baf6 | [
"MIT"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | train.py | darthsuogles/BigGAN-PyTorch | 6988f1f3ccfa4f6794ce269f056422da4ce9baf6 | [
"MIT"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | """ BigGAN: The Authorized Unofficial PyTorch release
Code by A. Brock and A. Andonian
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by A. Brock, J. Donahue, and K. Simonyan (arXiv 1809.11096).
Let's go.
"""
import os
import functools
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import torchvision
# Import my stuff
import inception_utils
import utils
import losses
import train_fns
from sync_batchnorm import patch_replication_callback
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
# Import the model--this line allows us to dynamically select different files.
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name']
else utils.name_from_config(config))
print('Experiment name is %s' % experiment_name)
# Next, build the model
G = model.Generator(**config).to(device)
D = model.Discriminator(**config).to(device)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(config['ema_decay']))
G_ema = model.Generator(**{**config, 'skip_init':True,
'no_optim': True}).to(device)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
ema = None
# FP16?
if config['G_fp16']:
print('Casting G to float16...')
G = G.half()
if config['ema']:
G_ema = G_ema.half()
if config['D_fp16']:
print('Casting D to fp16...')
D = D.half()
# Consider automatically reducing SN_eps?
GD = model.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G,D]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
'best_IS': 0, 'best_FID': 999999, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# If parallel, parallelize the GD module
if config['parallel']:
GD = nn.DataParallel(GD)
if config['cross_replica']:
patch_replication_callback(GD)
# Prepare loggers for stats; metrics holds test metrics,
# lmetrics holds any desired training metrics.
test_metrics_fname = '%s/%s_log.jsonl' % (config['logs_root'],
experiment_name)
train_metrics_fname = '%s/%s' % (config['logs_root'], experiment_name)
print('Inception Metrics will be saved to {}'.format(test_metrics_fname))
test_log = utils.MetricsLogger(test_metrics_fname,
reinitialize=(not config['resume']))
print('Training Metrics will be saved to {}'.format(train_metrics_fname))
train_log = utils.MyLogger(train_metrics_fname,
reinitialize=(not config['resume']),
logstyle=config['logstyle'])
# Write metadata
utils.write_metadata(config['logs_root'], experiment_name, config, state_dict)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] * config['num_D_steps']
* config['num_D_accumulations'])
loaders = utils.get_data_loaders(**{**config, 'batch_size': D_batch_size,
'start_itr': state_dict['itr']})
# Prepare inception metrics: FID and IS
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
if config['which_train_fn'] == 'GAN':
train = train_fns.GAN_training_function(G, D, GD, z_, y_,
ema, state_dict, config)
# Else, assume debugging and use the dummy train fn
else:
train = train_fns.dummy_training_function()
# Prepare Sample function for use with inception metrics
sample = functools.partial(utils.sample,
G=(G_ema if config['ema'] and config['use_ema']
else G),
z_=z_, y_=y_, config=config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
for epoch in range(state_dict['epoch'], config['num_epochs']):
# Which progressbar to use? TQDM or my own?
if config['pbar'] == 'mine':
pbar = utils.progress(loaders[0],displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(loaders[0])
for i, (x, y) in enumerate(pbar):
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
if config['ema']:
G_ema.train()
if config['D_fp16']:
x, y = x.to(device).half(), y.to(device)
else:
x, y = x.to(device), y.to(device)
metrics = train(x, y)
train_log.log(itr=int(state_dict['itr']), **metrics)
# Every sv_log_interval, log singular values
if (config['sv_log_interval'] > 0) and (not (state_dict['itr'] % config['sv_log_interval'])):
train_log.log(itr=int(state_dict['itr']),
**{**utils.get_SVs(G, 'G'), **utils.get_SVs(D, 'D')})
# If using my progbar, print metrics.
if config['pbar'] == 'mine':
print(', '.join(['itr: %d' % state_dict['itr']]
+ ['%s : %+4.3f' % (key, metrics[key])
for key in metrics]), end=' ')
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
if config['ema']:
G_ema.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
# Test every specified interval
if not (state_dict['itr'] % config['test_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
train_fns.test(G, D, G_ema, state_dict, config, sample,
get_inception_metrics, experiment_name, test_log)
# Increment epoch counter at end of epoch
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main() | 40.229075 | 124 | 0.635896 |
import os
import functools
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import torchvision
import inception_utils
import utils
import losses
import train_fns
from sync_batchnorm import patch_replication_callback
def run(config):
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
utils.seed_rng(config['seed'])
utils.prepare_root(config)
torch.backends.cudnn.benchmark = True
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name']
else utils.name_from_config(config))
print('Experiment name is %s' % experiment_name)
G = model.Generator(**config).to(device)
D = model.Discriminator(**config).to(device)
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(config['ema_decay']))
G_ema = model.Generator(**{**config, 'skip_init':True,
'no_optim': True}).to(device)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
ema = None
if config['G_fp16']:
print('Casting G to float16...')
G = G.half()
if config['ema']:
G_ema = G_ema.half()
if config['D_fp16']:
print('Casting D to fp16...')
D = D.half()
GD = model.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G,D]]))
t = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
'best_IS': 0, 'best_FID': 999999, 'config': config}
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
if config['parallel']:
GD = nn.DataParallel(GD)
if config['cross_replica']:
patch_replication_callback(GD)
test_metrics_fname = '%s/%s_log.jsonl' % (config['logs_root'],
experiment_name)
train_metrics_fname = '%s/%s' % (config['logs_root'], experiment_name)
print('Inception Metrics will be saved to {}'.format(test_metrics_fname))
test_log = utils.MetricsLogger(test_metrics_fname,
reinitialize=(not config['resume']))
print('Training Metrics will be saved to {}'.format(train_metrics_fname))
train_log = utils.MyLogger(train_metrics_fname,
reinitialize=(not config['resume']),
logstyle=config['logstyle'])
utils.write_metadata(config['logs_root'], experiment_name, config, state_dict)
# to the dataloader, as G doesn't require dataloading.
D_batch_size = (config['batch_size'] * config['num_D_steps']
* config['num_D_accumulations'])
loaders = utils.get_data_loaders(**{**config, 'batch_size': D_batch_size,
'start_itr': state_dict['itr']})
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
if config['which_train_fn'] == 'GAN':
train = train_fns.GAN_training_function(G, D, GD, z_, y_,
ema, state_dict, config)
else:
train = train_fns.dummy_training_function()
sample = functools.partial(utils.sample,
G=(G_ema if config['ema'] and config['use_ema']
else G),
z_=z_, y_=y_, config=config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
for epoch in range(state_dict['epoch'], config['num_epochs']):
if config['pbar'] == 'mine':
pbar = utils.progress(loaders[0],displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(loaders[0])
for i, (x, y) in enumerate(pbar):
state_dict['itr'] += 1
G.train()
D.train()
if config['ema']:
G_ema.train()
if config['D_fp16']:
x, y = x.to(device).half(), y.to(device)
else:
x, y = x.to(device), y.to(device)
metrics = train(x, y)
train_log.log(itr=int(state_dict['itr']), **metrics)
if (config['sv_log_interval'] > 0) and (not (state_dict['itr'] % config['sv_log_interval'])):
train_log.log(itr=int(state_dict['itr']),
**{**utils.get_SVs(G, 'G'), **utils.get_SVs(D, 'D')})
if config['pbar'] == 'mine':
print(', '.join(['itr: %d' % state_dict['itr']]
+ ['%s : %+4.3f' % (key, metrics[key])
for key in metrics]), end=' ')
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
if config['ema']:
G_ema.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
if not (state_dict['itr'] % config['test_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
train_fns.test(G, D, G_ema, state_dict, config, sample,
get_inception_metrics, experiment_name, test_log)
state_dict['epoch'] += 1
def main():
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main() | true | true |
f7f9c54b17dacd252e701e8adb842c091f9e4901 | 681 | py | Python | accounts/migrations/0002_auto_20200908_1336.py | SarangWadode/medstore | 07cb70661a8cba6f8dd090dfbd589bfacb7bf12a | [
"MIT"
] | 2 | 2021-03-24T13:36:39.000Z | 2022-02-10T13:51:59.000Z | accounts/migrations/0002_auto_20200908_1336.py | SarangWadode/medstore | 07cb70661a8cba6f8dd090dfbd589bfacb7bf12a | [
"MIT"
] | 44 | 2021-01-05T01:51:38.000Z | 2022-02-10T13:44:26.000Z | accounts/migrations/0002_auto_20200908_1336.py | mukeshgurpude/medstore | 498b76acbeb9727e7a61560e4016b3577c2706d2 | [
"MIT"
] | 1 | 2020-10-28T09:26:01.000Z | 2020-10-28T09:26:01.000Z | # Generated by Django 3.1 on 2020-09-08 08:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(choices=[('Male', 'Male'), ('Female', 'Female'), ('Prefer not to say', 'Prefer not to say')], max_length=50, null=True),
),
migrations.AlterField(
model_name='userprofile',
name='phone',
field=models.PositiveBigIntegerField(null=True, verbose_name='Mobile Number'),
),
]
| 28.375 | 155 | 0.593245 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(choices=[('Male', 'Male'), ('Female', 'Female'), ('Prefer not to say', 'Prefer not to say')], max_length=50, null=True),
),
migrations.AlterField(
model_name='userprofile',
name='phone',
field=models.PositiveBigIntegerField(null=True, verbose_name='Mobile Number'),
),
]
| true | true |
f7f9c57d95687981d735c5b9f71b31788ac82444 | 767 | py | Python | desktop/core/ext-py/xlwt-1.3.0/examples/merged.py | zhoudahong/hue | 9ec1b48e6abf08e81b74fa5fc4a03770e37aff92 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/xlwt-1.3.0/examples/merged.py | zhoudahong/hue | 9ec1b48e6abf08e81b74fa5fc4a03770e37aff92 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/xlwt-1.3.0/examples/merged.py | zhoudahong/hue | 9ec1b48e6abf08e81b74fa5fc4a03770e37aff92 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Kiseliov Roman
from xlwt import *
fnt = Font()
fnt.name = 'Arial'
fnt.colour_index = 4
fnt.bold = True
borders = Borders()
borders.left = 6
borders.right = 6
borders.top = 6
borders.bottom = 6
al = Alignment()
al.horz = Alignment.HORZ_CENTER
al.vert = Alignment.VERT_CENTER
style = XFStyle()
style.font = fnt
style.borders = borders
style.alignment = al
wb = Workbook()
ws0 = wb.add_sheet('sheet0')
ws1 = wb.add_sheet('sheet1')
ws2 = wb.add_sheet('sheet2')
for i in range(0, 0x200, 2):
ws0.write_merge(i, i+1, 1, 5, 'test %d' % i, style)
ws1.write_merge(i, i, 1, 7, 'test %d' % i, style)
ws2.write_merge(i, i+1, 1, 7 + (i%10), 'test %d' % i, style)
wb.save('merged.xls')
| 19.175 | 64 | 0.65189 |
from xlwt import *
fnt = Font()
fnt.name = 'Arial'
fnt.colour_index = 4
fnt.bold = True
borders = Borders()
borders.left = 6
borders.right = 6
borders.top = 6
borders.bottom = 6
al = Alignment()
al.horz = Alignment.HORZ_CENTER
al.vert = Alignment.VERT_CENTER
style = XFStyle()
style.font = fnt
style.borders = borders
style.alignment = al
wb = Workbook()
ws0 = wb.add_sheet('sheet0')
ws1 = wb.add_sheet('sheet1')
ws2 = wb.add_sheet('sheet2')
for i in range(0, 0x200, 2):
ws0.write_merge(i, i+1, 1, 5, 'test %d' % i, style)
ws1.write_merge(i, i, 1, 7, 'test %d' % i, style)
ws2.write_merge(i, i+1, 1, 7 + (i%10), 'test %d' % i, style)
wb.save('merged.xls')
| true | true |
f7f9c631dd0b2e9aa7271690abee3aed5371fe4f | 2,380 | py | Python | apps/students/models.py | 96RadhikaJadhav/JagratiWebApp | f516c1a683dc0b550f8e0815a012342d316c3de1 | [
"MIT"
] | 1 | 2020-12-31T13:56:55.000Z | 2020-12-31T13:56:55.000Z | apps/students/models.py | 96RadhikaJadhav/JagratiWebApp | f516c1a683dc0b550f8e0815a012342d316c3de1 | [
"MIT"
] | null | null | null | apps/students/models.py | 96RadhikaJadhav/JagratiWebApp | f516c1a683dc0b550f8e0815a012342d316c3de1 | [
"MIT"
] | null | null | null | from django.db import models
from home.models import Calendar, Schedule
# Create your models here.
class Student(models.Model):
VILLAGE = (
('G', 'Gadheri'),
('M', 'Mehgawan'),
('C', 'Chanditola'),
('A', 'Amanala'),
('S', 'Suarkol'),
)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
school_class = models.IntegerField()
village = models.CharField(max_length=3, choices=VILLAGE)
contact_no = models.CharField(max_length=13, blank=True)
guardian_name = models.CharField(max_length=30, blank=True)
restricted = models.BooleanField(default=False)
def __str__(self):
return f'{self.get_full_name} ({self.school_class})'
@property
def get_full_name(self):
return f'{self.first_name} {self.last_name}'
class StudentSchedule(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name='student_schedules')
day = models.IntegerField(choices=Schedule.DAY, blank=True)
schedule = models.ForeignKey(Schedule, on_delete=models.CASCADE, related_name='student_schedules')
class Meta:
unique_together = (('student', 'day'),)
verbose_name = 'Student Schedule'
verbose_name_plural = 'Students Schedule'
def __str__(self):
return f'{self.student} - {self.schedule}'
def save(self, *args, **kwargs):
self.day = Schedule.objects.get(id=self.schedule.id).day
super(StudentSchedule, self).save(*args, **kwargs)
class StudentAttendence(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name='student_attendence')
cal_date = models.ForeignKey(Calendar, on_delete=models.CASCADE, related_name='student_attendence')
present = models.BooleanField(default=False)
hw_done = models.BooleanField(default=False, verbose_name="HomeWork Done")
class Meta:
unique_together = (('student', 'cal_date'),)
verbose_name = 'Student Attendence'
verbose_name_plural = 'Students Attendence'
def __str__(self):
return f'{self.student} - {self.cal_date}'
def save(self, *args, **kwargs):
"""For cpanel."""
self.present = (self.present is True)
self.hw_done = (self.hw_done is True)
super(StudentAttendence, self).save(*args, **kwargs)
| 35 | 103 | 0.677731 | from django.db import models
from home.models import Calendar, Schedule
class Student(models.Model):
VILLAGE = (
('G', 'Gadheri'),
('M', 'Mehgawan'),
('C', 'Chanditola'),
('A', 'Amanala'),
('S', 'Suarkol'),
)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
school_class = models.IntegerField()
village = models.CharField(max_length=3, choices=VILLAGE)
contact_no = models.CharField(max_length=13, blank=True)
guardian_name = models.CharField(max_length=30, blank=True)
restricted = models.BooleanField(default=False)
def __str__(self):
return f'{self.get_full_name} ({self.school_class})'
@property
def get_full_name(self):
return f'{self.first_name} {self.last_name}'
class StudentSchedule(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name='student_schedules')
day = models.IntegerField(choices=Schedule.DAY, blank=True)
schedule = models.ForeignKey(Schedule, on_delete=models.CASCADE, related_name='student_schedules')
class Meta:
unique_together = (('student', 'day'),)
verbose_name = 'Student Schedule'
verbose_name_plural = 'Students Schedule'
def __str__(self):
return f'{self.student} - {self.schedule}'
def save(self, *args, **kwargs):
self.day = Schedule.objects.get(id=self.schedule.id).day
super(StudentSchedule, self).save(*args, **kwargs)
class StudentAttendence(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name='student_attendence')
cal_date = models.ForeignKey(Calendar, on_delete=models.CASCADE, related_name='student_attendence')
present = models.BooleanField(default=False)
hw_done = models.BooleanField(default=False, verbose_name="HomeWork Done")
class Meta:
unique_together = (('student', 'cal_date'),)
verbose_name = 'Student Attendence'
verbose_name_plural = 'Students Attendence'
def __str__(self):
return f'{self.student} - {self.cal_date}'
def save(self, *args, **kwargs):
self.present = (self.present is True)
self.hw_done = (self.hw_done is True)
super(StudentAttendence, self).save(*args, **kwargs)
| true | true |
f7f9c7099985c76c55e77a21d827117a20fecab1 | 2,348 | py | Python | tests/conftest.py | TinLe/elastalert | fc648c37eaed7617196a1ca020ebd99beb0fade8 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | TinLe/elastalert | fc648c37eaed7617196a1ca020ebd99beb0fade8 | [
"Apache-2.0"
] | 1 | 2021-06-02T04:32:03.000Z | 2021-06-02T04:32:03.000Z | tests/conftest.py | talyian/elastalert | 8ff39d485c0babd098ad659b53ce0f8ad456c6c3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import elasticsearch
import mock
import pytest
from elastalert.elastalert import ElastAlerter
class mock_es_client(object):
def __init__(self, host='es', port=14900):
self.host = host
self.port = port
self.return_hits = []
self.search = mock.Mock()
self.create = mock.Mock()
self.delete = mock.Mock()
class mock_ruletype(object):
def __init__(self):
self.add_data = mock.Mock()
self.add_count_data = mock.Mock()
self.add_terms_data = mock.Mock()
self.matches = []
self.get_match_data = lambda x: x
self.get_match_str = lambda x: "some stuff happened"
self.garbage_collect = mock.Mock()
class mock_alert(object):
def __init__(self):
self.alert = mock.Mock()
def get_info(self):
return {'type': 'mock'}
@pytest.fixture
def ea():
rules = [{'es_host': '',
'es_port': '',
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': []}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'rules': rules,
'max_query_size': 100000,
'old_query_limit': datetime.timedelta(weeks=1)}
elasticsearch.client.Elasticsearch = mock_es_client
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
ea = ElastAlerter(['--pin_rules'])
ea.rules[0]['type'] = mock_ruletype()
ea.rules[0]['alert'] = [mock_alert()]
ea.writeback_es = mock_es_client()
ea.writeback_es.search.return_value = {'hits': {'hits': []}}
ea.writeback_es.create.return_value = {'_id': 'ABCD'}
ea.current_es = mock_es_client('', '')
return ea
| 31.306667 | 73 | 0.58092 |
import datetime
import elasticsearch
import mock
import pytest
from elastalert.elastalert import ElastAlerter
class mock_es_client(object):
def __init__(self, host='es', port=14900):
self.host = host
self.port = port
self.return_hits = []
self.search = mock.Mock()
self.create = mock.Mock()
self.delete = mock.Mock()
class mock_ruletype(object):
def __init__(self):
self.add_data = mock.Mock()
self.add_count_data = mock.Mock()
self.add_terms_data = mock.Mock()
self.matches = []
self.get_match_data = lambda x: x
self.get_match_str = lambda x: "some stuff happened"
self.garbage_collect = mock.Mock()
class mock_alert(object):
def __init__(self):
self.alert = mock.Mock()
def get_info(self):
return {'type': 'mock'}
@pytest.fixture
def ea():
rules = [{'es_host': '',
'es_port': '',
'name': 'anytest',
'index': 'idx',
'filter': [],
'include': ['@timestamp'],
'aggregation': datetime.timedelta(0),
'realert': datetime.timedelta(0),
'processed_hits': {},
'timestamp_field': '@timestamp',
'match_enhancements': []}]
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=10),
'buffer_time': datetime.timedelta(minutes=5),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'rules': rules,
'max_query_size': 100000,
'old_query_limit': datetime.timedelta(weeks=1)}
elasticsearch.client.Elasticsearch = mock_es_client
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
ea = ElastAlerter(['--pin_rules'])
ea.rules[0]['type'] = mock_ruletype()
ea.rules[0]['alert'] = [mock_alert()]
ea.writeback_es = mock_es_client()
ea.writeback_es.search.return_value = {'hits': {'hits': []}}
ea.writeback_es.create.return_value = {'_id': 'ABCD'}
ea.current_es = mock_es_client('', '')
return ea
| true | true |
f7f9c7a8e30f567acb44dc065de504016bedccd2 | 663 | py | Python | app/user/views.py | EmersonsfDev/Flask_login | d50e62b0352fa6d2b69e21e14f6d4bf8e9cbc92f | [
"Apache-2.0"
] | null | null | null | app/user/views.py | EmersonsfDev/Flask_login | d50e62b0352fa6d2b69e21e14f6d4bf8e9cbc92f | [
"Apache-2.0"
] | null | null | null | app/user/views.py | EmersonsfDev/Flask_login | d50e62b0352fa6d2b69e21e14f6d4bf8e9cbc92f | [
"Apache-2.0"
] | null | null | null | from flask import redirect, render_template, url_for
from flask_login import login_required
from app import db
from app.models import User
from . import user
@user.route("/")
def index():
users = User.query.all() # Select * from users;
return render_template("users.html", users=users)
@user.route("/user/<int:id>")
@login_required
def unique(id):
user = User.query.get(id)
return render_template("user.html", user=user)
@user.route("/user/delete/<int:id>")
def delete(id):
user = User.query.filter_by(id=id).first()
db.session.delete(user)
db.session.commit()
return redirect(url_for(".index"))
| 23.678571 | 54 | 0.671192 | from flask import redirect, render_template, url_for
from flask_login import login_required
from app import db
from app.models import User
from . import user
@user.route("/")
def index():
users = User.query.all()
return render_template("users.html", users=users)
@user.route("/user/<int:id>")
@login_required
def unique(id):
user = User.query.get(id)
return render_template("user.html", user=user)
@user.route("/user/delete/<int:id>")
def delete(id):
user = User.query.filter_by(id=id).first()
db.session.delete(user)
db.session.commit()
return redirect(url_for(".index"))
| true | true |
f7f9c8525c8765435d123e12348f5dbca6ca3c15 | 3,732 | py | Python | sdk/python/pulumi_azure/authorization/get_user_assigned_identity.py | AdminTurnedDevOps/pulumi-azure | affd9eaaee3016f350f0d0469694dbd52850300b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/authorization/get_user_assigned_identity.py | AdminTurnedDevOps/pulumi-azure | affd9eaaee3016f350f0d0469694dbd52850300b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/authorization/get_user_assigned_identity.py | AdminTurnedDevOps/pulumi-azure | affd9eaaee3016f350f0d0469694dbd52850300b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetUserAssignedIdentityResult:
"""
A collection of values returned by getUserAssignedIdentity.
"""
def __init__(__self__, client_id=None, id=None, location=None, name=None, principal_id=None, resource_group_name=None, tags=None):
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
__self__.client_id = client_id
"""
The Client ID of the User Assigned Identity.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The Azure location where the User Assigned Identity exists.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
__self__.principal_id = principal_id
"""
The Service Principal ID of the User Assigned Identity.
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A mapping of tags assigned to the User Assigned Identity.
"""
class AwaitableGetUserAssignedIdentityResult(GetUserAssignedIdentityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUserAssignedIdentityResult(
client_id=self.client_id,
id=self.id,
location=self.location,
name=self.name,
principal_id=self.principal_id,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_user_assigned_identity(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing User Assigned Identity.
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:authorization/getUserAssignedIdentity:getUserAssignedIdentity', __args__, opts=opts).value
return AwaitableGetUserAssignedIdentityResult(
client_id=__ret__.get('clientId'),
id=__ret__.get('id'),
location=__ret__.get('location'),
name=__ret__.get('name'),
principal_id=__ret__.get('principalId'),
resource_group_name=__ret__.get('resourceGroupName'),
tags=__ret__.get('tags'))
| 39.702128 | 134 | 0.667203 |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetUserAssignedIdentityResult:
def __init__(__self__, client_id=None, id=None, location=None, name=None, principal_id=None, resource_group_name=None, tags=None):
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
__self__.client_id = client_id
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
__self__.principal_id = principal_id
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
class AwaitableGetUserAssignedIdentityResult(GetUserAssignedIdentityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUserAssignedIdentityResult(
client_id=self.client_id,
id=self.id,
location=self.location,
name=self.name,
principal_id=self.principal_id,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_user_assigned_identity(name=None,resource_group_name=None,opts=None):
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:authorization/getUserAssignedIdentity:getUserAssignedIdentity', __args__, opts=opts).value
return AwaitableGetUserAssignedIdentityResult(
client_id=__ret__.get('clientId'),
id=__ret__.get('id'),
location=__ret__.get('location'),
name=__ret__.get('name'),
principal_id=__ret__.get('principalId'),
resource_group_name=__ret__.get('resourceGroupName'),
tags=__ret__.get('tags'))
| true | true |
f7f9c927990c92870b237492b72e754c44b514cc | 91 | py | Python | saltshaker/version.py | diegotoral/SaltShaker | 86c7619f3347c1b56ed3e680b8bb558d93b0e385 | [
"MIT"
] | 2 | 2017-07-02T20:29:26.000Z | 2017-07-02T21:18:51.000Z | saltshaker/version.py | diegotoral/SaltShaker | 86c7619f3347c1b56ed3e680b8bb558d93b0e385 | [
"MIT"
] | null | null | null | saltshaker/version.py | diegotoral/SaltShaker | 86c7619f3347c1b56ed3e680b8bb558d93b0e385 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = '0.0.1'
def get_version():
return __version__
| 11.375 | 23 | 0.604396 |
__version__ = '0.0.1'
def get_version():
return __version__
| true | true |
f7f9c96dca50dc206ff3e874c0800267a552dca3 | 497 | py | Python | week10/CinemaReservation/hack_cinema/users/users_gateway.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 30 | 2020-01-22T17:22:43.000Z | 2022-01-26T08:28:57.000Z | week10/CinemaReservation/hack_cinema/users/users_gateway.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 1 | 2020-01-21T19:50:47.000Z | 2020-03-18T16:18:31.000Z | week10/CinemaReservation/hack_cinema/users/users_gateway.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 7 | 2019-11-28T15:59:16.000Z | 2020-12-05T08:39:02.000Z | from ..db import Database
from .models import UserModel
class UserGateway:
def __init__(self):
self.model = UserModel
self.db = Database()
def create(self, *, email, password):
self.model.validate(email, password)
self.db.cursor.execute() # TODO: create user query
# TODO: What whould I return?
def all(self):
raw_users = self.db.cursor.execute() # TODO: Select all users
return [self.model(**row) for row in raw_users]
| 23.666667 | 70 | 0.631791 | from ..db import Database
from .models import UserModel
class UserGateway:
def __init__(self):
self.model = UserModel
self.db = Database()
def create(self, *, email, password):
self.model.validate(email, password)
self.db.cursor.execute()
def all(self):
raw_users = self.db.cursor.execute()
return [self.model(**row) for row in raw_users]
| true | true |
f7f9c975e23e44ade6351567ee3a56fd11afc6be | 3,466 | py | Python | lib_search.py | ayoubBouziane/model_server | 03d6d325304e01fc197e6e033c84eb9af150301d | [
"Apache-2.0"
] | null | null | null | lib_search.py | ayoubBouziane/model_server | 03d6d325304e01fc197e6e033c84eb9af150301d | [
"Apache-2.0"
] | null | null | null | lib_search.py | ayoubBouziane/model_server | 03d6d325304e01fc197e6e033c84eb9af150301d | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import re
COPYRIGHT = re.compile(r'Copyright')
INTEL_COPYRIGHT = re.compile(r'Copyright (\(c\) )?(201(8|9)-)?20(20|19|18) Intel Corporation')
def check_header(fd):
result = False
detected = False
try:
for line in fd:
if COPYRIGHT.findall(line):
detected = True
if INTEL_COPYRIGHT.findall(line):
result = True
break
except:
print("ERROR: Cannot parse file:" + str(fd))
return detected, result
def check_dir(start_dir):
ok = []
not_ok = []
no_header = []
exclude_files = ['__pycache__', '.venv', '.pytest_cache', '.vscode', 'ovms-c/dist', '.git', '.tar.gz', 'docx',
'.npy', '.png', '.svg', '.bin', '.jpeg', '.jpg', 'license.txt', 'md', '.groovy', '.json' ,'bazel-',
'Doxyfile', 'clang-format','net_http.patch', 'tftext.patch', 'tf.patch', 'client_requirements.txt',
'openvino.LICENSE.txt', 'c-ares.LICENSE.txt', 'zlib.LICENSE.txt', 'boost.LICENSE.txt',
'libuuid.LICENSE.txt', 'input_images.txt', 'REST_age_gender.ipynb', 'dummy.xml', 'listen.patch', 'add.xml',
'requirements.txt', 'missing_headers.txt', 'libevent/BUILD', 'azure_sdk.patch', 'rest_sdk_v2.10.16.patch', ]
exclude_directories = ['/dist/']
for (d_path, dir_set, file_set) in os.walk(start_dir):
for f_name in file_set:
skip = False
for excluded in exclude_directories:
if excluded in d_path:
skip = True
print('Warning - Skipping directory - ' + d_path + ' for file - ' + f_name)
break
if skip:
continue
fpath = os.path.join(d_path, f_name)
if not [test for test in exclude_files if test in fpath]:
with open(fpath, 'r') as fd:
header_detected, result = check_header(fd)
if header_detected:
if result:
ok.append(fpath)
else:
not_ok.append(fpath)
else:
no_header.append(fpath)
return not_ok, no_header
def main():
if len(sys.argv) < 1:
print('Provide start dir!')
else:
start_dir = sys.argv[1]
external_component_set, no_header_set = check_dir(start_dir)
if len(no_header_set) == 0:
print('Success: All files have headers')
else:
print('#########################')
print('## No header files detected:')
for no_header in no_header_set:
print(f'{no_header}')
if __name__ == '__main__':
main()
| 33.980392 | 129 | 0.548471 |
import os
import sys
import re
COPYRIGHT = re.compile(r'Copyright')
INTEL_COPYRIGHT = re.compile(r'Copyright (\(c\) )?(201(8|9)-)?20(20|19|18) Intel Corporation')
def check_header(fd):
result = False
detected = False
try:
for line in fd:
if COPYRIGHT.findall(line):
detected = True
if INTEL_COPYRIGHT.findall(line):
result = True
break
except:
print("ERROR: Cannot parse file:" + str(fd))
return detected, result
def check_dir(start_dir):
ok = []
not_ok = []
no_header = []
exclude_files = ['__pycache__', '.venv', '.pytest_cache', '.vscode', 'ovms-c/dist', '.git', '.tar.gz', 'docx',
'.npy', '.png', '.svg', '.bin', '.jpeg', '.jpg', 'license.txt', 'md', '.groovy', '.json' ,'bazel-',
'Doxyfile', 'clang-format','net_http.patch', 'tftext.patch', 'tf.patch', 'client_requirements.txt',
'openvino.LICENSE.txt', 'c-ares.LICENSE.txt', 'zlib.LICENSE.txt', 'boost.LICENSE.txt',
'libuuid.LICENSE.txt', 'input_images.txt', 'REST_age_gender.ipynb', 'dummy.xml', 'listen.patch', 'add.xml',
'requirements.txt', 'missing_headers.txt', 'libevent/BUILD', 'azure_sdk.patch', 'rest_sdk_v2.10.16.patch', ]
exclude_directories = ['/dist/']
for (d_path, dir_set, file_set) in os.walk(start_dir):
for f_name in file_set:
skip = False
for excluded in exclude_directories:
if excluded in d_path:
skip = True
print('Warning - Skipping directory - ' + d_path + ' for file - ' + f_name)
break
if skip:
continue
fpath = os.path.join(d_path, f_name)
if not [test for test in exclude_files if test in fpath]:
with open(fpath, 'r') as fd:
header_detected, result = check_header(fd)
if header_detected:
if result:
ok.append(fpath)
else:
not_ok.append(fpath)
else:
no_header.append(fpath)
return not_ok, no_header
def main():
if len(sys.argv) < 1:
print('Provide start dir!')
else:
start_dir = sys.argv[1]
external_component_set, no_header_set = check_dir(start_dir)
if len(no_header_set) == 0:
print('Success: All files have headers')
else:
print('#########################')
print('## No header files detected:')
for no_header in no_header_set:
print(f'{no_header}')
if __name__ == '__main__':
main()
| true | true |
f7f9c9edab3b2b66e59f0f2d6e520e55c558c159 | 193 | py | Python | exercises/concept/pretty-leaflet/string_formatting.py | highb/python | 5cf63f0a070e0efef4d2334582d331296100fbb0 | [
"MIT"
] | 1,177 | 2017-06-21T20:24:06.000Z | 2022-03-29T02:30:55.000Z | exercises/concept/pretty-leaflet/string_formatting.py | highb/python | 5cf63f0a070e0efef4d2334582d331296100fbb0 | [
"MIT"
] | 1,890 | 2017-06-18T20:06:10.000Z | 2022-03-31T18:35:51.000Z | exercises/concept/pretty-leaflet/string_formatting.py | highb/python | 5cf63f0a070e0efef4d2334582d331296100fbb0 | [
"MIT"
] | 1,095 | 2017-06-26T23:06:19.000Z | 2022-03-29T03:25:38.000Z | def capitalize_header(event_name):
pass
def format_date(event_date):
pass
def display_icons(icons):
pass
def print_leaflet(event_name, icons, authors, event_date=None):
pass
| 16.083333 | 63 | 0.740933 | def capitalize_header(event_name):
pass
def format_date(event_date):
pass
def display_icons(icons):
pass
def print_leaflet(event_name, icons, authors, event_date=None):
pass
| true | true |
f7f9cb6cae0403510c98c6943007fd306aac6a84 | 515 | py | Python | helpdesk/migrations/0010_remove_queuemembership.py | altimore/django-helpdesk | e710cb028be6725350b05a802d14508a96375d14 | [
"BSD-3-Clause",
"CC-BY-4.0",
"MIT"
] | null | null | null | helpdesk/migrations/0010_remove_queuemembership.py | altimore/django-helpdesk | e710cb028be6725350b05a802d14508a96375d14 | [
"BSD-3-Clause",
"CC-BY-4.0",
"MIT"
] | null | null | null | helpdesk/migrations/0010_remove_queuemembership.py | altimore/django-helpdesk | e710cb028be6725350b05a802d14508a96375d14 | [
"BSD-3-Clause",
"CC-BY-4.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("helpdesk", "0009_migrate_queuemembership"),
]
operations = [
migrations.RemoveField(
model_name="queuemembership",
name="queues",
),
migrations.RemoveField(
model_name="queuemembership",
name="user",
),
migrations.DeleteModel(
name="QueueMembership",
),
]
| 21.458333 | 53 | 0.553398 |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("helpdesk", "0009_migrate_queuemembership"),
]
operations = [
migrations.RemoveField(
model_name="queuemembership",
name="queues",
),
migrations.RemoveField(
model_name="queuemembership",
name="user",
),
migrations.DeleteModel(
name="QueueMembership",
),
]
| true | true |
f7f9ccff2f79aa03b7ad28757bcdd31989c41f47 | 1,300 | py | Python | WildlifeObservations/observations/management/commands/import_id_guides.py | jen-thomas/wildlife-observations | 594f141822c75f3d8fdbc8dcba3d09bbf5bc42ca | [
"MIT"
] | null | null | null | WildlifeObservations/observations/management/commands/import_id_guides.py | jen-thomas/wildlife-observations | 594f141822c75f3d8fdbc8dcba3d09bbf5bc42ca | [
"MIT"
] | 24 | 2022-01-21T16:37:55.000Z | 2022-03-10T15:34:34.000Z | WildlifeObservations/observations/management/commands/import_id_guides.py | jen-thomas/wildlife-observations | 594f141822c75f3d8fdbc8dcba3d09bbf5bc42ca | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from django.db import transaction
from ...models import IdentificationGuide
class Command(BaseCommand):
help = 'Adds ID guides'
@transaction.atomic
def handle(self, *args, **options):
self.import_id_guides()
def import_id_guides(self):
id_guide = IdentificationGuide.objects.create(title='Grasshoppers of Britain and Western Europe',
author='Sardet, Roesti and Braud')
id_guide = IdentificationGuide.objects.create(title='The Orthoptera fauna of the Pyrenean region - a field guide',
author='Poniatowski, Defaut, Llucià-Pomares and Fartmann')
id_guide = IdentificationGuide.objects.create(title='Atles dels Ortòpters de Catalunya',
author='Olmo Vidal')
id_guide = IdentificationGuide.objects.create(title='Revisión de los Ortópteros (Insecta: Orthoptera) de Cataluña (España)',
author='Llucià Pomares')
id_guide = IdentificationGuide.objects.create(
title='Saltamontes, Grillos y Langostas',
author='Bellmann, Rutschmann, Roesti and Hochkirch') | 54.166667 | 132 | 0.618462 | from django.core.management.base import BaseCommand
from django.db import transaction
from ...models import IdentificationGuide
class Command(BaseCommand):
help = 'Adds ID guides'
@transaction.atomic
def handle(self, *args, **options):
self.import_id_guides()
def import_id_guides(self):
id_guide = IdentificationGuide.objects.create(title='Grasshoppers of Britain and Western Europe',
author='Sardet, Roesti and Braud')
id_guide = IdentificationGuide.objects.create(title='The Orthoptera fauna of the Pyrenean region - a field guide',
author='Poniatowski, Defaut, Llucià-Pomares and Fartmann')
id_guide = IdentificationGuide.objects.create(title='Atles dels Ortòpters de Catalunya',
author='Olmo Vidal')
id_guide = IdentificationGuide.objects.create(title='Revisión de los Ortópteros (Insecta: Orthoptera) de Cataluña (España)',
author='Llucià Pomares')
id_guide = IdentificationGuide.objects.create(
title='Saltamontes, Grillos y Langostas',
author='Bellmann, Rutschmann, Roesti and Hochkirch') | true | true |
f7f9cd6b8dab8a678606d68bee0e5a4b21885af1 | 75 | py | Python | index.py | netvork109/soapie-client | 87ee215be565101bf79583a75ca2ce08d627186c | [
"MIT"
] | null | null | null | index.py | netvork109/soapie-client | 87ee215be565101bf79583a75ca2ce08d627186c | [
"MIT"
] | null | null | null | index.py | netvork109/soapie-client | 87ee215be565101bf79583a75ca2ce08d627186c | [
"MIT"
] | null | null | null | # local modules
from data import *
from editor import *
from api import API | 18.75 | 20 | 0.773333 |
from data import *
from editor import *
from api import API | true | true |
f7f9cd9dcd11a0d0ae3196de0e978244a8ba850e | 2,526 | py | Python | presets/Modes/Python/S - A Zach Reactive/main.py | jqrsound/EYESY_OS_for_RasPiSound | ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62 | [
"BSD-3-Clause"
] | 4 | 2021-07-04T16:49:34.000Z | 2022-02-08T13:38:34.000Z | presets/Modes/Python/S - A Zach Reactive/main.py | jqrsound/EYESY_OS_for_RasPiSound | ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62 | [
"BSD-3-Clause"
] | null | null | null | presets/Modes/Python/S - A Zach Reactive/main.py | jqrsound/EYESY_OS_for_RasPiSound | ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62 | [
"BSD-3-Clause"
] | null | null | null | import pygame
import pygame.gfxdraw
import random
import time
import math
from pygame.locals import *
# original code adapted from zach lieberman's talk
# https://www.youtube.com/watch?v=bmztlO9_Wvo
white=(255,255,255)
w1 = 0
h1 = 0
def setup(screen, etc) :
global w1,h1
w1 = screen.get_width()
h1 = screen.get_height()
pass
def draw(screen, etc):
global w1,h1
etc.color_picker_bg(etc.knob5)
#for i in range(320):
for i in range((h1 / 2) - 10):
i=i*2
color = (int(127 + 120 * math.sin(i * .01 + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob4*.01) + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob4*.02)+ time.time())))
r1= (abs(etc.audio_in[i/50]/900))
radius_1 = int(100 + r1+40 * math.sin(i * (etc.knob1 * .05)+.0001 + time.time()))
radius1 = int(etc.knob3 * radius_1)
radius_2 = int( 70 + r1 - 20 * math.sin(i * (etc.knob2 * .2)+.0001 + time.time()))
radius2 = int(etc.knob3 * radius_2)
xoffset1 = i
xpos1 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1)+ int(r1*1.5)
xpos2 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1+(h1 / 2))+ int(r1*1.5)#int(w1 // 2 + 100 * math.sin(i * .02 + time.time())*1.3)+(h1 / 2)+ int(r1*1.5)#-int(etc.knob1*(720-i))
xpos3 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1-+(h1 / 2))+ int(r1*1.5)#int(w1 // 2 + 100 * math.sin(i * .02 + time.time())*1.2)-(h1 / 2)+ int(r1*1.5)#-int(etc.knob1*(720-i))
rect2 = Rect(xpos2, i, radius2*1.5, radius2*1.5)
radius3=int(radius2+10+10 *(math.sin(i * (etc.knob2 * .2) + time.time())))
radius4=int(radius2+10+10 *(math.cos(i * (etc.knob1 * .2) + time.time())))
pygame.gfxdraw.circle(screen, xpos1, i, radius1, color)
pygame.gfxdraw.rectangle(screen, rect2, color)
pygame.gfxdraw.ellipse(screen, xpos3, i, radius3, radius4, color)
#pygame.gfxdraw.circle(screen, xpos3, i, radius2, color)
#pygame.gfxdraw.filled_circle(screen, xpos1, i, radius1, color)
#pygame.gfxdraw.filled_circle(screen, xpos2, i, radius2, color)
#pygame.gfxdraw.filled_circle(screen, xpos3, i, radius2, color)
#pygame.gfxdraw.circle(screen, xpos1, i, radius1, white )
#pygame.gfxdraw.circle(screen, xpos2, i, radius2, white )
#pygame.gfxdraw.circle(screen, xpos3, i, radius2, white )
| 47.660377 | 223 | 0.578385 | import pygame
import pygame.gfxdraw
import random
import time
import math
from pygame.locals import *
# https://www.youtube.com/watch?v=bmztlO9_Wvo
white=(255,255,255)
w1 = 0
h1 = 0
def setup(screen, etc) :
global w1,h1
w1 = screen.get_width()
h1 = screen.get_height()
pass
def draw(screen, etc):
global w1,h1
etc.color_picker_bg(etc.knob5)
#for i in range(320):
for i in range((h1 / 2) - 10):
i=i*2
color = (int(127 + 120 * math.sin(i * .01 + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob4*.01) + time.time())),
int(127 + 120 * math.sin(i * (.01 + etc.knob4*.02)+ time.time())))
r1= (abs(etc.audio_in[i/50]/900))
radius_1 = int(100 + r1+40 * math.sin(i * (etc.knob1 * .05)+.0001 + time.time()))
radius1 = int(etc.knob3 * radius_1)
radius_2 = int( 70 + r1 - 20 * math.sin(i * (etc.knob2 * .2)+.0001 + time.time()))
radius2 = int(etc.knob3 * radius_2)
xoffset1 = i
xpos1 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1)+ int(r1*1.5)
xpos2 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1+(h1 / 2))+ int(r1*1.5)#int(w1 // 2 + 100 * math.sin(i * .02 + time.time())*1.3)+(h1 / 2)+ int(r1*1.5)#-int(etc.knob1*(720-i))
xpos3 = int(((w1 / 2)-i) * math.sin(i * .01 + (time.time()*0.3)) + (w1 / 2-i) + xoffset1-+(h1 / 2))+ int(r1*1.5)#int(w1 // 2 + 100 * math.sin(i * .02 + time.time())*1.2)-(h1 / 2)+ int(r1*1.5)#-int(etc.knob1*(720-i))
rect2 = Rect(xpos2, i, radius2*1.5, radius2*1.5)
radius3=int(radius2+10+10 *(math.sin(i * (etc.knob2 * .2) + time.time())))
radius4=int(radius2+10+10 *(math.cos(i * (etc.knob1 * .2) + time.time())))
pygame.gfxdraw.circle(screen, xpos1, i, radius1, color)
pygame.gfxdraw.rectangle(screen, rect2, color)
pygame.gfxdraw.ellipse(screen, xpos3, i, radius3, radius4, color)
#pygame.gfxdraw.circle(screen, xpos3, i, radius2, color)
#pygame.gfxdraw.filled_circle(screen, xpos1, i, radius1, color)
#pygame.gfxdraw.filled_circle(screen, xpos2, i, radius2, color)
#pygame.gfxdraw.filled_circle(screen, xpos3, i, radius2, color)
#pygame.gfxdraw.circle(screen, xpos1, i, radius1, white )
#pygame.gfxdraw.circle(screen, xpos2, i, radius2, white )
#pygame.gfxdraw.circle(screen, xpos3, i, radius2, white )
| true | true |
f7f9ce88964d1bfaab8aa22f167b9de31a05863e | 7,399 | py | Python | tf_agents/bandits/environments/random_bandit_environment_test.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | 3,175 | 2017-09-08T18:28:32.000Z | 2022-03-31T01:32:22.000Z | tf_agents/bandits/environments/random_bandit_environment_test.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | 703 | 2017-09-18T05:51:57.000Z | 2022-03-31T17:37:50.000Z | tf_agents/bandits/environments/random_bandit_environment_test.py | Francis777/agents | 24e878a697be418307cfbff69724d86be767719d | [
"Apache-2.0"
] | 844 | 2017-09-08T23:28:57.000Z | 2022-03-30T09:29:32.000Z | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.environments.bandit_tf_environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.environments import random_bandit_environment
from tf_agents.specs import tensor_spec
tfd = tfp.distributions
def get_gaussian_random_environment(
observation_shape, action_shape, batch_size):
"""Returns a RandomBanditEnvironment with Gaussian observation and reward."""
overall_shape = [batch_size] + observation_shape
observation_distribution = tfd.Independent(
tfd.Normal(loc=tf.zeros(overall_shape), scale=tf.ones(overall_shape)))
reward_distribution = tfd.Normal(
loc=tf.zeros(batch_size), scale=tf.ones(batch_size))
action_spec = tensor_spec.TensorSpec(shape=action_shape, dtype=tf.float32)
return random_bandit_environment.RandomBanditEnvironment(
observation_distribution,
reward_distribution,
action_spec)
class RandomBanditEnvironmentTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
dict(overall_observation_shape=[3, 4, 5, 6],
batch_dims=2),
dict(overall_observation_shape=[3, 3, 3, 3],
batch_dims=0),
)
def testInvalidObservationBatchShape(
self, overall_observation_shape, batch_dims):
observation_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_observation_shape),
tf.ones(overall_observation_shape)),
reinterpreted_batch_ndims=batch_dims)
reward_distribution = tfd.Normal(tf.zeros(overall_observation_shape[0]),
tf.ones(overall_observation_shape[0]))
with self.assertRaisesRegexp(
ValueError,
'`observation_distribution` must have batch shape with length 1'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.parameters(
dict(overall_reward_shape=[3, 4, 5, 6],
batch_dims=2),
dict(overall_reward_shape=[4, 5, 6],
batch_dims=0),
)
def testInvalidRewardBatchShape(
self, overall_reward_shape, batch_dims):
observation_distribution = tfd.Normal(
tf.zeros(overall_reward_shape[0]),
tf.ones(overall_reward_shape[0]))
reward_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_reward_shape),
tf.ones(overall_reward_shape)),
reinterpreted_batch_ndims=batch_dims)
with self.assertRaisesRegexp(
ValueError,
'`reward_distribution` must have batch shape with length 1'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.parameters(
dict(overall_reward_shape=[3, 4, 5, 6]),
dict(overall_reward_shape=[4, 5, 6]),
)
def testInvalidRewardEventShape(self, overall_reward_shape):
observation_distribution = tfd.Normal(
tf.zeros(overall_reward_shape[0]),
tf.ones(overall_reward_shape[0]))
reward_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_reward_shape),
tf.ones(overall_reward_shape)))
with self.assertRaisesRegexp(
ValueError, '`reward_distribution` must have event_shape ()'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.parameters(
dict(overall_observation_shape=[4, 5, 6],
overall_reward_shape=[3]),
dict(overall_observation_shape=[3],
overall_reward_shape=[1]),
)
def testMismatchedBatchShape(
self, overall_observation_shape, overall_reward_shape):
observation_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_observation_shape),
tf.ones(overall_observation_shape)))
reward_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_reward_shape),
tf.ones(overall_reward_shape)))
with self.assertRaisesRegexp(
ValueError,
'`reward_distribution` and `observation_distribution` must have the '
'same batch shape'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.named_parameters(
dict(testcase_name='_observation_[]_action_[]_batch_1',
observation_shape=[],
action_shape=[],
batch_size=1),
dict(testcase_name='_observation_[3, 4, 5, 6]_action_[2, 3, 4]_batch_32',
observation_shape=[3, 4, 5, 6],
action_shape=[2, 3, 4],
batch_size=32),
)
def testObservationAndRewardShapes(
self, observation_shape, action_shape, batch_size):
"""Exercise `reset` and `step`. Ensure correct shapes are returned."""
env = get_gaussian_random_environment(
observation_shape, action_shape, batch_size)
observation = env.reset().observation
reward = env.step(tf.zeros(batch_size)).reward
expected_observation_shape = np.array([batch_size] + observation_shape)
expected_reward_shape = np.array([batch_size])
self.assertAllEqual(
expected_observation_shape, self.evaluate(tf.shape(observation)))
self.assertAllEqual(
expected_reward_shape, self.evaluate(tf.shape(reward)))
@parameterized.named_parameters(
dict(testcase_name='_observation_[]_action_[]_batch_1',
observation_shape=[],
action_shape=[],
batch_size=1,
seed=12345),
dict(testcase_name='_observation_[3, 4, 5, 6]_action_[2, 3, 4]_batch_32',
observation_shape=[3, 4, 5, 6],
action_shape=[2, 3, 4],
batch_size=32,
seed=98765),
)
def testObservationAndRewardsVary(
self, observation_shape, action_shape, batch_size, seed):
"""Ensure that observations and rewards change in consecutive calls."""
tf.compat.v1.set_random_seed(seed)
env = get_gaussian_random_environment(
observation_shape, action_shape, batch_size)
observation0 = env.reset().observation
reward0 = env.step(tf.zeros([batch_size] + action_shape)).reward
observation0 = self.evaluate(observation0)
reward0 = self.evaluate(reward0)
observation1 = env.reset().observation
reward1 = env.step(tf.zeros([batch_size] + action_shape)).reward
self.evaluate(observation1)
self.evaluate(reward1)
self.assertNotAllClose(observation0, observation1)
self.assertNotAllClose(reward0, reward1)
if __name__ == '__main__':
tf.test.main()
| 39.148148 | 79 | 0.711177 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.environments import random_bandit_environment
from tf_agents.specs import tensor_spec
tfd = tfp.distributions
def get_gaussian_random_environment(
observation_shape, action_shape, batch_size):
overall_shape = [batch_size] + observation_shape
observation_distribution = tfd.Independent(
tfd.Normal(loc=tf.zeros(overall_shape), scale=tf.ones(overall_shape)))
reward_distribution = tfd.Normal(
loc=tf.zeros(batch_size), scale=tf.ones(batch_size))
action_spec = tensor_spec.TensorSpec(shape=action_shape, dtype=tf.float32)
return random_bandit_environment.RandomBanditEnvironment(
observation_distribution,
reward_distribution,
action_spec)
class RandomBanditEnvironmentTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
dict(overall_observation_shape=[3, 4, 5, 6],
batch_dims=2),
dict(overall_observation_shape=[3, 3, 3, 3],
batch_dims=0),
)
def testInvalidObservationBatchShape(
self, overall_observation_shape, batch_dims):
observation_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_observation_shape),
tf.ones(overall_observation_shape)),
reinterpreted_batch_ndims=batch_dims)
reward_distribution = tfd.Normal(tf.zeros(overall_observation_shape[0]),
tf.ones(overall_observation_shape[0]))
with self.assertRaisesRegexp(
ValueError,
'`observation_distribution` must have batch shape with length 1'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.parameters(
dict(overall_reward_shape=[3, 4, 5, 6],
batch_dims=2),
dict(overall_reward_shape=[4, 5, 6],
batch_dims=0),
)
def testInvalidRewardBatchShape(
self, overall_reward_shape, batch_dims):
observation_distribution = tfd.Normal(
tf.zeros(overall_reward_shape[0]),
tf.ones(overall_reward_shape[0]))
reward_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_reward_shape),
tf.ones(overall_reward_shape)),
reinterpreted_batch_ndims=batch_dims)
with self.assertRaisesRegexp(
ValueError,
'`reward_distribution` must have batch shape with length 1'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.parameters(
dict(overall_reward_shape=[3, 4, 5, 6]),
dict(overall_reward_shape=[4, 5, 6]),
)
def testInvalidRewardEventShape(self, overall_reward_shape):
observation_distribution = tfd.Normal(
tf.zeros(overall_reward_shape[0]),
tf.ones(overall_reward_shape[0]))
reward_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_reward_shape),
tf.ones(overall_reward_shape)))
with self.assertRaisesRegexp(
ValueError, '`reward_distribution` must have event_shape ()'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.parameters(
dict(overall_observation_shape=[4, 5, 6],
overall_reward_shape=[3]),
dict(overall_observation_shape=[3],
overall_reward_shape=[1]),
)
def testMismatchedBatchShape(
self, overall_observation_shape, overall_reward_shape):
observation_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_observation_shape),
tf.ones(overall_observation_shape)))
reward_distribution = tfd.Independent(
tfd.Normal(tf.zeros(overall_reward_shape),
tf.ones(overall_reward_shape)))
with self.assertRaisesRegexp(
ValueError,
'`reward_distribution` and `observation_distribution` must have the '
'same batch shape'):
random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution)
@parameterized.named_parameters(
dict(testcase_name='_observation_[]_action_[]_batch_1',
observation_shape=[],
action_shape=[],
batch_size=1),
dict(testcase_name='_observation_[3, 4, 5, 6]_action_[2, 3, 4]_batch_32',
observation_shape=[3, 4, 5, 6],
action_shape=[2, 3, 4],
batch_size=32),
)
def testObservationAndRewardShapes(
self, observation_shape, action_shape, batch_size):
env = get_gaussian_random_environment(
observation_shape, action_shape, batch_size)
observation = env.reset().observation
reward = env.step(tf.zeros(batch_size)).reward
expected_observation_shape = np.array([batch_size] + observation_shape)
expected_reward_shape = np.array([batch_size])
self.assertAllEqual(
expected_observation_shape, self.evaluate(tf.shape(observation)))
self.assertAllEqual(
expected_reward_shape, self.evaluate(tf.shape(reward)))
@parameterized.named_parameters(
dict(testcase_name='_observation_[]_action_[]_batch_1',
observation_shape=[],
action_shape=[],
batch_size=1,
seed=12345),
dict(testcase_name='_observation_[3, 4, 5, 6]_action_[2, 3, 4]_batch_32',
observation_shape=[3, 4, 5, 6],
action_shape=[2, 3, 4],
batch_size=32,
seed=98765),
)
def testObservationAndRewardsVary(
self, observation_shape, action_shape, batch_size, seed):
tf.compat.v1.set_random_seed(seed)
env = get_gaussian_random_environment(
observation_shape, action_shape, batch_size)
observation0 = env.reset().observation
reward0 = env.step(tf.zeros([batch_size] + action_shape)).reward
observation0 = self.evaluate(observation0)
reward0 = self.evaluate(reward0)
observation1 = env.reset().observation
reward1 = env.step(tf.zeros([batch_size] + action_shape)).reward
self.evaluate(observation1)
self.evaluate(reward1)
self.assertNotAllClose(observation0, observation1)
self.assertNotAllClose(reward0, reward1)
if __name__ == '__main__':
tf.test.main()
| true | true |
f7f9cf42a7755e62180d3792cd9799d2205644b7 | 1,188 | py | Python | tests/test_base_manager.py | kibernick/pycontacts | 9ec7653cdea582b242a6d5f314b4d0c4bb92dd39 | [
"MIT"
] | null | null | null | tests/test_base_manager.py | kibernick/pycontacts | 9ec7653cdea582b242a6d5f314b4d0c4bb92dd39 | [
"MIT"
] | null | null | null | tests/test_base_manager.py | kibernick/pycontacts | 9ec7653cdea582b242a6d5f314b4d0c4bb92dd39 | [
"MIT"
] | null | null | null | from pycontacts.managers import BaseManager
from conftest import ExtenedBaseModel
class ExampleManager(BaseManager):
cls = ExtenedBaseModel
def test_new_manager(address_book):
examples = ExampleManager(address_book)
assert examples.book == address_book
def test_manager_create(address_book):
examples = ExampleManager(address_book)
empty_example = examples.create()
assert isinstance(empty_example, ExtenedBaseModel)
assert not empty_example['test_set']
assert not empty_example['test_not_set']
def test_manager_filter(address_book):
examples = ExampleManager(address_book)
example = examples.create(test_set="Jack")
example.save()
results = examples.filter(test_set="Jack")
assert results.values()[0]['test_set'] == "Jack"
def test_manager_convert_results(address_book):
examples = ExampleManager(address_book)
example = examples.create(test_set="Jack")
example.save()
results = examples.filter(test_set="Jack")
example_objs = examples.convert_results(results)
assert len(example_objs) == 1
assert isinstance(example_objs[0], ExtenedBaseModel)
assert example_objs[0]['test_set'] == "Jack"
| 27.627907 | 56 | 0.750842 | from pycontacts.managers import BaseManager
from conftest import ExtenedBaseModel
class ExampleManager(BaseManager):
cls = ExtenedBaseModel
def test_new_manager(address_book):
examples = ExampleManager(address_book)
assert examples.book == address_book
def test_manager_create(address_book):
examples = ExampleManager(address_book)
empty_example = examples.create()
assert isinstance(empty_example, ExtenedBaseModel)
assert not empty_example['test_set']
assert not empty_example['test_not_set']
def test_manager_filter(address_book):
examples = ExampleManager(address_book)
example = examples.create(test_set="Jack")
example.save()
results = examples.filter(test_set="Jack")
assert results.values()[0]['test_set'] == "Jack"
def test_manager_convert_results(address_book):
examples = ExampleManager(address_book)
example = examples.create(test_set="Jack")
example.save()
results = examples.filter(test_set="Jack")
example_objs = examples.convert_results(results)
assert len(example_objs) == 1
assert isinstance(example_objs[0], ExtenedBaseModel)
assert example_objs[0]['test_set'] == "Jack"
| true | true |
f7f9cfc9da0438adb7aab54ef70197fd1fc1f1c8 | 911 | py | Python | Python/Basics-Sentdex/1. Basics with Sentdex/Tutorial 13 - Bringing things together_Iterator_Iterable/Iteratable/iterator_iterable.py | yorks-dev/Learning-Software-Developement | 4733f782705dda04cc790b0e16297241c23b2504 | [
"MIT"
] | null | null | null | Python/Basics-Sentdex/1. Basics with Sentdex/Tutorial 13 - Bringing things together_Iterator_Iterable/Iteratable/iterator_iterable.py | yorks-dev/Learning-Software-Developement | 4733f782705dda04cc790b0e16297241c23b2504 | [
"MIT"
] | null | null | null | Python/Basics-Sentdex/1. Basics with Sentdex/Tutorial 13 - Bringing things together_Iterator_Iterable/Iteratable/iterator_iterable.py | yorks-dev/Learning-Software-Developement | 4733f782705dda04cc790b0e16297241c23b2504 | [
"MIT"
] | null | null | null | # iterable : Something that we can iterate over.
# iterator : a special object with a next() method
import itertools
x = [1, 2, 3, 4] # ... Iterable not an iterator. So we cant use the next()
# for i in x:
# print(i)
n = itertools.cycle(x) # ... Iterator .. also an iterable ..
# basically a infinite cycle of (1-->4,1 --> 4 ...)
# for i in n:
# print(i) #Prints an infinite loop
'''
print(next(n))
print(next(n))
print(next(n))
print(next(n))
print(next(n))
print(next(n))
print(next(n)) # Prints the value in cycles ...
print(next(n))
'''
y = iter(x)
next(y) # One step iterated
for i in y:
print(i) #Should print 2 3 4
# Now the iterator y is exhausted
for i in y:
print(i)
for i in y:
print(i) # These will not producre any output as the iterator is exhausted
player_num = itertools.cycle([[1,'X'], [2,'O']])
print(next(player_num)[0])
| 20.704545 | 80 | 0.608123 |
import itertools
x = [1, 2, 3, 4]
n = itertools.cycle(x)
for i in y:
print(i)
for i in y:
print(i)
for i in y:
print(i)
player_num = itertools.cycle([[1,'X'], [2,'O']])
print(next(player_num)[0])
| true | true |
f7f9cfd718181ae979f7da1ff0932b5301163d3d | 1,800 | py | Python | mla/base/base.py | Sanyambansal76/MLAlgorithms | c8d0083cde15b56d171d273c7870b87b0392f6c3 | [
"MIT"
] | 4 | 2019-03-07T22:19:57.000Z | 2021-05-12T22:09:18.000Z | mla/base/base.py | zhenghuangcheng/ML_Algorithms | 829c74cf7d79307fc6ca1d849e65b959fb10e5de | [
"MIT"
] | null | null | null | mla/base/base.py | zhenghuangcheng/ML_Algorithms | 829c74cf7d79307fc6ca1d849e65b959fb10e5de | [
"MIT"
] | 3 | 2018-03-22T15:53:21.000Z | 2020-09-22T11:24:59.000Z | import numpy as np
class BaseEstimator(object):
X = None
y = None
y_required = True
def _setup_input(self, X, y=None):
"""Ensure inputs to an estimator are in the expected format.
Ensures X and y are stored as numpy ndarrays by converting from an
array-like object if necessary. Enables estimators to define whether
they require a set of y target values or not with y_required, e.g.
kmeans clustering requires no target labels and is fit against only X.
Parameters
----------
X : array-like
Feature dataset.
y : array-like
Target values. By default is required, but if y_required = false
then may be omitted.
"""
if not isinstance(X, np.ndarray):
X = np.array(X)
if X.size == 0:
raise ValueError('Number of features must be > 0')
if X.ndim == 1:
self.n_samples, self.n_features = 1, X.shape
else:
self.n_samples, self.n_features = X.shape[0], np.prod(X.shape[1:])
self.X = X
if self.y_required:
if y is None:
raise ValueError('Missed required argument y')
if not isinstance(y, np.ndarray):
y = np.array(y)
if y.size == 0:
raise ValueError('Number of targets must be > 0')
self.y = y
def fit(self, X, y=None):
self._setup_input(X, y)
def predict(self, X=None):
if not isinstance(X, np.ndarray):
X = np.array(X)
if self.X is not None:
return self._predict(X)
else:
raise ValueError('You must call `fit` before `predict`')
def _predict(self, X=None):
raise NotImplementedError()
| 28.125 | 78 | 0.558889 | import numpy as np
class BaseEstimator(object):
X = None
y = None
y_required = True
def _setup_input(self, X, y=None):
if not isinstance(X, np.ndarray):
X = np.array(X)
if X.size == 0:
raise ValueError('Number of features must be > 0')
if X.ndim == 1:
self.n_samples, self.n_features = 1, X.shape
else:
self.n_samples, self.n_features = X.shape[0], np.prod(X.shape[1:])
self.X = X
if self.y_required:
if y is None:
raise ValueError('Missed required argument y')
if not isinstance(y, np.ndarray):
y = np.array(y)
if y.size == 0:
raise ValueError('Number of targets must be > 0')
self.y = y
def fit(self, X, y=None):
self._setup_input(X, y)
def predict(self, X=None):
if not isinstance(X, np.ndarray):
X = np.array(X)
if self.X is not None:
return self._predict(X)
else:
raise ValueError('You must call `fit` before `predict`')
def _predict(self, X=None):
raise NotImplementedError()
| true | true |
f7f9d0416e0fd9cfb6b30b27275071d81ff69cf4 | 262 | py | Python | cpdb/search_mobile/serializers/trr_serializer.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 25 | 2018-07-20T22:31:40.000Z | 2021-07-15T16:58:41.000Z | cpdb/search_mobile/serializers/trr_serializer.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 13 | 2018-06-18T23:08:47.000Z | 2022-02-10T07:38:25.000Z | cpdb/search_mobile/serializers/trr_serializer.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 6 | 2018-05-17T21:59:43.000Z | 2020-11-17T00:30:26.000Z | from rest_framework import serializers
from shared.serializer import NoNullSerializer
class TRRSerializer(NoNullSerializer):
id = serializers.IntegerField()
type = serializers.SerializerMethodField()
def get_type(self, obj):
return 'TRR'
| 21.833333 | 46 | 0.759542 | from rest_framework import serializers
from shared.serializer import NoNullSerializer
class TRRSerializer(NoNullSerializer):
id = serializers.IntegerField()
type = serializers.SerializerMethodField()
def get_type(self, obj):
return 'TRR'
| true | true |
f7f9d0cf5780e1bbe437f2475539d7875a6c09b9 | 2,768 | py | Python | var/spack/repos/builtin/packages/ninja/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/ninja/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/ninja/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack.package import *
class Ninja(Package):
"""Ninja is a small build system with a focus on speed. It differs from
other build systems in two major respects: it is designed to have its input
files generated by a higher-level build system, and it is designed to run
builds as fast as possible."""
homepage = "https://ninja-build.org/"
url = "https://github.com/ninja-build/ninja/archive/v1.7.2.tar.gz"
git = "https://github.com/ninja-build/ninja.git"
tags = ['build-tools', 'e4s']
executables = ['^ninja$']
version('kitware', branch='features-for-fortran', git='https://github.com/Kitware/ninja.git')
version('master', branch='master')
version('1.10.2', sha256='ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed')
version('1.10.1', sha256='a6b6f7ac360d4aabd54e299cc1d8fa7b234cd81b9401693da21221c62569a23e')
version('1.10.0', sha256='3810318b08489435f8efc19c05525e80a993af5a55baa0dfeae0465a9d45f99f')
version('1.9.0', sha256='5d7ec75828f8d3fd1a0c2f31b5b0cea780cdfe1031359228c428c1a48bfcd5b9')
version('1.8.2', sha256='86b8700c3d0880c2b44c2ff67ce42774aaf8c28cbf57725cb881569288c1c6f4')
version('1.7.2', sha256='2edda0a5421ace3cf428309211270772dd35a91af60c96f93f90df6bc41b16d9')
version('1.6.0', sha256='b43e88fb068fe4d92a3dfd9eb4d19755dae5c33415db2e9b7b61b4659009cde7')
depends_on('python', type='build')
phases = ['configure', 'install']
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
return output.strip()
def configure(self, spec, prefix):
python('configure.py', '--bootstrap')
@run_after('configure')
@on_package_attributes(run_tests=True)
def configure_test(self):
ninja = Executable('./ninja')
ninja('-j{0}'.format(make_jobs), 'ninja_test')
ninja_test = Executable('./ninja_test')
ninja_test()
def setup_run_environment(self, env):
env.prepend_path('PYTHONPATH', self.prefix.misc)
def install(self, spec, prefix):
mkdir(prefix.bin)
name = 'ninja'
if sys.platform == 'win32':
name = name + '.exe'
install(name, prefix.bin)
install_tree('misc', prefix.misc)
if sys.platform == "win32":
return
# Some distros like Fedora install a 'ninja-build' executable
# instead of 'ninja'. Install both for uniformity.
with working_dir(prefix.bin):
symlink('ninja', 'ninja-build')
| 38.985915 | 97 | 0.689306 |
import sys
from spack.package import *
class Ninja(Package):
homepage = "https://ninja-build.org/"
url = "https://github.com/ninja-build/ninja/archive/v1.7.2.tar.gz"
git = "https://github.com/ninja-build/ninja.git"
tags = ['build-tools', 'e4s']
executables = ['^ninja$']
version('kitware', branch='features-for-fortran', git='https://github.com/Kitware/ninja.git')
version('master', branch='master')
version('1.10.2', sha256='ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed')
version('1.10.1', sha256='a6b6f7ac360d4aabd54e299cc1d8fa7b234cd81b9401693da21221c62569a23e')
version('1.10.0', sha256='3810318b08489435f8efc19c05525e80a993af5a55baa0dfeae0465a9d45f99f')
version('1.9.0', sha256='5d7ec75828f8d3fd1a0c2f31b5b0cea780cdfe1031359228c428c1a48bfcd5b9')
version('1.8.2', sha256='86b8700c3d0880c2b44c2ff67ce42774aaf8c28cbf57725cb881569288c1c6f4')
version('1.7.2', sha256='2edda0a5421ace3cf428309211270772dd35a91af60c96f93f90df6bc41b16d9')
version('1.6.0', sha256='b43e88fb068fe4d92a3dfd9eb4d19755dae5c33415db2e9b7b61b4659009cde7')
depends_on('python', type='build')
phases = ['configure', 'install']
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
return output.strip()
def configure(self, spec, prefix):
python('configure.py', '--bootstrap')
@run_after('configure')
@on_package_attributes(run_tests=True)
def configure_test(self):
ninja = Executable('./ninja')
ninja('-j{0}'.format(make_jobs), 'ninja_test')
ninja_test = Executable('./ninja_test')
ninja_test()
def setup_run_environment(self, env):
env.prepend_path('PYTHONPATH', self.prefix.misc)
def install(self, spec, prefix):
mkdir(prefix.bin)
name = 'ninja'
if sys.platform == 'win32':
name = name + '.exe'
install(name, prefix.bin)
install_tree('misc', prefix.misc)
if sys.platform == "win32":
return
with working_dir(prefix.bin):
symlink('ninja', 'ninja-build')
| true | true |
f7f9d122199ead50578defaedb9ee52f4cbd91fd | 419 | py | Python | example/env.example.py | andrzejressel/tuya-iot-python-sdk | cbe628695b8adb966a333ff28f6f41653baa1e5f | [
"MIT"
] | null | null | null | example/env.example.py | andrzejressel/tuya-iot-python-sdk | cbe628695b8adb966a333ff28f6f41653baa1e5f | [
"MIT"
] | null | null | null | example/env.example.py | andrzejressel/tuya-iot-python-sdk | cbe628695b8adb966a333ff28f6f41653baa1e5f | [
"MIT"
] | null | null | null | # from tuya_iot.tuya_enums import *
# online
# ACCESS_ID = # your_access_id
# ACCESS_KEY = # your_access_key
# USERNAME = # your_username
# PASSWORD = # your_password
# ASSET_ID = # your_asset_id
# DEVICE_ID = # your_device_id
# COUNTRY_CODE = # your_country_code
# SCHEMA = "tuyaSmart"
# ENDPOINT = TuyaCloudOpenAPIEndpoint.EUROPE
# WS_ENDPOINT = TuyaCloudPulsarWSEndpoint.EUROPE
# WS_ENV = TuyaCloudPulsarTopic.PROD
| 27.933333 | 48 | 0.768496 | true | true | |
f7f9d157d6be28ec0a55bec3f9919e77734d4177 | 1,382 | py | Python | py_everything/search.py | Morgan-Phoenix/py_everything | a7bbaf19ee6007fcfbcfe9d03944ef621b9f9ac9 | [
"MIT"
] | null | null | null | py_everything/search.py | Morgan-Phoenix/py_everything | a7bbaf19ee6007fcfbcfe9d03944ef621b9f9ac9 | [
"MIT"
] | null | null | null | py_everything/search.py | Morgan-Phoenix/py_everything | a7bbaf19ee6007fcfbcfe9d03944ef621b9f9ac9 | [
"MIT"
] | null | null | null | from typing import List, Any
import os
def searchFiles(keyword: str , path: str):
"""Search for files"""
files: List[Any] = []
for root, dirs, files in os.walk(path):
for file in files:
if keyword in file:
files.append(root + '\\' + str(file))
return files
def searchDirs(keyword: str, path: str):
"""Search for folders"""
folders = []
for root, dirs, files in os.walk(path):
for dir in dirs:
if keyword in dir:
folders.append(root + '\\' + str(dir))
return folders
def searchExts(ext: str, path: str):
"""Search for extensions"""
files: List[Any] = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(ext):
files.append(root + '\\' + str(file))
return files
def searchList(listOfTerms, query: str, filter='in'):
"""Search within a list"""
matches = []
for item in listOfTerms:
if filter == 'in' and query in item:
matches.append(item)
elif filter == 'start' and item.startswith(query):
matches.append(item)
elif filter == 'end' and item.endswith(query):
matches.append(item)
elif filter == 'exact' and item == query:
matches.append(item)
return matches
| 30.711111 | 59 | 0.547033 | from typing import List, Any
import os
def searchFiles(keyword: str , path: str):
files: List[Any] = []
for root, dirs, files in os.walk(path):
for file in files:
if keyword in file:
files.append(root + '\\' + str(file))
return files
def searchDirs(keyword: str, path: str):
folders = []
for root, dirs, files in os.walk(path):
for dir in dirs:
if keyword in dir:
folders.append(root + '\\' + str(dir))
return folders
def searchExts(ext: str, path: str):
files: List[Any] = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(ext):
files.append(root + '\\' + str(file))
return files
def searchList(listOfTerms, query: str, filter='in'):
matches = []
for item in listOfTerms:
if filter == 'in' and query in item:
matches.append(item)
elif filter == 'start' and item.startswith(query):
matches.append(item)
elif filter == 'end' and item.endswith(query):
matches.append(item)
elif filter == 'exact' and item == query:
matches.append(item)
return matches
| true | true |
f7f9d249528f221b9eef7b917caa3cd915feb5ff | 5,589 | py | Python | wxBot-v4.py | weenjoylife/wxbot_jyb | b445f319d924aac93836b648a1855acc432e80b5 | [
"Apache-2.0"
] | null | null | null | wxBot-v4.py | weenjoylife/wxbot_jyb | b445f319d924aac93836b648a1855acc432e80b5 | [
"Apache-2.0"
] | null | null | null | wxBot-v4.py | weenjoylife/wxbot_jyb | b445f319d924aac93836b648a1855acc432e80b5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from wxbot import *
import ConfigParser
import json
from collections import Counter
import time
import re
"""
20180720编写
版本说明:
1.该版本为第一个生产环境版本
2.优化了微信端的回复显示格式:只显示月-日,不再显示年份
以下无内容
"""
# 报单微信Bot
class BDWXBot(WXBot):
def __init__(self):
WXBot.__init__(self)
self.tuling_key = ""
self.robot_switch = True
try:
cf = ConfigParser.ConfigParser()
cf.read('conf.ini')
self.tuling_key = cf.get('main', 'key')
except Exception:
pass
print 'tuling_key:', self.tuling_key
def tuling_auto_reply(self, uid, msg):
if self.tuling_key:
url = "http://www.tuling123.com/openapi/api"
user_id = uid.replace('@', '')[:30]
body = {'key': self.tuling_key, 'info': msg.encode('utf8'), 'userid': user_id}
r = requests.post(url, data=body)
respond = json.loads(r.text)
result = ''
if respond['code'] == 100000:
result = respond['text'].replace('<br>', ' ')
result = result.replace(u'\xa0', u' ')
elif respond['code'] == 200000:
result = respond['url']
elif respond['code'] == 302000:
for k in respond['list']:
result = result + u"【" + k['source'] + u"】 " +\
k['article'] + "\t" + k['detailurl'] + "\n"
else:
result = respond['text'].replace('<br>', ' ')
result = result.replace(u'\xa0', u' ')
print ' ROBOT:', result
return result
else:
return u"知道啦"
def auto_switch(self, msg):
msg_data = msg['content']['data']
stop_cmd = [u'退下', u'走开', u'关闭', u'关掉', u'休息', u'滚开']
start_cmd = [u'出来', u'启动', u'工作']
if self.robot_switch:
for i in stop_cmd:
if i == msg_data:
self.robot_switch = False
self.send_msg_by_uid(u'[Robot]' + u'机器人已关闭!', msg['to_user_id'])
else:
for i in start_cmd:
if i == msg_data:
self.robot_switch = True
self.send_msg_by_uid(u'[Robot]' + u'机器人已开启!', msg['to_user_id'])
def handle_msg_all(self, msg):
msg_time = time.strftime('%m-%d %H:%M', time.localtime())
data_list = msg['content']['data'].split()
# 处理群发的文本消息
if msg['msg_type_id'] == 3 and msg['content']['type'] == 0: # group text message
msg_content = msg['content']['data']
msg_content_list = msg_content.split()
pattern = re.compile(u"[0-9]+")
res = re.findall(pattern, msg_content_list[0])
print(res)
print msg_content_list[0]
print type(msg_content_list[0])
# 汇总
if msg['content']['data'] == u'汇总':
mydict = {}
baodan_records = ''
with open('baodan.txt', 'r') as file:
#baodan_records = file.read() 这一行read了,会导致for line in file.readlines() 没有数据
#print(baodan_records)
for line in file.readlines():
user_name = line.split()[0]
#print(user_name)
if(user_name not in mydict.keys()):
mydict[user_name] = float(line.split()[3])
else:
mydict[user_name] = mydict[user_name] + float(line.split()[3])
with open('qingshu.txt', 'a+') as file:
all_records = ''
with open('baodan.txt', 'r') as a_file:
all_records = a_file.read()
file.write(all_records)
file.write('----')
file.write('\n')
for k,v in mydict.items():
record_str = k + ' ' + str(v)
file.write('汇总: ' + record_str)
file.write('\n')
file.write('----'+'\n')
with open('baodan.txt', 'w+') as file:
file.write('')
with open('qingshu.txt', 'r+') as file:
zhangdan = file.read()
new_zhangdan = zhangdan.split('----')[-2]
new_zhangdan = new_zhangdan[1:]
self.send_msg_by_uid(new_zhangdan, msg['user']['id'])
# 清数
elif msg['content']['data'] == u'清数':
reply = u'已完成清数,小伙伴请核对记录噢'
self.send_msg_by_uid(reply, msg['user']['id'])
elif msg_content_list[0] == u'云闪付':
user = msg['content']['user']['name']
msg_date = msg_time
reply = ''
data_list = data_list[1:]
for index,bd_data in enumerate(data_list):
money = bd_data
if(index == len(data_list) - 1):
record = user + '-' + u'云闪付' + ' ' + msg_date + ' ' + money
else:
record = user + '-' + u'云闪付' + ' ' + msg_date + ' ' + money+ '\n'
reply = reply + record
self.send_msg_by_uid(u'成功录入: \n' + reply, msg['user']['id'])
with open('baodan.txt', 'a+') as file:
file.write(reply.encode(encoding='UTF-8'))
file.write('\n')
elif len(res) == 0:
reply = u'请检查输入'
self.send_msg_by_uid(reply, msg['user']['id'])
# 录入报单
else:
user = msg['content']['user']['name']
msg_date = msg_time
reply = ''
for index,bd_data in enumerate(data_list):
money = bd_data
if(index == len(data_list) - 1):
record = user + ' ' + msg_date + ' ' + money
else:
record = user + ' ' + msg_date + ' ' + money+ '\n'
reply = reply + record
self.send_msg_by_uid(u'成功录入: \n' + reply, msg['user']['id'])
with open('baodan.txt', 'a+') as file:
file.write(reply.encode(encoding='UTF-8'))
file.write('\n')
def main():
bot = BDWXBot()
bot.DEBUG = True
bot.conf['qr'] = 'png'
bot.run()
if __name__ == '__main__':
main()
| 28.227273 | 90 | 0.528538 |
from wxbot import *
import ConfigParser
import json
from collections import Counter
import time
import re
"""
20180720编写
版本说明:
1.该版本为第一个生产环境版本
2.优化了微信端的回复显示格式:只显示月-日,不再显示年份
以下无内容
"""
class BDWXBot(WXBot):
def __init__(self):
WXBot.__init__(self)
self.tuling_key = ""
self.robot_switch = True
try:
cf = ConfigParser.ConfigParser()
cf.read('conf.ini')
self.tuling_key = cf.get('main', 'key')
except Exception:
pass
print 'tuling_key:', self.tuling_key
def tuling_auto_reply(self, uid, msg):
if self.tuling_key:
url = "http://www.tuling123.com/openapi/api"
user_id = uid.replace('@', '')[:30]
body = {'key': self.tuling_key, 'info': msg.encode('utf8'), 'userid': user_id}
r = requests.post(url, data=body)
respond = json.loads(r.text)
result = ''
if respond['code'] == 100000:
result = respond['text'].replace('<br>', ' ')
result = result.replace(u'\xa0', u' ')
elif respond['code'] == 200000:
result = respond['url']
elif respond['code'] == 302000:
for k in respond['list']:
result = result + u"【" + k['source'] + u"】 " +\
k['article'] + "\t" + k['detailurl'] + "\n"
else:
result = respond['text'].replace('<br>', ' ')
result = result.replace(u'\xa0', u' ')
print ' ROBOT:', result
return result
else:
return u"知道啦"
def auto_switch(self, msg):
msg_data = msg['content']['data']
stop_cmd = [u'退下', u'走开', u'关闭', u'关掉', u'休息', u'滚开']
start_cmd = [u'出来', u'启动', u'工作']
if self.robot_switch:
for i in stop_cmd:
if i == msg_data:
self.robot_switch = False
self.send_msg_by_uid(u'[Robot]' + u'机器人已关闭!', msg['to_user_id'])
else:
for i in start_cmd:
if i == msg_data:
self.robot_switch = True
self.send_msg_by_uid(u'[Robot]' + u'机器人已开启!', msg['to_user_id'])
def handle_msg_all(self, msg):
msg_time = time.strftime('%m-%d %H:%M', time.localtime())
data_list = msg['content']['data'].split()
if msg['msg_type_id'] == 3 and msg['content']['type'] == 0:
msg_content = msg['content']['data']
msg_content_list = msg_content.split()
pattern = re.compile(u"[0-9]+")
res = re.findall(pattern, msg_content_list[0])
print(res)
print msg_content_list[0]
print type(msg_content_list[0])
if msg['content']['data'] == u'汇总':
mydict = {}
baodan_records = ''
with open('baodan.txt', 'r') as file:
for line in file.readlines():
user_name = line.split()[0]
if(user_name not in mydict.keys()):
mydict[user_name] = float(line.split()[3])
else:
mydict[user_name] = mydict[user_name] + float(line.split()[3])
with open('qingshu.txt', 'a+') as file:
all_records = ''
with open('baodan.txt', 'r') as a_file:
all_records = a_file.read()
file.write(all_records)
file.write('----')
file.write('\n')
for k,v in mydict.items():
record_str = k + ' ' + str(v)
file.write('汇总: ' + record_str)
file.write('\n')
file.write('----'+'\n')
with open('baodan.txt', 'w+') as file:
file.write('')
with open('qingshu.txt', 'r+') as file:
zhangdan = file.read()
new_zhangdan = zhangdan.split('----')[-2]
new_zhangdan = new_zhangdan[1:]
self.send_msg_by_uid(new_zhangdan, msg['user']['id'])
elif msg['content']['data'] == u'清数':
reply = u'已完成清数,小伙伴请核对记录噢'
self.send_msg_by_uid(reply, msg['user']['id'])
elif msg_content_list[0] == u'云闪付':
user = msg['content']['user']['name']
msg_date = msg_time
reply = ''
data_list = data_list[1:]
for index,bd_data in enumerate(data_list):
money = bd_data
if(index == len(data_list) - 1):
record = user + '-' + u'云闪付' + ' ' + msg_date + ' ' + money
else:
record = user + '-' + u'云闪付' + ' ' + msg_date + ' ' + money+ '\n'
reply = reply + record
self.send_msg_by_uid(u'成功录入: \n' + reply, msg['user']['id'])
with open('baodan.txt', 'a+') as file:
file.write(reply.encode(encoding='UTF-8'))
file.write('\n')
elif len(res) == 0:
reply = u'请检查输入'
self.send_msg_by_uid(reply, msg['user']['id'])
else:
user = msg['content']['user']['name']
msg_date = msg_time
reply = ''
for index,bd_data in enumerate(data_list):
money = bd_data
if(index == len(data_list) - 1):
record = user + ' ' + msg_date + ' ' + money
else:
record = user + ' ' + msg_date + ' ' + money+ '\n'
reply = reply + record
self.send_msg_by_uid(u'成功录入: \n' + reply, msg['user']['id'])
with open('baodan.txt', 'a+') as file:
file.write(reply.encode(encoding='UTF-8'))
file.write('\n')
def main():
bot = BDWXBot()
bot.DEBUG = True
bot.conf['qr'] = 'png'
bot.run()
if __name__ == '__main__':
main()
| false | true |
f7f9d2e5bccf9baf5c353c527b321faebbe25bd2 | 14,152 | py | Python | src/garage/experiment/local_tf_runner.py | lywong92/garage | 96cb8887fcae90531a645d540653010e7fe10fcc | [
"MIT"
] | 1 | 2020-01-05T14:57:43.000Z | 2020-01-05T14:57:43.000Z | src/garage/experiment/local_tf_runner.py | lywong92/garage | 96cb8887fcae90531a645d540653010e7fe10fcc | [
"MIT"
] | null | null | null | src/garage/experiment/local_tf_runner.py | lywong92/garage | 96cb8887fcae90531a645d540653010e7fe10fcc | [
"MIT"
] | null | null | null | """
The local runner for tensorflow algorithms.
A runner setup context for algorithms during initialization and
pipelines data between sampler and algorithm during training.
"""
import copy
import time
from types import SimpleNamespace
from dowel import logger, tabular
import tensorflow as tf
from garage.experiment import snapshotter
# Note: Optional module should be imported ad hoc to break circular dependency.
class LocalRunner:
"""This class implements a local runner for tensorflow algorithms.
A local runner provides a default tensorflow session using python context.
This is useful for those experiment components (e.g. policy) that require a
tensorflow session during construction.
Use Runner.setup(algo, env) to setup algorithm and environement for runner
and Runner.train() to start training.
Examples:
with LocalRunner() as runner:
env = gym.make('CartPole-v1')
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32))
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=4000)
"""
def __init__(self, sess=None, max_cpus=1):
"""Create a new local runner.
Args:
max_cpus(int): The maximum number of parallel sampler workers.
sess(tf.Session): An optional tensorflow session.
A new session will be created immediately if not provided.
Note:
The local runner will set up a joblib task pool of size max_cpus
possibly later used by BatchSampler. If BatchSampler is not used,
the processes in the pool will remain dormant.
This setup is required to use tensorflow in a multiprocess
environment before a tensorflow session is created
because tensorflow is not fork-safe.
See https://github.com/tensorflow/tensorflow/issues/2448.
"""
if max_cpus > 1:
from garage.sampler import singleton_pool
singleton_pool.initialize(max_cpus)
self.sess = sess or tf.Session()
self.sess_entered = False
self.has_setup = False
self.plot = False
self.setup_args = None
self.train_args = None
def __enter__(self):
"""Set self.sess as the default session.
Returns:
This local runner.
"""
if tf.get_default_session() is not self.sess:
self.sess.__enter__()
self.sess_entered = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Leave session."""
if tf.get_default_session() is self.sess and self.sess_entered:
self.sess.__exit__(exc_type, exc_val, exc_tb)
self.sess_entered = False
def setup(self, algo, env, sampler_cls=None, sampler_args=None):
"""Set up runner for algorithm and environment.
This method saves algo and env within runner and creates a sampler.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (garage.np.algos.RLAlgorithm): An algorithm instance.
env (garage.envs.GarageEnv): An environement instance.
sampler_cls (garage.sampler.Sampler): A sampler class.
sampler_args (dict): Arguments to be passed to sampler constructor.
"""
self.algo = algo
self.env = env
self.policy = self.algo.policy
if sampler_args is None:
sampler_args = {}
if sampler_cls is None:
from garage.tf.algos.batch_polopt import BatchPolopt
if isinstance(algo, BatchPolopt):
if self.policy.vectorized:
from garage.tf.samplers import OnPolicyVectorizedSampler
sampler_cls = OnPolicyVectorizedSampler
else:
from garage.tf.samplers import BatchSampler
sampler_cls = BatchSampler
else:
from garage.tf.samplers import OffPolicyVectorizedSampler
sampler_cls = OffPolicyVectorizedSampler
self.sampler = sampler_cls(algo, env, **sampler_args)
self.initialize_tf_vars()
logger.log(self.sess.graph)
self.has_setup = True
self.setup_args = SimpleNamespace(
sampler_cls=sampler_cls, sampler_args=sampler_args)
def initialize_tf_vars(self):
"""Initialize all uninitialized variables in session."""
with tf.name_scope('initialize_tf_vars'):
uninited_set = [
e.decode()
for e in self.sess.run(tf.report_uninitialized_variables())
]
self.sess.run(
tf.variables_initializer([
v for v in tf.global_variables()
if v.name.split(':')[0] in uninited_set
]))
def _start_worker(self):
"""Start Plotter and Sampler workers."""
self.sampler.start_worker()
if self.plot:
from garage.tf.plotter import Plotter
self.plotter = Plotter(self.env, self.policy)
self.plotter.start()
def _shutdown_worker(self):
"""Shutdown Plotter and Sampler workers."""
self.sampler.shutdown_worker()
if self.plot:
self.plotter.close()
def obtain_samples(self, itr, batch_size):
"""Obtain one batch of samples.
Args:
itr(int): Index of iteration (epoch).
batch_size(int): Number of steps in batch.
This is a hint that the sampler may or may not respect.
Returns:
One batch of samples.
"""
if self.train_args.n_epoch_cycles == 1:
logger.log('Obtaining samples...')
return self.sampler.obtain_samples(itr, batch_size)
def save(self, epoch, paths=None):
"""Save snapshot of current batch.
Args:
itr(int): Index of iteration (epoch).
paths(dict): Batch of samples after preprocessed. If None,
no paths will be logged to the snapshot.
"""
assert self.has_setup
logger.log('Saving snapshot...')
params = dict()
# Save arguments
params['setup_args'] = self.setup_args
params['train_args'] = self.train_args
# Save states
params['env'] = self.env
params['algo'] = self.algo
if paths:
params['paths'] = paths
params['last_epoch'] = epoch
snapshotter.save_itr_params(epoch, params)
logger.log('Saved')
def restore(self, snapshot_dir, from_epoch='last'):
"""Restore experiment from snapshot.
Args:
snapshot_dir(str): Directory of snapshot.
from_epoch(str or int): The epoch to restore from.
Can be 'first', 'last' or a number.
Not applicable when snapshot_mode='last'.
Returns:
A SimpleNamespace for train()'s arguments.
Examples:
1. Resume experiment immediately.
with LocalRunner() as runner:
runner.restore(snapshot_dir)
runner.resume()
2. Resume experiment with modified training arguments.
with LocalRunner() as runner:
runner.restore(snapshot_dir, resume_now=False)
runner.resume(n_epochs=20)
Note:
When resume via command line, new snapshots will be
saved into the SAME directory if not specified.
When resume programmatically, snapshot directory should be
specify manually or through run_experiment() interface.
"""
snapshotter.snapshot_dir = snapshot_dir
saved = snapshotter.load(from_epoch)
self.setup_args = saved['setup_args']
self.train_args = saved['train_args']
self.setup(
env=saved['env'],
algo=saved['algo'],
sampler_cls=self.setup_args.sampler_cls,
sampler_args=self.setup_args.sampler_args)
n_epochs = self.train_args.n_epochs
last_epoch = saved['last_epoch']
n_epoch_cycles = self.train_args.n_epoch_cycles
batch_size = self.train_args.batch_size
store_paths = self.train_args.store_paths
pause_for_plot = self.train_args.pause_for_plot
fmt = '{:<20} {:<15}'
logger.log('Restore from snapshot saved in %s' % snapshot_dir)
logger.log(fmt.format('Train Args', 'Value'))
logger.log(fmt.format('n_epochs', n_epochs))
logger.log(fmt.format('last_epoch', last_epoch))
logger.log(fmt.format('n_epoch_cycles', n_epoch_cycles))
logger.log(fmt.format('batch_size', batch_size))
logger.log(fmt.format('store_paths', store_paths))
logger.log(fmt.format('pause_for_plot', pause_for_plot))
self.train_args.start_epoch = last_epoch + 1
return copy.copy(self.train_args)
def log_diagnostics(self, pause_for_plot=False):
"""Log diagnostics.
Args:
pause_for_plot(bool): Pause for plot.
"""
logger.log('Time %.2f s' % (time.time() - self._start_time))
logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time))
logger.log(tabular)
if self.plot:
self.plotter.update_plot(self.policy, self.algo.max_path_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...')
def train(self,
n_epochs,
batch_size,
n_epoch_cycles=1,
plot=False,
store_paths=False,
pause_for_plot=False):
"""Start training.
Args:
n_epochs(int): Number of epochs.
batch_size(int): Number of environment steps in one batch.
n_epoch_cycles(int): Number of batches of samples in each epoch.
This is only useful for off-policy algorithm.
For on-policy algorithm this value should always be 1.
plot(bool): Visualize policy by doing rollout after each epoch.
store_paths(bool): Save paths in snapshot.
pause_for_plot(bool): Pause for plot.
Returns:
The average return in last epoch cycle.
"""
assert self.has_setup, ('Use Runner.setup() to setup runner before '
'training.')
# Save arguments for restore
self.train_args = SimpleNamespace(
n_epochs=n_epochs,
n_epoch_cycles=n_epoch_cycles,
batch_size=batch_size,
plot=plot,
store_paths=store_paths,
pause_for_plot=pause_for_plot,
start_epoch=0)
self.plot = plot
return self.algo.train(self, batch_size)
def step_epochs(self):
"""Generator for training.
This function serves as a generator. It is used to separate
services such as snapshotting, sampler control from the actual
training loop. It is used inside train() in each algorithm.
The generator initializes two variables: `self.step_itr` and
`self.step_path`. To use the generator, these two have to be
updated manually in each epoch, as the example shows below.
Yields:
int: The next training epoch.
Examples:
for epoch in runner.step_epochs():
runner.step_path = runner.obtain_samples(...)
self.train_once(...)
runner.step_itr += 1
"""
try:
self._start_worker()
self._start_time = time.time()
self.step_itr = (
self.train_args.start_epoch * self.train_args.n_epoch_cycles)
self.step_path = None
for epoch in range(self.train_args.start_epoch,
self.train_args.n_epochs):
self._itr_start_time = time.time()
with logger.prefix('epoch #%d | ' % epoch):
yield epoch
save_path = (self.step_path
if self.train_args.store_paths else None)
self.save(epoch, save_path)
self.log_diagnostics(self.train_args.pause_for_plot)
logger.dump_all(self.step_itr)
tabular.clear()
finally:
self._shutdown_worker()
def resume(self,
n_epochs=None,
batch_size=None,
n_epoch_cycles=None,
plot=None,
store_paths=None,
pause_for_plot=None):
"""Resume from restored experiment.
This method provides the same interface as train().
If not specified, an argument will default to the
saved arguments from the last call to train().
Returns:
The average return in last epoch cycle.
"""
assert self.train_args is not None, (
'You must call restore() before resume().')
self.train_args.n_epochs = n_epochs or self.train_args.n_epochs
self.train_args.batch_size = batch_size or self.train_args.batch_size
self.train_args.n_epoch_cycles = (n_epoch_cycles
or self.train_args.n_epoch_cycles)
if plot is not None:
self.train_args.plot = plot
if store_paths is not None:
self.train_args.store_paths = store_paths
if pause_for_plot is not None:
self.train_args.pause_for_plot = pause_for_plot
return self.algo.train(self, batch_size)
| 34.771499 | 79 | 0.59617 | import copy
import time
from types import SimpleNamespace
from dowel import logger, tabular
import tensorflow as tf
from garage.experiment import snapshotter
class LocalRunner:
def __init__(self, sess=None, max_cpus=1):
if max_cpus > 1:
from garage.sampler import singleton_pool
singleton_pool.initialize(max_cpus)
self.sess = sess or tf.Session()
self.sess_entered = False
self.has_setup = False
self.plot = False
self.setup_args = None
self.train_args = None
def __enter__(self):
if tf.get_default_session() is not self.sess:
self.sess.__enter__()
self.sess_entered = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if tf.get_default_session() is self.sess and self.sess_entered:
self.sess.__exit__(exc_type, exc_val, exc_tb)
self.sess_entered = False
def setup(self, algo, env, sampler_cls=None, sampler_args=None):
self.algo = algo
self.env = env
self.policy = self.algo.policy
if sampler_args is None:
sampler_args = {}
if sampler_cls is None:
from garage.tf.algos.batch_polopt import BatchPolopt
if isinstance(algo, BatchPolopt):
if self.policy.vectorized:
from garage.tf.samplers import OnPolicyVectorizedSampler
sampler_cls = OnPolicyVectorizedSampler
else:
from garage.tf.samplers import BatchSampler
sampler_cls = BatchSampler
else:
from garage.tf.samplers import OffPolicyVectorizedSampler
sampler_cls = OffPolicyVectorizedSampler
self.sampler = sampler_cls(algo, env, **sampler_args)
self.initialize_tf_vars()
logger.log(self.sess.graph)
self.has_setup = True
self.setup_args = SimpleNamespace(
sampler_cls=sampler_cls, sampler_args=sampler_args)
def initialize_tf_vars(self):
with tf.name_scope('initialize_tf_vars'):
uninited_set = [
e.decode()
for e in self.sess.run(tf.report_uninitialized_variables())
]
self.sess.run(
tf.variables_initializer([
v for v in tf.global_variables()
if v.name.split(':')[0] in uninited_set
]))
def _start_worker(self):
self.sampler.start_worker()
if self.plot:
from garage.tf.plotter import Plotter
self.plotter = Plotter(self.env, self.policy)
self.plotter.start()
def _shutdown_worker(self):
self.sampler.shutdown_worker()
if self.plot:
self.plotter.close()
def obtain_samples(self, itr, batch_size):
if self.train_args.n_epoch_cycles == 1:
logger.log('Obtaining samples...')
return self.sampler.obtain_samples(itr, batch_size)
def save(self, epoch, paths=None):
assert self.has_setup
logger.log('Saving snapshot...')
params = dict()
params['setup_args'] = self.setup_args
params['train_args'] = self.train_args
params['env'] = self.env
params['algo'] = self.algo
if paths:
params['paths'] = paths
params['last_epoch'] = epoch
snapshotter.save_itr_params(epoch, params)
logger.log('Saved')
def restore(self, snapshot_dir, from_epoch='last'):
snapshotter.snapshot_dir = snapshot_dir
saved = snapshotter.load(from_epoch)
self.setup_args = saved['setup_args']
self.train_args = saved['train_args']
self.setup(
env=saved['env'],
algo=saved['algo'],
sampler_cls=self.setup_args.sampler_cls,
sampler_args=self.setup_args.sampler_args)
n_epochs = self.train_args.n_epochs
last_epoch = saved['last_epoch']
n_epoch_cycles = self.train_args.n_epoch_cycles
batch_size = self.train_args.batch_size
store_paths = self.train_args.store_paths
pause_for_plot = self.train_args.pause_for_plot
fmt = '{:<20} {:<15}'
logger.log('Restore from snapshot saved in %s' % snapshot_dir)
logger.log(fmt.format('Train Args', 'Value'))
logger.log(fmt.format('n_epochs', n_epochs))
logger.log(fmt.format('last_epoch', last_epoch))
logger.log(fmt.format('n_epoch_cycles', n_epoch_cycles))
logger.log(fmt.format('batch_size', batch_size))
logger.log(fmt.format('store_paths', store_paths))
logger.log(fmt.format('pause_for_plot', pause_for_plot))
self.train_args.start_epoch = last_epoch + 1
return copy.copy(self.train_args)
def log_diagnostics(self, pause_for_plot=False):
logger.log('Time %.2f s' % (time.time() - self._start_time))
logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time))
logger.log(tabular)
if self.plot:
self.plotter.update_plot(self.policy, self.algo.max_path_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...')
def train(self,
n_epochs,
batch_size,
n_epoch_cycles=1,
plot=False,
store_paths=False,
pause_for_plot=False):
assert self.has_setup, ('Use Runner.setup() to setup runner before '
'training.')
self.train_args = SimpleNamespace(
n_epochs=n_epochs,
n_epoch_cycles=n_epoch_cycles,
batch_size=batch_size,
plot=plot,
store_paths=store_paths,
pause_for_plot=pause_for_plot,
start_epoch=0)
self.plot = plot
return self.algo.train(self, batch_size)
def step_epochs(self):
try:
self._start_worker()
self._start_time = time.time()
self.step_itr = (
self.train_args.start_epoch * self.train_args.n_epoch_cycles)
self.step_path = None
for epoch in range(self.train_args.start_epoch,
self.train_args.n_epochs):
self._itr_start_time = time.time()
with logger.prefix('epoch #%d | ' % epoch):
yield epoch
save_path = (self.step_path
if self.train_args.store_paths else None)
self.save(epoch, save_path)
self.log_diagnostics(self.train_args.pause_for_plot)
logger.dump_all(self.step_itr)
tabular.clear()
finally:
self._shutdown_worker()
def resume(self,
n_epochs=None,
batch_size=None,
n_epoch_cycles=None,
plot=None,
store_paths=None,
pause_for_plot=None):
assert self.train_args is not None, (
'You must call restore() before resume().')
self.train_args.n_epochs = n_epochs or self.train_args.n_epochs
self.train_args.batch_size = batch_size or self.train_args.batch_size
self.train_args.n_epoch_cycles = (n_epoch_cycles
or self.train_args.n_epoch_cycles)
if plot is not None:
self.train_args.plot = plot
if store_paths is not None:
self.train_args.store_paths = store_paths
if pause_for_plot is not None:
self.train_args.pause_for_plot = pause_for_plot
return self.algo.train(self, batch_size)
| true | true |
f7f9d3a8135700a216c8335586b8e05519c876c5 | 5,756 | py | Python | py_aoc_2018_tests/test_days.py | davidlukac/advent-of-code | ed330b3005af248a5812906de97bd108485e624c | [
"MIT"
] | null | null | null | py_aoc_2018_tests/test_days.py | davidlukac/advent-of-code | ed330b3005af248a5812906de97bd108485e624c | [
"MIT"
] | null | null | null | py_aoc_2018_tests/test_days.py | davidlukac/advent-of-code | ed330b3005af248a5812906de97bd108485e624c | [
"MIT"
] | null | null | null | import io
import unittest
from collections import OrderedDict
import pytest
from py_aoc_2018.day_1 import day_1
from py_aoc_2018.day_2 import day_2, find_matching
from py_aoc_2018.day_3 import Claim, load_claims, optimize_claims, SquareBySquareOverclaimedCounter
from py_aoc_2018.day_3 import IterateClaimsOverclaimedCounter, ClaimsOverlap
class TestDay1(unittest.TestCase):
def test(self):
final_frequency, matching_frequency, _ = day_1()
assert 578 == final_frequency
assert 82516 == matching_frequency
class TestDay2(unittest.TestCase):
def test(self):
assert 8820 == day_2()[0]
def test_matching(self):
data = [
'abcde',
'fghij',
'klmno',
'pqrst',
'fguij',
'axcye',
'wvxyz'
]
cache = {}
res = []
for d in data:
find_matching(d, cache, res)
assert ['fgij'] == res
class TestDay3(unittest.TestCase):
def test_claim_factory(self):
assert Claim(1, 2, 3, 4, 5) == Claim.from_string('#1 @ 2,3: 4x5')
assert Claim(2, 2, 3, 4, 5) != Claim.from_string('#1 @ 2,3: 4x5')
def test_canvas_size(self):
data = [
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2'
]
stream = io.StringIO('\n'.join(data))
assert (7, 7) == load_claims(stream)[:-1]
data = [
'#1 @ 1,1: 3x2',
'#2 @ 2,1: 2x4',
'#3 @ 3,2: 5x2',
'#4 @ 5,3: 3x5'
]
stream = io.StringIO('\n'.join(data))
assert (8, 8) == load_claims(stream)[:-1]
def test_claim_is_on(self):
c = Claim(1, 1, 1, 3, 2)
assert not c.is_on(0, 0)
assert not c.is_on(2, 0)
assert not c.is_on(4, 2)
assert not c.is_on(0, 2)
assert not c.is_on(2, 3)
assert not c.is_on(4, 4)
assert c.is_on(1, 1)
assert c.is_on(3, 2)
c = Claim.from_string('#3 @ 3,2: 5x2')
assert c.is_on(3, 2)
assert c.is_on(7, 2)
assert c.is_on(3, 3)
assert c.is_on(7, 3)
assert c.is_on(5, 3)
def test_count_too_occupied(self):
data = [
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
counter = SquareBySquareOverclaimedCounter(size_x, size_y, optimize_claims(claims))
assert 4 == counter.count_too_occupied()
data = [
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
assert 4 == IterateClaimsOverclaimedCounter(size_x, size_y, optimize_claims(claims)).count_too_occupied()
data = [
'#1 @ 1,1: 3x2',
'#2 @ 2,1: 2x4',
'#3 @ 3,2: 5x2',
'#4 @ 5,3: 3x5'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
counter = SquareBySquareOverclaimedCounter(size_x, size_y, optimize_claims(claims))
assert counter.count_too_occupied() == 8
data = [
'#1 @ 1,1: 3x2',
'#2 @ 2,1: 2x4',
'#3 @ 3,2: 5x2',
'#4 @ 5,3: 3x5'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
assert IterateClaimsOverclaimedCounter(size_x, size_y, optimize_claims(claims)).count_too_occupied() == 8
def test_sort(self):
data = [
'#4 @ 3,2: 5x5',
'#2 @ 2,1: 2x4',
'#5 @ 5,3: 3x5',
'#3 @ 3,2: 5x2',
'#1 @ 1,1: 3x2',
]
stream = io.StringIO('\n'.join(data))
_, _, claims = load_claims(stream)
claim_ordered = optimize_claims(claims)
claims_ordered_expected = OrderedDict({
1: Claim.from_string('#1 @ 1,1: 3x2'),
2: Claim.from_string('#2 @ 2,1: 2x4'),
3: Claim.from_string('#3 @ 3,2: 5x2'),
4: Claim.from_string('#4 @ 3,2: 5x5'),
5: Claim.from_string('#5 @ 5,3: 3x5'),
})
for c_actual, c_expected in zip(claim_ordered.values(), claims_ordered_expected.values()):
assert c_actual == c_expected
def test_claims_overlap(self):
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 3, 3, 10, 10)
assert ClaimsOverlap(c1, c2).overlap_on_x == (3, 5)
assert ClaimsOverlap(c1, c2).is_overlap_on_x
c1 = Claim(1, 0, 0, 1, 1)
c2 = Claim(2, 0, 0, 1, 1)
assert ClaimsOverlap(c1, c2).overlap_on_x == (0, 0)
assert ClaimsOverlap(c1, c2).is_overlap_on_x
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 6, 6, 10, 10)
assert not ClaimsOverlap(c1, c2).overlap_on_x
assert not ClaimsOverlap(c1, c2).is_overlap_on_x
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 3, 3, 10, 10)
assert ClaimsOverlap(c1, c2).overlap_on_y == (3, 5)
assert ClaimsOverlap(c1, c2).is_overlap_on_y
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 3, 3, 10, 10)
assert ClaimsOverlap(c2, c1).overlap_on_y == (3, 5)
assert ClaimsOverlap(c2, c1).is_overlap_on_y
c1 = Claim(1, 0, 0, 1, 1)
c2 = Claim(2, 0, 0, 1, 1)
assert ClaimsOverlap(c1, c2).overlap_on_y == (0, 0)
assert ClaimsOverlap(c1, c2).is_overlap_on_y
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 6, 6, 10, 10)
assert not ClaimsOverlap(c1, c2).overlap_on_y
if __name__ == '__main__':
pytest.main()
| 27.806763 | 113 | 0.525191 | import io
import unittest
from collections import OrderedDict
import pytest
from py_aoc_2018.day_1 import day_1
from py_aoc_2018.day_2 import day_2, find_matching
from py_aoc_2018.day_3 import Claim, load_claims, optimize_claims, SquareBySquareOverclaimedCounter
from py_aoc_2018.day_3 import IterateClaimsOverclaimedCounter, ClaimsOverlap
class TestDay1(unittest.TestCase):
def test(self):
final_frequency, matching_frequency, _ = day_1()
assert 578 == final_frequency
assert 82516 == matching_frequency
class TestDay2(unittest.TestCase):
def test(self):
assert 8820 == day_2()[0]
def test_matching(self):
data = [
'abcde',
'fghij',
'klmno',
'pqrst',
'fguij',
'axcye',
'wvxyz'
]
cache = {}
res = []
for d in data:
find_matching(d, cache, res)
assert ['fgij'] == res
class TestDay3(unittest.TestCase):
def test_claim_factory(self):
assert Claim(1, 2, 3, 4, 5) == Claim.from_string('#1 @ 2,3: 4x5')
assert Claim(2, 2, 3, 4, 5) != Claim.from_string('#1 @ 2,3: 4x5')
def test_canvas_size(self):
data = [
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2'
]
stream = io.StringIO('\n'.join(data))
assert (7, 7) == load_claims(stream)[:-1]
data = [
'#1 @ 1,1: 3x2',
'#2 @ 2,1: 2x4',
'#3 @ 3,2: 5x2',
'#4 @ 5,3: 3x5'
]
stream = io.StringIO('\n'.join(data))
assert (8, 8) == load_claims(stream)[:-1]
def test_claim_is_on(self):
c = Claim(1, 1, 1, 3, 2)
assert not c.is_on(0, 0)
assert not c.is_on(2, 0)
assert not c.is_on(4, 2)
assert not c.is_on(0, 2)
assert not c.is_on(2, 3)
assert not c.is_on(4, 4)
assert c.is_on(1, 1)
assert c.is_on(3, 2)
c = Claim.from_string('#3 @ 3,2: 5x2')
assert c.is_on(3, 2)
assert c.is_on(7, 2)
assert c.is_on(3, 3)
assert c.is_on(7, 3)
assert c.is_on(5, 3)
def test_count_too_occupied(self):
data = [
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
counter = SquareBySquareOverclaimedCounter(size_x, size_y, optimize_claims(claims))
assert 4 == counter.count_too_occupied()
data = [
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
assert 4 == IterateClaimsOverclaimedCounter(size_x, size_y, optimize_claims(claims)).count_too_occupied()
data = [
'#1 @ 1,1: 3x2',
'#2 @ 2,1: 2x4',
'#3 @ 3,2: 5x2',
'#4 @ 5,3: 3x5'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
counter = SquareBySquareOverclaimedCounter(size_x, size_y, optimize_claims(claims))
assert counter.count_too_occupied() == 8
data = [
'#1 @ 1,1: 3x2',
'#2 @ 2,1: 2x4',
'#3 @ 3,2: 5x2',
'#4 @ 5,3: 3x5'
]
stream = io.StringIO('\n'.join(data))
size_x, size_y, claims = load_claims(stream)
assert IterateClaimsOverclaimedCounter(size_x, size_y, optimize_claims(claims)).count_too_occupied() == 8
def test_sort(self):
data = [
'#4 @ 3,2: 5x5',
'#2 @ 2,1: 2x4',
'#5 @ 5,3: 3x5',
'#3 @ 3,2: 5x2',
'#1 @ 1,1: 3x2',
]
stream = io.StringIO('\n'.join(data))
_, _, claims = load_claims(stream)
claim_ordered = optimize_claims(claims)
claims_ordered_expected = OrderedDict({
1: Claim.from_string('#1 @ 1,1: 3x2'),
2: Claim.from_string('#2 @ 2,1: 2x4'),
3: Claim.from_string('#3 @ 3,2: 5x2'),
4: Claim.from_string('#4 @ 3,2: 5x5'),
5: Claim.from_string('#5 @ 5,3: 3x5'),
})
for c_actual, c_expected in zip(claim_ordered.values(), claims_ordered_expected.values()):
assert c_actual == c_expected
def test_claims_overlap(self):
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 3, 3, 10, 10)
assert ClaimsOverlap(c1, c2).overlap_on_x == (3, 5)
assert ClaimsOverlap(c1, c2).is_overlap_on_x
c1 = Claim(1, 0, 0, 1, 1)
c2 = Claim(2, 0, 0, 1, 1)
assert ClaimsOverlap(c1, c2).overlap_on_x == (0, 0)
assert ClaimsOverlap(c1, c2).is_overlap_on_x
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 6, 6, 10, 10)
assert not ClaimsOverlap(c1, c2).overlap_on_x
assert not ClaimsOverlap(c1, c2).is_overlap_on_x
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 3, 3, 10, 10)
assert ClaimsOverlap(c1, c2).overlap_on_y == (3, 5)
assert ClaimsOverlap(c1, c2).is_overlap_on_y
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 3, 3, 10, 10)
assert ClaimsOverlap(c2, c1).overlap_on_y == (3, 5)
assert ClaimsOverlap(c2, c1).is_overlap_on_y
c1 = Claim(1, 0, 0, 1, 1)
c2 = Claim(2, 0, 0, 1, 1)
assert ClaimsOverlap(c1, c2).overlap_on_y == (0, 0)
assert ClaimsOverlap(c1, c2).is_overlap_on_y
c1 = Claim(1, 1, 1, 5, 5)
c2 = Claim(2, 6, 6, 10, 10)
assert not ClaimsOverlap(c1, c2).overlap_on_y
if __name__ == '__main__':
pytest.main()
| true | true |
f7f9d3cd3ab6517110579eb5b363906479418ee4 | 1,344 | py | Python | Section 3/balancer.py | PacktPublishing/Artificial-Intelligence-with-Python-Deep-Neural-Networks | 0f729bea5a8ab6f932ce3b01f80b4dbd3b8d62a2 | [
"MIT"
] | 3 | 2019-06-03T12:47:43.000Z | 2021-10-24T02:55:03.000Z | Section 3/balancer.py | PacktPublishing/Artificial-Intelligence-with-Python-Deep-Neural-Networks | 0f729bea5a8ab6f932ce3b01f80b4dbd3b8d62a2 | [
"MIT"
] | null | null | null | Section 3/balancer.py | PacktPublishing/Artificial-Intelligence-with-Python-Deep-Neural-Networks | 0f729bea5a8ab6f932ce3b01f80b4dbd3b8d62a2 | [
"MIT"
] | 2 | 2020-06-08T23:01:47.000Z | 2021-08-10T17:42:06.000Z | import argparse
import gym
def build_arg_parser():
parser = argparse.ArgumentParser(description='Run an environment')
parser.add_argument('--input-env', dest='input_env', required=True,
choices=['cartpole', 'mountaincar', 'pendulum'],
help='Specify the name of the environment')
return parser
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_env = args.input_env
name_map = {'cartpole': 'CartPole-v0',
'mountaincar': 'MountainCar-v0',
'pendulum': 'Pendulum-v0'}
# Create the environment
env = gym.make(name_map[input_env])
# Start iterating
for _ in range(20):
# Reset the environment
observation = env.reset()
# Iterate 100 times
for i in range(100):
# Render the environment
env.render()
# Print the current observation
print(observation)
# Take action
action = env.action_space.sample()
# Extract the observation, reward, status and
# other info based on the action taken
observation, reward, done, info = env.step(action)
# Check if it's done
if done:
print('Episode finished after {} timesteps'.format(i+1))
break
| 28 | 72 | 0.584821 | import argparse
import gym
def build_arg_parser():
parser = argparse.ArgumentParser(description='Run an environment')
parser.add_argument('--input-env', dest='input_env', required=True,
choices=['cartpole', 'mountaincar', 'pendulum'],
help='Specify the name of the environment')
return parser
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_env = args.input_env
name_map = {'cartpole': 'CartPole-v0',
'mountaincar': 'MountainCar-v0',
'pendulum': 'Pendulum-v0'}
env = gym.make(name_map[input_env])
for _ in range(20):
observation = env.reset()
for i in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print('Episode finished after {} timesteps'.format(i+1))
break
| true | true |
f7f9d607814c4ab71dab6e9df44d8f3ec518ac9f | 1,052 | py | Python | tests/terraform/checks/resource/gcp/test_GoogleCloudSqlDatabasePublicallyAccessible.py | mgmt1pyro/Test-Theme | d3e20b62111636ecbe4267c5fff7c2820a9a892d | [
"Apache-2.0"
] | null | null | null | tests/terraform/checks/resource/gcp/test_GoogleCloudSqlDatabasePublicallyAccessible.py | mgmt1pyro/Test-Theme | d3e20b62111636ecbe4267c5fff7c2820a9a892d | [
"Apache-2.0"
] | null | null | null | tests/terraform/checks/resource/gcp/test_GoogleCloudSqlDatabasePublicallyAccessible.py | mgmt1pyro/Test-Theme | d3e20b62111636ecbe4267c5fff7c2820a9a892d | [
"Apache-2.0"
] | null | null | null | import unittest
from checkov.terraform.checks.resource.gcp.GoogleCloudSqlDatabasePublicallyAccessible import check
from checkov.terraform.models.enums import CheckResult
class GoogleCloudSqlDatabasePublicallyAccessible(unittest.TestCase):
def test_failure(self):
resource_conf = {'settings': [{'tier': ['db-f1-micro'], 'ip_configuration': [{'ipv4_enabled': True, 'authorized_networks': [ [ {'name': 'net1', 'value': '10.0.0.0/16'}, {'name': 'net1', 'value': '0.0.0.0/0'} ] ]}]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'settings': [{'tier': ['db-f1-micro'], 'ip_configuration': [{'ipv4_enabled': True, 'authorized_networks': [ [ {'name': 'net1', 'value': '10.0.0.0/16'}, {'name': 'net1', 'value': '10.10.0.0/16'} ] ]}]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 47.818182 | 227 | 0.684411 | import unittest
from checkov.terraform.checks.resource.gcp.GoogleCloudSqlDatabasePublicallyAccessible import check
from checkov.terraform.models.enums import CheckResult
class GoogleCloudSqlDatabasePublicallyAccessible(unittest.TestCase):
def test_failure(self):
resource_conf = {'settings': [{'tier': ['db-f1-micro'], 'ip_configuration': [{'ipv4_enabled': True, 'authorized_networks': [ [ {'name': 'net1', 'value': '10.0.0.0/16'}, {'name': 'net1', 'value': '0.0.0.0/0'} ] ]}]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'settings': [{'tier': ['db-f1-micro'], 'ip_configuration': [{'ipv4_enabled': True, 'authorized_networks': [ [ {'name': 'net1', 'value': '10.0.0.0/16'}, {'name': 'net1', 'value': '10.10.0.0/16'} ] ]}]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f9d667528222b2a8ef392ab2a9d527423b3b73 | 8,371 | py | Python | backbone/hyrnn_nets.py | jacv050/hyperfuture | 1c328c18773bf6c0bb0d1573ef34431c59a054d2 | [
"MIT"
] | 130 | 2021-01-05T17:56:14.000Z | 2022-03-30T03:51:17.000Z | backbone/hyrnn_nets.py | shravankumar147/hyperfuture | 54288230656c7a8cc0b825f9e397d690408d9e42 | [
"MIT"
] | null | null | null | backbone/hyrnn_nets.py | shravankumar147/hyperfuture | 54288230656c7a8cc0b825f9e397d690408d9e42 | [
"MIT"
] | 24 | 2021-01-06T07:21:55.000Z | 2021-10-29T19:27:06.000Z | """
Network definitions from https://github.com/ferrine/hyrnn
"""
import geoopt
import geoopt.manifolds.stereographic.math as gmath
import numpy as np
import torch.nn
import torch.nn.functional
from torch.cuda.amp import autocast
def mobius_linear(
input,
weight,
bias=None,
hyperbolic_input=True,
hyperbolic_bias=True,
nonlin=None,
k=-1.0,
):
k = torch.tensor(k)
if hyperbolic_input:
output = mobius_matvec(weight, input, k=k)
else:
output = torch.nn.functional.linear(input, weight)
output = gmath.expmap0(output, k=k)
if bias is not None:
if not hyperbolic_bias:
bias = gmath.expmap0(bias, k=k)
output = gmath.mobius_add(output, bias.unsqueeze(0).expand_as(output), k=k)
if nonlin is not None:
output = gmath.mobius_fn_apply(nonlin, output, k=k)
output = gmath.project(output, k=k)
return output
def mobius_matvec(m: torch.Tensor, x: torch.Tensor, *, k: torch.Tensor, dim=-1):
return _mobius_matvec(m, x, k, dim=dim)
def _mobius_matvec(m: torch.Tensor, x: torch.Tensor, k: torch.Tensor, dim: int = -1):
if m.dim() > 2 and dim != -1:
raise RuntimeError(
"broadcasted Möbius matvec is supported for the last dim only"
)
x_norm = x.norm(dim=dim, keepdim=True, p=2).clamp_min(1e-15)
if dim != -1 or m.dim() == 2:
# mx = torch.tensordot(x, m, [dim], [1])
mx = torch.matmul(m, x.transpose(1, 0)).transpose(1, 0)
else:
mx = torch.matmul(m, x.unsqueeze(-1)).squeeze(-1)
mx_norm = mx.norm(dim=dim, keepdim=True, p=2).clamp_min(1e-15)
res_c = gmath.tan_k(mx_norm / x_norm * gmath.artan_k(x_norm, k), k) * (mx / mx_norm)
cond = (mx == 0).prod(dim=dim, keepdim=True, dtype=torch.uint8)
res_0 = torch.zeros(1, dtype=res_c.dtype, device=res_c.device)
res = torch.where(cond, res_0, res_c)
return res
def one_rnn_transform(W, h, U, x, b, k):
W_otimes_h = gmath.mobius_matvec(W, h, k=k)
U_otimes_x = gmath.mobius_matvec(U, x, k=k)
Wh_plus_Ux = gmath.mobius_add(W_otimes_h, U_otimes_x, k=k)
return gmath.mobius_add(Wh_plus_Ux, b, k=k)
def mobius_gru_cell(
input: torch.Tensor,
hx: torch.Tensor,
weight_ih: torch.Tensor,
weight_hh: torch.Tensor,
bias: torch.Tensor,
k: torch.Tensor,
nonlin=None,
):
W_ir, W_ih, W_iz = weight_ih.chunk(3)
b_r, b_h, b_z = bias
W_hr, W_hh, W_hz = weight_hh.chunk(3)
z_t = gmath.logmap0(one_rnn_transform(W_hz, hx, W_iz, input, b_z, k), k=k).sigmoid()
r_t = gmath.logmap0(one_rnn_transform(W_hr, hx, W_ir, input, b_r, k), k=k).sigmoid()
rh_t = gmath.mobius_pointwise_mul(r_t, hx, k=k)
h_tilde = one_rnn_transform(W_hh, rh_t, W_ih, input, b_h, k)
if nonlin is not None:
h_tilde = gmath.mobius_fn_apply(nonlin, h_tilde, k=k)
delta_h = gmath.mobius_add(-hx, h_tilde, k=k)
h_out = gmath.mobius_add(hx, gmath.mobius_pointwise_mul(z_t, delta_h, k=k), k=k)
return h_out
def mobius_gru_loop(
input: torch.Tensor,
h0: torch.Tensor,
weight_ih: torch.Tensor,
weight_hh: torch.Tensor,
bias: torch.Tensor,
k: torch.Tensor,
batch_sizes=None,
hyperbolic_input: bool = False,
hyperbolic_hidden_state0: bool = False,
nonlin=None,
):
if not hyperbolic_hidden_state0:
hx = gmath.expmap0(h0, k=k)
else:
hx = h0
if not hyperbolic_input:
input = gmath.expmap0(input, k=k)
outs = []
if batch_sizes is None:
input_unbinded = input.unbind(0)
for t in range(input.size(0)):
hx = mobius_gru_cell(
input=input_unbinded[t],
hx=hx,
weight_ih=weight_ih,
weight_hh=weight_hh,
bias=bias,
nonlin=nonlin,
k=k,
)
outs.append(hx)
outs = torch.stack(outs)
h_last = hx
else:
h_last = []
T = len(batch_sizes) - 1
for i, t in enumerate(range(batch_sizes.size(0))):
ix, input = input[: batch_sizes[t]], input[batch_sizes[t]:]
hx = mobius_gru_cell(
input=ix,
hx=hx,
weight_ih=weight_ih,
weight_hh=weight_hh,
bias=bias,
nonlin=nonlin,
k=k,
)
outs.append(hx)
if t < T:
hx, ht = hx[: batch_sizes[t + 1]], hx[batch_sizes[t + 1]:]
h_last.append(ht)
else:
h_last.append(hx)
h_last.reverse()
h_last = torch.cat(h_last)
outs = torch.cat(outs)
return outs, h_last
class MobiusLinear(torch.nn.Linear):
def __init__(
self,
*args,
hyperbolic_input=True,
hyperbolic_bias=True,
nonlin=None,
k=-1.0,
fp64_hyper=True,
**kwargs
):
k = torch.tensor(k)
super().__init__(*args, **kwargs)
if self.bias is not None:
if hyperbolic_bias:
self.ball = manifold = geoopt.PoincareBall(c=k.abs())
self.bias = geoopt.ManifoldParameter(self.bias, manifold=manifold)
with torch.no_grad():
# self.bias.set_(gmath.expmap0(self.bias.normal_() / 4, k=k))
self.bias.set_(gmath.expmap0(self.bias.normal_() / 400, k=k))
with torch.no_grad():
# 1e-2 was the original value in the code. The updated one is from HNN++
std = 1 / np.sqrt(2 * self.weight.shape[0] * self.weight.shape[1])
# Actually, we divide that by 100 so that it starts really small and far from the border
std = std / 100
self.weight.normal_(std=std)
self.hyperbolic_bias = hyperbolic_bias
self.hyperbolic_input = hyperbolic_input
self.nonlin = nonlin
self.k = k
self.fp64_hyper = fp64_hyper
def forward(self, input):
if self.fp64_hyper:
input = input.double()
else:
input = input.float()
with autocast(enabled=False): # Do not use fp16
return mobius_linear(
input,
weight=self.weight,
bias=self.bias,
hyperbolic_input=self.hyperbolic_input,
nonlin=self.nonlin,
hyperbolic_bias=self.hyperbolic_bias,
k=self.k,
)
def extra_repr(self):
info = super().extra_repr()
info += "c={}, hyperbolic_input={}".format(self.ball.c, self.hyperbolic_input)
if self.bias is not None:
info = ", hyperbolic_bias={}".format(self.hyperbolic_bias)
return info
class MobiusDist2Hyperplane(torch.nn.Module):
def __init__(self, in_features, out_features, k=-1.0, fp64_hyper=True):
k = torch.tensor(k)
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.ball = ball = geoopt.PoincareBall(c=k.abs())
self.sphere = sphere = geoopt.manifolds.Sphere()
self.scale = torch.nn.Parameter(torch.zeros(out_features))
point = torch.randn(out_features, in_features) / 4
point = gmath.expmap0(point, k=k)
tangent = torch.randn(out_features, in_features)
self.point = geoopt.ManifoldParameter(point, manifold=ball)
self.fp64_hyper = fp64_hyper
with torch.no_grad():
self.tangent = geoopt.ManifoldParameter(tangent, manifold=sphere).proj_()
def forward(self, input):
if self.fp64_hyper:
input = input.double()
else:
input = input.float()
with autocast(enabled=False): # Do not use fp16
input = input.unsqueeze(-2)
distance = gmath.dist2plane(
x=input, p=self.point, a=self.tangent, k=self.ball.c, signed=True
)
return distance * self.scale.exp()
def extra_repr(self):
return (
"in_features={in_features}, out_features={out_features}"
# "c={ball.c}".format(
# **self.__dict__
# )
)
| 34.028455 | 100 | 0.571736 |
import geoopt
import geoopt.manifolds.stereographic.math as gmath
import numpy as np
import torch.nn
import torch.nn.functional
from torch.cuda.amp import autocast
def mobius_linear(
input,
weight,
bias=None,
hyperbolic_input=True,
hyperbolic_bias=True,
nonlin=None,
k=-1.0,
):
k = torch.tensor(k)
if hyperbolic_input:
output = mobius_matvec(weight, input, k=k)
else:
output = torch.nn.functional.linear(input, weight)
output = gmath.expmap0(output, k=k)
if bias is not None:
if not hyperbolic_bias:
bias = gmath.expmap0(bias, k=k)
output = gmath.mobius_add(output, bias.unsqueeze(0).expand_as(output), k=k)
if nonlin is not None:
output = gmath.mobius_fn_apply(nonlin, output, k=k)
output = gmath.project(output, k=k)
return output
def mobius_matvec(m: torch.Tensor, x: torch.Tensor, *, k: torch.Tensor, dim=-1):
return _mobius_matvec(m, x, k, dim=dim)
def _mobius_matvec(m: torch.Tensor, x: torch.Tensor, k: torch.Tensor, dim: int = -1):
if m.dim() > 2 and dim != -1:
raise RuntimeError(
"broadcasted Möbius matvec is supported for the last dim only"
)
x_norm = x.norm(dim=dim, keepdim=True, p=2).clamp_min(1e-15)
if dim != -1 or m.dim() == 2:
mx = torch.matmul(m, x.transpose(1, 0)).transpose(1, 0)
else:
mx = torch.matmul(m, x.unsqueeze(-1)).squeeze(-1)
mx_norm = mx.norm(dim=dim, keepdim=True, p=2).clamp_min(1e-15)
res_c = gmath.tan_k(mx_norm / x_norm * gmath.artan_k(x_norm, k), k) * (mx / mx_norm)
cond = (mx == 0).prod(dim=dim, keepdim=True, dtype=torch.uint8)
res_0 = torch.zeros(1, dtype=res_c.dtype, device=res_c.device)
res = torch.where(cond, res_0, res_c)
return res
def one_rnn_transform(W, h, U, x, b, k):
W_otimes_h = gmath.mobius_matvec(W, h, k=k)
U_otimes_x = gmath.mobius_matvec(U, x, k=k)
Wh_plus_Ux = gmath.mobius_add(W_otimes_h, U_otimes_x, k=k)
return gmath.mobius_add(Wh_plus_Ux, b, k=k)
def mobius_gru_cell(
input: torch.Tensor,
hx: torch.Tensor,
weight_ih: torch.Tensor,
weight_hh: torch.Tensor,
bias: torch.Tensor,
k: torch.Tensor,
nonlin=None,
):
W_ir, W_ih, W_iz = weight_ih.chunk(3)
b_r, b_h, b_z = bias
W_hr, W_hh, W_hz = weight_hh.chunk(3)
z_t = gmath.logmap0(one_rnn_transform(W_hz, hx, W_iz, input, b_z, k), k=k).sigmoid()
r_t = gmath.logmap0(one_rnn_transform(W_hr, hx, W_ir, input, b_r, k), k=k).sigmoid()
rh_t = gmath.mobius_pointwise_mul(r_t, hx, k=k)
h_tilde = one_rnn_transform(W_hh, rh_t, W_ih, input, b_h, k)
if nonlin is not None:
h_tilde = gmath.mobius_fn_apply(nonlin, h_tilde, k=k)
delta_h = gmath.mobius_add(-hx, h_tilde, k=k)
h_out = gmath.mobius_add(hx, gmath.mobius_pointwise_mul(z_t, delta_h, k=k), k=k)
return h_out
def mobius_gru_loop(
input: torch.Tensor,
h0: torch.Tensor,
weight_ih: torch.Tensor,
weight_hh: torch.Tensor,
bias: torch.Tensor,
k: torch.Tensor,
batch_sizes=None,
hyperbolic_input: bool = False,
hyperbolic_hidden_state0: bool = False,
nonlin=None,
):
if not hyperbolic_hidden_state0:
hx = gmath.expmap0(h0, k=k)
else:
hx = h0
if not hyperbolic_input:
input = gmath.expmap0(input, k=k)
outs = []
if batch_sizes is None:
input_unbinded = input.unbind(0)
for t in range(input.size(0)):
hx = mobius_gru_cell(
input=input_unbinded[t],
hx=hx,
weight_ih=weight_ih,
weight_hh=weight_hh,
bias=bias,
nonlin=nonlin,
k=k,
)
outs.append(hx)
outs = torch.stack(outs)
h_last = hx
else:
h_last = []
T = len(batch_sizes) - 1
for i, t in enumerate(range(batch_sizes.size(0))):
ix, input = input[: batch_sizes[t]], input[batch_sizes[t]:]
hx = mobius_gru_cell(
input=ix,
hx=hx,
weight_ih=weight_ih,
weight_hh=weight_hh,
bias=bias,
nonlin=nonlin,
k=k,
)
outs.append(hx)
if t < T:
hx, ht = hx[: batch_sizes[t + 1]], hx[batch_sizes[t + 1]:]
h_last.append(ht)
else:
h_last.append(hx)
h_last.reverse()
h_last = torch.cat(h_last)
outs = torch.cat(outs)
return outs, h_last
class MobiusLinear(torch.nn.Linear):
def __init__(
self,
*args,
hyperbolic_input=True,
hyperbolic_bias=True,
nonlin=None,
k=-1.0,
fp64_hyper=True,
**kwargs
):
k = torch.tensor(k)
super().__init__(*args, **kwargs)
if self.bias is not None:
if hyperbolic_bias:
self.ball = manifold = geoopt.PoincareBall(c=k.abs())
self.bias = geoopt.ManifoldParameter(self.bias, manifold=manifold)
with torch.no_grad():
self.bias.set_(gmath.expmap0(self.bias.normal_() / 400, k=k))
with torch.no_grad():
std = 1 / np.sqrt(2 * self.weight.shape[0] * self.weight.shape[1])
std = std / 100
self.weight.normal_(std=std)
self.hyperbolic_bias = hyperbolic_bias
self.hyperbolic_input = hyperbolic_input
self.nonlin = nonlin
self.k = k
self.fp64_hyper = fp64_hyper
def forward(self, input):
if self.fp64_hyper:
input = input.double()
else:
input = input.float()
with autocast(enabled=False):
return mobius_linear(
input,
weight=self.weight,
bias=self.bias,
hyperbolic_input=self.hyperbolic_input,
nonlin=self.nonlin,
hyperbolic_bias=self.hyperbolic_bias,
k=self.k,
)
def extra_repr(self):
info = super().extra_repr()
info += "c={}, hyperbolic_input={}".format(self.ball.c, self.hyperbolic_input)
if self.bias is not None:
info = ", hyperbolic_bias={}".format(self.hyperbolic_bias)
return info
class MobiusDist2Hyperplane(torch.nn.Module):
def __init__(self, in_features, out_features, k=-1.0, fp64_hyper=True):
k = torch.tensor(k)
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.ball = ball = geoopt.PoincareBall(c=k.abs())
self.sphere = sphere = geoopt.manifolds.Sphere()
self.scale = torch.nn.Parameter(torch.zeros(out_features))
point = torch.randn(out_features, in_features) / 4
point = gmath.expmap0(point, k=k)
tangent = torch.randn(out_features, in_features)
self.point = geoopt.ManifoldParameter(point, manifold=ball)
self.fp64_hyper = fp64_hyper
with torch.no_grad():
self.tangent = geoopt.ManifoldParameter(tangent, manifold=sphere).proj_()
def forward(self, input):
if self.fp64_hyper:
input = input.double()
else:
input = input.float()
with autocast(enabled=False):
input = input.unsqueeze(-2)
distance = gmath.dist2plane(
x=input, p=self.point, a=self.tangent, k=self.ball.c, signed=True
)
return distance * self.scale.exp()
def extra_repr(self):
return (
"in_features={in_features}, out_features={out_features}"
)
| true | true |
f7f9d6af25d1ecfefe19469b4269c1c2f995d79f | 8,266 | py | Python | gensrc/gensrc/addins/valueobjects.py | txu2014/quantlib | 95c7d94906c30d0c3c4e0758a2ebfe2a62b075ec | [
"BSD-3-Clause"
] | null | null | null | gensrc/gensrc/addins/valueobjects.py | txu2014/quantlib | 95c7d94906c30d0c3c4e0758a2ebfe2a62b075ec | [
"BSD-3-Clause"
] | null | null | null | gensrc/gensrc/addins/valueobjects.py | txu2014/quantlib | 95c7d94906c30d0c3c4e0758a2ebfe2a62b075ec | [
"BSD-3-Clause"
] | 1 | 2022-02-24T04:54:18.000Z | 2022-02-24T04:54:18.000Z |
"""
Copyright (C) 2005, 2006 Plamen Neykov
Copyright (C) 2007, 2008 Eric Ehlers
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<quantlib-dev@lists.sf.net>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
"""Generate source code for ValueObjects."""
from gensrc.addins import addin
from gensrc.functions import function
from gensrc.utilities import outputfile
from gensrc.utilities import common
from gensrc.utilities import log
from gensrc.categories import category
from gensrc.configuration import environment
class ValueObjects(addin.Addin):
"""Generate source code for ValueObjects."""
VO_INCLUDE = '''\
#include <%(libRootDirectory)s/valueobjects/vo_%(categoryName)s.hpp>\n'''
PROCESSOR_NAME = '''\
virtual std::string processorName() { return "%(processorName)s"; }'''
#############################################
# public interface
#############################################
def generate(self, categoryList, enumerationList):
"""Generate source code for ValueObjects."""
self.categoryList_ = categoryList
self.enumerationList_ = enumerationList
allIncludes = ''
log.Log.instance().logMessage(' begin generating ValueObjects ...')
for cat in self.categoryList_.categories('*', self.coreCategories_, self.addinCategories_):
if cat.generateVOs():
allIncludes += ValueObjects.VO_INCLUDE % {
'categoryName' : cat.name(),
'libRootDirectory' : environment.config().libRootDirectory() }
if self.headersInline_:
self.generateHeadersInline(cat)
else:
self.generateHeaders(cat)
self.generateFunctions(cat)
self.bufferAll_.set({
'allIncludes' : allIncludes,
'libRootDirectory' : environment.config().libRootDirectory() })
allFilename = self.rootPath_ + 'vo_all.hpp'
outputfile.OutputFile(self, allFilename, self.copyright_, self.bufferAll_)
log.Log.instance().logMessage(' done generating ValueObjects.')
def generateHeaderInline(self, func):
"""Generate class definition source for prototype of given constructor function."""
if func.processorName():
processorName = ValueObjects.PROCESSOR_NAME % {
common.PROCESSOR_NAME : func.processorName() }
else:
processorName = ""
return self.bufferClassDeclInline_.set({
'constructorDeclaration' : func.parameterList().generate(
self.constructorDeclaration_),
'functionName' : func.name(),
'processorName' : processorName,
'serializeMembers' : func.parameterList().generate(self.serializeMembers_),
'memberDeclaration' : func.parameterList().generate(self.memberDeclaration_) })
def generateFunctionInline(self, func):
"""Generate source code for function."""
return self.bufferClassBodyInline_.set({
'constructorInit' : func.parameterList().generate(self.constructorInit_),
'constructorParList' : func.parameterList().generate(self.constructorDeclaration_),
'functionName' : func.name(),
'propertyDeclaration' : func.parameterList().generate(self.propertyDeclaration_),
'propertyGet' : func.parameterList().generate(self.propertyGet_),
'propertySet' : func.parameterList().generate(self.propertySet_),
'propertyInsert' : func.parameterList().generate(self.propertyInsert_),
'propertyPush' : func.parameterList().generate(self.propertyPush_),
'populateObjectIDs' : func.parameterList().generate(self.populateObjectIDs_) })
def generateHeadersInline(self, cat):
"""Generate class source for constructor function prototypes."""
bufHeader = ''
bufFunc = ''
for func in cat.functions('*'):
if func.generateVOs():
bufHeader += self.generateHeaderInline(func)
bufFunc += self.generateFunctionInline(func)
self.bufferIncludesInline_.set({
'categoryName' : cat.name(),
'functions' : bufFunc,
'headers' : bufHeader,
'libRoot' : environment.config().libRootDirectory(),
'namespaceObjects' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'vo_' + cat.name() + '.hpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferIncludesInline_)
def generateHeader(self, func):
"""Generate class definition source for prototype of given constructor function."""
if func.processorName():
processorName = ValueObjects.PROCESSOR_NAME % {
common.PROCESSOR_NAME : func.processorName() }
else:
processorName = ""
return self.bufferClassDecl_.set({
'constructorDeclaration' : func.parameterList().generate(
self.constructorDeclaration_),
'functionName' : func.name(),
'processorName' : processorName,
'serializeMembers' : func.parameterList().generate(self.serializeMembers_),
'memberDeclaration' : func.parameterList().generate(self.memberDeclaration_) })
def generateHeaders(self, cat):
"""Generate class source for constructor function prototypes."""
bufHeader = ''
for func in cat.functions('*'):
if func.generateVOs():
bufHeader += self.generateHeader(func)
self.bufferIncludesDecl_.set({
'categoryName' : cat.name(),
'headers' : bufHeader,
'libRoot' : environment.config().libRootDirectory(),
'namespaceObjects' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'vo_' + cat.name() + '.hpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferIncludesDecl_)
def generateFunction(self, func):
"""Generate source code for function."""
return self.bufferClassBody_.set({
'constructorInit' : func.parameterList().generate(self.constructorInit_),
'constructorParList' : func.parameterList().generate(self.constructorDeclaration_),
'functionName' : func.name(),
'propertyDeclaration' : func.parameterList().generate(self.propertyDeclaration_),
'propertyGet' : func.parameterList().generate(self.propertyGet_),
'propertySet' : func.parameterList().generate(self.propertySet_),
'populateObjectIDs' : func.parameterList().generate(self.populateObjectIDs_) })
def generateFunctions(self, cat):
"""Generate source for function implementations."""
bufFunc = ''
for func in cat.functions('*'):
if func.generateVOs():
bufFunc += self.generateFunction(func)
self.bufferIncludes_.set({
'categoryName' : cat.name(),
'functions' : bufFunc,
'libRoot' : environment.config().libRootDirectory(),
'namespaceObjects' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'vo_' + cat.name() + '.cpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferIncludes_)
#############################################
# serializer interface
#############################################
def serialize(self, serializer):
"""Load/unload class state to/from serializer object."""
super(ValueObjects, self).serialize(serializer)
serializer.serializeBoolean(self, common.HEADERS_INLINE, False)
| 45.169399 | 99 | 0.638156 |
from gensrc.addins import addin
from gensrc.functions import function
from gensrc.utilities import outputfile
from gensrc.utilities import common
from gensrc.utilities import log
from gensrc.categories import category
from gensrc.configuration import environment
class ValueObjects(addin.Addin):
VO_INCLUDE = '''\
#include <%(libRootDirectory)s/valueobjects/vo_%(categoryName)s.hpp>\n'''
PROCESSOR_NAME = '''\
virtual std::string processorName() { return "%(processorName)s"; }'''
ate(self.constructorInit_),
'constructorParList' : func.parameterList().generate(self.constructorDeclaration_),
'functionName' : func.name(),
'propertyDeclaration' : func.parameterList().generate(self.propertyDeclaration_),
'propertyGet' : func.parameterList().generate(self.propertyGet_),
'propertySet' : func.parameterList().generate(self.propertySet_),
'propertyInsert' : func.parameterList().generate(self.propertyInsert_),
'propertyPush' : func.parameterList().generate(self.propertyPush_),
'populateObjectIDs' : func.parameterList().generate(self.populateObjectIDs_) })
def generateHeadersInline(self, cat):
bufHeader = ''
bufFunc = ''
for func in cat.functions('*'):
if func.generateVOs():
bufHeader += self.generateHeaderInline(func)
bufFunc += self.generateFunctionInline(func)
self.bufferIncludesInline_.set({
'categoryName' : cat.name(),
'functions' : bufFunc,
'headers' : bufHeader,
'libRoot' : environment.config().libRootDirectory(),
'namespaceObjects' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'vo_' + cat.name() + '.hpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferIncludesInline_)
def generateHeader(self, func):
if func.processorName():
processorName = ValueObjects.PROCESSOR_NAME % {
common.PROCESSOR_NAME : func.processorName() }
else:
processorName = ""
return self.bufferClassDecl_.set({
'constructorDeclaration' : func.parameterList().generate(
self.constructorDeclaration_),
'functionName' : func.name(),
'processorName' : processorName,
'serializeMembers' : func.parameterList().generate(self.serializeMembers_),
'memberDeclaration' : func.parameterList().generate(self.memberDeclaration_) })
def generateHeaders(self, cat):
bufHeader = ''
for func in cat.functions('*'):
if func.generateVOs():
bufHeader += self.generateHeader(func)
self.bufferIncludesDecl_.set({
'categoryName' : cat.name(),
'headers' : bufHeader,
'libRoot' : environment.config().libRootDirectory(),
'namespaceObjects' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'vo_' + cat.name() + '.hpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferIncludesDecl_)
def generateFunction(self, func):
return self.bufferClassBody_.set({
'constructorInit' : func.parameterList().generate(self.constructorInit_),
'constructorParList' : func.parameterList().generate(self.constructorDeclaration_),
'functionName' : func.name(),
'propertyDeclaration' : func.parameterList().generate(self.propertyDeclaration_),
'propertyGet' : func.parameterList().generate(self.propertyGet_),
'propertySet' : func.parameterList().generate(self.propertySet_),
'populateObjectIDs' : func.parameterList().generate(self.populateObjectIDs_) })
def generateFunctions(self, cat):
bufFunc = ''
for func in cat.functions('*'):
if func.generateVOs():
bufFunc += self.generateFunction(func)
self.bufferIncludes_.set({
'categoryName' : cat.name(),
'functions' : bufFunc,
'libRoot' : environment.config().libRootDirectory(),
'namespaceObjects' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'vo_' + cat.name() + '.cpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferIncludes_)
| true | true |
f7f9d6c3d3dec5a14d7740b96e3ef44d190b7fac | 3,427 | py | Python | blog/models.py | NieShengyuan/blog | 24e64b8daa50b77a48a91ed23399ea3ca85cb02a | [
"MIT"
] | null | null | null | blog/models.py | NieShengyuan/blog | 24e64b8daa50b77a48a91ed23399ea3ca85cb02a | [
"MIT"
] | null | null | null | blog/models.py | NieShengyuan/blog | 24e64b8daa50b77a48a91ed23399ea3ca85cb02a | [
"MIT"
] | null | null | null | import markdown
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
import django.utils.six
from django.utils.html import strip_tags
class Category(models.Model):
"""
Django 要求模型必须继承 models.Model 类。
Category 只需要一个简单的分类名 name 就可以了。
CharField 指定了分类名 name 的数据类型,CharField 是字符型,
CharField 的 max_length 参数指定其最大长度,超过这个长度的分类名就不能被存入数据库。
当然 Django 还为我们提供了多种其它的数据类型,如日期时间类型 DateTimeField、整数类型 IntegerField 等等。
Django 内置的全部类型可查看文档:
https://docs.djangoproject.com/en/1.10/ref/models/fields/#field-types
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Tag(models.Model):
"""
标签 Tag 也比较简单,和 Category 一样。
再次强调一定要继承 models.Model 类!
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Post(models.Model):
"""
文章的数据库表稍微复杂一点,主要是涉及的字段更多。
"""
# ... 其它已有字段
# 新增 views 字段记录阅读量
views = models.PositiveIntegerField(default=0)
# 文章标题
title = models.CharField(max_length=70)
# 文章正文,我们使用了 TextField。
# 存储比较短的字符串可以使用 CharField,但对于文章的正文来说可能会是一大段文本,因此使用 TextField 来存储大段文本。
body = models.TextField()
# 这两个列分别表示文章的创建时间和最后一次修改时间,存储时间的字段用 DateTimeField 类型。
created_time = models.DateTimeField()
modified_time = models.DateTimeField()
# 文章摘要,可以没有文章摘要,但默认情况下 CharField 要求我们必须存入数据,否则就会报错。
# 指定 CharField 的 blank=True 参数值后就可以允许空值了。
excerpt = models.CharField(max_length=200, blank=True)
# 这是分类与标签,分类与标签的模型我们已经定义在上面。
# 我们在这里把文章对应的数据库表和分类、标签对应的数据库表关联了起来,但是关联形式稍微有点不同。
# 我们规定一篇文章只能对应一个分类,但是一个分类下可以有多篇文章,所以我们使用的是 ForeignKey,即一对多的关联关系。
# 而对于标签来说,一篇文章可以有多个标签,同一个标签下也可能有多篇文章,所以我们使用 ManyToManyField,表明这是多对多的关联关系。
# 同时我们规定文章可以没有标签,因此为标签 tags 指定了 blank=True。
# 如果你对 ForeignKey、ManyToManyField 不了解,请看教程中的解释,亦可参考官方文档:
# https://docs.djangoproject.com/en/1.10/topics/db/models/#relationships
category = models.ForeignKey(Category, on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, blank=True)
# 摘要
body = models.TextField()
excerpt = models.CharField(max_length=200, blank=True)
# 文章作者,这里 User 是从 django.contrib.auth.models 导入的。
# django.contrib.auth 是 Django 内置的应用,专门用于处理网站用户的注册、登录等流程,User 是 Django 为我们已经写好的用户模型。
# 这里我们通过 ForeignKey 把文章和 User 关联了起来。
# 因为我们规定一篇文章只能有一个作者,而一个作者可能会写多篇文章,因此这是一对多的关联关系,和 Category 类似。
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
# 自定义 get_absolute_url 方法
# 记得从 django.urls 中导入 reverse 函数
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-created_time']
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
def save(self, *args, **kwargs):
# 如果没有填写摘要
if not self.excerpt:
# 首先实例化一个 Markdown 类,用于渲染 body 的文本
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
# 先将 Markdown 文本渲染成 HTML 文本
# strip_tags 去掉 HTML 文本的全部 HTML 标签
# 从文本摘取前 54 个字符赋给 excerpt
self.excerpt = strip_tags(md.convert(self.body))[:54]
# 调用父类的 save 方法将数据保存到数据库中
super(Post, self).save(*args, **kwargs)
| 31.731481 | 88 | 0.686606 | import markdown
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
import django.utils.six
from django.utils.html import strip_tags
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Post(models.Model):
views = models.PositiveIntegerField(default=0)
title = models.CharField(max_length=70)
body = models.TextField()
created_time = models.DateTimeField()
modified_time = models.DateTimeField()
excerpt = models.CharField(max_length=200, blank=True)
= models.ForeignKey(Category, on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, blank=True)
body = models.TextField()
excerpt = models.CharField(max_length=200, blank=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-created_time']
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
def save(self, *args, **kwargs):
if not self.excerpt:
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
self.excerpt = strip_tags(md.convert(self.body))[:54]
super(Post, self).save(*args, **kwargs)
| true | true |
f7f9d6ce5d08e6e9691522a79bdd359f09bb7203 | 7,257 | py | Python | push_notifications/models.py | DataGreed/django-push-notifications | dc241fbe0346719eac8b823a5707fd05f9b23800 | [
"MIT"
] | 1 | 2019-05-07T10:48:26.000Z | 2019-05-07T10:48:26.000Z | push_notifications/models.py | ubergrape/django-push-notifications | c610dd9c7871f4e81bdffce783df8b040ca22878 | [
"MIT"
] | null | null | null | push_notifications/models.py | ubergrape/django-push-notifications | c610dd9c7871f4e81bdffce783df8b040ca22878 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import HexIntegerField
from .settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS
CLOUD_MESSAGE_TYPES = (
("FCM", "Firebase Cloud Message"),
("GCM", "Google Cloud Message"),
)
BROWSER_TYPES = (
("CHROME", "Chrome"),
("FIREFOX", "Firefox"),
("OPERA", "Opera"),
)
class Device(models.Model):
name = models.CharField(max_length=255, verbose_name=_("Name"), blank=True, null=True)
active = models.BooleanField(
verbose_name=_("Is active"), default=True,
help_text=_("Inactive devices will not be sent notifications")
)
user = models.ForeignKey(
SETTINGS["USER_MODEL"], blank=True, null=True, on_delete=models.CASCADE
)
date_created = models.DateTimeField(
verbose_name=_("Creation date"), auto_now_add=True, null=True
)
application_id = models.CharField(
max_length=64, verbose_name=_("Application ID"),
help_text=_(
"Opaque application identity, should be filled in for multiple"
" key/certificate access"
),
blank=True, null=True
)
class Meta:
abstract = True
def __str__(self):
return (
self.name or
str(self.device_id or "") or
"{} for {}".format(self.__class__.__name__, self.user or "unknown user")
)
class GCMDeviceManager(models.Manager):
def get_queryset(self):
return GCMDeviceQuerySet(self.model)
class GCMDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
if self:
from .gcm import send_message as gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
app_ids = self.filter(active=True).order_by(
"application_id"
).values_list("application_id", flat=True).distinct()
response = []
for cloud_type in ("FCM", "GCM"):
for app_id in app_ids:
reg_ids = list(
self.filter(
active=True, cloud_message_type=cloud_type, application_id=app_id).values_list(
"registration_id", flat=True
)
)
if reg_ids:
r = gcm_send_message(reg_ids, data, cloud_type, application_id=app_id, **kwargs)
response.append(r)
return response
class GCMDevice(Device):
# device_id cannot be a reliable primary key as fragmentation between different devices
# can make it turn out to be null and such:
# http://android-developers.blogspot.co.uk/2011/03/identifying-app-installations.html
device_id = HexIntegerField(
verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text=_("ANDROID_ID / TelephonyManager.getDeviceId() (always as hex)")
)
registration_id = models.TextField(verbose_name=_("Registration ID"), unique=SETTINGS["UNIQUE_REG_ID"])
cloud_message_type = models.CharField(
verbose_name=_("Cloud Message Type"), max_length=3,
choices=CLOUD_MESSAGE_TYPES, default="GCM",
help_text=_("You should choose FCM or GCM")
)
objects = GCMDeviceManager()
class Meta:
verbose_name = _("GCM device")
def send_message(self, message, **kwargs):
from .gcm import send_message as gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
return gcm_send_message(
self.registration_id, data, self.cloud_message_type,
application_id=self.application_id, **kwargs
)
class APNSDeviceManager(models.Manager):
def get_queryset(self):
return APNSDeviceQuerySet(self.model)
class APNSDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, creds=None, **kwargs):
if self:
from .apns import apns_send_bulk_message
app_ids = self.filter(active=True).order_by("application_id")\
.values_list("application_id", flat=True).distinct()
res = []
for app_id in app_ids:
reg_ids = list(self.filter(active=True, application_id=app_id).values_list(
"registration_id", flat=True)
)
r = apns_send_bulk_message(
registration_ids=reg_ids, alert=message, application_id=app_id,
creds=creds, **kwargs
)
if hasattr(r, "keys"):
res += [r]
elif hasattr(r, "__getitem__"):
res += r
return res
class APNSDevice(Device):
device_id = models.UUIDField(
verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text="UDID / UIDevice.identifierForVendor()"
)
registration_id = models.CharField(
verbose_name=_("Registration ID"), max_length=200, unique=SETTINGS["UNIQUE_REG_ID"]
)
objects = APNSDeviceManager()
class Meta:
verbose_name = _("APNS device")
def send_message(self, message, creds=None, **kwargs):
from .apns import apns_send_message
return apns_send_message(
registration_id=self.registration_id,
alert=message,
application_id=self.application_id, creds=creds,
**kwargs
)
class WNSDeviceManager(models.Manager):
def get_queryset(self):
return WNSDeviceQuerySet(self.model)
class WNSDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
from .wns import wns_send_bulk_message
app_ids = self.filter(active=True).order_by("application_id").values_list(
"application_id", flat=True
).distinct()
res = []
for app_id in app_ids:
reg_ids = self.filter(active=True, application_id=app_id).values_list(
"registration_id", flat=True
)
r = wns_send_bulk_message(uri_list=list(reg_ids), message=message, **kwargs)
if hasattr(r, "keys"):
res += [r]
elif hasattr(r, "__getitem__"):
res += r
return res
class WNSDevice(Device):
device_id = models.UUIDField(
verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text=_("GUID()")
)
registration_id = models.TextField(verbose_name=_("Notification URI"), unique=SETTINGS["UNIQUE_REG_ID"])
objects = WNSDeviceManager()
class Meta:
verbose_name = _("WNS device")
def send_message(self, message, **kwargs):
from .wns import wns_send_message
return wns_send_message(
uri=self.registration_id, message=message, application_id=self.application_id,
**kwargs
)
class WebPushDeviceManager(models.Manager):
def get_queryset(self):
return WebPushDeviceQuerySet(self.model)
class WebPushDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
devices = self.filter(active=True).order_by("application_id").distinct()
res = []
for device in devices:
res.append(device.send_message(message))
return res
class WebPushDevice(Device):
registration_id = models.TextField(verbose_name=_("Registration ID"), unique=SETTINGS["UNIQUE_REG_ID"])
p256dh = models.CharField(
verbose_name=_("User public encryption key"),
max_length=88)
auth = models.CharField(
verbose_name=_("User auth secret"),
max_length=24)
browser = models.CharField(
verbose_name=_("Browser"), max_length=10,
choices=BROWSER_TYPES, default=BROWSER_TYPES[0][0],
help_text=_("Currently only support to Chrome, Firefox and Opera browsers")
)
objects = WebPushDeviceManager()
class Meta:
verbose_name = _("WebPush device")
@property
def device_id(self):
return None
def send_message(self, message, **kwargs):
from .webpush import webpush_send_message
return webpush_send_message(
uri=self.registration_id, message=message, browser=self.browser,
auth=self.auth, p256dh=self.p256dh, application_id=self.application_id, **kwargs)
| 27.911538 | 105 | 0.729503 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import HexIntegerField
from .settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS
CLOUD_MESSAGE_TYPES = (
("FCM", "Firebase Cloud Message"),
("GCM", "Google Cloud Message"),
)
BROWSER_TYPES = (
("CHROME", "Chrome"),
("FIREFOX", "Firefox"),
("OPERA", "Opera"),
)
class Device(models.Model):
name = models.CharField(max_length=255, verbose_name=_("Name"), blank=True, null=True)
active = models.BooleanField(
verbose_name=_("Is active"), default=True,
help_text=_("Inactive devices will not be sent notifications")
)
user = models.ForeignKey(
SETTINGS["USER_MODEL"], blank=True, null=True, on_delete=models.CASCADE
)
date_created = models.DateTimeField(
verbose_name=_("Creation date"), auto_now_add=True, null=True
)
application_id = models.CharField(
max_length=64, verbose_name=_("Application ID"),
help_text=_(
"Opaque application identity, should be filled in for multiple"
" key/certificate access"
),
blank=True, null=True
)
class Meta:
abstract = True
def __str__(self):
return (
self.name or
str(self.device_id or "") or
"{} for {}".format(self.__class__.__name__, self.user or "unknown user")
)
class GCMDeviceManager(models.Manager):
def get_queryset(self):
return GCMDeviceQuerySet(self.model)
class GCMDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
if self:
from .gcm import send_message as gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
app_ids = self.filter(active=True).order_by(
"application_id"
).values_list("application_id", flat=True).distinct()
response = []
for cloud_type in ("FCM", "GCM"):
for app_id in app_ids:
reg_ids = list(
self.filter(
active=True, cloud_message_type=cloud_type, application_id=app_id).values_list(
"registration_id", flat=True
)
)
if reg_ids:
r = gcm_send_message(reg_ids, data, cloud_type, application_id=app_id, **kwargs)
response.append(r)
return response
class GCMDevice(Device):
device_id = HexIntegerField(
verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text=_("ANDROID_ID / TelephonyManager.getDeviceId() (always as hex)")
)
registration_id = models.TextField(verbose_name=_("Registration ID"), unique=SETTINGS["UNIQUE_REG_ID"])
cloud_message_type = models.CharField(
verbose_name=_("Cloud Message Type"), max_length=3,
choices=CLOUD_MESSAGE_TYPES, default="GCM",
help_text=_("You should choose FCM or GCM")
)
objects = GCMDeviceManager()
class Meta:
verbose_name = _("GCM device")
def send_message(self, message, **kwargs):
from .gcm import send_message as gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
return gcm_send_message(
self.registration_id, data, self.cloud_message_type,
application_id=self.application_id, **kwargs
)
class APNSDeviceManager(models.Manager):
def get_queryset(self):
return APNSDeviceQuerySet(self.model)
class APNSDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, creds=None, **kwargs):
if self:
from .apns import apns_send_bulk_message
app_ids = self.filter(active=True).order_by("application_id")\
.values_list("application_id", flat=True).distinct()
res = []
for app_id in app_ids:
reg_ids = list(self.filter(active=True, application_id=app_id).values_list(
"registration_id", flat=True)
)
r = apns_send_bulk_message(
registration_ids=reg_ids, alert=message, application_id=app_id,
creds=creds, **kwargs
)
if hasattr(r, "keys"):
res += [r]
elif hasattr(r, "__getitem__"):
res += r
return res
class APNSDevice(Device):
device_id = models.UUIDField(
verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text="UDID / UIDevice.identifierForVendor()"
)
registration_id = models.CharField(
verbose_name=_("Registration ID"), max_length=200, unique=SETTINGS["UNIQUE_REG_ID"]
)
objects = APNSDeviceManager()
class Meta:
verbose_name = _("APNS device")
def send_message(self, message, creds=None, **kwargs):
from .apns import apns_send_message
return apns_send_message(
registration_id=self.registration_id,
alert=message,
application_id=self.application_id, creds=creds,
**kwargs
)
class WNSDeviceManager(models.Manager):
def get_queryset(self):
return WNSDeviceQuerySet(self.model)
class WNSDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
from .wns import wns_send_bulk_message
app_ids = self.filter(active=True).order_by("application_id").values_list(
"application_id", flat=True
).distinct()
res = []
for app_id in app_ids:
reg_ids = self.filter(active=True, application_id=app_id).values_list(
"registration_id", flat=True
)
r = wns_send_bulk_message(uri_list=list(reg_ids), message=message, **kwargs)
if hasattr(r, "keys"):
res += [r]
elif hasattr(r, "__getitem__"):
res += r
return res
class WNSDevice(Device):
device_id = models.UUIDField(
verbose_name=_("Device ID"), blank=True, null=True, db_index=True,
help_text=_("GUID()")
)
registration_id = models.TextField(verbose_name=_("Notification URI"), unique=SETTINGS["UNIQUE_REG_ID"])
objects = WNSDeviceManager()
class Meta:
verbose_name = _("WNS device")
def send_message(self, message, **kwargs):
from .wns import wns_send_message
return wns_send_message(
uri=self.registration_id, message=message, application_id=self.application_id,
**kwargs
)
class WebPushDeviceManager(models.Manager):
def get_queryset(self):
return WebPushDeviceQuerySet(self.model)
class WebPushDeviceQuerySet(models.query.QuerySet):
def send_message(self, message, **kwargs):
devices = self.filter(active=True).order_by("application_id").distinct()
res = []
for device in devices:
res.append(device.send_message(message))
return res
class WebPushDevice(Device):
registration_id = models.TextField(verbose_name=_("Registration ID"), unique=SETTINGS["UNIQUE_REG_ID"])
p256dh = models.CharField(
verbose_name=_("User public encryption key"),
max_length=88)
auth = models.CharField(
verbose_name=_("User auth secret"),
max_length=24)
browser = models.CharField(
verbose_name=_("Browser"), max_length=10,
choices=BROWSER_TYPES, default=BROWSER_TYPES[0][0],
help_text=_("Currently only support to Chrome, Firefox and Opera browsers")
)
objects = WebPushDeviceManager()
class Meta:
verbose_name = _("WebPush device")
@property
def device_id(self):
return None
def send_message(self, message, **kwargs):
from .webpush import webpush_send_message
return webpush_send_message(
uri=self.registration_id, message=message, browser=self.browser,
auth=self.auth, p256dh=self.p256dh, application_id=self.application_id, **kwargs)
| true | true |
f7f9d7911fe578e951a9ca8ec9f03a27b1b6ea67 | 5,447 | py | Python | tests/api/v2/test_max_project_volume.py | CiscoSystems/ceilometer | a9267fd94e7854afa0720d761fbe75d946e7167d | [
"Apache-2.0"
] | 1 | 2021-11-22T11:00:53.000Z | 2021-11-22T11:00:53.000Z | tests/api/v2/test_max_project_volume.py | CiscoSystems/ceilometer | a9267fd94e7854afa0720d761fbe75d946e7167d | [
"Apache-2.0"
] | null | null | null | tests/api/v2/test_max_project_volume.py | CiscoSystems/ceilometer | a9267fd94e7854afa0720d761fbe75d946e7167d | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Steven Berler <steven.berler@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test getting the max resource volume.
"""
import datetime
from oslo.config import cfg
from ceilometer.collector import meter
from ceilometer import counter
from ceilometer.storage.impl_mongodb import require_map_reduce
from .base import FunctionalTest
class TestMaxProjectVolume(FunctionalTest):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestMaxProjectVolume, self).setUp()
require_map_reduce(self.conn)
self.counters = []
for i in range(3):
c = counter.Counter(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id-%s' % i,
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.counter',
}
)
self.counters.append(c)
msg = meter.meter_message_from_counter(c,
cfg.CONF.metering_secret,
'source1',
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(data[0]['max'], 7)
self.assertEqual(data[0]['count'], 3)
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(data[0]['max'], 7)
self.assertEqual(data[0]['count'], 2)
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
},
])
self.assertEqual(data, [])
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(data[0]['max'], 5)
self.assertEqual(data[0]['count'], 1)
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
},
])
self.assertEqual(data, [])
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:32:00',
},
])
self.assertEqual(data[0]['max'], 6)
self.assertEqual(data[0]['count'], 1)
| 41.580153 | 76 | 0.38792 |
import datetime
from oslo.config import cfg
from ceilometer.collector import meter
from ceilometer import counter
from ceilometer.storage.impl_mongodb import require_map_reduce
from .base import FunctionalTest
class TestMaxProjectVolume(FunctionalTest):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestMaxProjectVolume, self).setUp()
require_map_reduce(self.conn)
self.counters = []
for i in range(3):
c = counter.Counter(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id-%s' % i,
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.counter',
}
)
self.counters.append(c)
msg = meter.meter_message_from_counter(c,
cfg.CONF.metering_secret,
'source1',
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(data[0]['max'], 7)
self.assertEqual(data[0]['count'], 3)
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(data[0]['max'], 7)
self.assertEqual(data[0]['count'], 2)
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
},
])
self.assertEqual(data, [])
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(data[0]['max'], 5)
self.assertEqual(data[0]['count'], 1)
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
},
])
self.assertEqual(data, [])
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:32:00',
},
])
self.assertEqual(data[0]['max'], 6)
self.assertEqual(data[0]['count'], 1)
| true | true |
f7f9d815fd74248ee87d991bd107aab15b47f8cc | 618 | py | Python | easy/867-transpose-matrix.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | easy/867-transpose-matrix.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | easy/867-transpose-matrix.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
转置矩阵
给你一个二维整数数组 matrix, 返回 matrix 的 转置矩阵 。
矩阵的 转置 是指将矩阵的主对角线翻转,交换矩阵的行索引与列索引。
'''
from typing import List
'''
思路:简单问题,原矩阵大小为m*n,创建一个n*m大小的新矩阵,按照行列转化的方式将旧矩阵数据复制过去
'''
class Solution:
def transpose(self, matrix: List[List[int]]) -> List[List[int]]:
m = len(matrix)
n = len(matrix[0])
newMatrix = [[]] * n
for i in range(n):
newMatrix[i] = [0] * m
for j in range(m):
newMatrix[i][j] = matrix[j][i]
return newMatrix
s = Solution()
print(s.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
print(s.transpose([[1, 2, 3], [4, 5, 6]]))
| 20.6 | 68 | 0.548544 |
from typing import List
class Solution:
def transpose(self, matrix: List[List[int]]) -> List[List[int]]:
m = len(matrix)
n = len(matrix[0])
newMatrix = [[]] * n
for i in range(n):
newMatrix[i] = [0] * m
for j in range(m):
newMatrix[i][j] = matrix[j][i]
return newMatrix
s = Solution()
print(s.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
print(s.transpose([[1, 2, 3], [4, 5, 6]]))
| true | true |
f7f9dae3e42da4080067f70c12a852c4112f050d | 1,019 | py | Python | opencv/tutorials/imageProcessing/hough_line/probabilistic.py | SSG-DRD-IOT/commercial-iot-security-system | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | [
"MIT"
] | null | null | null | opencv/tutorials/imageProcessing/hough_line/probabilistic.py | SSG-DRD-IOT/commercial-iot-security-system | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | [
"MIT"
] | null | null | null | opencv/tutorials/imageProcessing/hough_line/probabilistic.py | SSG-DRD-IOT/commercial-iot-security-system | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | [
"MIT"
] | 3 | 2022-01-22T05:02:41.000Z | 2022-03-31T08:13:06.000Z | """
Probabilistic Hough Transform
# this is Hough you do it.
in HT, even for line w/ 2 arguments, lots of computation
Probabilistic Hough Transform is optimization of HT we saw
only considers random subset of points
sufficient for line detection
have to decrease threshold
function: cv2.HoughLinesP()
2 arguments:
minLineLength - min length of line; segments shorter than this rejected
maxLineGap = max allowed gap between line segments to treat them as single line
# directly returns endpts of lines
previously, only got parameters of lines and had to find all points
now, all directly given
"""
import cv2
import numpy as np
img = cv2.imread('dave.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize = 3)
lines = cv2.HoughLinesP(edges, 1, np.pi/180,100,minLineLength=100,maxLineGap=10)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imwrite('houghlines5.jpg', img)
| 32.870968 | 83 | 0.718351 |
import cv2
import numpy as np
img = cv2.imread('dave.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize = 3)
lines = cv2.HoughLinesP(edges, 1, np.pi/180,100,minLineLength=100,maxLineGap=10)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imwrite('houghlines5.jpg', img)
| true | true |
f7f9db121dcf5ff2c64326f029b1adcc1fbd467d | 2,758 | py | Python | source/tests/test.py | xu4wang/todo | 5e53e162233b8a7da32f6c93a02849134a1d67b5 | [
"MIT"
] | 419 | 2016-02-13T19:26:13.000Z | 2022-03-22T23:41:43.000Z | source/tests/test.py | xu4wang/todo | 5e53e162233b8a7da32f6c93a02849134a1d67b5 | [
"MIT"
] | 19 | 2016-02-15T05:22:54.000Z | 2022-03-27T20:43:14.000Z | source/tests/test.py | xu4wang/todo | 5e53e162233b8a7da32f6c93a02849134a1d67b5 | [
"MIT"
] | 58 | 2016-02-14T02:24:18.000Z | 2022-03-26T19:23:23.000Z | #! /usr/bin/env python3
import unittest, sys, os, functools, argparse
import os.path as op
from . import utils
from . import test_todo, test_utils, test_rainbow, test_text_wrap # pylint: disable=W0611
sys.path.insert(0, op.abspath('.'))
import todo.todo as todo
import todo.utils as tutils
from todo.data_access import DB_PATH as DATA_LOCATION
from todo.todo import CONFIG_FILE
NOW = todo.NOW
TEST_CONFIG = 'tests/.toduhrc'
TEST_DATA_FILE = 'tests/empty_data.sqlite'
UNIT_TESTS = [
'tests.test_todo',
'tests.test_utils',
'tests.test_rainbow',
'tests.test_text_wrap'
]
TRACES_DIR = 'tests/traces'
TEST_REPLACEMENTS = [
(DATA_LOCATION, TEST_DATA_FILE),
(CONFIG_FILE, TEST_CONFIG)
]
class TestSetup:
def __init__(self, replacements=TEST_REPLACEMENTS):
self.replacements = {repl: None for repl in replacements}
def __enter__(self):
for source, repl in self.replacements:
backup = utils.backup_and_replace(source, repl)
self.replacements[(source, repl)] = backup
return self
def __exit__(self, *args):
for (source, repl), backup in self.replacements.items():
os.rename(backup, source)
def test_trace(trace_file, print_commands=False):
with TestSetup() as setup:
get_dt = functools.partial(tutils.get_datetime, now=NOW)
errors = utils.test_trace(trace_file, get_dt, print_commands)
if errors['clash'] == 0 and errors['crash'] == 0:
print('OK')
else:
print('FAIL')
def main():
parser = argparse.ArgumentParser(description='todo test suite')
parser.add_argument('-a', '--all', action='store_true',
help="Run functional test in addition to unit tests")
parser.add_argument('-f', '--func', action='store_true',
help="Run only functional test")
parser.add_argument('-v', '--verbose', action='store_true',
help="Prints the commands being ran during functional test")
parser.add_argument('-b', '--build', action='store',
dest='build',
help="Build a trace file")
parser.add_argument('-o', '--out', action='store',
dest='out',
help="Destination of a trace build")
args = parser.parse_args()
if args.build is not None:
out = args.build
if args.out is not None:
out = args.out
with TestSetup() as setup:
utils.run_trace(args.build, out)
sys.exit(0)
if not args.func:
suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
for module in UNIT_TESTS:
mod_suite = test_loader.loadTestsFromModule(sys.modules[module])
suite.addTests(mod_suite)
print('* Unit tests')
unittest.TextTestRunner().run(suite)
if args.func or args.all:
print('* Fonctional tests')
for filename in sorted(os.listdir(TRACES_DIR)):
path = op.join(TRACES_DIR, filename)
print('[{}]'.format(filename))
test_trace(path, args.verbose)
if __name__ == '__main__':
main()
| 26.266667 | 89 | 0.719362 |
import unittest, sys, os, functools, argparse
import os.path as op
from . import utils
from . import test_todo, test_utils, test_rainbow, test_text_wrap
sys.path.insert(0, op.abspath('.'))
import todo.todo as todo
import todo.utils as tutils
from todo.data_access import DB_PATH as DATA_LOCATION
from todo.todo import CONFIG_FILE
NOW = todo.NOW
TEST_CONFIG = 'tests/.toduhrc'
TEST_DATA_FILE = 'tests/empty_data.sqlite'
UNIT_TESTS = [
'tests.test_todo',
'tests.test_utils',
'tests.test_rainbow',
'tests.test_text_wrap'
]
TRACES_DIR = 'tests/traces'
TEST_REPLACEMENTS = [
(DATA_LOCATION, TEST_DATA_FILE),
(CONFIG_FILE, TEST_CONFIG)
]
class TestSetup:
def __init__(self, replacements=TEST_REPLACEMENTS):
self.replacements = {repl: None for repl in replacements}
def __enter__(self):
for source, repl in self.replacements:
backup = utils.backup_and_replace(source, repl)
self.replacements[(source, repl)] = backup
return self
def __exit__(self, *args):
for (source, repl), backup in self.replacements.items():
os.rename(backup, source)
def test_trace(trace_file, print_commands=False):
with TestSetup() as setup:
get_dt = functools.partial(tutils.get_datetime, now=NOW)
errors = utils.test_trace(trace_file, get_dt, print_commands)
if errors['clash'] == 0 and errors['crash'] == 0:
print('OK')
else:
print('FAIL')
def main():
parser = argparse.ArgumentParser(description='todo test suite')
parser.add_argument('-a', '--all', action='store_true',
help="Run functional test in addition to unit tests")
parser.add_argument('-f', '--func', action='store_true',
help="Run only functional test")
parser.add_argument('-v', '--verbose', action='store_true',
help="Prints the commands being ran during functional test")
parser.add_argument('-b', '--build', action='store',
dest='build',
help="Build a trace file")
parser.add_argument('-o', '--out', action='store',
dest='out',
help="Destination of a trace build")
args = parser.parse_args()
if args.build is not None:
out = args.build
if args.out is not None:
out = args.out
with TestSetup() as setup:
utils.run_trace(args.build, out)
sys.exit(0)
if not args.func:
suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
for module in UNIT_TESTS:
mod_suite = test_loader.loadTestsFromModule(sys.modules[module])
suite.addTests(mod_suite)
print('* Unit tests')
unittest.TextTestRunner().run(suite)
if args.func or args.all:
print('* Fonctional tests')
for filename in sorted(os.listdir(TRACES_DIR)):
path = op.join(TRACES_DIR, filename)
print('[{}]'.format(filename))
test_trace(path, args.verbose)
if __name__ == '__main__':
main()
| true | true |
f7f9db6f26a5d9f211f23e307181a8967645b204 | 885 | py | Python | setup.py | sfroth/python-ontrac | f593a80a226a4f3d1191e09e1141e6bbe7370148 | [
"BSD-3-Clause"
] | null | null | null | setup.py | sfroth/python-ontrac | f593a80a226a4f3d1191e09e1141e6bbe7370148 | [
"BSD-3-Clause"
] | null | null | null | setup.py | sfroth/python-ontrac | f593a80a226a4f3d1191e09e1141e6bbe7370148 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
import ontrac
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
KEYWORDS = 'ontrac api wrapper'
setup(name='ontrac',
version=ontrac.VERSION,
description='OnTrac Web Services API wrapper.',
url='https://github.com/SideStudios/python-ontrac',
packages=['ontrac'],
package_dir={'ontrac': 'ontrac'},
platforms=['Platform Independent'],
license='BSD',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
requires=['lxml'],
install_requires=['lxml>=3.4.1'],
)
| 29.5 | 67 | 0.632768 |
from distutils.core import setup
import ontrac
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
KEYWORDS = 'ontrac api wrapper'
setup(name='ontrac',
version=ontrac.VERSION,
description='OnTrac Web Services API wrapper.',
url='https://github.com/SideStudios/python-ontrac',
packages=['ontrac'],
package_dir={'ontrac': 'ontrac'},
platforms=['Platform Independent'],
license='BSD',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
requires=['lxml'],
install_requires=['lxml>=3.4.1'],
)
| true | true |
f7f9dc303e099414ffbbcd093f3e36fdd4a3c761 | 985 | py | Python | day2/day2.py | BLannoo/Advent-of-Code-2020 | a4efa637e5885a3f849b8bad13c7115c82bdec97 | [
"MIT"
] | null | null | null | day2/day2.py | BLannoo/Advent-of-Code-2020 | a4efa637e5885a3f849b8bad13c7115c82bdec97 | [
"MIT"
] | null | null | null | day2/day2.py | BLannoo/Advent-of-Code-2020 | a4efa637e5885a3f849b8bad13c7115c82bdec97 | [
"MIT"
] | null | null | null | import re
def test_silver():
assert 660 == valid_count(read_input(), silver)
def test_gold():
assert 530 == valid_count(read_input(), gold)
def silver(line: str) -> bool:
match = re.match(
r"(?P<min>\d+)-(?P<max>\d+) (?P<char>\w): (?P<password>\w+)",
line
)
return int(match.group("min")) <= match.group("password").count(match.group("char")) <= int(match.group("max"))
def gold(line: str) -> bool:
match = re.match(r"(?P<first>\d+)-(?P<second>\d+) (?P<char>\w): (?P<password>\w+)", line)
return (
(match.group("password")[int(match.group("first")) - 1] == match.group("char"))
^
(match.group("password")[int(match.group("second")) - 1] == match.group("char"))
)
def valid_count(lines, rule):
valid = [
line
for line in lines
if rule(line)
]
return len(valid)
def read_input():
with open("input.txt") as file:
return file.read().split("\n")
| 24.02439 | 115 | 0.550254 | import re
def test_silver():
assert 660 == valid_count(read_input(), silver)
def test_gold():
assert 530 == valid_count(read_input(), gold)
def silver(line: str) -> bool:
match = re.match(
r"(?P<min>\d+)-(?P<max>\d+) (?P<char>\w): (?P<password>\w+)",
line
)
return int(match.group("min")) <= match.group("password").count(match.group("char")) <= int(match.group("max"))
def gold(line: str) -> bool:
match = re.match(r"(?P<first>\d+)-(?P<second>\d+) (?P<char>\w): (?P<password>\w+)", line)
return (
(match.group("password")[int(match.group("first")) - 1] == match.group("char"))
^
(match.group("password")[int(match.group("second")) - 1] == match.group("char"))
)
def valid_count(lines, rule):
valid = [
line
for line in lines
if rule(line)
]
return len(valid)
def read_input():
with open("input.txt") as file:
return file.read().split("\n")
| true | true |
f7f9dc538099b1ac2c1efc11043efddc264d346b | 10,517 | py | Python | venv/Lib/site-packages/alpacalib/syntax.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/alpacalib/syntax.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/alpacalib/syntax.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | 1 | 2020-06-28T11:47:47.000Z | 2020-06-28T11:47:47.000Z | """
syntax.py
Syntax Pasrse for Regex
"""
from alpacalib.rast import RAST
from alpacalib.lex import LexParser
from alpacalib.charset import CharacterSet
class SyntaxParserError(Exception):
pass
class SyntaxParser:
def __init__(self, regex):
self.lex = LexParser(regex)
def build(self):
self.root = self.__parse_regex()
# check redundancy character.
if not self.token[0] or self.token[1] != 'EOR':
raise SyntaxParserError(
'Regex Syntax Error: we need EOR, but we encount "%s"!'
% self.token[1])
return self.root
def __parse_regex(self):
self.token = self.lex.get_token()
# select(REGEX ::= SIMPLE UNION_ELR) = {'(', '.', operand, '['}
if not self.token[0] or self.token[1] in ['(', '.', '[']:
elem = self.__parse_simple()
root = RAST()
self.__parse_union_elr(root)
if not root.is_empty():
root.children.insert(0, elem)
return root
else:
return elem
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[" or operand, but we encount "%s"!'
% self.token[1])
def __parse_union_elr(self, root):
# select(UNION_ELR ::= '|' SIMPLE UNION_ELR) = {'|'}
# select(UNION_ELR ::= $) = {#, ')'}
if self.token[0] and self.token[1] == '|':
root.is_operator, root.token = self.token
self.token = self.lex.get_token()
root.children.append(self.__parse_simple())
self.__parse_union_elr(root)
elif self.token[0] and self.token[1] in [')', 'EOR']:
return
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "|", ")" or EOR, but we encount "%s"!'
% self.token[1])
def __parse_simple(self):
# select(SIMPLE ::= BASIC CONCATENATION_ELR) = {'(', '.', operand, '['}
if not self.token[0] or self.token[1] in ['(', '.', '[']:
elem = self.__parse_basic()
root = RAST()
self.__parse_concatenation_elr(root)
if not root.is_empty():
root.children.insert(0, elem)
return root
else:
return elem
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[" or operand, but we encount "%s"!'
% self.token[1])
def __parse_concatenation_elr(self, root):
# select(CONCATENATION_ELR ::= BASIC CONCATENATION_ELR) = {'(', '.', operand, '['}
# select(CONCATENATION_ELR ::= $) = {'|', #, ')'}
if not self.token[0] or self.token[1] in ['(', '.', '[']:
root.is_operator = True
root.children.append(self.__parse_basic())
self.__parse_concatenation_elr(root)
elif self.token[0] and self.token[1] in ['|', ')', 'EOR']:
return
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[", operand or "|", ")", EOR, but we encount "%s"!'
% self.token[1])
def __parse_basic(self):
# select(BASIC ::= ELEMENTARY BASIC_ECF) = {'(', '.', operand, '['}
if not self.token[0] or self.token[1] in ['(', '.', '[']:
elem = self.__parse_elementary()
root = self.__parse_basic_ecf()
if not root.is_empty():
root.children.append(elem)
return root
else:
return elem
def __parse_basic_ecf(self):
# select(BASIC_ECF ::= '*') = {'*'}
# select(BASIC_ECF ::= '+') = {'+'}
# select(BASIC_ECF ::= empty) = {'(', '.', operand, '[', '|', eor, ')'}
root = RAST()
if self.token[0] and self.token[1] in ['*', '+']:
root.is_operator, root.token = self.token
self.token = self.lex.get_token()
return root
elif not self.token[0] or self.token[1] in ['(', '.', '[', '|', ')', 'EOR']:
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[", operand or "|", ")", EOR, but we encount "%s"!'
% self.token[1])
def __parse_elementary(self):
# select(ELEMENTARY :: = GROUP) = {'('}
# select(ELEMENTARY ::= SET) = {'['}
# select(ELEMENTARY ::= '.') = {'.'}
# select(ELEMENTARY ::= operand) = {operand}
if self.token[0] and self.token[1] == '(':
return self.__parse_group()
elif self.token[0] and self.token[1] == '[':
return self.__parse_set()
elif not self.token[0] or self.token[1] == '.':
root = RAST()
root.is_operator, root.token = self.token
self.token = self.lex.get_token()
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[" or operand, but we encount "%s"!'
% self.token[1])
def __parse_group(self):
# select(GROUP ::= '(' REGEX ')') = {'('}
if self.token[0] and self.token[1] == '(':
root = self.__parse_regex()
if self.token[0] and self.token[1] == ')':
self.token = self.lex.get_token()
else:
raise SyntaxParserError(
'Regex Syntax Error: we need ")", but we encount "%s"!'
% self.token[1])
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", but we encount "%s"!'
% self.token[1])
def __parse_set(self):
# select(SET ::= '[' SET_ECF) = {'['}
if self.token[0] and self.token[1] == '[':
root = RAST()
root.is_operator, root.token = self.token
self.token = self.lex.get_token(inset=True, firstchar=True)
self.__parse_set_ecf(root)
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "[", but we encount "%s"!'
% self.token[1])
def __parse_set_ecf(self, root):
# select(SET_ECF ::= ITEMS ']') = {operand}
# select(SET_ECF ::= '^' ITEMS ']') = {'^'}
if not self.token[0]:
self.__parse_items(root)
elif self.token[0] and self.token[1] == '^':
root.is_operator = self.token[0]
root.token += self.token[1]
self.token = self.lex.get_token(inset=True)
self.__parse_items(root)
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "^" or operand, but we encount "%s"!'
% self.token[1])
if self.token[0] and self.token[1] == ']':
root.is_operator = self.token[0]
root.token += self.token[1]
self.token = self.lex.get_token()
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "]", but we encount "%s"!'
% self.token[1])
def __parse_items(self, root):
# select(ITEMS ::= ITEM ITEMS_ECF) = {operand}
if not self.token[0]:
root.children.append(self.__parse_item())
self.__parse_items_ecf(root)
else:
raise SyntaxParserError(
'Regex Syntax Error: we need operand, but we encount "%s"!'
% self.token[1])
def __parse_items_ecf(self, root):
# select(ITEMS_ECF ::= ITEMS) = {operand}
# select(ITEMS_ECF ::= empty) = {']'}
if not self.token[0]:
self.__parse_items(root)
elif self.token[0] and self.token[1] == ']':
return
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "]" or operand, but we encount "%s"!'
% self.token[1])
def __parse_item(self):
# select(ITEM ::= operand ITEM_ECF) = {operand}
if not self.token[0]:
elem = RAST()
elem.is_operator, elem.token = self.token
self.token = self.lex.get_token(inset=True)
root = self.__parse_item_ecf()
if not root.is_empty():
root.children.insert(0, elem)
# We got a range, check validity of the range now.
if root.children[0].token in CharacterSet.mnemnoic:
raise SyntaxParserError(
'Regex Semantics Error: we encount "%s" in range!'
% root.children[0].token)
elif root.children[1].token in CharacterSet.mnemnoic:
raise SyntaxParserError(
'Regex Semantics Error: we encount "%s" in range!'
% root.children[1].token)
elif CharacterSet.is_valid_range(root.children[0].token, root.children[1].token):
return root
else:
raise SyntaxParserError(
'Regex Semantics Error: we encount a invalid range "%s%s%s"!'
% (root.children[0].token, root.token, root.children[1].token))
else:
return elem
else:
raise SyntaxParserError(
'Regex Syntax Error: we need operand, but we encount "%s"!'
% self.token[1])
def __parse_item_ecf(self):
# select(ITEM_ECF ::= '-' operand) = {'-'}
# select(ITEM_ECF ::= empty) = {operand, ']'}
root = RAST()
if self.token[0] and self.token[1] == '-':
root.is_operator, root.token = self.token
self.token = self.lex.get_token(inset=True)
if not self.token[0]:
elem = RAST()
elem.is_operator, elem.token = self.token
root.children.append(elem)
self.token = self.lex.get_token(inset=True)
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need operand, but we encount "%s"!'
% self.token[1])
elif not self.token[0] or self.token[1] == ']':
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "-", "]" or operand, but we encount "%s"!'
% self.token[1])
| 39.389513 | 107 | 0.501474 |
from alpacalib.rast import RAST
from alpacalib.lex import LexParser
from alpacalib.charset import CharacterSet
class SyntaxParserError(Exception):
pass
class SyntaxParser:
def __init__(self, regex):
self.lex = LexParser(regex)
def build(self):
self.root = self.__parse_regex()
if not self.token[0] or self.token[1] != 'EOR':
raise SyntaxParserError(
'Regex Syntax Error: we need EOR, but we encount "%s"!'
% self.token[1])
return self.root
def __parse_regex(self):
self.token = self.lex.get_token()
if not self.token[0] or self.token[1] in ['(', '.', '[']:
elem = self.__parse_simple()
root = RAST()
self.__parse_union_elr(root)
if not root.is_empty():
root.children.insert(0, elem)
return root
else:
return elem
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[" or operand, but we encount "%s"!'
% self.token[1])
def __parse_union_elr(self, root):
if self.token[0] and self.token[1] == '|':
root.is_operator, root.token = self.token
self.token = self.lex.get_token()
root.children.append(self.__parse_simple())
self.__parse_union_elr(root)
elif self.token[0] and self.token[1] in [')', 'EOR']:
return
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "|", ")" or EOR, but we encount "%s"!'
% self.token[1])
def __parse_simple(self):
if not self.token[0] or self.token[1] in ['(', '.', '[']:
elem = self.__parse_basic()
root = RAST()
self.__parse_concatenation_elr(root)
if not root.is_empty():
root.children.insert(0, elem)
return root
else:
return elem
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[" or operand, but we encount "%s"!'
% self.token[1])
def __parse_concatenation_elr(self, root):
if not self.token[0] or self.token[1] in ['(', '.', '[']:
root.is_operator = True
root.children.append(self.__parse_basic())
self.__parse_concatenation_elr(root)
elif self.token[0] and self.token[1] in ['|', ')', 'EOR']:
return
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[", operand or "|", ")", EOR, but we encount "%s"!'
% self.token[1])
def __parse_basic(self):
if not self.token[0] or self.token[1] in ['(', '.', '[']:
elem = self.__parse_elementary()
root = self.__parse_basic_ecf()
if not root.is_empty():
root.children.append(elem)
return root
else:
return elem
def __parse_basic_ecf(self):
root = RAST()
if self.token[0] and self.token[1] in ['*', '+']:
root.is_operator, root.token = self.token
self.token = self.lex.get_token()
return root
elif not self.token[0] or self.token[1] in ['(', '.', '[', '|', ')', 'EOR']:
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[", operand or "|", ")", EOR, but we encount "%s"!'
% self.token[1])
def __parse_elementary(self):
if self.token[0] and self.token[1] == '(':
return self.__parse_group()
elif self.token[0] and self.token[1] == '[':
return self.__parse_set()
elif not self.token[0] or self.token[1] == '.':
root = RAST()
root.is_operator, root.token = self.token
self.token = self.lex.get_token()
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", ".", "[" or operand, but we encount "%s"!'
% self.token[1])
def __parse_group(self):
if self.token[0] and self.token[1] == '(':
root = self.__parse_regex()
if self.token[0] and self.token[1] == ')':
self.token = self.lex.get_token()
else:
raise SyntaxParserError(
'Regex Syntax Error: we need ")", but we encount "%s"!'
% self.token[1])
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "(", but we encount "%s"!'
% self.token[1])
def __parse_set(self):
if self.token[0] and self.token[1] == '[':
root = RAST()
root.is_operator, root.token = self.token
self.token = self.lex.get_token(inset=True, firstchar=True)
self.__parse_set_ecf(root)
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "[", but we encount "%s"!'
% self.token[1])
def __parse_set_ecf(self, root):
if not self.token[0]:
self.__parse_items(root)
elif self.token[0] and self.token[1] == '^':
root.is_operator = self.token[0]
root.token += self.token[1]
self.token = self.lex.get_token(inset=True)
self.__parse_items(root)
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "^" or operand, but we encount "%s"!'
% self.token[1])
if self.token[0] and self.token[1] == ']':
root.is_operator = self.token[0]
root.token += self.token[1]
self.token = self.lex.get_token()
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "]", but we encount "%s"!'
% self.token[1])
def __parse_items(self, root):
if not self.token[0]:
root.children.append(self.__parse_item())
self.__parse_items_ecf(root)
else:
raise SyntaxParserError(
'Regex Syntax Error: we need operand, but we encount "%s"!'
% self.token[1])
def __parse_items_ecf(self, root):
if not self.token[0]:
self.__parse_items(root)
elif self.token[0] and self.token[1] == ']':
return
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "]" or operand, but we encount "%s"!'
% self.token[1])
def __parse_item(self):
if not self.token[0]:
elem = RAST()
elem.is_operator, elem.token = self.token
self.token = self.lex.get_token(inset=True)
root = self.__parse_item_ecf()
if not root.is_empty():
root.children.insert(0, elem)
if root.children[0].token in CharacterSet.mnemnoic:
raise SyntaxParserError(
'Regex Semantics Error: we encount "%s" in range!'
% root.children[0].token)
elif root.children[1].token in CharacterSet.mnemnoic:
raise SyntaxParserError(
'Regex Semantics Error: we encount "%s" in range!'
% root.children[1].token)
elif CharacterSet.is_valid_range(root.children[0].token, root.children[1].token):
return root
else:
raise SyntaxParserError(
'Regex Semantics Error: we encount a invalid range "%s%s%s"!'
% (root.children[0].token, root.token, root.children[1].token))
else:
return elem
else:
raise SyntaxParserError(
'Regex Syntax Error: we need operand, but we encount "%s"!'
% self.token[1])
def __parse_item_ecf(self):
root = RAST()
if self.token[0] and self.token[1] == '-':
root.is_operator, root.token = self.token
self.token = self.lex.get_token(inset=True)
if not self.token[0]:
elem = RAST()
elem.is_operator, elem.token = self.token
root.children.append(elem)
self.token = self.lex.get_token(inset=True)
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need operand, but we encount "%s"!'
% self.token[1])
elif not self.token[0] or self.token[1] == ']':
return root
else:
raise SyntaxParserError(
'Regex Syntax Error: we need "-", "]" or operand, but we encount "%s"!'
% self.token[1])
| true | true |
f7f9ddd7b08717338bd32f4ce298dd6f6fd20a68 | 3,118 | py | Python | tensorflow/contrib/eager/python/datasets.py | tianhm/tensorflow | e55574f28257bdacd744dcdba86c839e661b1b2a | [
"Apache-2.0"
] | 47 | 2017-03-08T20:58:54.000Z | 2021-06-24T07:07:49.000Z | tensorflow/contrib/eager/python/datasets.py | genSud/tensorflow | ec8216568d8cd9810004067558041c11a8356685 | [
"Apache-2.0"
] | 1 | 2019-07-11T16:29:54.000Z | 2019-07-11T16:29:54.000Z | tensorflow/contrib/eager/python/datasets.py | genSud/tensorflow | ec8216568d8cd9810004067558041c11a8356685 | [
"Apache-2.0"
] | 19 | 2017-04-17T01:28:40.000Z | 2020-08-15T13:01:33.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for tf.contrib.data when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _iterator_shared_name():
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "eager_iterator_{}".format(uid)
class Iterator(object):
"""An iterator producing tf.Tensor objects from a tf.contrib.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.contrib.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Args:
dataset: A `tf.contrib.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.in_eager_mode():
raise RuntimeError(
"{} objects only make sense when eager execution is enabled".format(
type(self)))
ds_variant = dataset.make_dataset_resource()
self._output_types = dataset.output_types
self._flat_output_types = nest.flatten(dataset.output_types)
self._flat_output_shapes = nest.flatten(dataset.output_shapes)
self._resource = gen_dataset_ops.iterator(
container="",
shared_name=_iterator_shared_name(),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
def __del__(self):
if self._resource is not None:
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def next(self):
"""Return the next tf.Tensor from the dataset."""
try:
ret = gen_dataset_ops.iterator_get_next(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return nest.pack_sequence_as(self._output_types, ret)
except errors.OutOfRangeError:
raise StopIteration
| 32.14433 | 80 | 0.71424 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _iterator_shared_name():
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "eager_iterator_{}".format(uid)
class Iterator(object):
def __init__(self, dataset):
if not context.in_eager_mode():
raise RuntimeError(
"{} objects only make sense when eager execution is enabled".format(
type(self)))
ds_variant = dataset.make_dataset_resource()
self._output_types = dataset.output_types
self._flat_output_types = nest.flatten(dataset.output_types)
self._flat_output_shapes = nest.flatten(dataset.output_shapes)
self._resource = gen_dataset_ops.iterator(
container="",
shared_name=_iterator_shared_name(),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
def __del__(self):
if self._resource is not None:
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
try:
ret = gen_dataset_ops.iterator_get_next(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return nest.pack_sequence_as(self._output_types, ret)
except errors.OutOfRangeError:
raise StopIteration
| true | true |
f7f9de82300605b4cafcc1c246400ab08f4a30c5 | 1,318 | py | Python | Code_10_2_Data_pipeline_nosql.py | bpbpublications/Data-Science-for-Business-Professionals | e0795321f4f393b4ae2c5959046e5508a510f2a6 | [
"MIT"
] | null | null | null | Code_10_2_Data_pipeline_nosql.py | bpbpublications/Data-Science-for-Business-Professionals | e0795321f4f393b4ae2c5959046e5508a510f2a6 | [
"MIT"
] | null | null | null | Code_10_2_Data_pipeline_nosql.py | bpbpublications/Data-Science-for-Business-Professionals | e0795321f4f393b4ae2c5959046e5508a510f2a6 | [
"MIT"
] | null | null | null | ##Python Script to Implement the Pipeline
#Define the API KEY ( this can be found in the data.gov.in account section for registered users)
API_KEY = <YOUR API KEY>
#Import the requests library
import requests
#Construct the GET REQUEST
response = requests.get(
'https://api.data.gov.in/resource/9ef84268-d588-465a-a308-a864a43d0070',
params=[('api-key',API_KEY ),
('format','json'),
('offset',0),
('limit',20)],
)
#Check if the request was successful - a scuccess request returns a status code 200
if response.status_code == 200:
print('Success!')
else:
print('Some Error Occured')
#If the you see a success message then you can extract the values from the JSON response
json_response = response.json()
#Import the MongoClient from PyMongo Library
from pymongo import MongoClient
#Define the connection string to your MongoDB instance
# You remember we did not set-up any password while installing MongoDB
client = MongoClient('mongodb://localhost:27017')
#Access the Database
db = client['data-gov-in']
#Access the Collection
posts = db.commodity_prices
#Insert the fist commodity price data into database
result = posts.insert_one(json_response)
| 28.652174 | 97 | 0.674507 | requests
response = requests.get(
'https://api.data.gov.in/resource/9ef84268-d588-465a-a308-a864a43d0070',
params=[('api-key',API_KEY ),
('format','json'),
('offset',0),
('limit',20)],
)
if response.status_code == 200:
print('Success!')
else:
print('Some Error Occured')
json_response = response.json()
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017')
db = client['data-gov-in']
posts = db.commodity_prices
result = posts.insert_one(json_response)
| false | true |
f7f9deb4b6b436f25447bc1ffe5c6369d88eedce | 32 | py | Python | shadowrun_prototype/defs/obje.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | shadowrun_prototype/defs/obje.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | shadowrun_prototype/defs/obje.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | from ...hek.defs.obje import *
| 16 | 31 | 0.65625 | from ...hek.defs.obje import *
| true | true |
f7f9df744a7c40740fac49d87ac5a7b56073d214 | 312 | py | Python | pacote-download/pythonProject/exercicios_python_guanabara/ex53_professor.py | oliveirajonathas/python_estudos | 28921672d7e5d0866030c45b077a28998905f752 | [
"MIT"
] | null | null | null | pacote-download/pythonProject/exercicios_python_guanabara/ex53_professor.py | oliveirajonathas/python_estudos | 28921672d7e5d0866030c45b077a28998905f752 | [
"MIT"
] | null | null | null | pacote-download/pythonProject/exercicios_python_guanabara/ex53_professor.py | oliveirajonathas/python_estudos | 28921672d7e5d0866030c45b077a28998905f752 | [
"MIT"
] | null | null | null | frase = str(input('Digite uma frase: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto)-1, -1, -1):
inverso = inverso + junto[letra]
if junto == inverso:
print('Temos um palíndromo!')
else:
print('A frase digitada não é um palíndromo')
| 26 | 56 | 0.653846 | frase = str(input('Digite uma frase: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto)-1, -1, -1):
inverso = inverso + junto[letra]
if junto == inverso:
print('Temos um palíndromo!')
else:
print('A frase digitada não é um palíndromo')
| true | true |
f7f9dfc5a177c79d2b80ef9e34a715e0f0ec5468 | 3,408 | py | Python | pymimir/ARC.py | trauzti/mimir | 76362f82a06230606fc994cc7ea73f6ca036255b | [
"ISC"
] | 4 | 2016-09-07T00:16:47.000Z | 2018-05-29T12:27:47.000Z | pymimir/ARC.py | trauzti/mimir | 76362f82a06230606fc994cc7ea73f6ca036255b | [
"ISC"
] | null | null | null | pymimir/ARC.py | trauzti/mimir | 76362f82a06230606fc994cc7ea73f6ca036255b | [
"ISC"
] | 2 | 2015-02-04T09:38:26.000Z | 2020-06-23T18:22:22.000Z | # Modified from http://code.activestate.com/recipes/576532/
from collections import OrderedDict
from common import Entry, statbase
class Deque:
'Fast searchable queue'
def __init__(self):
self.od = OrderedDict()
def appendleft(self, k):
od = self.od
if k in od:
del od[k]
od[k] = None
def pop(self):
return self.od.popitem(0)[0]
def remove(self, k):
del self.od[k]
def __len__(self):
return len(self.od)
def __contains__(self, k):
return k in self.od
def __iter__(self):
return reversed(self.od)
def __repr__(self):
return 'Deque(%r)' % (list(self),)
class alg:
def __repr__(self):
return "ARC"
def __init__(self, c, **kwargs):
self.c = c # Cache size
self.cn = 0 # Items in cache now
self.cached = {} # Cached keys
self.hitcount = 0
self.count = 0
self.p = 0
self.t1 = Deque()
self.t2 = Deque()
self.b1 = Deque()
self.b2 = Deque()
self.stats = statbase()
def setup(self, reqlist):
# I'm an online algorithm :-)
pass
def replace(self, args):
if self.t1 and ((args in self.b2 and len(self.t1) == self.p) or (len(self.t1) > self.p)):
old = self.t1.pop()
self.b1.appendleft(old)
else:
old = self.t2.pop()
self.b2.appendleft(old)
de = self.cached[old]
self.stats.Evict(de)
del self.cached[old]
def get(self, key):
self.count += 1
e = None
if key in self.t1:
self.t1.remove(key)
self.t2.appendleft(key)
self.hitcount += 1
e = self.cached[key]
self.stats.Hit(e)
return e.value
elif key in self.t2:
self.t2.remove(key)
self.t2.appendleft(key)
self.hitcount += 1
e = self.cached[key]
self.stats.Hit(e)
return e.value
self.stats.Miss(key)
return None
def put(self, key, value=None):
e = self.cached.get(key)
if e:
self.stats.Set(e)
e.value = value
return 1
e = Entry(key, value)
self.cached[key] = e
self.stats.Set(e)
if key in self.b1:
self.p = min(self.c, self.p + max(len(self.b2) / len(self.b1) , 1))
self.replace(key)
self.b1.remove(key)
self.t2.appendleft(key)
return 1
if key in self.b2:
self.p = max(0, self.p - max(len(self.b1)/len(self.b2) , 1))
self.replace(key)
self.b2.remove(key)
self.t2.appendleft(key)
return 1
if len(self.t1) + len(self.b1) == self.c:
if len(self.t1) < self.c:
self.b1.pop()
self.replace(key)
else:
popkey = self.t1.pop()
de = self.cached[popkey]
self.stats.Evict(de)
del self.cached[popkey]
else:
total = len(self.t1) + len(self.b1) + len(self.t2) + len(self.b2)
if total >= self.c:
if total == (2 * self.c):
self.b2.pop()
self.replace(key)
self.t1.appendleft(key)
return 1
| 28.4 | 97 | 0.48973 |
from collections import OrderedDict
from common import Entry, statbase
class Deque:
def __init__(self):
self.od = OrderedDict()
def appendleft(self, k):
od = self.od
if k in od:
del od[k]
od[k] = None
def pop(self):
return self.od.popitem(0)[0]
def remove(self, k):
del self.od[k]
def __len__(self):
return len(self.od)
def __contains__(self, k):
return k in self.od
def __iter__(self):
return reversed(self.od)
def __repr__(self):
return 'Deque(%r)' % (list(self),)
class alg:
def __repr__(self):
return "ARC"
def __init__(self, c, **kwargs):
self.c = c
self.cn = 0
self.cached = {}
self.hitcount = 0
self.count = 0
self.p = 0
self.t1 = Deque()
self.t2 = Deque()
self.b1 = Deque()
self.b2 = Deque()
self.stats = statbase()
def setup(self, reqlist):
pass
def replace(self, args):
if self.t1 and ((args in self.b2 and len(self.t1) == self.p) or (len(self.t1) > self.p)):
old = self.t1.pop()
self.b1.appendleft(old)
else:
old = self.t2.pop()
self.b2.appendleft(old)
de = self.cached[old]
self.stats.Evict(de)
del self.cached[old]
def get(self, key):
self.count += 1
e = None
if key in self.t1:
self.t1.remove(key)
self.t2.appendleft(key)
self.hitcount += 1
e = self.cached[key]
self.stats.Hit(e)
return e.value
elif key in self.t2:
self.t2.remove(key)
self.t2.appendleft(key)
self.hitcount += 1
e = self.cached[key]
self.stats.Hit(e)
return e.value
self.stats.Miss(key)
return None
def put(self, key, value=None):
e = self.cached.get(key)
if e:
self.stats.Set(e)
e.value = value
return 1
e = Entry(key, value)
self.cached[key] = e
self.stats.Set(e)
if key in self.b1:
self.p = min(self.c, self.p + max(len(self.b2) / len(self.b1) , 1))
self.replace(key)
self.b1.remove(key)
self.t2.appendleft(key)
return 1
if key in self.b2:
self.p = max(0, self.p - max(len(self.b1)/len(self.b2) , 1))
self.replace(key)
self.b2.remove(key)
self.t2.appendleft(key)
return 1
if len(self.t1) + len(self.b1) == self.c:
if len(self.t1) < self.c:
self.b1.pop()
self.replace(key)
else:
popkey = self.t1.pop()
de = self.cached[popkey]
self.stats.Evict(de)
del self.cached[popkey]
else:
total = len(self.t1) + len(self.b1) + len(self.t2) + len(self.b2)
if total >= self.c:
if total == (2 * self.c):
self.b2.pop()
self.replace(key)
self.t1.appendleft(key)
return 1
| true | true |
f7f9e066f00f2469e952b600f16817805a10de19 | 2,341 | py | Python | thglibs/auxiliares/hash_encode/hash_encode.py | darkcode357/thg_lib | c1052bcd85f705ff8be404b7a28964eabef2ed45 | [
"MIT"
] | null | null | null | thglibs/auxiliares/hash_encode/hash_encode.py | darkcode357/thg_lib | c1052bcd85f705ff8be404b7a28964eabef2ed45 | [
"MIT"
] | 52 | 2018-10-25T20:29:17.000Z | 2018-10-25T20:45:02.000Z | thglibs/auxiliares/hash_encode/hash_encode.py | darkcode357/thg_lib | c1052bcd85f705ff8be404b7a28964eabef2ed45 | [
"MIT"
] | null | null | null | import hashlib
class Hash_encode:
"""
suport para hash_encode
=>sha3_256
=>ha256
=>blake2b
=>sha384
=>md5
=>sha3_512
=>sha512
=>sha512
=>sha1
=>sha3_224
=>blake2s
=>sha3_384
=>sha224
"""
def __init__(self, sha3_256, sha256, blake2b, sha384, md5, sha3_512, sha512, sha1, sha3_224, blake2s, sha3_384,
sha224):
self.sha3_256 = sha3_256
self.sha256 = sha256
self.blake2b = blake2b
self.sha384 = sha384
self.md5 = md5
self.sha3_512 = sha3_512
self.sha512 = sha512
self.sha512 = sha512
self.sha1 = sha1
self.sha3_224 = sha3_224
self.blake2s = blake2s
self.sha3_384 = sha3_384
self.sha224 = sha224
def sha3_256(self):
hash = hashlib.sha3_256()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha256(self):
hash = hashlib.sha256()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def blake2b(self):
hash = hashlib.blake2b()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha384(self):
hash = hashlib.sha384()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def md5(self):
hash = hashlib.md5()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha3_512(self):
hash = hashlib.sha3_512()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha512(self):
hash = hashlib.sha512()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha1(self):
hash = hashlib.sha1()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha3_224(self):
hash = hashlib.sha3_224()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def blake2s(self):
hash = hashlib.blake2s()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha3_384(self):
hash = hashlib.sha3_384()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha224(self):
hash = hashlib.sha224()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
| 24.134021 | 115 | 0.562153 | import hashlib
class Hash_encode:
def __init__(self, sha3_256, sha256, blake2b, sha384, md5, sha3_512, sha512, sha1, sha3_224, blake2s, sha3_384,
sha224):
self.sha3_256 = sha3_256
self.sha256 = sha256
self.blake2b = blake2b
self.sha384 = sha384
self.md5 = md5
self.sha3_512 = sha3_512
self.sha512 = sha512
self.sha512 = sha512
self.sha1 = sha1
self.sha3_224 = sha3_224
self.blake2s = blake2s
self.sha3_384 = sha3_384
self.sha224 = sha224
def sha3_256(self):
hash = hashlib.sha3_256()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha256(self):
hash = hashlib.sha256()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def blake2b(self):
hash = hashlib.blake2b()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha384(self):
hash = hashlib.sha384()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def md5(self):
hash = hashlib.md5()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha3_512(self):
hash = hashlib.sha3_512()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha512(self):
hash = hashlib.sha512()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha1(self):
hash = hashlib.sha1()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha3_224(self):
hash = hashlib.sha3_224()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def blake2s(self):
hash = hashlib.blake2s()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha3_384(self):
hash = hashlib.sha3_384()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
def sha224(self):
hash = hashlib.sha224()
hash.update(self.encode('utf-8'))
print(hash.hexdigest())
| true | true |
f7f9e088c3967cca2dcace2828f5a0a009034dcd | 591 | py | Python | src/objects/hospital.py | jp172/covid19-hospital-scheduler | 0931ac7b91f3e7fdbad741c5fc92577278dfc823 | [
"MIT"
] | 5 | 2020-03-22T22:46:15.000Z | 2020-03-25T14:16:49.000Z | src/objects/hospital.py | jp172/covid19-hospital-scheduler | 0931ac7b91f3e7fdbad741c5fc92577278dfc823 | [
"MIT"
] | null | null | null | src/objects/hospital.py | jp172/covid19-hospital-scheduler | 0931ac7b91f3e7fdbad741c5fc92577278dfc823 | [
"MIT"
] | 1 | 2020-03-22T20:44:01.000Z | 2020-03-22T20:44:01.000Z | from dataclasses import dataclass
from dataclasses_json import dataclass_json
from ..globals import CAPACITY_SCALAR
from .position import Position
@dataclass_json
@dataclass
class Hospital:
ident: str
position: Position
nbr_free_beds: int
nbr_free_corona_beds: int
nbr_corona_beds: int
nbr_corona_pat_in_normal_bed: int
capacity_coefficient: float
def calculate_capacity_coefficient(self):
self.capacity_coefficient = (
(1 + CAPACITY_SCALAR * self.nbr_corona_pat_in_normal_bed)
) / (1 + max(-0.75, self.nbr_free_corona_beds))
| 24.625 | 69 | 0.746193 | from dataclasses import dataclass
from dataclasses_json import dataclass_json
from ..globals import CAPACITY_SCALAR
from .position import Position
@dataclass_json
@dataclass
class Hospital:
ident: str
position: Position
nbr_free_beds: int
nbr_free_corona_beds: int
nbr_corona_beds: int
nbr_corona_pat_in_normal_bed: int
capacity_coefficient: float
def calculate_capacity_coefficient(self):
self.capacity_coefficient = (
(1 + CAPACITY_SCALAR * self.nbr_corona_pat_in_normal_bed)
) / (1 + max(-0.75, self.nbr_free_corona_beds))
| true | true |
f7f9e1913bfc9725ab0fcf5277e5457cc7a0cf2f | 4,249 | py | Python | tests/test_pacmon.py | fhightower/pacmon | ad9f9869f61cbd43ffe97fd7f10a3522161f74b8 | [
"MIT"
] | null | null | null | tests/test_pacmon.py | fhightower/pacmon | ad9f9869f61cbd43ffe97fd7f10a3522161f74b8 | [
"MIT"
] | null | null | null | tests/test_pacmon.py | fhightower/pacmon | ad9f9869f61cbd43ffe97fd7f10a3522161f74b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pacmon` module."""
import json
import os
from pacmon import pacmon
import pytest
def test_initialization():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
def test_pypi_monitoring_1():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'ioc_fanger')
assert len(changes) == 0
def test_pypi_monitoring_2():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'requests')
assert len(changes) == 0
def test_pypi_monitoring_3():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'onemillion')
assert len(changes) == 0
def test_pypi_file_replacement():
"""Make sure file removal and addition is recorded correctly."""
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"onemillion": {
"abc": "123"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'onemillion')
assert len(changes) == 3
assert len(changes['added_files']) == 8
assert len(changes['removed_files']) == 1
assert len(changes['changed_files']) == 0
def test_pypi_file_change():
"""Make sure a file hash change is recorded correctly."""
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"onemillion": {
"onemillion/cli.py": "123"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'onemillion')
assert len(changes) == 3
assert len(changes['added_files']) == 7
assert len(changes['removed_files']) == 0
assert len(changes['changed_files']) == 1
@pytest.mark.npm
def test_npm_monitoring_1():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('npm', 'spaces-ng')
assert len(changes) == 0
@pytest.mark.npm
def test_npm_file_replacement():
"""Make sure file removal and addition is recorded correctly."""
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"spaces-ng": {
"abc": "123"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('npm', 'spaces-ng')
print("changes {}".format(changes))
assert len(changes) == 3
assert len(changes['added_files']) == 22
assert len(changes['removed_files']) == 1
assert len(changes['changed_files']) == 0
@pytest.mark.npm
def test_npm_file_change():
"""Make sure a file hash change is recorded correctly."""
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"spaces-ng": {
"node_modules/spaces-ng/src/SpacesModule.d.ts": "123",
"node_modules/spaces-ng/LICENSE": "abc"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('npm', 'spaces-ng')
print("changes {}".format(changes))
assert len(changes) == 3
assert len(changes['added_files']) == 20
assert len(changes['removed_files']) == 0
assert len(changes['changed_files']) == 2
@pytest.mark.npm
def test_dual_package_check():
"""Make sure packages are downloaded and stored properly."""
p = pacmon.Pacmon()
p.check_package('npm', 'spaces-ng')
p.check_package('pypi', 'onemillion')
with open(p.output_path, 'r') as f:
hashes = json.load(f)
assert 'spaces-ng' in hashes
assert 'onemillion' in hashes
| 35.115702 | 105 | 0.633796 |
import json
import os
from pacmon import pacmon
import pytest
def test_initialization():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
def test_pypi_monitoring_1():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'ioc_fanger')
assert len(changes) == 0
def test_pypi_monitoring_2():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'requests')
assert len(changes) == 0
def test_pypi_monitoring_3():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'onemillion')
assert len(changes) == 0
def test_pypi_file_replacement():
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"onemillion": {
"abc": "123"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'onemillion')
assert len(changes) == 3
assert len(changes['added_files']) == 8
assert len(changes['removed_files']) == 1
assert len(changes['changed_files']) == 0
def test_pypi_file_change():
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"onemillion": {
"onemillion/cli.py": "123"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('pypi', 'onemillion')
assert len(changes) == 3
assert len(changes['added_files']) == 7
assert len(changes['removed_files']) == 0
assert len(changes['changed_files']) == 1
@pytest.mark.npm
def test_npm_monitoring_1():
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('npm', 'spaces-ng')
assert len(changes) == 0
@pytest.mark.npm
def test_npm_file_replacement():
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"spaces-ng": {
"abc": "123"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('npm', 'spaces-ng')
print("changes {}".format(changes))
assert len(changes) == 3
assert len(changes['added_files']) == 22
assert len(changes['removed_files']) == 1
assert len(changes['changed_files']) == 0
@pytest.mark.npm
def test_npm_file_change():
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")), 'w+') as f:
json.dump({
"spaces-ng": {
"node_modules/spaces-ng/src/SpacesModule.d.ts": "123",
"node_modules/spaces-ng/LICENSE": "abc"
}
}, f)
p = pacmon.Pacmon(os.path.abspath(os.path.join(os.path.dirname(__file__), "./test_output.json")))
changes = p.check_package('npm', 'spaces-ng')
print("changes {}".format(changes))
assert len(changes) == 3
assert len(changes['added_files']) == 20
assert len(changes['removed_files']) == 0
assert len(changes['changed_files']) == 2
@pytest.mark.npm
def test_dual_package_check():
p = pacmon.Pacmon()
p.check_package('npm', 'spaces-ng')
p.check_package('pypi', 'onemillion')
with open(p.output_path, 'r') as f:
hashes = json.load(f)
assert 'spaces-ng' in hashes
assert 'onemillion' in hashes
| true | true |
f7f9e24332c8bdf2631924c3f1b6673eb2080dfb | 44,358 | py | Python | fixture/Python3/expected_xml/Block1.py | exKAZUu/ParserTests | 609cfc62b70c8d04d5a2ced25a213a8d601e7011 | [
"Apache-2.0"
] | null | null | null | fixture/Python3/expected_xml/Block1.py | exKAZUu/ParserTests | 609cfc62b70c8d04d5a2ced25a213a8d601e7011 | [
"Apache-2.0"
] | null | null | null | fixture/Python3/expected_xml/Block1.py | exKAZUu/ParserTests | 609cfc62b70c8d04d5a2ced25a213a8d601e7011 | [
"Apache-2.0"
] | null | null | null | <file_input>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="1">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
<EQUAL startline="1">=</EQUAL>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="1">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="1"></NEWLINE>
</simple_stmt>
</stmt>
<stmt>
<compound_stmt>
<if_stmt>
<NAME startline="2">if</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="2">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<EQEQUAL startline="2">==</EQEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="2">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="2">:</COLON>
<suite>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="2">print</NAME>
</atom>
<trailer>
<LPAR startline="2">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="2">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="2">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="2"></NEWLINE>
</simple_stmt>
</suite>
</if_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<while_stmt>
<NAME startline="3">while</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="3">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<NOTEQUAL startline="3">!=</NOTEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="3">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="3">:</COLON>
<suite>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="3">print</NAME>
</atom>
<trailer>
<LPAR startline="3">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="3">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="3">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="3"></NEWLINE>
</simple_stmt>
</suite>
</while_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<for_stmt>
<NAME startline="4">for</NAME>
<exprlist>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="4">x</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</exprlist>
<NAME startline="4">in</NAME>
<testlist>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<LSQB startline="4">[</LSQB>
<testlist_comp>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="4">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_comp>
<RSQB startline="4">]</RSQB>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist>
<COLON startline="4">:</COLON>
<suite>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="4">print</NAME>
</atom>
<trailer>
<LPAR startline="4">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="4">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="4">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="4"></NEWLINE>
</simple_stmt>
</suite>
</for_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<if_stmt>
<NAME startline="6">if</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="6">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<EQEQUAL startline="6">==</EQEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="6">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="6">:</COLON>
<suite>
<NEWLINE startline="6"></NEWLINE>
<INDENT startline="7"></INDENT>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="7">print</NAME>
</atom>
<trailer>
<LPAR startline="7">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="7">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="7">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="7"></NEWLINE>
</simple_stmt>
</stmt>
<DEDENT startline="8"></DEDENT>
</suite>
</if_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<while_stmt>
<NAME startline="8">while</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="8">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<NOTEQUAL startline="8">!=</NOTEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="8">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="8">:</COLON>
<suite>
<NEWLINE startline="8"></NEWLINE>
<INDENT startline="9"></INDENT>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="9">print</NAME>
</atom>
<trailer>
<LPAR startline="9">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="9">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="9">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="9"></NEWLINE>
</simple_stmt>
</stmt>
<DEDENT startline="10"></DEDENT>
</suite>
</while_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<for_stmt>
<NAME startline="10">for</NAME>
<exprlist>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="10">x</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</exprlist>
<NAME startline="10">in</NAME>
<testlist>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<LSQB startline="10">[</LSQB>
<testlist_comp>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="10">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_comp>
<RSQB startline="10">]</RSQB>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist>
<COLON startline="10">:</COLON>
<suite>
<NEWLINE startline="10"></NEWLINE>
<INDENT startline="11"></INDENT>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="11">print</NAME>
</atom>
<trailer>
<LPAR startline="11">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="11">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="11">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="11"></NEWLINE>
</simple_stmt>
</stmt>
<DEDENT startline="11"></DEDENT>
</suite>
</for_stmt>
</compound_stmt>
</stmt>
<NEWLINE startline="11"></NEWLINE>
<ENDMARKER startline="11"></ENDMARKER>
</file_input> | 45.495385 | 118 | 0.185265 | <file_input>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="1">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
<EQUAL startline="1">=</EQUAL>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="1">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="1"></NEWLINE>
</simple_stmt>
</stmt>
<stmt>
<compound_stmt>
<if_stmt>
<NAME startline="2">if</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="2">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<EQEQUAL startline="2">==</EQEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="2">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="2">:</COLON>
<suite>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="2">print</NAME>
</atom>
<trailer>
<LPAR startline="2">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="2">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="2">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="2"></NEWLINE>
</simple_stmt>
</suite>
</if_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<while_stmt>
<NAME startline="3">while</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="3">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<NOTEQUAL startline="3">!=</NOTEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="3">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="3">:</COLON>
<suite>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="3">print</NAME>
</atom>
<trailer>
<LPAR startline="3">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="3">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="3">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="3"></NEWLINE>
</simple_stmt>
</suite>
</while_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<for_stmt>
<NAME startline="4">for</NAME>
<exprlist>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="4">x</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</exprlist>
<NAME startline="4">in</NAME>
<testlist>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<LSQB startline="4">[</LSQB>
<testlist_comp>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="4">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_comp>
<RSQB startline="4">]</RSQB>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist>
<COLON startline="4">:</COLON>
<suite>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="4">print</NAME>
</atom>
<trailer>
<LPAR startline="4">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="4">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="4">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="4"></NEWLINE>
</simple_stmt>
</suite>
</for_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<if_stmt>
<NAME startline="6">if</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="6">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<EQEQUAL startline="6">==</EQEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="6">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="6">:</COLON>
<suite>
<NEWLINE startline="6"></NEWLINE>
<INDENT startline="7"></INDENT>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="7">print</NAME>
</atom>
<trailer>
<LPAR startline="7">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="7">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="7">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="7"></NEWLINE>
</simple_stmt>
</stmt>
<DEDENT startline="8"></DEDENT>
</suite>
</if_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<while_stmt>
<NAME startline="8">while</NAME>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="8">i</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
<comp_op>
<NOTEQUAL startline="8">!=</NOTEQUAL>
</comp_op>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="8">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
<COLON startline="8">:</COLON>
<suite>
<NEWLINE startline="8"></NEWLINE>
<INDENT startline="9"></INDENT>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="9">print</NAME>
</atom>
<trailer>
<LPAR startline="9">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="9">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="9">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="9"></NEWLINE>
</simple_stmt>
</stmt>
<DEDENT startline="10"></DEDENT>
</suite>
</while_stmt>
</compound_stmt>
</stmt>
<stmt>
<compound_stmt>
<for_stmt>
<NAME startline="10">for</NAME>
<exprlist>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="10">x</NAME>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</exprlist>
<NAME startline="10">in</NAME>
<testlist>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<LSQB startline="10">[</LSQB>
<testlist_comp>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NUMBER startline="10">0</NUMBER>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_comp>
<RSQB startline="10">]</RSQB>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist>
<COLON startline="10">:</COLON>
<suite>
<NEWLINE startline="10"></NEWLINE>
<INDENT startline="11"></INDENT>
<stmt>
<simple_stmt>
<small_stmt>
<expr_stmt>
<testlist_star_expr>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<NAME startline="11">print</NAME>
</atom>
<trailer>
<LPAR startline="11">(</LPAR>
<arglist>
<argument>
<test>
<or_test>
<and_test>
<not_test>
<comparison>
<expr>
<xor_expr>
<and_expr>
<shift_expr>
<arith_expr>
<term>
<factor>
<power>
<atom>
<STRING startline="11">"test"</STRING>
</atom>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</argument>
</arglist>
<RPAR startline="11">)</RPAR>
</trailer>
</power>
</factor>
</term>
</arith_expr>
</shift_expr>
</and_expr>
</xor_expr>
</expr>
</comparison>
</not_test>
</and_test>
</or_test>
</test>
</testlist_star_expr>
</expr_stmt>
</small_stmt>
<NEWLINE startline="11"></NEWLINE>
</simple_stmt>
</stmt>
<DEDENT startline="11"></DEDENT>
</suite>
</for_stmt>
</compound_stmt>
</stmt>
<NEWLINE startline="11"></NEWLINE>
<ENDMARKER startline="11"></ENDMARKER>
</file_input> | false | true |
f7f9e252060aa604e95ba76f501f5ed1f1b78487 | 8,362 | py | Python | cirq/contrib/paulistring/clifford_optimize.py | rickyHong/Cirq-repl | 5b31440d5b5bf5a66ee85ef5e44373ac89aa0eaf | [
"Apache-2.0"
] | 2 | 2019-04-02T09:16:28.000Z | 2019-05-25T18:35:19.000Z | cirq/contrib/paulistring/clifford_optimize.py | babbush/Cirq | 447b2c762cc2820dd28abb3bd2bc785d36bae39a | [
"Apache-2.0"
] | 36 | 2019-04-03T23:03:51.000Z | 2019-05-15T23:49:01.000Z | cirq/contrib/paulistring/clifford_optimize.py | babbush/Cirq | 447b2c762cc2820dd28abb3bd2bc785d36bae39a | [
"Apache-2.0"
] | 2 | 2019-04-03T22:55:05.000Z | 2019-04-24T23:24:53.000Z | # Copyright 2018 The ops Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, cast
from cirq import ops, circuits
from cirq.contrib.paulistring.convert_gate_set import (
converted_gate_set)
def clifford_optimized_circuit(circuit: circuits.Circuit,
atol: float = 1e-8
) -> circuits.Circuit:
# Convert to a circuit with SingleQubitCliffordGates,
# CZs and other ignored gates
c_cliff = converted_gate_set(circuit, no_clifford_gates=False,
atol=atol)
all_ops = list(c_cliff.all_operations())
def find_merge_point(
start_i: int,
string_op: ops.PauliStringPhasor,
stop_at_cz: bool,
) -> Tuple[int, ops.PauliStringPhasor, int]:
STOP = 0
CONTINUE = 1
SKIP = 2
def continue_condition(op: ops.Operation,
current_string: ops.PauliStringPhasor,
is_first: bool) -> int:
if ops.op_gate_of_type(op, ops.SingleQubitCliffordGate):
return (CONTINUE if len(current_string.pauli_string) != 1
else STOP)
if ops.op_gate_of_type(op, ops.CZPowGate):
return STOP if stop_at_cz else CONTINUE
if (isinstance(op, ops.PauliStringPhasor) and
len(op.qubits) == 1 and
(op.pauli_string[op.qubits[0]] == current_string.pauli_string[
op.qubits[0]])):
return SKIP
return STOP
modified_op = string_op
furthest_op = string_op
furthest_i = start_i + 1
num_passed_over = 0
for i in range(start_i+1, len(all_ops)):
op = all_ops[i]
if not set(op.qubits) & set(modified_op.qubits):
# No qubits in common
continue
cont_cond = continue_condition(op, modified_op, i == start_i+1)
if cont_cond == STOP:
if len(modified_op.pauli_string) == 1:
furthest_op = modified_op
furthest_i = i
break
if cont_cond == CONTINUE:
modified_op = modified_op.pass_operations_over(
[op], after_to_before=True)
num_passed_over += 1
if len(modified_op.pauli_string) == 1:
furthest_op = modified_op
furthest_i = i + 1
return furthest_i, furthest_op, num_passed_over
def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:
orig_qubit, = cliff_op.qubits
remaining_cliff_gate = ops.SingleQubitCliffordGate.I
for pauli, quarter_turns in reversed(
cast(ops.SingleQubitCliffordGate,
cliff_op.gate).decompose_rotation()):
trans = remaining_cliff_gate.transform(pauli)
pauli = trans.to
quarter_turns *= -1 if trans.flip else 1
string_op = ops.PauliStringPhasor(ops.PauliString.from_single(
cliff_op.qubits[0], pauli),
exponent_neg=quarter_turns / 2)
merge_i, merge_op, num_passed = find_merge_point(start_i, string_op,
quarter_turns == 2)
assert merge_i > start_i
assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'
qubit, pauli = next(iter(merge_op.pauli_string.items()))
quarter_turns = round(merge_op.exponent_relative * 2)
if merge_op.pauli_string.coefficient not in [1, -1]:
raise NotImplementedError("TODO: handle all coefficients.")
quarter_turns *= int(merge_op.pauli_string.coefficient.real)
quarter_turns %= 4
part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(
pauli, quarter_turns)
other_op = all_ops[merge_i] if merge_i < len(all_ops) else None
if other_op is not None and qubit not in set(other_op.qubits):
other_op = None
if (isinstance(other_op, ops.GateOperation)
and isinstance(other_op.gate, ops.SingleQubitCliffordGate)):
# Merge with another SingleQubitCliffordGate
new_op = part_cliff_gate.merged_with(other_op.gate
)(qubit)
all_ops[merge_i] = new_op
elif (isinstance(other_op, ops.GateOperation)
and isinstance(other_op.gate, ops.CZPowGate)
and other_op.gate.exponent == 1
and quarter_turns == 2):
# Pass whole Pauli gate over CZ, possibly adding a Z gate
if pauli != ops.pauli_gates.Z:
other_qubit = other_op.qubits[
other_op.qubits.index(qubit)-1]
all_ops.insert(merge_i+1,
ops.SingleQubitCliffordGate.Z(other_qubit))
all_ops.insert(merge_i+1, part_cliff_gate(qubit))
elif isinstance(other_op, ops.PauliStringPhasor):
# Pass over a non-Clifford gate
mod_op = other_op.pass_operations_over(
[part_cliff_gate(qubit)])
all_ops[merge_i] = mod_op
all_ops.insert(merge_i+1, part_cliff_gate(qubit))
elif merge_i > start_i + 1 and num_passed > 0:
# Moved Clifford through the circuit but nothing to merge
all_ops.insert(merge_i, part_cliff_gate(qubit))
else:
# Couldn't move Clifford
remaining_cliff_gate = remaining_cliff_gate.merged_with(
part_cliff_gate)
if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:
all_ops.pop(start_i)
return True
else:
all_ops[start_i] = remaining_cliff_gate(orig_qubit)
return False
def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:
"""Returns the number of operations removed at or before start_i."""
for i in reversed(range(start_i)):
op = all_ops[i]
if not set(cz_op.qubits) & set(op.qubits):
# Don't share qubits
# Keep looking
continue
elif not (isinstance(op, ops.GateOperation)
and isinstance(op.gate, ops.CZPowGate)
and op.gate.exponent == 1):
# Not a CZ gate
return 0
elif cz_op == op:
# Cancel two CZ gates
all_ops.pop(start_i)
all_ops.pop(i)
return 2
else:
# Two CZ gates that share one qubit
# Pass through and keep looking
continue # coverage: ignore
# The above line is covered by test_remove_staggered_czs but the
# coverage checker disagrees.
return 0
i = 0
while i < len(all_ops):
op = all_ops[i]
if (isinstance(op, ops.GateOperation)
and isinstance(op.gate, ops.SingleQubitCliffordGate)):
if try_merge_clifford(op, i):
i -= 1
elif (isinstance(op, ops.GateOperation)
and isinstance(op.gate, ops.CZPowGate)
and op.gate.exponent == 1):
num_rm = try_merge_cz(op, i)
i -= num_rm
i += 1
return circuits.Circuit.from_ops(
all_ops,
strategy=circuits.InsertStrategy.EARLIEST)
| 43.552083 | 80 | 0.559196 |
from typing import Tuple, cast
from cirq import ops, circuits
from cirq.contrib.paulistring.convert_gate_set import (
converted_gate_set)
def clifford_optimized_circuit(circuit: circuits.Circuit,
atol: float = 1e-8
) -> circuits.Circuit:
c_cliff = converted_gate_set(circuit, no_clifford_gates=False,
atol=atol)
all_ops = list(c_cliff.all_operations())
def find_merge_point(
start_i: int,
string_op: ops.PauliStringPhasor,
stop_at_cz: bool,
) -> Tuple[int, ops.PauliStringPhasor, int]:
STOP = 0
CONTINUE = 1
SKIP = 2
def continue_condition(op: ops.Operation,
current_string: ops.PauliStringPhasor,
is_first: bool) -> int:
if ops.op_gate_of_type(op, ops.SingleQubitCliffordGate):
return (CONTINUE if len(current_string.pauli_string) != 1
else STOP)
if ops.op_gate_of_type(op, ops.CZPowGate):
return STOP if stop_at_cz else CONTINUE
if (isinstance(op, ops.PauliStringPhasor) and
len(op.qubits) == 1 and
(op.pauli_string[op.qubits[0]] == current_string.pauli_string[
op.qubits[0]])):
return SKIP
return STOP
modified_op = string_op
furthest_op = string_op
furthest_i = start_i + 1
num_passed_over = 0
for i in range(start_i+1, len(all_ops)):
op = all_ops[i]
if not set(op.qubits) & set(modified_op.qubits):
continue
cont_cond = continue_condition(op, modified_op, i == start_i+1)
if cont_cond == STOP:
if len(modified_op.pauli_string) == 1:
furthest_op = modified_op
furthest_i = i
break
if cont_cond == CONTINUE:
modified_op = modified_op.pass_operations_over(
[op], after_to_before=True)
num_passed_over += 1
if len(modified_op.pauli_string) == 1:
furthest_op = modified_op
furthest_i = i + 1
return furthest_i, furthest_op, num_passed_over
def try_merge_clifford(cliff_op: ops.GateOperation, start_i: int) -> bool:
orig_qubit, = cliff_op.qubits
remaining_cliff_gate = ops.SingleQubitCliffordGate.I
for pauli, quarter_turns in reversed(
cast(ops.SingleQubitCliffordGate,
cliff_op.gate).decompose_rotation()):
trans = remaining_cliff_gate.transform(pauli)
pauli = trans.to
quarter_turns *= -1 if trans.flip else 1
string_op = ops.PauliStringPhasor(ops.PauliString.from_single(
cliff_op.qubits[0], pauli),
exponent_neg=quarter_turns / 2)
merge_i, merge_op, num_passed = find_merge_point(start_i, string_op,
quarter_turns == 2)
assert merge_i > start_i
assert len(merge_op.pauli_string) == 1, 'PauliString length != 1'
qubit, pauli = next(iter(merge_op.pauli_string.items()))
quarter_turns = round(merge_op.exponent_relative * 2)
if merge_op.pauli_string.coefficient not in [1, -1]:
raise NotImplementedError("TODO: handle all coefficients.")
quarter_turns *= int(merge_op.pauli_string.coefficient.real)
quarter_turns %= 4
part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(
pauli, quarter_turns)
other_op = all_ops[merge_i] if merge_i < len(all_ops) else None
if other_op is not None and qubit not in set(other_op.qubits):
other_op = None
if (isinstance(other_op, ops.GateOperation)
and isinstance(other_op.gate, ops.SingleQubitCliffordGate)):
new_op = part_cliff_gate.merged_with(other_op.gate
)(qubit)
all_ops[merge_i] = new_op
elif (isinstance(other_op, ops.GateOperation)
and isinstance(other_op.gate, ops.CZPowGate)
and other_op.gate.exponent == 1
and quarter_turns == 2):
if pauli != ops.pauli_gates.Z:
other_qubit = other_op.qubits[
other_op.qubits.index(qubit)-1]
all_ops.insert(merge_i+1,
ops.SingleQubitCliffordGate.Z(other_qubit))
all_ops.insert(merge_i+1, part_cliff_gate(qubit))
elif isinstance(other_op, ops.PauliStringPhasor):
mod_op = other_op.pass_operations_over(
[part_cliff_gate(qubit)])
all_ops[merge_i] = mod_op
all_ops.insert(merge_i+1, part_cliff_gate(qubit))
elif merge_i > start_i + 1 and num_passed > 0:
all_ops.insert(merge_i, part_cliff_gate(qubit))
else:
remaining_cliff_gate = remaining_cliff_gate.merged_with(
part_cliff_gate)
if remaining_cliff_gate == ops.SingleQubitCliffordGate.I:
all_ops.pop(start_i)
return True
else:
all_ops[start_i] = remaining_cliff_gate(orig_qubit)
return False
def try_merge_cz(cz_op: ops.GateOperation, start_i: int) -> int:
for i in reversed(range(start_i)):
op = all_ops[i]
if not set(cz_op.qubits) & set(op.qubits):
# Don't share qubits
continue
elif not (isinstance(op, ops.GateOperation)
and isinstance(op.gate, ops.CZPowGate)
and op.gate.exponent == 1):
return 0
elif cz_op == op:
all_ops.pop(start_i)
all_ops.pop(i)
return 2
else:
continue
return 0
i = 0
while i < len(all_ops):
op = all_ops[i]
if (isinstance(op, ops.GateOperation)
and isinstance(op.gate, ops.SingleQubitCliffordGate)):
if try_merge_clifford(op, i):
i -= 1
elif (isinstance(op, ops.GateOperation)
and isinstance(op.gate, ops.CZPowGate)
and op.gate.exponent == 1):
num_rm = try_merge_cz(op, i)
i -= num_rm
i += 1
return circuits.Circuit.from_ops(
all_ops,
strategy=circuits.InsertStrategy.EARLIEST)
| true | true |
f7f9e3108829f88a5ca916aeef820a02b3bb4922 | 2,794 | py | Python | LAB03/01-DDB/backend/cloudalbum/__init__.py | liks79/moving-to-serverless-renew | 2f173071ab387654d4cc851a0b39130613906378 | [
"MIT"
] | 6 | 2019-08-21T04:13:34.000Z | 2019-10-29T07:15:39.000Z | LAB03/01-DDB/backend/cloudalbum/__init__.py | liks79/moving-to-serverless-renew | 2f173071ab387654d4cc851a0b39130613906378 | [
"MIT"
] | 89 | 2019-07-31T02:29:54.000Z | 2022-03-12T01:03:22.000Z | LAB03/01-DDB/backend/cloudalbum/__init__.py | michaelrishiforrester/moving-to-serverless-renew | 27cbcbde9db3d2bc66212fe4f768563d25f64c19 | [
"MIT"
] | 4 | 2019-08-02T03:00:35.000Z | 2020-02-26T18:44:03.000Z | """
cloudalbum/__init__.py
~~~~~~~~~~~~~~~~~~~~~~~
Environment configuration how to run application.
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by Dayoungle Jun, Sungshik Jou.
:license: MIT, see LICENSE for more details.
"""
import os
import logging
import sys
import json
import datetime
from bson.objectid import ObjectId
from flask import Flask
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
from werkzeug.exceptions import Conflict
from cloudalbum.database import create_table
class JSONEncoder(json.JSONEncoder):
""" extend json-encoder class """
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
def create_app(script_info=None):
# instantiate the application
app = Flask(__name__)
# initiate some config value for JWT Authentication
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY', 'my_jwt')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = datetime.timedelta(days=1)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access']
flask_bcrypt = Bcrypt(app)
jwt = JWTManager(app)
app.json_encoder = JSONEncoder
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
# set logger to STDOUT
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
# Create database table, if it is not exists
with app.app_context():
create_table()
# register blueprints
from cloudalbum.api.users import users_blueprint
app.register_blueprint(users_blueprint, url_prefix='/users')
from cloudalbum.api.photos import photos_blueprint
app.register_blueprint(photos_blueprint, url_prefix='/photos')
from cloudalbum.api.admin import admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist_DB(decrypted_token):
from cloudalbum.util.jwt_helper import is_blacklisted_token_set
try:
return is_blacklisted_token_set(decrypted_token)
except Exception as e:
app.logger.error(e)
raise Conflict('Session already expired: {0}'.format(e))
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {'application': app}
return app
| 30.043011 | 114 | 0.704366 |
import os
import logging
import sys
import json
import datetime
from bson.objectid import ObjectId
from flask import Flask
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
from werkzeug.exceptions import Conflict
from cloudalbum.database import create_table
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
def create_app(script_info=None):
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY', 'my_jwt')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = datetime.timedelta(days=1)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access']
flask_bcrypt = Bcrypt(app)
jwt = JWTManager(app)
app.json_encoder = JSONEncoder
CORS(app, resources={r'/*': {'origins': '*'}})
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
with app.app_context():
create_table()
from cloudalbum.api.users import users_blueprint
app.register_blueprint(users_blueprint, url_prefix='/users')
from cloudalbum.api.photos import photos_blueprint
app.register_blueprint(photos_blueprint, url_prefix='/photos')
from cloudalbum.api.admin import admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist_DB(decrypted_token):
from cloudalbum.util.jwt_helper import is_blacklisted_token_set
try:
return is_blacklisted_token_set(decrypted_token)
except Exception as e:
app.logger.error(e)
raise Conflict('Session already expired: {0}'.format(e))
@app.shell_context_processor
def ctx():
return {'application': app}
return app
| true | true |
f7f9e35397087f18ceebaed258481b681536e538 | 2,567 | py | Python | src/ner_func.py | Danboruya/ner-practice | a23d103a9997d70b83425a899893848f52b9a4dc | [
"Apache-2.0"
] | null | null | null | src/ner_func.py | Danboruya/ner-practice | a23d103a9997d70b83425a899893848f52b9a4dc | [
"Apache-2.0"
] | null | null | null | src/ner_func.py | Danboruya/ner-practice | a23d103a9997d70b83425a899893848f52b9a4dc | [
"Apache-2.0"
] | null | null | null | import re
import pyknp
def extract_ne(src_str, knp, fstring_flag=False, detail_flag=False):
_knp = knp
tagged_str = src_str
result = _knp.parse(src_str)
ne_phrase_list = []
ne_dict = {}
if fstring_flag:
for t in result.tag_list():
print(t.fstring)
if detail_flag:
for x in result.tag_list():
print(x.fstring)
for tag in result.tag_list():
if "NE:" in tag.fstring:
if "NE:ARTIFACT" in tag.fstring or "NE:ORGANIZATION" in tag.fstring:
tagged_ne_phrase = re.search("<NE:(.*):(.*)>", tag.fstring).group(0).split("><")[0] + ">"
ne_phrase = tagged_ne_phrase.split(":")[2][:-1]
else:
tagged_ne_phrase = re.search("<NE:(.*):(.*)>", tag.fstring).group(0)
ne_phrase = re.search("<NE:(.*):(.*)>", tag.fstring).group(2)
tagged_str = tagged_str.replace(ne_phrase, tagged_ne_phrase)
ne_phrase_list.append(ne_phrase)
ne_dict[ne_phrase] = tagged_ne_phrase
return [tagged_str, src_str, ne_phrase_list, ne_dict]
def swap_ne_tag_with_only_tag(src_str, target_tag, tag):
tagged_ne_phrase = re.search("<NE:{}:(.*)>".format(target_tag), src_str).group(0)
ne_phrase = tagged_ne_phrase.split(":")[2][:-1]
return src_str.replace(tagged_ne_phrase, "<NE:{}:{}>".format(str(tag), ne_phrase))
def swap_ne_tag_with_ne_and_tag(src_str, target_ne, _tag, ne_phrase_list):
tagged_str = src_str
if target_ne not in ne_phrase_list:
tagged_str = tagged_str.replace(target_ne, "<NE:{}:{}>".format(_tag, target_ne))
return tagged_str
def tester_1():
# Simple
knp = pyknp.KNP(option="-tab -dpnd",
rcfile='/usr/local/etc/knprc',
jumanrcfile='/usr/local/etc/jumanrc')
test = "昨日ノーベル物理学賞について学んだ"
tagged_test = extract_ne(test, knp, detail_flag=False)
print(swap_ne_tag_with_only_tag(tagged_test[0], "ARTIFACT", "PRIZE"))
def tester_2():
# Swap with ne
knp = pyknp.KNP(option="-tab -dpnd",
rcfile='/usr/local/etc/knprc',
jumanrcfile='/usr/local/etc/jumanrc')
test = "昨日ノーベル物理学賞について学んだ"
test1 = "昨日英語の教科書を買った"
tagged_test = extract_ne(test, knp, detail_flag=False)
tagged_test1 = extract_ne(test1, knp, detail_flag=False)
print(swap_ne_tag_with_ne_and_tag(tagged_test[0], "ノーベル物理学賞", "PRIZE", tagged_test[2]))
print(swap_ne_tag_with_ne_and_tag(tagged_test1[0], "教科書", "EDUCATION", tagged_test1[2]))
print(tagged_test[3])
| 36.15493 | 105 | 0.62836 | import re
import pyknp
def extract_ne(src_str, knp, fstring_flag=False, detail_flag=False):
_knp = knp
tagged_str = src_str
result = _knp.parse(src_str)
ne_phrase_list = []
ne_dict = {}
if fstring_flag:
for t in result.tag_list():
print(t.fstring)
if detail_flag:
for x in result.tag_list():
print(x.fstring)
for tag in result.tag_list():
if "NE:" in tag.fstring:
if "NE:ARTIFACT" in tag.fstring or "NE:ORGANIZATION" in tag.fstring:
tagged_ne_phrase = re.search("<NE:(.*):(.*)>", tag.fstring).group(0).split("><")[0] + ">"
ne_phrase = tagged_ne_phrase.split(":")[2][:-1]
else:
tagged_ne_phrase = re.search("<NE:(.*):(.*)>", tag.fstring).group(0)
ne_phrase = re.search("<NE:(.*):(.*)>", tag.fstring).group(2)
tagged_str = tagged_str.replace(ne_phrase, tagged_ne_phrase)
ne_phrase_list.append(ne_phrase)
ne_dict[ne_phrase] = tagged_ne_phrase
return [tagged_str, src_str, ne_phrase_list, ne_dict]
def swap_ne_tag_with_only_tag(src_str, target_tag, tag):
tagged_ne_phrase = re.search("<NE:{}:(.*)>".format(target_tag), src_str).group(0)
ne_phrase = tagged_ne_phrase.split(":")[2][:-1]
return src_str.replace(tagged_ne_phrase, "<NE:{}:{}>".format(str(tag), ne_phrase))
def swap_ne_tag_with_ne_and_tag(src_str, target_ne, _tag, ne_phrase_list):
tagged_str = src_str
if target_ne not in ne_phrase_list:
tagged_str = tagged_str.replace(target_ne, "<NE:{}:{}>".format(_tag, target_ne))
return tagged_str
def tester_1():
knp = pyknp.KNP(option="-tab -dpnd",
rcfile='/usr/local/etc/knprc',
jumanrcfile='/usr/local/etc/jumanrc')
test = "昨日ノーベル物理学賞について学んだ"
tagged_test = extract_ne(test, knp, detail_flag=False)
print(swap_ne_tag_with_only_tag(tagged_test[0], "ARTIFACT", "PRIZE"))
def tester_2():
knp = pyknp.KNP(option="-tab -dpnd",
rcfile='/usr/local/etc/knprc',
jumanrcfile='/usr/local/etc/jumanrc')
test = "昨日ノーベル物理学賞について学んだ"
test1 = "昨日英語の教科書を買った"
tagged_test = extract_ne(test, knp, detail_flag=False)
tagged_test1 = extract_ne(test1, knp, detail_flag=False)
print(swap_ne_tag_with_ne_and_tag(tagged_test[0], "ノーベル物理学賞", "PRIZE", tagged_test[2]))
print(swap_ne_tag_with_ne_and_tag(tagged_test1[0], "教科書", "EDUCATION", tagged_test1[2]))
print(tagged_test[3])
| true | true |
f7f9e4154d6f8e32a33526e6381306274a9a2ac7 | 178 | py | Python | heksher/exceptions.py | biocatchltd/heksher-py | bdee59eabd13ce4b32b8100e33675859bdc0cfb7 | [
"MIT"
] | 2 | 2021-01-21T11:41:16.000Z | 2021-02-10T07:27:48.000Z | heksher/exceptions.py | biocatchltd/heksher-py | bdee59eabd13ce4b32b8100e33675859bdc0cfb7 | [
"MIT"
] | 20 | 2021-02-04T11:51:28.000Z | 2022-01-09T14:39:54.000Z | heksher/exceptions.py | biocatchltd/heksher-py | bdee59eabd13ce4b32b8100e33675859bdc0cfb7 | [
"MIT"
] | null | null | null | class NoMatchError(Exception):
"""
Raised either internally when resolving a ruleset fails, or externally when a setting has no default and no
matching rules
"""
| 29.666667 | 111 | 0.719101 | class NoMatchError(Exception):
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.