hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71167e5a3698413e13abeed3d48c71b338681ad | 191 | py | Python | web/app/djrq/model/prokyon/album_view.py | bmillham/djrq2 | c84283b75a7c15da1902ebfc32b7d75159c09e20 | [
"MIT"
] | 1 | 2016-11-23T20:50:00.000Z | 2016-11-23T20:50:00.000Z | web/app/djrq/model/prokyon/album_view.py | bmillham/djrq2 | c84283b75a7c15da1902ebfc32b7d75159c09e20 | [
"MIT"
] | 15 | 2017-01-15T04:18:40.000Z | 2017-02-25T04:13:06.000Z | web/app/djrq/model/prokyon/album_view.py | bmillham/djrq2 | c84283b75a7c15da1902ebfc32b7d75159c09e20 | [
"MIT"
] | null | null | null | from . import *
class Album(Base):
__tablename__ = "album_view"
id = Column(Integer, primary_key=True)
fullname = Column(Text)
name = Column(Text)
prefix = Column(Text)
| 19.1 | 42 | 0.65445 | from . import *
class Album(Base):
__tablename__ = "album_view"
id = Column(Integer, primary_key=True)
fullname = Column(Text)
name = Column(Text)
prefix = Column(Text)
| true | true |
f7116863995bc72b2db542ec2e1f8ca284bc4257 | 758 | py | Python | leetcode/python/easy/p1169_invalidTransactions.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/easy/p1169_invalidTransactions.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/easy/p1169_invalidTransactions.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | class Solution:
def invalidTransactions(self, transactions):
helper = []
helper_back = []
for transaction in transactions:
tmp_transaction = transaction.split(",")
helper.append(tmp_transaction)
for record in helper:
if int(record[2]) > 1000:
helper_back.append(",".join(record))
else:
for record2 in helper:
if record[0] == record2[0] and record[3] != record2[3] and abs(int(record[1])-int(record2[1])) <= 60:
helper_back.append(",".join(record))
break
return helper_back
slu = Solution()
print(slu.invalidTransactions(["alice,20,800,mtv","alice,50,1200,mtv"]))
| 34.454545 | 121 | 0.546174 | class Solution:
def invalidTransactions(self, transactions):
helper = []
helper_back = []
for transaction in transactions:
tmp_transaction = transaction.split(",")
helper.append(tmp_transaction)
for record in helper:
if int(record[2]) > 1000:
helper_back.append(",".join(record))
else:
for record2 in helper:
if record[0] == record2[0] and record[3] != record2[3] and abs(int(record[1])-int(record2[1])) <= 60:
helper_back.append(",".join(record))
break
return helper_back
slu = Solution()
print(slu.invalidTransactions(["alice,20,800,mtv","alice,50,1200,mtv"]))
| true | true |
f711693db6db8e214c62c061ee3ce205f8f0b101 | 5,413 | py | Python | tests/pipelines/test_distillation.py | gr33n-made/catalyst | bd413abc908ef7cbdeab42b0e805277a791e3ddb | [
"Apache-2.0"
] | 1 | 2021-09-29T20:30:50.000Z | 2021-09-29T20:30:50.000Z | tests/pipelines/test_distillation.py | gr33n-made/catalyst | bd413abc908ef7cbdeab42b0e805277a791e3ddb | [
"Apache-2.0"
] | null | null | null | tests/pipelines/test_distillation.py | gr33n-made/catalyst | bd413abc908ef7cbdeab42b0e805277a791e3ddb | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
import os
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data import ToTensor
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS
class DistilRunner(dl.Runner):
def handle_batch(self, batch):
x, y = batch
self.model["teacher"].eval() # let's manually set teacher model to eval mode
with torch.no_grad():
t_logits = self.model["teacher"](x)
s_logits = self.model["student"](x)
self.batch = {
"t_logits": t_logits,
"s_logits": s_logits,
"targets": y,
"s_logprobs": F.log_softmax(s_logits, dim=-1),
"t_probs": F.softmax(t_logits, dim=-1),
}
def train_experiment(device, engine=None):
with TemporaryDirectory() as logdir:
teacher = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
model = {"teacher": teacher, "student": student}
criterion = {"cls": nn.CrossEntropyLoss(), "kl": nn.KLDivLoss(reduction="batchmean")}
optimizer = optim.Adam(student.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
}
runner = DistilRunner()
# model training
runner.train(
engine=engine or dl.DeviceEngine(device),
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
logdir=logdir,
verbose=False,
callbacks=[
dl.AccuracyCallback(
input_key="t_logits", target_key="targets", num_classes=2, prefix="teacher_"
),
dl.AccuracyCallback(
input_key="s_logits", target_key="targets", num_classes=2, prefix="student_"
),
dl.CriterionCallback(
input_key="s_logits",
target_key="targets",
metric_key="cls_loss",
criterion_key="cls",
),
dl.CriterionCallback(
input_key="s_logprobs",
target_key="t_probs",
metric_key="kl_div_loss",
criterion_key="kl",
),
dl.MetricAggregationCallback(
metric_key="loss", metrics=["kl_div_loss", "cls_loss"], mode="mean"
),
dl.OptimizerCallback(metric_key="loss", model_key="student"),
dl.CheckpointCallback(
logdir=logdir,
loader_key="valid",
metric_key="loss",
minimize=True,
save_n_best=3,
),
],
)
# Torch
def test_distillation_on_cpu():
train_experiment("cpu")
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_distillation_on_torch_cuda0():
train_experiment("cuda:0")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_cuda1():
train_experiment("cuda:1")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_dp():
train_experiment(None, dl.DataParallelEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_ddp():
train_experiment(None, dl.DistributedDataParallelEngine())
# AMP
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason="No CUDA or AMP found",
)
def test_distillation_on_amp():
train_experiment(None, dl.AMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_dp():
train_experiment(None, dl.DataParallelAMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_ddp():
train_experiment(None, dl.DistributedDataParallelAMPEngine())
# APEX
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason="No CUDA or Apex found",
)
def test_distillation_on_apex():
train_experiment(None, dl.APEXEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
reason="No CUDA>=2 or Apex found",
)
def test_distillation_on_apex_dp():
train_experiment(None, dl.DataParallelAPEXEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
# reason="No CUDA>=2 or Apex found",
# )
# def test_distillation_on_apex_ddp():
# train_experiment(None, dl.DistributedDataParallelApexEngine())
| 30.931429 | 99 | 0.619435 |
import os
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data import ToTensor
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS
class DistilRunner(dl.Runner):
def handle_batch(self, batch):
x, y = batch
self.model["teacher"].eval()
with torch.no_grad():
t_logits = self.model["teacher"](x)
s_logits = self.model["student"](x)
self.batch = {
"t_logits": t_logits,
"s_logits": s_logits,
"targets": y,
"s_logprobs": F.log_softmax(s_logits, dim=-1),
"t_probs": F.softmax(t_logits, dim=-1),
}
def train_experiment(device, engine=None):
with TemporaryDirectory() as logdir:
teacher = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
model = {"teacher": teacher, "student": student}
criterion = {"cls": nn.CrossEntropyLoss(), "kl": nn.KLDivLoss(reduction="batchmean")}
optimizer = optim.Adam(student.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
}
runner = DistilRunner()
# model training
runner.train(
engine=engine or dl.DeviceEngine(device),
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
logdir=logdir,
verbose=False,
callbacks=[
dl.AccuracyCallback(
input_key="t_logits", target_key="targets", num_classes=2, prefix="teacher_"
),
dl.AccuracyCallback(
input_key="s_logits", target_key="targets", num_classes=2, prefix="student_"
),
dl.CriterionCallback(
input_key="s_logits",
target_key="targets",
metric_key="cls_loss",
criterion_key="cls",
),
dl.CriterionCallback(
input_key="s_logprobs",
target_key="t_probs",
metric_key="kl_div_loss",
criterion_key="kl",
),
dl.MetricAggregationCallback(
metric_key="loss", metrics=["kl_div_loss", "cls_loss"], mode="mean"
),
dl.OptimizerCallback(metric_key="loss", model_key="student"),
dl.CheckpointCallback(
logdir=logdir,
loader_key="valid",
metric_key="loss",
minimize=True,
save_n_best=3,
),
],
)
# Torch
def test_distillation_on_cpu():
train_experiment("cpu")
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_distillation_on_torch_cuda0():
train_experiment("cuda:0")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_cuda1():
train_experiment("cuda:1")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_dp():
train_experiment(None, dl.DataParallelEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_ddp():
train_experiment(None, dl.DistributedDataParallelEngine())
# AMP
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason="No CUDA or AMP found",
)
def test_distillation_on_amp():
train_experiment(None, dl.AMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_dp():
train_experiment(None, dl.DataParallelAMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_ddp():
train_experiment(None, dl.DistributedDataParallelAMPEngine())
# APEX
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason="No CUDA or Apex found",
)
def test_distillation_on_apex():
train_experiment(None, dl.APEXEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
reason="No CUDA>=2 or Apex found",
)
def test_distillation_on_apex_dp():
train_experiment(None, dl.DataParallelAPEXEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
# reason="No CUDA>=2 or Apex found",
# )
# def test_distillation_on_apex_ddp():
# train_experiment(None, dl.DistributedDataParallelApexEngine())
| true | true |
f71169a59355addf79f185e4ad592858c30dd011 | 576 | py | Python | Data_Science/lesson_2/api_quiz/api_quiz_solution.py | napjon/moocs_solution | 5c96f43f6cb2ae643f482580446869953a99beb6 | [
"MIT"
] | 13 | 2016-04-29T07:21:44.000Z | 2021-09-29T03:20:51.000Z | Data_Science/lesson_2/api_quiz/api_quiz_solution.py | napjon/moocs_solution | 5c96f43f6cb2ae643f482580446869953a99beb6 | [
"MIT"
] | 1 | 2017-02-07T07:37:20.000Z | 2017-02-19T08:37:17.000Z | Data_Science/lesson_2/api_quiz/api_quiz_solution.py | napjon/moocs_solution | 5c96f43f6cb2ae643f482580446869953a99beb6 | [
"MIT"
] | 13 | 2016-01-25T03:23:57.000Z | 2019-10-13T15:29:23.000Z | import json
import requests
import pprint
def api_get_request(url):
# In this exercise, you want to call the last.fm API to get a list of the
# top artists in Spain.
#
# Once you've done this, return the name of the number 1 top artist in Spain.
data = requests.get(url).text
data = json.loads(data)
#country_data = data['country']
pp = pprint.PrettyPrinter(depth = 4)
#pp.pprint(data)
top_artists = data['topartists']['artist']
#[e['name'] for e in top_artists]
return top_artists[0]['name'] # return the top artist in Spain
| 30.315789 | 81 | 0.671875 | import json
import requests
import pprint
def api_get_request(url):
data = requests.get(url).text
data = json.loads(data)
#country_data = data['country']
pp = pprint.PrettyPrinter(depth = 4)
#pp.pprint(data)
top_artists = data['topartists']['artist']
#[e['name'] for e in top_artists]
return top_artists[0]['name'] # return the top artist in Spain
| true | true |
f71169dddfc98800a555c6b84c121cbb3c2501e0 | 580 | py | Python | tests/integration/models/test_group.py | beanpuppy/infynipy | 585e8734a6c3ed69c2a78d52a394a71303bcc88b | [
"MIT"
] | null | null | null | tests/integration/models/test_group.py | beanpuppy/infynipy | 585e8734a6c3ed69c2a78d52a394a71303bcc88b | [
"MIT"
] | null | null | null | tests/integration/models/test_group.py | beanpuppy/infynipy | 585e8734a6c3ed69c2a78d52a394a71303bcc88b | [
"MIT"
] | null | null | null | from infynipy.models.group import ReferrerGroup
from .. import IntegrationTest, vcrr
class TestReferrerGroup(IntegrationTest):
@vcrr.use_cassette
def test_referrer_group_get_multiple(self):
for group in self.infynity.referrer_groups:
assert isinstance(group, ReferrerGroup)
@vcrr.use_cassette
def test_referrer_group_create(self):
data = {
"group_name": "Test3 Group",
"broker_id": 20041
}
group_id = self.infynity.referrer_group(data=data).create()
assert isinstance(group_id, str)
| 27.619048 | 67 | 0.682759 | from infynipy.models.group import ReferrerGroup
from .. import IntegrationTest, vcrr
class TestReferrerGroup(IntegrationTest):
@vcrr.use_cassette
def test_referrer_group_get_multiple(self):
for group in self.infynity.referrer_groups:
assert isinstance(group, ReferrerGroup)
@vcrr.use_cassette
def test_referrer_group_create(self):
data = {
"group_name": "Test3 Group",
"broker_id": 20041
}
group_id = self.infynity.referrer_group(data=data).create()
assert isinstance(group_id, str)
| true | true |
f7116afc9150f85440d20e85f7548abaa8191c95 | 7,494 | py | Python | official/nlp/modeling/networks/albert_encoder_test.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2020-09-14T10:46:07.000Z | 2020-09-14T10:46:07.000Z | official/nlp/modeling/networks/albert_encoder_test.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 8 | 2020-05-19T00:52:30.000Z | 2020-06-04T23:57:20.000Z | official/nlp/modeling/networks/albert_encoder_test.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 2 | 2021-10-07T04:47:04.000Z | 2021-12-18T04:18:19.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ALBERT transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import albert_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class AlbertEncoderTest(keras_parameterized.TestCase):
def tearDown(self):
super(AlbertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="default", expected_dtype=tf.float32),
dict(testcase_name="with_float16_dtype", expected_dtype=tf.float16),
)
def test_network_creation(self, expected_dtype):
hidden_size = 32
sequence_length = 21
kwargs = dict(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
if expected_dtype == tf.float16:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertEqual(tf.float32, data.dtype)
self.assertEqual(expected_dtype, pooled.dtype)
# ALBERT has additonal 'embedding_hidden_mapping_in' weights and
# it shares transformer weights.
self.assertNotEmpty(
[x for x in test_network.weights if "embedding_projection/" in x.name])
self.assertNotEmpty(
[x for x in test_network.weights if "transformer/" in x.name])
self.assertEmpty(
[x for x in test_network.weights if "transformer/layer" in x.name])
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
num_layers = 3
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
# Tests dictionary outputs.
test_network_dict = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types,
dict_outputs=True)
_ = test_network_dict([word_ids, mask, type_ids])
test_network_dict.set_weights(test_network.get_weights())
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
self.assertLen(dict_outputs["pooled_output"], num_layers)
def test_serialize_deserialize(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
embedding_width=8,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform")
network = albert_encoder.AlbertEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = (
albert_encoder.AlbertEncoder.from_config(
network.get_config()))
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
tf.test.main()
| 39.650794 | 101 | 0.72538 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized
from official.nlp.modeling.networks import albert_encoder
@keras_parameterized.run_all_keras_modes
class AlbertEncoderTest(keras_parameterized.TestCase):
def tearDown(self):
super(AlbertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="default", expected_dtype=tf.float32),
dict(testcase_name="with_float16_dtype", expected_dtype=tf.float16),
)
def test_network_creation(self, expected_dtype):
hidden_size = 32
sequence_length = 21
kwargs = dict(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
if expected_dtype == tf.float16:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
test_network = albert_encoder.AlbertEncoder(**kwargs)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
self.assertEqual(tf.float32, data.dtype)
self.assertEqual(expected_dtype, pooled.dtype)
self.assertNotEmpty(
[x for x in test_network.weights if "embedding_projection/" in x.name])
self.assertNotEmpty(
[x for x in test_network.weights if "transformer/" in x.name])
self.assertEmpty(
[x for x in test_network.weights if "transformer/layer" in x.name])
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
num_layers = 3
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
# Tests dictionary outputs.
test_network_dict = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types,
dict_outputs=True)
_ = test_network_dict([word_ids, mask, type_ids])
test_network_dict.set_weights(test_network.get_weights())
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
self.assertLen(dict_outputs["pooled_output"], num_layers)
def test_serialize_deserialize(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
embedding_width=8,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform")
network = albert_encoder.AlbertEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = (
albert_encoder.AlbertEncoder.from_config(
network.get_config()))
_ = new_network.to_json()
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
tf.test.main()
| true | true |
f7116afe35be171ca7613c0d3c649949f5a75deb | 5,513 | py | Python | RaspberryPi/ble_lights.py | ippie52/BLE-Tutor | df4fae208d289e988955182c356040243365f66d | [
"MIT"
] | null | null | null | RaspberryPi/ble_lights.py | ippie52/BLE-Tutor | df4fae208d289e988955182c356040243365f66d | [
"MIT"
] | null | null | null | RaspberryPi/ble_lights.py | ippie52/BLE-Tutor | df4fae208d289e988955182c356040243365f66d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# ------------------------------------------------------------------------------
"""@package ble_lights.py
Sets the attached light values and notifies when they change.
"""
# ------------------------------------------------------------------------------
# Kris Dunning ippie52@gmail.com 2020.
# ------------------------------------------------------------------------------
import pybleno as ble
import wiringpi
from typing import Dict
from sys import exit
from time import sleep
from colour_printer import ColourPrinter
class KrisCharacteristic(ColourPrinter, ble.Characteristic):
"""
Provides the base for debugging a characteristic
"""
def __init__(self, settings: Dict[str, any], name: str, colour: str) -> None:
"""
Initialises the object
"""
ble.Characteristic.__init__(self, settings)
ColourPrinter.__init__(colour, name)
class UnlockChar(KrisCharacteristic):
"""
Provides the characteristic for the UnlockChar
"""
def __init__(self, uuid: str) -> None:
"""
Constructs ths UnlockChar
"""
self._changeObservers = {}
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['write'],
'value': ''
},
'UnlockChar',
ColourPrinter.GREEN
)
self._value = ''
# def addObserver(self, name: str, observer) -> None:
# """
# Custom observer to turn on an LED
# """
# self.print(f'Adding observer for {name}.')
# self._changeObservers[name] = observer
# def removeObserver(self, name: str) -> None:
# if name in self._changeObservers.keys():
# self.print(f'Removing observer {name}.')
# del self._changeObservers[name]
# else:
# self.print(f'Could not find observer {name} to remove.')
# def onReadRequest(self, offset, callback):
# """
# Handles the read request for this characteristic
# """
# self.print(f'Read request received, offset {offset}')
# callback(ble.Characteristic.RESULT_SUCCESS, self._value)
def onWriteRequest(self, data, offset, withoutResponse, callback):
"""
Handles the write request
"""
self.print(f'Write request received, data: {data}, offset: {offset}')
if data != self._value[0]:
self.print('The value has changed - Signal any listeners')
for key, observer in self._changeObservers.items():
self.print(f'Signalling observer {key}')
observer(self._value[0])
self._value = value
class StatusChar(KrisCharacteristic):
"""
Provides the characteristic for an LED
"""
def __init__(self, uuid: str, led: int) -> None:
"""
Constructs the StatusChar
"""
self._led = led
self._value = wiringpi.digitalRead(self._led)
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['notify'],
'value': self._value
},
'StatusChar',
ColourPrinter.GOLD
)
self._updateValueCallbacks = None
def onSubscribe(self, maxValueSize: int, updateValueCallback) -> None:
"""
Sets the update value callback
"""
self.print('New subscriber added.')
self._updateValueCallback = updateValueCallback
def onUnsubscribe(self) -> None:
"""
Removes the update value callback
"""
self.print('Subscriber removed')
self._updateValueCallback = None
# def set(self, new_value: int):
# """
# Sets the value of the LED
# """
# new_value = 0 if new_value == 0 else 1
# wiringpi.digitalWrite(self._led, new_value)
# self._value = new_value
def onStateChange(state: str) -> None:
"""
The state change handler function
"""
global server
print(f'on -> State Change: {state}')
if state == 'poweredOn':
server.startAdvertising('Kris Service?', ['FF10'])
else:
server.stopAdvertising()
def onAdvertisingStart(error: bool) -> None:
"""
The advertising handler function
"""
print(f'on -> Advertising Start: {error}')
if not error:
global server
status = StatusChar('FF12')
switch = UnlockChar('FF11')
switch.addObserver('FF12', status.set)
server.setServices([
ble.BlenoPrimaryService({
'uuid': 'FF10',
'characteristics': [status, switch]
})
]
)
RED_GPIO = 0
GRN_GPIO = 2
BLU_GPIO = 3
LED_SEQUENCE = [RED_GPIO, GRN_GPIO, BLU_GPIO]
BTN_GPIO = 1
wiringpi.wiringPiSetup() # For GPIO pin numbering
for led in LED_SEQUENCE:
wiringpi.pinMode(led, 1)
wiringpi.digitalWrite(led, 0)
wiringpi.pinMode(BTN_GPIO, 0)
cp = ColourPrinter(ColourPrinter.SILVER, 'Script')
cp.print('Creating the server...')
server = ble.Bleno()
cp.print('Binding the onStateChange handler')
server.on('stateChange', onStateChange)
cp.print('Binding the onAdvertisingStart handler')
server.on('advertisingStart', onAdvertisingStart)
cp.print('Starting the server...')
server.start()
running = True
while running:
try:
sleep(0.1)
except KeyboardInterrupt:
cp.print('Polite exit.')
running = False
server.stopAdvertising()
server.disconnect()
| 29.015789 | 81 | 0.573372 |
import pybleno as ble
import wiringpi
from typing import Dict
from sys import exit
from time import sleep
from colour_printer import ColourPrinter
class KrisCharacteristic(ColourPrinter, ble.Characteristic):
def __init__(self, settings: Dict[str, any], name: str, colour: str) -> None:
ble.Characteristic.__init__(self, settings)
ColourPrinter.__init__(colour, name)
class UnlockChar(KrisCharacteristic):
def __init__(self, uuid: str) -> None:
self._changeObservers = {}
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['write'],
'value': ''
},
'UnlockChar',
ColourPrinter.GREEN
)
self._value = ''
# Custom observer to turn on an LED
# """
# Handles the read request for this characteristic
# """
def onWriteRequest(self, data, offset, withoutResponse, callback):
self.print(f'Write request received, data: {data}, offset: {offset}')
if data != self._value[0]:
self.print('The value has changed - Signal any listeners')
for key, observer in self._changeObservers.items():
self.print(f'Signalling observer {key}')
observer(self._value[0])
self._value = value
class StatusChar(KrisCharacteristic):
def __init__(self, uuid: str, led: int) -> None:
self._led = led
self._value = wiringpi.digitalRead(self._led)
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['notify'],
'value': self._value
},
'StatusChar',
ColourPrinter.GOLD
)
self._updateValueCallbacks = None
def onSubscribe(self, maxValueSize: int, updateValueCallback) -> None:
self.print('New subscriber added.')
self._updateValueCallback = updateValueCallback
def onUnsubscribe(self) -> None:
self.print('Subscriber removed')
self._updateValueCallback = None
# Sets the value of the LED
# """
def onStateChange(state: str) -> None:
global server
print(f'on -> State Change: {state}')
if state == 'poweredOn':
server.startAdvertising('Kris Service?', ['FF10'])
else:
server.stopAdvertising()
def onAdvertisingStart(error: bool) -> None:
print(f'on -> Advertising Start: {error}')
if not error:
global server
status = StatusChar('FF12')
switch = UnlockChar('FF11')
switch.addObserver('FF12', status.set)
server.setServices([
ble.BlenoPrimaryService({
'uuid': 'FF10',
'characteristics': [status, switch]
})
]
)
RED_GPIO = 0
GRN_GPIO = 2
BLU_GPIO = 3
LED_SEQUENCE = [RED_GPIO, GRN_GPIO, BLU_GPIO]
BTN_GPIO = 1
wiringpi.wiringPiSetup()
for led in LED_SEQUENCE:
wiringpi.pinMode(led, 1)
wiringpi.digitalWrite(led, 0)
wiringpi.pinMode(BTN_GPIO, 0)
cp = ColourPrinter(ColourPrinter.SILVER, 'Script')
cp.print('Creating the server...')
server = ble.Bleno()
cp.print('Binding the onStateChange handler')
server.on('stateChange', onStateChange)
cp.print('Binding the onAdvertisingStart handler')
server.on('advertisingStart', onAdvertisingStart)
cp.print('Starting the server...')
server.start()
running = True
while running:
try:
sleep(0.1)
except KeyboardInterrupt:
cp.print('Polite exit.')
running = False
server.stopAdvertising()
server.disconnect()
| true | true |
f7116b42fc8b5931f1f34af5460f82cc8a0e789a | 7,049 | py | Python | graphql_compiler/compiler/ir_lowering_match/__init__.py | jb-kensho/graphql-compiler | 4511793281698bd55e63fd7a3f25f9cb094084d4 | [
"Apache-2.0"
] | null | null | null | graphql_compiler/compiler/ir_lowering_match/__init__.py | jb-kensho/graphql-compiler | 4511793281698bd55e63fd7a3f25f9cb094084d4 | [
"Apache-2.0"
] | 1 | 2019-04-18T18:23:16.000Z | 2019-04-18T18:23:16.000Z | graphql_compiler/compiler/ir_lowering_match/__init__.py | jb-kensho/graphql-compiler | 4511793281698bd55e63fd7a3f25f9cb094084d4 | [
"Apache-2.0"
] | 1 | 2019-11-21T02:38:27.000Z | 2019-11-21T02:38:27.000Z | # Copyright 2018-present Kensho Technologies, LLC.
import six
from ..blocks import Filter, GlobalOperationsStart
from ..ir_lowering_common import (extract_optional_location_root_info,
extract_simple_optional_location_info,
lower_context_field_existence, merge_consecutive_filter_clauses,
optimize_boolean_expression_comparisons, remove_end_optionals)
from .ir_lowering import (lower_backtrack_blocks,
lower_folded_coerce_types_into_filter_blocks,
lower_has_substring_binary_compositions,
remove_backtrack_blocks_from_fold,
rewrite_binary_composition_inside_ternary_conditional,
truncate_repeated_single_step_traversals,
truncate_repeated_single_step_traversals_in_sub_queries)
from ..ir_sanity_checks import sanity_check_ir_blocks_from_frontend
from .between_lowering import lower_comparisons_to_between
from .optional_traversal import (collect_filters_to_first_location_occurrence,
convert_optional_traversals_to_compound_match_query,
lower_context_field_expressions, prune_non_existent_outputs)
from ..match_query import convert_to_match_query
from ..workarounds import (orientdb_class_with_while, orientdb_eval_scheduling,
orientdb_query_execution)
from .utils import construct_where_filter_predicate
##############
# Public API #
##############
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None):
"""Lower the IR into an IR form that can be represented in MATCH queries.
Args:
ir_blocks: list of IR blocks to lower into MATCH-compatible form
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
MatchQuery object containing the IR blocks organized in a MATCH-like structure
"""
sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table)
# Construct the mapping of each location to its corresponding GraphQL type.
location_types = {
location: location_info.type
for location, location_info in query_metadata_table.registered_locations
}
# Compute the set of all locations that have associated type coercions.
coerced_locations = {
location
for location, location_info in query_metadata_table.registered_locations
if location_info.coerced_from_type is not None
}
# Extract information for both simple and complex @optional traverses
location_to_optional_results = extract_optional_location_root_info(ir_blocks)
complex_optional_roots, location_to_optional_roots = location_to_optional_results
simple_optional_root_info = extract_simple_optional_location_info(
ir_blocks, complex_optional_roots, location_to_optional_roots)
ir_blocks = remove_end_optionals(ir_blocks)
# Append global operation block(s) to filter out incorrect results
# from simple optional match traverses (using a WHERE statement)
if len(simple_optional_root_info) > 0:
where_filter_predicate = construct_where_filter_predicate(
query_metadata_table, simple_optional_root_info)
ir_blocks.insert(-1, GlobalOperationsStart())
ir_blocks.insert(-1, Filter(where_filter_predicate))
# These lowering / optimization passes work on IR blocks.
ir_blocks = lower_context_field_existence(ir_blocks, query_metadata_table)
ir_blocks = optimize_boolean_expression_comparisons(ir_blocks)
ir_blocks = rewrite_binary_composition_inside_ternary_conditional(ir_blocks)
ir_blocks = merge_consecutive_filter_clauses(ir_blocks)
ir_blocks = lower_has_substring_binary_compositions(ir_blocks)
ir_blocks = orientdb_eval_scheduling.workaround_lowering_pass(ir_blocks, query_metadata_table)
# Here, we lower from raw IR blocks into a MatchQuery object.
# From this point on, the lowering / optimization passes work on the MatchQuery representation.
match_query = convert_to_match_query(ir_blocks)
match_query = lower_comparisons_to_between(match_query)
match_query = lower_backtrack_blocks(match_query, location_types)
match_query = truncate_repeated_single_step_traversals(match_query)
match_query = orientdb_class_with_while.workaround_type_coercions_in_recursions(match_query)
# Optimize and lower the IR blocks inside @fold scopes.
new_folds = {
key: merge_consecutive_filter_clauses(
remove_backtrack_blocks_from_fold(
lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks)
)
)
for key, folded_ir_blocks in six.iteritems(match_query.folds)
}
match_query = match_query._replace(folds=new_folds)
compound_match_query = convert_optional_traversals_to_compound_match_query(
match_query, complex_optional_roots, location_to_optional_roots)
compound_match_query = prune_non_existent_outputs(compound_match_query)
compound_match_query = collect_filters_to_first_location_occurrence(compound_match_query)
compound_match_query = lower_context_field_expressions(compound_match_query)
compound_match_query = truncate_repeated_single_step_traversals_in_sub_queries(
compound_match_query)
compound_match_query = orientdb_query_execution.expose_ideal_query_execution_start_points(
compound_match_query, location_types, coerced_locations)
return compound_match_query
| 54.643411 | 99 | 0.709037 |
import six
from ..blocks import Filter, GlobalOperationsStart
from ..ir_lowering_common import (extract_optional_location_root_info,
extract_simple_optional_location_info,
lower_context_field_existence, merge_consecutive_filter_clauses,
optimize_boolean_expression_comparisons, remove_end_optionals)
from .ir_lowering import (lower_backtrack_blocks,
lower_folded_coerce_types_into_filter_blocks,
lower_has_substring_binary_compositions,
remove_backtrack_blocks_from_fold,
rewrite_binary_composition_inside_ternary_conditional,
truncate_repeated_single_step_traversals,
truncate_repeated_single_step_traversals_in_sub_queries)
from ..ir_sanity_checks import sanity_check_ir_blocks_from_frontend
from .between_lowering import lower_comparisons_to_between
from .optional_traversal import (collect_filters_to_first_location_occurrence,
convert_optional_traversals_to_compound_match_query,
lower_context_field_expressions, prune_non_existent_outputs)
from ..match_query import convert_to_match_query
from ..workarounds import (orientdb_class_with_while, orientdb_eval_scheduling,
orientdb_query_execution)
from .utils import construct_where_filter_predicate
location: location_info.type
for location, location_info in query_metadata_table.registered_locations
}
coerced_locations = {
location
for location, location_info in query_metadata_table.registered_locations
if location_info.coerced_from_type is not None
}
location_to_optional_results = extract_optional_location_root_info(ir_blocks)
complex_optional_roots, location_to_optional_roots = location_to_optional_results
simple_optional_root_info = extract_simple_optional_location_info(
ir_blocks, complex_optional_roots, location_to_optional_roots)
ir_blocks = remove_end_optionals(ir_blocks)
if len(simple_optional_root_info) > 0:
where_filter_predicate = construct_where_filter_predicate(
query_metadata_table, simple_optional_root_info)
ir_blocks.insert(-1, GlobalOperationsStart())
ir_blocks.insert(-1, Filter(where_filter_predicate))
ir_blocks = lower_context_field_existence(ir_blocks, query_metadata_table)
ir_blocks = optimize_boolean_expression_comparisons(ir_blocks)
ir_blocks = rewrite_binary_composition_inside_ternary_conditional(ir_blocks)
ir_blocks = merge_consecutive_filter_clauses(ir_blocks)
ir_blocks = lower_has_substring_binary_compositions(ir_blocks)
ir_blocks = orientdb_eval_scheduling.workaround_lowering_pass(ir_blocks, query_metadata_table)
match_query = convert_to_match_query(ir_blocks)
match_query = lower_comparisons_to_between(match_query)
match_query = lower_backtrack_blocks(match_query, location_types)
match_query = truncate_repeated_single_step_traversals(match_query)
match_query = orientdb_class_with_while.workaround_type_coercions_in_recursions(match_query)
new_folds = {
key: merge_consecutive_filter_clauses(
remove_backtrack_blocks_from_fold(
lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks)
)
)
for key, folded_ir_blocks in six.iteritems(match_query.folds)
}
match_query = match_query._replace(folds=new_folds)
compound_match_query = convert_optional_traversals_to_compound_match_query(
match_query, complex_optional_roots, location_to_optional_roots)
compound_match_query = prune_non_existent_outputs(compound_match_query)
compound_match_query = collect_filters_to_first_location_occurrence(compound_match_query)
compound_match_query = lower_context_field_expressions(compound_match_query)
compound_match_query = truncate_repeated_single_step_traversals_in_sub_queries(
compound_match_query)
compound_match_query = orientdb_query_execution.expose_ideal_query_execution_start_points(
compound_match_query, location_types, coerced_locations)
return compound_match_query
| true | true |
f7116c404797f3847bdb182bff01976965d6e485 | 63,967 | py | Python | src/sage/schemes/elliptic_curves/padic_lseries.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 3 | 2019-07-15T13:48:24.000Z | 2019-11-08T12:31:43.000Z | src/sage/schemes/elliptic_curves/padic_lseries.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 2 | 2018-10-30T13:40:20.000Z | 2020-07-23T12:13:30.000Z | src/sage/schemes/elliptic_curves/padic_lseries.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 1 | 2020-07-23T10:29:58.000Z | 2020-07-23T10:29:58.000Z | # -*- coding: utf-8 -*-
r"""
`p`-adic `L`-functions of elliptic curves
To an elliptic curve `E` over the rational numbers and a prime `p`, one
can associate a `p`-adic L-function; at least if `E` does not have additive
reduction at `p`. This function is defined by interpolation of L-values of `E`
at twists. Through the main conjecture of Iwasawa theory it should also be
equal to a characteristic series of a certain Selmer group.
If `E` is ordinary, then it is an element of the Iwasawa algebra
`\Lambda(\ZZ_p^\times) = \ZZ_p[\Delta][\![T]\!]`, where `\Delta` is the group
of `(p-1)`-st roots of unity in `\ZZ_p^\times`, and `T = [\gamma] - 1` where
`\gamma = 1 + p` is a generator of `1 + p\ZZ_p`. (There is a slightly different
description for `p = 2`.)
One can decompose this algebra as the direct product of the subalgebras
corresponding to the characters of `\Delta`, which are simply the powers
`\tau^\eta` (`0 \le \eta \le p-2`) of the Teichmueller character `\tau: \Delta
\to \ZZ_p^\times`. Projecting the L-function into these components gives `p-1`
power series in `T`, each with coefficients in `\ZZ_p`.
If `E` is supersingular, the series will have coefficients in a quadratic
extension of `\QQ_p`, and the coefficients will be unbounded. In this case we
have only implemented the series for `\eta = 0`. We have also implemented the
`p`-adic L-series as formulated by Perrin-Riou [BP]_, which has coefficients in
the Dieudonné module `D_pE = H^1_{dR}(E/\QQ_p)` of `E`. There is a different
description by Pollack [Po]_ which is not available here.
According to the `p`-adic version of the Birch and Swinnerton-Dyer conjecture
[MTT]_, the order of vanishing of the `L`-function at the trivial character
(i.e. of the series for `\eta = 0` at `T = 0`) is just the rank of `E(\QQ)`, or
this rank plus one if the reduction at `p` is split multiplicative.
See [SW]_ for more details.
REFERENCES:
- [MTT]_
- [BP]_
.. [Po] Robert Pollack, *On the `p`-adic `L`-function of a modular form
at a supersingular prime*, Duke Math. J. 118 (2003), no. 3, 523-558.
- [SW]_
AUTHORS:
- William Stein (2007-01-01): first version
- Chris Wuthrich (22/05/2007): changed minor issues and added supersingular things
- Chris Wuthrich (11/2008): added quadratic_twists
- David Loeffler (01/2011): added nontrivial Teichmueller components
"""
######################################################################
# Copyright (C) 2007 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
######################################################################
from __future__ import print_function
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.padics.factory import Qp
from sage.rings.infinity import infinity
from sage.rings.all import LaurentSeriesRing, PowerSeriesRing, PolynomialRing, Integers
from sage.rings.integer import Integer
from sage.arith.all import valuation, binomial, kronecker_symbol, gcd, prime_divisors
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import richcmp_method, richcmp
from sage.misc.all import verbose, denominator, get_verbose
import sage.arith.all as arith
from sage.modules.free_module_element import vector
import sage.matrix.all as matrix
import sage.schemes.hyperelliptic_curves.monsky_washnitzer
from sage.functions.log import log
from sage.functions.other import floor
from sage.misc.decorators import rename_keyword
@richcmp_method
class pAdicLseries(SageObject):
r"""
The `p`-adic L-series of an elliptic curve.
EXAMPLES:
An ordinary example::
sage: e = EllipticCurve('389a')
sage: L = e.padic_lseries(5)
sage: L.series(0)
Traceback (most recent call last):
...
ValueError: n (=0) must be a positive integer
sage: L.series(1)
O(T^1)
sage: L.series(2)
O(5^4) + O(5)*T + (4 + O(5))*T^2 + (2 + O(5))*T^3 + (3 + O(5))*T^4 + O(T^5)
sage: L.series(3, prec=10)
O(5^5) + O(5^2)*T + (4 + 4*5 + O(5^2))*T^2 + (2 + 4*5 + O(5^2))*T^3 + (3 + O(5^2))*T^4 + (1 + O(5))*T^5 + O(5)*T^6 + (4 + O(5))*T^7 + (2 + O(5))*T^8 + O(5)*T^9 + O(T^10)
sage: L.series(2,quadratic_twist=-3)
2 + 4*5 + 4*5^2 + O(5^4) + O(5)*T + (1 + O(5))*T^2 + (4 + O(5))*T^3 + O(5)*T^4 + O(T^5)
A prime p such that E[p] is reducible::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.series(1)
5 + O(5^2) + O(T)
sage: L.series(2)
5 + 4*5^2 + O(5^3) + O(5^0)*T + O(5^0)*T^2 + O(5^0)*T^3 + O(5^0)*T^4 + O(T^5)
sage: L.series(3)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
An example showing the calculation of nontrivial Teichmueller twists::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(7)
sage: lp.series(4,eta=1)
3 + 7^3 + 6*7^4 + 3*7^5 + O(7^6) + (2*7 + 7^2 + O(7^3))*T + (1 + 5*7^2 + O(7^3))*T^2 + (4 + 4*7 + 4*7^2 + O(7^3))*T^3 + (4 + 3*7 + 7^2 + O(7^3))*T^4 + O(T^5)
sage: lp.series(4,eta=2)
5 + 6*7 + 4*7^2 + 2*7^3 + 3*7^4 + 2*7^5 + O(7^6) + (6 + 4*7 + 7^2 + O(7^3))*T + (3 + 2*7^2 + O(7^3))*T^2 + (1 + 4*7 + 7^2 + O(7^3))*T^3 + (6 + 6*7 + 6*7^2 + O(7^3))*T^4 + O(T^5)
sage: lp.series(4,eta=3)
O(7^6) + (5 + 4*7 + 2*7^2 + O(7^3))*T + (6 + 5*7 + 2*7^2 + O(7^3))*T^2 + (5*7 + O(7^3))*T^3 + (7 + 4*7^2 + O(7^3))*T^4 + O(T^5)
(Note that the last series vanishes at `T = 0`, which is consistent with ::
sage: E.quadratic_twist(-7).rank()
1
This proves that `E` has rank 1 over `\QQ(\zeta_7)`.)
TESTS:
The load-dumps test::
sage: lp = EllipticCurve('11a').padic_lseries(5)
sage: lp == loads(dumps(lp))
True
"""
def __init__(self, E, p, implementation = 'eclib', normalize='L_ratio'):
r"""
INPUT:
- ``E`` -- an elliptic curve
- ``p`` -- a prime of good reduction
- ``implementation`` -- string (default:'eclib'); either 'eclib' to use
John Cremona's ``eclib`` for the computation of modular
symbols or 'sage' to use Sage's own implementation
- ``normalize`` -- ``'L_ratio'`` (default), ``'period'`` or ``'none'``;
this is describes the way the modular symbols
are normalized. See ``modular_symbol`` of
an elliptic curve over Q for more details.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(3)
sage: Lp.series(2,prec=3)
2 + 3 + 3^2 + 2*3^3 + O(3^4) + (1 + O(3))*T + (1 + O(3))*T^2 + O(T^3)
"""
self._E = E
self._p = ZZ(p)
self._normalize = normalize
if implementation not in ['eclib', 'sage']:
raise ValueError("Implementation should be one of 'eclib' or 'sage'")
self._implementation = implementation
if not self._p.is_prime():
raise ValueError("p (=%s) must be a prime" % p)
if E.conductor() % (self._p)**2 == 0:
raise NotImplementedError("p (=%s) must be a prime of semi-stable reduction" % p)
try :
E.label()
except RuntimeError :
print("Warning : Curve outside Cremona's table. Computations of modular symbol space might take very long !")
self._modular_symbol = E.modular_symbol(sign=+1,
implementation=implementation,
normalize=normalize)
def __add_negative_space(self):
r"""
A helper function not designed for direct use.
This function add the attribute ``_negative_modular_symbol`` to the class. This may take time
and will only be needed when twisting with negative fundamental discriminants.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(5)
sage: lp.modular_symbol(1/7,sign=-1) #indirect doctest
-1/2
"""
self._negative_modular_symbol = self._E.modular_symbol(sign=-1, implementation="sage", normalize=self._normalize)
def __richcmp__(self, other, op):
r"""
Compare ``self`` and ``other``.
TESTS::
sage: lp1 = EllipticCurve('11a1').padic_lseries(5)
sage: lp2 = EllipticCurve('11a1').padic_lseries(7)
sage: lp3 = EllipticCurve('11a2').padic_lseries(5)
sage: lp1 == lp1
True
sage: lp1 == lp2
False
sage: lp1 == lp3
False
"""
if type(self) != type(other):
return NotImplemented
return richcmp((self._E, self._p), (other._E, other._p), op)
def elliptic_curve(self):
r"""
Return the elliptic curve to which this `p`-adic L-series is associated.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.elliptic_curve()
Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
"""
return self._E
def prime(self):
r"""
Return the prime `p` as in 'p-adic L-function'.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.prime()
5
"""
return self._p
def _repr_(self):
r"""
Return print representation.
EXAMPLES::
sage: e = EllipticCurve('37a')
sage: e.padic_lseries(3)._repr_()
'3-adic L-series of Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field'
sage: e.padic_lseries(3,normalize='none')
3-adic L-series of Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field (not normalized)
sage: L = e.padic_lseries(3,normalize='none')
sage: L.rename('(factor)*L_3(T)')
sage: L
(factor)*L_3(T)
"""
s = "%s-adic L-series of %s" % (self._p, self._E)
if not self._normalize == 'L_ratio':
s += ' (not normalized)'
return s
def modular_symbol(self, r, sign=+1, quadratic_twist=+1):
r"""
Return the modular symbol evaluated at `r`.
This is used to compute this `p`-adic L-series.
Note that the normalization is not correct at this
stage: use ``_quotient_of periods_to_twist`` to correct.
Note also that this function does not check if the condition
on the quadratic_twist=D is satisfied. So the result will only
be correct if for each prime `\ell` dividing `D`, we have
`ord_{\ell}(N)<= ord_{\ell}(D)`, where `N` is the conductor of the curve.
INPUT:
- ``r`` -- a cusp given as either a rational number or oo
- ``sign`` -- +1 (default) or -1 (only implemented without twists)
- ``quadratic_twist`` -- a fundamental discriminant of a quadratic field or +1 (default)
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(5)
sage: [lp.modular_symbol(r) for r in [0,1/5,oo,1/11]]
[1/5, 6/5, 0, 0]
sage: [lp.modular_symbol(r,sign=-1) for r in [0,1/3,oo,1/7]]
[0, 1/2, 0, -1/2]
sage: [lp.modular_symbol(r,quadratic_twist=-20) for r in [0,1/5,oo,1/11]]
[1, 1, 0, 1/2]
sage: E = EllipticCurve('20a1')
sage: Et = E.quadratic_twist(-4)
sage: lpt = Et.padic_lseries(5)
sage: eta = lpt._quotient_of_periods_to_twist(-4)
sage: lpt.modular_symbol(0) == lp.modular_symbol(0,quadratic_twist=-4) / eta
True
"""
if quadratic_twist == +1 :
if sign == +1 :
return self._modular_symbol(r)
elif sign == -1:
try:
m = self._negative_modular_symbol
except (KeyError, AttributeError):
if not hasattr(self, '_modular_symbol_negative'):
self.__add_negative_space()
m = self._negative_modular_symbol
return m(r)
else :
D = quadratic_twist
if sign == -1:
raise NotImplementedError("Quadratic twists for negative modular symbols are not yet implemented.")
if D > 0:
m = self._modular_symbol
return sum([kronecker_symbol(D, u) * m(r + ZZ(u) / D)
for u in range(1, D)])
else:
try:
m = self._negative_modular_symbol
except (KeyError, AttributeError):
if not hasattr(self, '_modular_symbol_negative'):
self.__add_negative_space()
m = self._negative_modular_symbol
return -sum([kronecker_symbol(D, u) * m(r + ZZ(u) / D)
for u in range(1, -D)])
def measure(self, a, n, prec, quadratic_twist=+1, sign = +1):
r"""
Return the measure on `\ZZ_p^{\times}` defined by
`\mu_{E,\alpha}^+ ( a + p^n \ZZ_p ) =
\frac{1}{\alpha^n} \left [\frac{a}{p^n}\right]^{+} -
\frac{1}{\alpha^{n+1}} \left[\frac{a}{p^{n-1}}\right]^{+}`
where `[\cdot]^{+}` is the modular symbol. This is used to define
this `p`-adic L-function (at least when the reduction is good).
The optional argument ``sign`` allows the minus symbol `[\cdot]^{-}` to
be substituted for the plus symbol.
The optional argument ``quadratic_twist`` replaces `E` by the twist in
the above formula, but the twisted modular symbol is computed using a
sum over modular symbols of `E` rather than finding the modular symbols
for the twist. Quadratic twists are only implemented if the sign is
`+1`.
Note that the normalization is not correct at this
stage: use ``_quotient_of periods`` and ``_quotient_of periods_to_twist``
to correct.
Note also that this function does not check if the condition
on the ``quadratic_twist=D`` is satisfied. So the result will only
be correct if for each prime `\ell` dividing `D`, we have
`ord_{\ell}(N)<= ord_{\ell}(D)`, where `N` is the conductor of the curve.
INPUT:
- ``a`` -- an integer
- ``n`` -- a non-negative integer
- ``prec`` -- an integer
- ``quadratic_twist`` (default = 1) -- a fundamental discriminant of a quadratic field,
should be coprime to the conductor of `E`
- ``sign`` (default = 1) -- an integer, which should be `\pm 1`.
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: L = E.padic_lseries(5)
sage: L.measure(1,2, prec=9)
2 + 3*5 + 4*5^3 + 2*5^4 + 3*5^5 + 3*5^6 + 4*5^7 + 4*5^8 + O(5^9)
sage: L.measure(1,2, quadratic_twist=8,prec=15)
O(5^15)
sage: L.measure(1,2, quadratic_twist=-4,prec=15)
4 + 4*5 + 4*5^2 + 3*5^3 + 2*5^4 + 5^5 + 3*5^6 + 5^8 + 2*5^9 + 3*5^12 + 2*5^13 + 4*5^14 + O(5^15)
sage: E = EllipticCurve('11a1')
sage: a = E.quadratic_twist(-3).padic_lseries(5).measure(1,2,prec=15)
sage: b = E.padic_lseries(5).measure(1,2, quadratic_twist=-3,prec=15)
sage: a == b * E.padic_lseries(5)._quotient_of_periods_to_twist(-3)
True
"""
s = ZZ(sign)
if s not in [1, -1]:
raise ValueError("Sign must be +- 1")
if quadratic_twist != 1 and s != 1:
raise NotImplementedError("Quadratic twists not implemented for sign -1")
if quadratic_twist < 0:
s = ZZ(-1)
try:
p, alpha, z, w, f = self.__measure_data[(n, prec, s)]
except (KeyError, AttributeError):
if not hasattr(self, '__measure_data'):
self.__measure_data = {}
p = self._p
alpha = self.alpha(prec=prec)
z = 1/(alpha**n)
w = p**(n-1)
if s == +1 :
f = self._modular_symbol
else :
try :
f = self._negative_modular_symbol
except (KeyError, AttributeError):
if not hasattr(self, '_modular_symbol_negative'):
self.__add_negative_space()
f = self._negative_modular_symbol
self.__measure_data[(n, prec, s)] = (p, alpha, z, w, f)
if quadratic_twist == 1:
if self._E.conductor() % p == 0:
return z * f(a/(p*w))
return z * ( f(a/(p*w)) - f(a/w) / alpha)
else:
D = quadratic_twist
if self.is_ordinary():
chip = kronecker_symbol(D,p)
else:
chip = 1 # alpha is +- sqrt(-p) anyway
if self._E.conductor() % p == 0:
mu = chip**n * z * sum([kronecker_symbol(D,u) * f(a/(p*w)+ZZ(u)/D) for u in range(1,D.abs())])
else:
mu = chip**n * z * sum([kronecker_symbol(D,u) *( f(a/(p*w)+ZZ(u)/D) - chip /alpha * f(a/w+ZZ(u)/D) ) for u in range(1,D.abs())])
return s*mu
def alpha(self, prec=20):
r"""
Return a `p`-adic root `\alpha` of the polynomial `x^2 - a_p x
+ p` with `ord_p(\alpha) < 1`. In the ordinary case this is
just the unit root.
INPUT:
- ``prec`` -- positive integer, the `p`-adic precision of the root.
EXAMPLES:
Consider the elliptic curve 37a::
sage: E = EllipticCurve('37a')
An ordinary prime::
sage: L = E.padic_lseries(5)
sage: alpha = L.alpha(10); alpha
3 + 2*5 + 4*5^2 + 2*5^3 + 5^4 + 4*5^5 + 2*5^7 + 5^8 + 5^9 + O(5^10)
sage: alpha^2 - E.ap(5)*alpha + 5
O(5^10)
A supersingular prime::
sage: L = E.padic_lseries(3)
sage: alpha = L.alpha(10); alpha
alpha + O(alpha^21)
sage: alpha^2 - E.ap(3)*alpha + 3
O(alpha^22)
A reducible prime::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.alpha(5)
1 + 4*5 + 3*5^2 + 2*5^3 + 4*5^4 + O(5^5)
"""
try:
return self._alpha[prec]
except AttributeError:
self._alpha = {}
except KeyError:
pass
E = self._E
p = self._p
a_p = E.ap(p)
K = Qp(p, prec, print_mode='series')
if E.conductor() % p == 0:
self._alpha[prec] = K(a_p)
return K(a_p)
R = ZZ['x']
f = R([p, -a_p, 1])
if E.is_ordinary(p):
G = f.factor_padic(p, prec + 5)
for pr, e in G:
a = -pr[0]
if a.valuation() < 1:
self._alpha[prec] = K(a)
return K(a)
raise RuntimeError("bug in p-adic L-function alpha")
else: # supersingular case
f = f.change_ring(K)
A = K.extension(f, names="alpha")
a = A.gen()
self._alpha[prec] = a
return a
def order_of_vanishing(self):
r"""
Return the order of vanishing of this `p`-adic L-series.
The output of this function is provably correct, due to a
theorem of Kato [Ka]_.
.. NOTE:: currently `p` must be a prime of good ordinary reduction.
REFERENCES:
- [MTT]_
- [Ka]_
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(3)
sage: L.order_of_vanishing()
0
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.order_of_vanishing()
0
sage: L = EllipticCurve('37a').padic_lseries(5)
sage: L.order_of_vanishing()
1
sage: L = EllipticCurve('43a').padic_lseries(3)
sage: L.order_of_vanishing()
1
sage: L = EllipticCurve('37b').padic_lseries(3)
sage: L.order_of_vanishing()
0
sage: L = EllipticCurve('389a').padic_lseries(3)
sage: L.order_of_vanishing()
2
sage: L = EllipticCurve('389a').padic_lseries(5)
sage: L.order_of_vanishing()
2
sage: L = EllipticCurve('5077a').padic_lseries(5, implementation = 'eclib')
sage: L.order_of_vanishing()
3
"""
try:
return self.__ord
except AttributeError:
pass
if not self.is_ordinary():
raise NotImplementedError
E = self.elliptic_curve()
if not E.is_good(self.prime()):
raise ValueError("prime must be of good reduction")
r = E.rank()
n = 1
while True:
f = self.series(n)
v = f.valuation()
if v < n and v < r:
raise RuntimeError("while computing p-adic order of vanishing, got a contradiction: the curve is %s, the curve has rank %s, but the p-adic L-series vanishes to order <= %s" % (E, r, v))
if v == r:
self.__ord = v
return v
n += 1
def teichmuller(self, prec):
r"""
Return Teichmuller lifts to the given precision.
INPUT:
- ``prec`` - a positive integer.
OUTPUT:
- a list of `p`-adic numbers, the cached Teichmuller lifts
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(7)
sage: L.teichmuller(1)
[0, 1, 2, 3, 4, 5, 6]
sage: L.teichmuller(2)
[0, 1, 30, 31, 18, 19, 48]
"""
p = self._p
K = Qp(p, prec, print_mode='series')
return [Integer(0)] + \
[a.residue(prec).lift() for a in K.teichmuller_system()]
def _e_bounds(self, n, prec):
r"""
A helper function not designed for direct use.
It computes the valuations of the coefficients of `\omega_n = (1+T)^{p^n}-1`.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(2)
sage: Lp._e_bounds(1,10)
[+Infinity, 1, 0, 0, 0, 0, 0, 0, 0, 0]
sage: Lp._e_bounds(2,10)
[+Infinity, 2, 1, 1, 0, 0, 0, 0, 0, 0]
sage: Lp._e_bounds(3,10)
[+Infinity, 3, 2, 2, 1, 1, 1, 1, 0, 0]
sage: Lp._e_bounds(4,10)
[+Infinity, 4, 3, 3, 2, 2, 2, 2, 1, 1]
"""
# trac 10280: replace with new corrected code, note that the sequence has to be decreasing.
pn = self._p**n
enj = infinity
res = [enj]
for j in range(1,prec):
bino = valuation(binomial(pn,j),self._p)
if bino < enj:
enj = bino
res.append(enj)
return res
def _get_series_from_cache(self, n, prec, D, eta):
r"""
A helper function not designed for direct use.
It picks up the series in the cache if it has been previously computed.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp._pAdicLseries__series = {} # clear cached series
sage: Lp._get_series_from_cache(3,5,1,0)
sage: Lp.series(3,prec=5)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
sage: Lp._get_series_from_cache(3,5,1,0)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
"""
try:
return self.__series[(n,prec,D,eta)]
except AttributeError:
self.__series = {}
except KeyError:
for _n, _prec, _D, _eta in self.__series:
if _n == n and _D == D and _eta == eta and _prec >= prec:
return self.__series[(_n,_prec,_D,_eta)].add_bigoh(prec)
return None
def _set_series_in_cache(self, n, prec, D, eta, f):
r"""
A helper function not designed for direct use.
It picks up the series in the cache if it has been previously computed.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp.series(3,prec=5)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
sage: Lp._set_series_in_cache(3,5,1,0,0)
sage: Lp.series(3,prec=5)
0
"""
self.__series[(n, prec, D, eta)] = f
def _quotient_of_periods_to_twist(self, D):
r"""
For a fundamental discriminant `D` of a quadratic number field this
computes the constant `\eta` such that
`\sqrt{\vert D\vert }\cdot\Omega_{E_D}^{+} =\eta\cdot \Omega_E^{sign(D)}`.
As in [MTT]_ page 40. This is either 1 or 2 unless the condition
on the twist is not satisfied, e.g. if we are 'twisting back' to a
semi-stable curve.
.. NOTE::
No check on precision is made, so this may fail for huge `D`.
EXAMPLES::
sage: E = EllipticCurve('37b1')
sage: lp = E.padic_lseries(3)
sage: lp._quotient_of_periods_to_twist(-20)
1
sage: lp._quotient_of_periods_to_twist(-4)
1
sage: lp._quotient_of_periods_to_twist(-3)
1
sage: lp._quotient_of_periods_to_twist(-8)
2
sage: lp._quotient_of_periods_to_twist(8)
2
sage: lp._quotient_of_periods_to_twist(5)
1
sage: lp._quotient_of_periods_to_twist(12)
1
sage: E = EllipticCurve('11a1')
sage: Et = E.quadratic_twist(-3)
sage: lpt = Et.padic_lseries(5)
sage: lpt._quotient_of_periods_to_twist(-3)
3
"""
from sage.functions.all import sqrt
# This function does not depend on p and could be moved out of this file but it is needed only here
# Note that the number of real components does not change by twisting.
if D == 1:
return 1
Et = self._E.quadratic_twist(D)
if D > 1:
qt = Et.period_lattice().basis()[0]/self._E.period_lattice().basis()[0]
qt *= sqrt(qt.parent()(D))
else:
qt = Et.period_lattice().basis()[1].imag()/self._E.period_lattice().basis()[0]
if Et.real_components() == 1:
qt *= 2
qt *= sqrt(qt.parent()(-D))
verbose('the real approximation is %s'%qt)
# we know from MTT that the result has a denominator 1
return QQ(int(round(8*qt)))/8
class pAdicLseriesOrdinary(pAdicLseries):
def series(self, n=2, quadratic_twist=+1, prec=5, eta=0):
r"""
Return the `n`-th approximation to the `p`-adic L-series, in the
component corresponding to the `\eta`-th power of the Teichmueller
character, as a power series in `T` (corresponding to `\gamma-1` with
`\gamma=1+p` as a generator of `1+p\ZZ_p`). Each coefficient is a
`p`-adic number whose precision is provably correct.
Here the normalization of the `p`-adic L-series is chosen
such that `L_p(E,1) = (1-1/\alpha)^2 L(E,1)/\Omega_E`
where `\alpha` is the unit root of the characteristic
polynomial of Frobenius on `T_pE` and `\Omega_E` is the
Néron period of `E`.
INPUT:
- ``n`` - (default: 2) a positive integer
- ``quadratic_twist`` - (default: +1) a fundamental discriminant of a
quadratic field, coprime to the conductor of the curve
- ``prec`` - (default: 5) maximal number of terms of the series to
compute; to compute as many as possible just give a very large
number for ``prec``; the result will still be correct.
- ``eta`` (default: 0) an integer (specifying the power of the
Teichmueller character on the group of roots of unity in
`\ZZ_p^\times`)
:meth:`power_series` is identical to ``series``.
EXAMPLES:
We compute some `p`-adic L-functions associated to the elliptic
curve 11a::
sage: E = EllipticCurve('11a')
sage: p = 3
sage: E.is_ordinary(p)
True
sage: L = E.padic_lseries(p)
sage: L.series(3)
2 + 3 + 3^2 + 2*3^3 + O(3^5) + (1 + 3 + O(3^2))*T + (1 + 2*3 + O(3^2))*T^2 + O(3)*T^3 + O(3)*T^4 + O(T^5)
Another example at a prime of bad reduction, where the
`p`-adic L-function has an extra 0 (compared to the non
`p`-adic L-function)::
sage: E = EllipticCurve('11a')
sage: p = 11
sage: E.is_ordinary(p)
True
sage: L = E.padic_lseries(p)
sage: L.series(2)
O(11^4) + (10 + O(11))*T + (6 + O(11))*T^2 + (2 + O(11))*T^3 + (5 + O(11))*T^4 + O(T^5)
We compute a `p`-adic L-function that vanishes to order 2::
sage: E = EllipticCurve('389a')
sage: p = 3
sage: E.is_ordinary(p)
True
sage: L = E.padic_lseries(p)
sage: L.series(1)
O(T^1)
sage: L.series(2)
O(3^4) + O(3)*T + (2 + O(3))*T^2 + O(T^3)
sage: L.series(3)
O(3^5) + O(3^2)*T + (2 + 2*3 + O(3^2))*T^2 + (2 + O(3))*T^3 + (1 + O(3))*T^4 + O(T^5)
Checks if the precision can be changed (:trac:`5846`)::
sage: L.series(3,prec=4)
O(3^5) + O(3^2)*T + (2 + 2*3 + O(3^2))*T^2 + (2 + O(3))*T^3 + O(T^4)
sage: L.series(3,prec=6)
O(3^5) + O(3^2)*T + (2 + 2*3 + O(3^2))*T^2 + (2 + O(3))*T^3 + (1 + O(3))*T^4 + (1 + O(3))*T^5 + O(T^6)
Rather than computing the `p`-adic L-function for the curve '15523a1', one can
compute it as a quadratic_twist::
sage: E = EllipticCurve('43a1')
sage: lp = E.padic_lseries(3)
sage: lp.series(2,quadratic_twist=-19)
2 + 2*3 + 2*3^2 + O(3^4) + (1 + O(3))*T + (1 + O(3))*T^2 + O(T^3)
sage: E.quadratic_twist(-19).label() # optional -- database_cremona_ellcurve
'15523a1'
This proves that the rank of '15523a1' is zero, even if ``mwrank`` can not determine this.
We calculate the `L`-series in the nontrivial Teichmueller components::
sage: L = EllipticCurve('110a1').padic_lseries(5)
sage: for j in [0..3]: print(L.series(4, eta=j))
O(5^6) + (2 + 2*5 + 2*5^2 + O(5^3))*T + (5 + 5^2 + O(5^3))*T^2 + (4 + 4*5 + 2*5^2 + O(5^3))*T^3 + (1 + 5 + 3*5^2 + O(5^3))*T^4 + O(T^5)
4 + 3*5 + 2*5^2 + 3*5^3 + 5^4 + O(5^6) + (1 + 3*5 + 4*5^2 + O(5^3))*T + (3 + 4*5 + 3*5^2 + O(5^3))*T^2 + (3 + 3*5^2 + O(5^3))*T^3 + (1 + 2*5 + 2*5^2 + O(5^3))*T^4 + O(T^5)
2 + O(5^6) + (1 + 5 + O(5^3))*T + (2 + 4*5 + 3*5^2 + O(5^3))*T^2 + (4 + 5 + 2*5^2 + O(5^3))*T^3 + (4 + O(5^3))*T^4 + O(T^5)
3 + 5 + 2*5^2 + 5^3 + 3*5^4 + 4*5^5 + O(5^6) + (1 + 2*5 + 4*5^2 + O(5^3))*T + (1 + 4*5 + O(5^3))*T^2 + (3 + 2*5 + 2*5^2 + O(5^3))*T^3 + (5 + 5^2 + O(5^3))*T^4 + O(T^5)
It should now also work with `p=2` (:trac:`20798`)::
sage: E = EllipticCurve("53a1")
sage: lp = E.padic_lseries(2)
sage: lp.series(7)
O(2^8) + (1 + 2^2 + 2^3 + O(2^5))*T + (1 + 2^3 + O(2^4))*T^2 + (2^2 + 2^3 + O(2^4))*T^3 + (2 + 2^2 + O(2^3))*T^4 + O(T^5)
sage: E = EllipticCurve("109a1")
sage: lp = E.padic_lseries(2)
sage: lp.series(6)
2^2 + 2^6 + O(2^7) + (2 + O(2^4))*T + O(2^3)*T^2 + (2^2 + O(2^3))*T^3 + (2 + O(2^2))*T^4 + O(T^5)
"""
n = ZZ(n)
if n < 1:
raise ValueError("n (=%s) must be a positive integer" % n)
if self._p == 2 and n == 1:
raise ValueError("n (=%s) must be a at least 2 if p is 2" % n)
if prec < 1:
raise ValueError("Insufficient precision (%s)" % prec)
# check if the conditions on quadratic_twist are satisfied
eta = ZZ(eta) % (self._p - 1)
D = ZZ(quadratic_twist)
if D != 1:
if eta != 0: raise NotImplementedError("quadratic twists only implemented for the 0th Teichmueller component")
if D % 4 == 0:
d = D//4
if not d.is_squarefree() or d % 4 == 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
else:
if not D.is_squarefree() or D % 4 != 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
if gcd(D,self._p) != 1:
raise ValueError("quadratic twist (=%s) must be coprime to p (=%s) "%(D,self._p))
if gcd(D,self._E.conductor())!= 1:
for ell in prime_divisors(D):
if valuation(self._E.conductor(),ell) > valuation(D,ell) :
raise ValueError("can not twist a curve of conductor (=%s) by the quadratic twist (=%s)."%(self._E.conductor(),D))
p = self._p
#verbose("computing L-series for p=%s, n=%s, and prec=%s"%(p,n,prec))
if prec == 1:
if eta == 0:
# trac 15737: if we only ask for the leading term we don't
# need to do any sum as L_p(E,0) = (1-1/alpha)^2 * m(0) (good case)
# set prec arbitrary to 20.
K = Qp(p, 20, print_mode='series')
R = PowerSeriesRing(K,'T',1)
L = self.modular_symbol(0, sign=+1, quadratic_twist=D)
chip = kronecker_symbol(D,p)
if self._E.conductor() % p == 0:
L *= 1 - chip/self.alpha()
else:
L *= (1-chip/self.alpha())**2
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
L = R(L, 1)
return L
else:
# here we need some sums anyway
bounds = self._prec_bounds(n,prec)
padic_prec = 20
else:
bounds = self._prec_bounds(n,prec)
padic_prec = max(bounds[1:]) + 5
verbose("using p-adic precision of %s"%padic_prec)
if p == 2:
res_series_prec = min(p**(n-2), prec)
else:
res_series_prec = min(p**(n-1), prec)
verbose("using series precision of %s"%res_series_prec)
ans = self._get_series_from_cache(n, res_series_prec,D,eta)
if not ans is None:
verbose("found series in cache")
return ans
K = QQ
R = PowerSeriesRing(K,'T',res_series_prec)
T = R(R.gen(),res_series_prec )
L = R(0)
one_plus_T_factor = R(1)
gamma_power = K(1)
teich = self.teichmuller(padic_prec)
if p == 2:
teich = [0, 1,-1]
gamma = K(5)
p_power = 2**(n-2)
a_range = 3
else:
teich = self.teichmuller(padic_prec)
gamma = K(1+ p)
p_power = p**(n-1)
a_range = p
si = 1-2*(eta % 2)
verbose("Now iterating over %s summands"%((p-1)*p_power))
verbose_level = get_verbose()
count_verb = 0
for j in range(p_power):
s = K(0)
if verbose_level >= 2 and j/p_power*100 > count_verb + 3:
verbose("%.2f percent done"%(float(j)/p_power*100))
count_verb += 3
for a in range(1,a_range):
b = teich[a] * gamma_power
s += teich[a]**eta * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si).lift()
L += s * one_plus_T_factor
one_plus_T_factor *= 1+T
gamma_power *= gamma
verbose("the series before adjusting the precision is %s"%L)
# Now create series but with each coefficient truncated
# so it is proven correct:
K = Qp(p, padic_prec, print_mode='series')
R = PowerSeriesRing(K,'T',res_series_prec)
L = R(L,res_series_prec)
aj = L.list()
if len(aj) > 0:
aj = [aj[0].add_bigoh(padic_prec-2)] + \
[aj[j].add_bigoh(bounds[j]) for j in range(1,len(aj))]
L = R(aj,res_series_prec )
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
self._set_series_in_cache(n, res_series_prec, D, eta, L)
return L
power_series = series
def is_ordinary(self):
r"""
Return ``True`` if the elliptic curve that this L-function is attached
to is ordinary.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.is_ordinary()
True
"""
return True
def is_supersingular(self):
r"""
Return ``True`` if the elliptic curve that this L function is attached
to is supersingular.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.is_supersingular()
False
"""
return False
def _c_bound(self):
r"""
A helper function not designed for direct use.
It returns the maximal `p`-adic valuation of the possible denominators
of the modular symbols.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp._c_bound()
1
sage: Lp = E.padic_lseries(17)
sage: Lp._c_bound()
0
"""
try:
return self.__c_bound
except AttributeError:
pass
E = self._E
p = self._p
if E.galois_representation().is_irreducible(p):
ans = 0
else:
m = E.modular_symbol_space(sign=1)
b = m.boundary_map().codomain()
C = b._known_cusps() # all known, since computed the boundary map
ans = max([valuation(self.modular_symbol(a).denominator(), p)
for a in C])
self.__c_bound = ans
return ans
def _prec_bounds(self, n, prec):
r"""
A helper function not designed for direct use.
It returns the `p`-adic precisions of the approximation
to the `p`-adic L-function.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp._prec_bounds(3,10)
[+Infinity, 1, 1, 1, 1, 0, 0, 0, 0, 0]
sage: Lp._prec_bounds(3,12)
[+Infinity, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
sage: Lp._prec_bounds(4,5)
[+Infinity, 2, 2, 2, 2]
sage: Lp._prec_bounds(15,10)
[+Infinity, 13, 13, 13, 13, 12, 12, 12, 12, 12]
sage: Lp = E.padic_lseries(3)
sage: Lp._prec_bounds(15,10)
[+Infinity, 14, 14, 13, 13, 13, 13, 13, 13, 12]
"""
if self._p == 2:
e = self._e_bounds(n - 2, prec)
else:
e = self._e_bounds(n - 1, prec)
c = self._c_bound()
return [e[j] - c for j in range(len(e))]
class pAdicLseriesSupersingular(pAdicLseries):
def series(self, n=3, quadratic_twist=+1, prec=5, eta=0):
r"""
Return the `n`-th approximation to the `p`-adic L-series as a
power series in `T` (corresponding to `\gamma-1` with
`\gamma=1+p` as a generator of `1+p\ZZ_p`). Each
coefficient is an element of a quadratic extension of the `p`-adic
number whose precision is probably (?) correct.
Here the normalization of the `p`-adic L-series is chosen
such that `L_p(E,1) = (1-1/\alpha)^2 L(E,1)/\Omega_E`
where `\alpha` is a root of the characteristic
polynomial of Frobenius on `T_pE` and `\Omega_E` is the
Néron period of `E`.
INPUT:
- ``n`` - (default: 2) a positive integer
- ``quadratic_twist`` - (default: +1) a fundamental discriminant of a
quadratic field, coprime to the conductor of the curve
- ``prec`` - (default: 5) maximal number of terms of the series to
compute; to compute as many as possible just give a very large
number for ``prec``; the result will still be correct.
- ``eta`` (default: 0) an integer (specifying the power of the
Teichmueller character on the group of roots of unity in
`\ZZ_p^\times`)
OUTPUT:
a power series with coefficients in a quadratic ramified extension of
the `p`-adic numbers generated by a root `alpha` of the characteristic
polynomial of Frobenius on `T_pE`.
ALIAS: power_series is identical to series.
EXAMPLES:
A supersingular example, where we must compute to higher precision to see anything::
sage: e = EllipticCurve('37a')
sage: L = e.padic_lseries(3); L
3-adic L-series of Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: L.series(2)
O(T^3)
sage: L.series(4) # takes a long time (several seconds)
O(alpha) + (alpha^-2 + O(alpha^0))*T + (alpha^-2 + O(alpha^0))*T^2 + O(T^5)
sage: L.alpha(2).parent()
3-adic Eisenstein Extension Field in alpha defined by x^2 + 3*x + 3
An example where we only compute the leading term (:trac:`15737`)::
sage: E = EllipticCurve("17a1")
sage: L = E.padic_lseries(3)
sage: L.series(4,prec=1)
alpha^-2 + alpha^-1 + 2 + 2*alpha + ... + O(alpha^38) + O(T)
It works also for `p=2`::
sage: E = EllipticCurve("11a1")
sage: lp = E.padic_lseries(2)
sage: lp.series(10)
O(alpha^-3) + (alpha^-4 + O(alpha^-3))*T + (alpha^-4 + O(alpha^-3))*T^2 + (alpha^-5 + alpha^-4 + O(alpha^-3))*T^3 + (alpha^-4 + O(alpha^-3))*T^4 + O(T^5)
"""
n = ZZ(n)
if n < 1:
raise ValueError("n (=%s) must be a positive integer" % n)
if self._p == 2 and n == 1:
raise ValueError("n (=%s) must be at least 2 when p=2" % n)
if prec < 1:
raise ValueError("Insufficient precision (%s)" % prec)
# check if the conditions on quadratic_twist are satisfied
D = ZZ(quadratic_twist)
if D != 1:
if eta != 0: raise NotImplementedError("quadratic twists only implemented for the 0th Teichmueller component")
if D % 4 == 0:
d = D//4
if not d.is_squarefree() or d % 4 == 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
else:
if not D.is_squarefree() or D % 4 != 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field" % D)
if gcd(D,self._E.conductor()) != 1:
for ell in prime_divisors(D):
if valuation(self._E.conductor(), ell) > valuation(D,ell) :
raise ValueError("can not twist a curve of conductor (=%s) by the quadratic twist (=%s)." % (self._E.conductor(), D))
p = self._p
eta = ZZ(eta) % (p - 1)
#if p == 2 and self._normalize :
#print('Warning : for p = 2 the normalization might not be correct !')
if prec == 1:
if eta == 0:
# trac 15737: if we only ask for the leading term we don't
# need to do any sum as L_p(E,0) = (1-1/alpha)^2 * m(0) (good case)
# set prec arbitrary to 20.
alpha = self.alpha(prec=20)
K = alpha.parent()
R = PowerSeriesRing(K,'T',1)
L = self.modular_symbol(0, sign=+1, quadratic_twist=D)
L *= (1-1/self.alpha())**2
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
L = R(L, 1)
return L
else:
# here we need some sums anyway
bounds = self._prec_bounds(n,prec)
alphaadic_prec = 20
else:
prec = min(p**(n-1), prec)
bounds = self._prec_bounds(n,prec)
alphaadic_prec = max(bounds[1:]) + 5
padic_prec = alphaadic_prec//2+1
verbose("using alpha-adic precision of %s"%padic_prec)
ans = self._get_series_from_cache(n, prec, quadratic_twist,eta)
if not ans is None:
verbose("found series in cache")
return ans
alpha = self.alpha(prec=padic_prec)
K = alpha.parent()
R = PowerSeriesRing(K,'T',prec)
T = R(R.gen(), prec)
L = R(0)
one_plus_T_factor = R(1)
gamma_power = 1
teich = self.teichmuller(padic_prec)
if p == 2:
teich = [0, 1,-1]
gamma = 5
p_power = 2**(n-2)
a_range = 3
else:
teich = self.teichmuller(padic_prec)
gamma = 1+ p
p_power = p**(n-1)
a_range = p
si = 1-2*(eta % 2)
verbose("Now iterating over %s summands"%((p-1)*p_power))
verbose_level = get_verbose()
count_verb = 0
for j in range(p_power):
s = K(0)
if verbose_level >= 2 and j/p_power*100 > count_verb + 3:
verbose("%.2f percent done"%(float(j)/p_power*100))
count_verb += 3
for a in range(1,a_range):
b = teich[a] * gamma_power
s += teich[a]**eta * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si)
L += s * one_plus_T_factor
one_plus_T_factor *= 1+T
gamma_power *= gamma
# Now create series but with each coefficient truncated
# so it is proven correct:
# the coefficients are now treated as alpha-adic numbers (trac 20254)
L = R(L,prec)
aj = L.list()
if len(aj) > 0:
bj = [aj[0].add_bigoh(2*(padic_prec-2))]
j = 1
while j < len(aj):
bj.append( aj[j].add_bigoh(bounds[j]) )
j += 1
L = R(bj, prec)
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
self._set_series_in_cache(n, prec, quadratic_twist, eta, L)
return L
power_series = series
def is_ordinary(self):
r"""
Return ``True`` if the elliptic curve that this L-function is attached
to is ordinary.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(19)
sage: L.is_ordinary()
False
"""
return False
def is_supersingular(self):
r"""
Return ``True`` if the elliptic curve that this L function is attached
to is supersingular.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(19)
sage: L.is_supersingular()
True
"""
return True
def _prec_bounds(self, n, prec):
r"""
A helper function not designed for direct use.
It returns the `\alpha`-adic precisions of the approximation
to the `p`-adic L-function.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(19)
sage: Lp._prec_bounds(3,5)
[+Infinity, -1, -1, -1, -1]
sage: Lp._prec_bounds(2,5)
[+Infinity, -2, -2, -2, -2]
sage: Lp._prec_bounds(10,5)
[+Infinity, 6, 6, 6, 6]
"""
if self._p == 2:
e = self._e_bounds(n - 2, prec)
else:
e = self._e_bounds(n - 1, prec)
c0 = ZZ(n + 2)
return [infinity] + [2 * e[j] - c0 for j in range(1, len(e))]
def _poly(self, a):
"""
Given an element a in Qp[alpha] this returns the list
containing the two coordinates in Qp.
EXAMPLES::
sage: E = EllipticCurve("14a1")
sage: lp = E.padic_lseries(5)
sage: K = lp.alpha().parent()
sage: a = K(5)
sage: a
4*alpha^2 + alpha^4 + O(alpha^42)
sage: lp._poly(a)
[5 + O(5^21), O(5^21)]
"""
# this should be implemented in elements of Eisenstein rings at some point trac 20248
if a.is_zero():
return [0,0]
v, k = a._ntl_rep_abs()
K = a.base_ring()
pi = K.uniformiser()
v0 = K(v[0]._sage_()) * pi**k
v1 = K(v[1]._sage_()) * pi**k
alpha = a.parent().gen()
assert v0 + v1*alpha == a
return [ v0, v1 ]
def Dp_valued_series(self, n=3, quadratic_twist=+1, prec=5):
r"""
Return a vector of two components which are p-adic power series.
The answer v is such that
`(1-\varphi)^{-2}\cdot L_p(E,T) =` ``v[1]`` `\cdot \omega +` ``v[2]`` `\cdot \varphi(\omega)`
as an element of the Dieudonné module `D_p(E) = H^1_{dR}(E/\QQ_p)` where
`\omega` is the invariant differential and `\varphi` is the Frobenius on `D_p(E)`.
According to the `p`-adic Birch and Swinnerton-Dyer
conjecture [BP]_ this function has a zero of order
rank of `E(\QQ)` and it's leading term is contains the order of
the Tate-Shafarevich group, the Tamagawa numbers, the order of the
torsion subgroup and the `D_p`-valued `p`-adic regulator.
INPUT:
- ``n`` -- (default: 3) a positive integer
- ``prec`` -- (default: 5) a positive integer
EXAMPLES::
sage: E = EllipticCurve('14a')
sage: L = E.padic_lseries(5)
sage: L.Dp_valued_series(4) # long time (9s on sage.math, 2011)
(1 + 4*5 + O(5^2) + (4 + O(5))*T + (1 + O(5))*T^2 + (4 + O(5))*T^3 + (2 + O(5))*T^4 + O(T^5), 5^2 + O(5^3) + O(5^2)*T + (4*5 + O(5^2))*T^2 + (2*5 + O(5^2))*T^3 + (2 + 2*5 + O(5^2))*T^4 + O(T^5))
"""
E = self._E
p = self._p
lps = self.series(n, quadratic_twist=quadratic_twist, prec=prec)
# now split up the series in two lps = G + H * alpha
R = lps.base_ring().base_ring() # Qp
QpT , T = PowerSeriesRing(R, 'T', prec).objgen()
Gli = []
Hli = []
for n in range(lps.prec()):
v = self._poly(lps[n])
Gli.append(v[0])
Hli.append(v[1])
G = QpT(Gli, prec)
H = QpT(Hli, prec)
# now compute phi
phi = matrix.matrix([[0,-1/p],[1,E.ap(p)/p]])
lpv = vector([G + (E.ap(p))*H , - R(p) * H ]) # this is L_p
eps = (1-phi)**(-2)
resu = lpv*eps.transpose()
return resu
@rename_keyword(deprecation=6094, method="algorithm")
def frobenius(self, prec=20, algorithm = "mw"):
r"""
Return a geometric Frobenius `\varphi` on the Dieudonné module `D_p(E)`
with respect to the basis `\omega`, the invariant differential, and `\eta=x\omega`.
It satisfies `\varphi^2 - a_p/p\, \varphi + 1/p = 0`.
INPUT:
- ``prec`` - (default: 20) a positive integer
- ``algorithm`` - either 'mw' (default) for Monsky-Washnitzer
or 'approx' for the algorithm described by Bernardi and Perrin-Riou
(much slower and not fully tested)
EXAMPLES::
sage: E = EllipticCurve('14a')
sage: L = E.padic_lseries(5)
sage: phi = L.frobenius(5)
sage: phi
[ 2 + 5^2 + 5^4 + O(5^5) 3*5^-1 + 3 + 5 + 4*5^2 + 5^3 + O(5^4)]
[ 3 + 3*5^2 + 4*5^3 + 3*5^4 + O(5^5) 3 + 4*5 + 3*5^2 + 4*5^3 + 3*5^4 + O(5^5)]
sage: -phi^2
[5^-1 + O(5^4) O(5^4)]
[ O(5^5) 5^-1 + O(5^4)]
"""
E = self._E
p = self._p
if algorithm != "mw" and algorithm !="approx":
raise ValueError("Unknown algorithm %s."%algorithm)
if algorithm == "approx":
return self.__phi_bpr(prec=prec)
if p < 4 and algorithm == "mw":
print("Warning: If this fails try again using algorithm=\"approx\"")
Ew = E.integral_short_weierstrass_model()
adjusted_prec = sage.schemes.hyperelliptic_curves.monsky_washnitzer.adjusted_prec(p, prec)
modprecring = Integers(p**adjusted_prec)
output_ring = Qp(p, prec)
R, x = PolynomialRing(modprecring, 'x').objgen()
Q = x**3 + modprecring(Ew.a4()) * x + modprecring(Ew.a6())
trace = Ew.ap(p)
fr = sage.schemes.hyperelliptic_curves.monsky_washnitzer.matrix_of_frobenius(Q, p, adjusted_prec, trace)
fr = matrix.matrix(output_ring,2,2,fr)
# return a vector for PARI's ellchangecurve to pass from e1 to e2
def isom(e1,e2):
if not e1.is_isomorphic(e2):
raise ValueError("Curves must be isomorphic.")
usq = (e1.discriminant()/e2.discriminant()).nth_root(6)
u = usq.sqrt()
s = (u * e2.a1() - e1.a1() )/ZZ(2)
r = (usq * e2.a2() - e1.a2() + s**2 + e1.a1()*s)/ZZ(3)
t = (u**3 * e2.a3() - e1.a3() - e1.a1()*r)/ZZ(2)
return [u,r,s,t]
v = isom(E,Ew)
u = v[0]
r = v[1]
# change basis
A = matrix.matrix([[u,-r/u],[0,1/u]])
frn = A * fr * A**(-1)
return 1/p*frn
def __phi_bpr(self, prec=0):
r"""
This returns a geometric Frobenius `\varphi` on the Dieudonné module `D_p(E)`
with respect to the basis `\omega`, the invariant differential, and `\eta=x\omega`.
It satisfies `\varphi^2 - a_p/p\, \varphi + 1/p = 0`.
The algorithm used here is described in bernardi-perrin-riou on page 232.
.. WARNING::
This function has not been sufficiently tested. It is very slow.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(19)
sage: lp.frobenius(prec=1,algorithm="approx") #indirect doctest
[ O(19^0) 4*19^-1 + O(19^0)]
[ 14 + O(19) O(19^0)]
sage: E = EllipticCurve('17a1')
sage: lp = E.padic_lseries(3)
sage: lp.frobenius(prec=3,algorithm="approx")
[ O(3) 2*3^-1 + 2 + O(3)]
[ 1 + O(3^2) O(3)]
sage: lp.frobenius(prec=5,algorithm="approx")
[ 3 + O(3^2) 2*3^-1 + 2 + 3 + O(3^2)]
[ 1 + 2*3^2 + O(3^3) 2*3 + O(3^2)]
"""
E = self._E
p = self._p
if prec > 10:
print("Warning: Very large value for the precision.")
if prec == 0:
prec = floor((log(10000)/log(p)))
verbose("prec set to %s"%prec)
eh = E.formal()
om = eh.differential(prec = p**prec+3)
verbose("differential computed")
xt = eh.x(prec=p**prec + 3)
et = xt*om
# c_(p^k) = cs[k] d...
cs = [om[p**k-1] for k in range(prec + 1)]
ds = [et[p**k-1] for k in range(prec + 1)]
delta = 0
dpr = 0
gamma = 0
dga = 0
for k in range(1,prec+1):
# this is the equation eq[0]*x+eq[1]*y+eq[2] == 0
# such that delta_ = delta + d^dpr*x ...
eq = [(p**dpr*cs[k]) % p**k,(-p**dga*ds[k]) % p**k , (delta*cs[k]-gamma*ds[k]-cs[k-1]) % p**k ]
verbose("valuations : %s"%([x.valuation(p) for x in eq]))
v = min([x.valuation(p) for x in eq])
if v == infinity:
verbose("no new information at step k=%s"%k)
else:
eq = [ZZ(x/p**v) for x in eq]
verbose("renormalised eq mod p^%s is now %s"%(k-v,eq))
if eq[0].valuation(p) == 0:
l = min(eq[1].valuation(p),k-v)
if l == 0:
verbose("not uniquely determined at step k=%s"%k)
else:
ainv = eq[0].inverse_mod(p**l)
delta = delta - eq[2]*ainv*p**dpr
dpr = dpr + l
delta = delta % p**dpr
verbose("delta_prec increased to %s\n delta is now %s"%(dpr,delta))
elif eq[1].valuation(p) == 0:
l = min(eq[0].valuation(p),k-v)
ainv = eq[1].inverse_mod(p**l)
gamma = gamma - eq[2]*ainv*p**dga
dga = dga + l
gamma = gamma % p**dga
verbose("gamma_prec increased to %s\n gamma is now %s"%(dga,gamma))
else:
raise RuntimeError("Bug: no delta or gamma can exist")
# end of approximation of delta and gamma
R = Qp(p,max(dpr,dga)+1)
delta = R(delta,absprec=dpr)
gamma = R(gamma,absprec=dga)
verbose("result delta = %s\n gamma = %s\n check : %s"%(delta,gamma, [Qp(p,k)(delta * cs[k] - gamma * ds[k] - cs[k-1]) for k in range(1,prec+1)] ))
a = delta
c = -gamma
d = E.ap(p) - a
b = (-1/p+a*d)/c
phi = matrix.matrix([[a,b],[c,d]])
return phi
def bernardi_sigma_function(self, prec=20):
r"""
Return the `p`-adic sigma function of Bernardi in terms of `z = log(t)`.
This is the same as ``padic_sigma`` with ``E2 = 0``.
EXAMPLES::
sage: E = EllipticCurve('14a')
sage: L = E.padic_lseries(5)
sage: L.bernardi_sigma_function(prec=5) # Todo: some sort of consistency check!?
z + 1/24*z^3 + 29/384*z^5 - 8399/322560*z^7 - 291743/92897280*z^9 + O(z^10)
"""
E = self._E
Eh = E.formal()
lo = Eh.log(prec + 5)
F = lo.reverse()
S = LaurentSeriesRing(QQ,'z')
z = S.gen()
F = F(z)
xofF = Eh.x(prec + 2)(F)
#r = ( E.a1()**2 + 4*E.a2() ) / ZZ(12)
g = (1/z**2 - xofF ).power_series()
h = g.integral().integral()
sigma_of_z = z.power_series() * h.exp()
return sigma_of_z
def Dp_valued_height(self,prec=20):
r"""
Return the canonical `p`-adic height with values in the Dieudonné module `D_p(E)`.
It is defined to be
`h_{\eta} \cdot \omega - h_{\omega} \cdot \eta`
where `h_{\eta}` is made out of the sigma function of Bernardi and
`h_{\omega}` is `log_E^2`.
The answer ``v`` is given as ``v[1]*omega + v[2]*eta``.
The coordinates of ``v`` are dependent of the
Weierstrass equation.
EXAMPLES::
sage: E = EllipticCurve('53a')
sage: L = E.padic_lseries(5)
sage: h = L.Dp_valued_height(7)
sage: h(E.gens()[0])
(3*5 + 5^2 + 2*5^3 + 3*5^4 + 4*5^5 + 5^6 + 5^7 + O(5^8), 5^2 + 4*5^4 + 2*5^7 + 3*5^8 + O(5^9))
"""
E = self._E
p = self._p
Ehat = E.formal()
elog = Ehat.log(prec + Integer(3))
# we will have to do it properly with David Harvey's _multiply_point()
n = arith.LCM(E.tamagawa_numbers())
n = arith.LCM(n, E.Np(p)) # allowed here because E has good reduction at p
def height(P,check=True):
if P.is_finite_order():
return Qp(p,prec)(0)
if check:
assert P.curve() == E, 'the point P must lie on the curve from which the height function was created'
Q = n * P
tt = - Q[0]/Q[1]
R = Qp(p,prec+5)
tt = R(tt)
zz = elog(tt)
homega = -zz**2/n**2
eQ = denominator(Q[1])/denominator(Q[0])
si = self.bernardi_sigma_function(prec=prec+4)
heta = 2 * log(si(zz)/eQ) / n**2
R = Qp(p,prec)
return vector([-R(heta),R(homega)])
return height
def Dp_valued_regulator(self, prec=20, v1=0, v2=0):
r"""
Return the canonical `p`-adic regulator with values in the Dieudonné module `D_p(E)`
as defined by Perrin-Riou using the `p`-adic height with values in `D_p(E)`.
The result is written in the basis `\omega`, `\varphi(\omega)`, and hence the
coordinates of the result are independent of the chosen Weierstrass equation.
.. NOTE::
The definition here is corrected with respect to
Perrin-Riou's article [PR]_. See [SW]_.
REFERENCES:
.. [PR] Perrin-Riou, *Arithmétique des courbes elliptiques à
réduction supersingulière en `p`*,
Experiment. Math. 12 (2003), no. 2, 155-186.
EXAMPLES::
sage: E = EllipticCurve('43a')
sage: L = E.padic_lseries(7)
sage: L.Dp_valued_regulator(7)
(5*7 + 6*7^2 + 4*7^3 + 4*7^4 + 7^5 + 4*7^7 + O(7^8), 4*7^2 + 2*7^3 + 3*7^4 + 7^5 + 6*7^6 + 4*7^7 + O(7^8))
"""
p = self._p
E = self._E
h = self.Dp_valued_height(prec=prec)
# this is the height_{v} (P) for a v in D_p
def hv(vec,P):
hP = h(P)
return - vec[0]*hP[1] +vec[1]*hP[0]
# def hvpairing(vec,P,Q):
# return (hv(vec, P+Q) - hv(vec,P)-hv(vec,Q))/2
K = Qp(p, prec)
if v1 == 0 and v2 == 0:
v1 = vector([K(0), K(1)]) # that is eta
v2 = vector([K(-1), K(1)]) # and this is eta-omega.
# the rest should not depend on this choice
# as long as it is outside Q_p * omega
rk = E.rank()
if rk == 0:
return vector([K(1), K(0)])
basis = E.gens()
def regv(vec):
M = matrix.matrix(K, rk, rk, 0)
point_height = [hv(vec, P) for P in basis]
for i in range(rk):
for j in range(i+1, rk):
M[i, j] = M[j, i] = (hv(vec,basis[i] + basis[j])- point_height[i] - point_height[j] )/2
for i in range(rk):
M[i, i] = point_height[i]
return M.determinant()
def Dp_pairing(vec1,vec2):
return (vec1[0]*vec2[1]-vec1[1]*vec2[0])
omega_vec = vector([K(1),K(0)])
# note the correction here with respect to Perrin-Riou's definition.
# only this way the result will be independent of the choice of v1 and v2.
reg1 = regv(v1) / Dp_pairing(omega_vec, v1)**(rk - 1)
reg2 = regv(v2) / Dp_pairing(omega_vec, v2)**(rk - 1)
# the regulator in the basis omega,eta
reg_oe = (reg1 * v2 - reg2 * v1 ) / Dp_pairing(v2, v1)
if p < 5:
phi = self.frobenius(min(6, prec), algorithm="approx")
else:
phi = self.frobenius(prec + 2, algorithm="mw")
c = phi[1, 0] # this is the 'period' [omega,phi(omega)]
a = phi[0, 0]
return vector([reg_oe[0] - a/c*reg_oe[1],reg_oe[1]/c])
| 37.429491 | 206 | 0.51783 |
self, '_modular_symbol_negative'):
self.__add_negative_space()
f = self._negative_modular_symbol
self.__measure_data[(n, prec, s)] = (p, alpha, z, w, f)
if quadratic_twist == 1:
if self._E.conductor() % p == 0:
return z * f(a/(p*w))
return z * ( f(a/(p*w)) - f(a/w) / alpha)
else:
D = quadratic_twist
if self.is_ordinary():
chip = kronecker_symbol(D,p)
else:
chip = 1 # alpha is +- sqrt(-p) anyway
if self._E.conductor() % p == 0:
mu = chip**n * z * sum([kronecker_symbol(D,u) * f(a/(p*w)+ZZ(u)/D) for u in range(1,D.abs())])
else:
mu = chip**n * z * sum([kronecker_symbol(D,u) *( f(a/(p*w)+ZZ(u)/D) - chip /alpha * f(a/w+ZZ(u)/D) ) for u in range(1,D.abs())])
return s*mu
def alpha(self, prec=20):
try:
return self._alpha[prec]
except AttributeError:
self._alpha = {}
except KeyError:
pass
E = self._E
p = self._p
a_p = E.ap(p)
K = Qp(p, prec, print_mode='series')
if E.conductor() % p == 0:
self._alpha[prec] = K(a_p)
return K(a_p)
R = ZZ['x']
f = R([p, -a_p, 1])
if E.is_ordinary(p):
G = f.factor_padic(p, prec + 5)
for pr, e in G:
a = -pr[0]
if a.valuation() < 1:
self._alpha[prec] = K(a)
return K(a)
raise RuntimeError("bug in p-adic L-function alpha")
else: # supersingular case
f = f.change_ring(K)
A = K.extension(f, names="alpha")
a = A.gen()
self._alpha[prec] = a
return a
def order_of_vanishing(self):
try:
return self.__ord
except AttributeError:
pass
if not self.is_ordinary():
raise NotImplementedError
E = self.elliptic_curve()
if not E.is_good(self.prime()):
raise ValueError("prime must be of good reduction")
r = E.rank()
n = 1
while True:
f = self.series(n)
v = f.valuation()
if v < n and v < r:
raise RuntimeError("while computing p-adic order of vanishing, got a contradiction: the curve is %s, the curve has rank %s, but the p-adic L-series vanishes to order <= %s" % (E, r, v))
if v == r:
self.__ord = v
return v
n += 1
def teichmuller(self, prec):
p = self._p
K = Qp(p, prec, print_mode='series')
return [Integer(0)] + \
[a.residue(prec).lift() for a in K.teichmuller_system()]
def _e_bounds(self, n, prec):
# trac 10280: replace with new corrected code, note that the sequence has to be decreasing.
pn = self._p**n
enj = infinity
res = [enj]
for j in range(1,prec):
bino = valuation(binomial(pn,j),self._p)
if bino < enj:
enj = bino
res.append(enj)
return res
def _get_series_from_cache(self, n, prec, D, eta):
try:
return self.__series[(n,prec,D,eta)]
except AttributeError:
self.__series = {}
except KeyError:
for _n, _prec, _D, _eta in self.__series:
if _n == n and _D == D and _eta == eta and _prec >= prec:
return self.__series[(_n,_prec,_D,_eta)].add_bigoh(prec)
return None
def _set_series_in_cache(self, n, prec, D, eta, f):
self.__series[(n, prec, D, eta)] = f
def _quotient_of_periods_to_twist(self, D):
from sage.functions.all import sqrt
# This function does not depend on p and could be moved out of this file but it is needed only here
# Note that the number of real components does not change by twisting.
if D == 1:
return 1
Et = self._E.quadratic_twist(D)
if D > 1:
qt = Et.period_lattice().basis()[0]/self._E.period_lattice().basis()[0]
qt *= sqrt(qt.parent()(D))
else:
qt = Et.period_lattice().basis()[1].imag()/self._E.period_lattice().basis()[0]
if Et.real_components() == 1:
qt *= 2
qt *= sqrt(qt.parent()(-D))
verbose('the real approximation is %s'%qt)
# we know from MTT that the result has a denominator 1
return QQ(int(round(8*qt)))/8
class pAdicLseriesOrdinary(pAdicLseries):
def series(self, n=2, quadratic_twist=+1, prec=5, eta=0):
n = ZZ(n)
if n < 1:
raise ValueError("n (=%s) must be a positive integer" % n)
if self._p == 2 and n == 1:
raise ValueError("n (=%s) must be a at least 2 if p is 2" % n)
if prec < 1:
raise ValueError("Insufficient precision (%s)" % prec)
# check if the conditions on quadratic_twist are satisfied
eta = ZZ(eta) % (self._p - 1)
D = ZZ(quadratic_twist)
if D != 1:
if eta != 0: raise NotImplementedError("quadratic twists only implemented for the 0th Teichmueller component")
if D % 4 == 0:
d = D//4
if not d.is_squarefree() or d % 4 == 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
else:
if not D.is_squarefree() or D % 4 != 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
if gcd(D,self._p) != 1:
raise ValueError("quadratic twist (=%s) must be coprime to p (=%s) "%(D,self._p))
if gcd(D,self._E.conductor())!= 1:
for ell in prime_divisors(D):
if valuation(self._E.conductor(),ell) > valuation(D,ell) :
raise ValueError("can not twist a curve of conductor (=%s) by the quadratic twist (=%s)."%(self._E.conductor(),D))
p = self._p
#verbose("computing L-series for p=%s, n=%s, and prec=%s"%(p,n,prec))
if prec == 1:
if eta == 0:
# trac 15737: if we only ask for the leading term we don't
K = Qp(p, 20, print_mode='series')
R = PowerSeriesRing(K,'T',1)
L = self.modular_symbol(0, sign=+1, quadratic_twist=D)
chip = kronecker_symbol(D,p)
if self._E.conductor() % p == 0:
L *= 1 - chip/self.alpha()
else:
L *= (1-chip/self.alpha())**2
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
L = R(L, 1)
return L
else:
bounds = self._prec_bounds(n,prec)
padic_prec = 20
else:
bounds = self._prec_bounds(n,prec)
padic_prec = max(bounds[1:]) + 5
verbose("using p-adic precision of %s"%padic_prec)
if p == 2:
res_series_prec = min(p**(n-2), prec)
else:
res_series_prec = min(p**(n-1), prec)
verbose("using series precision of %s"%res_series_prec)
ans = self._get_series_from_cache(n, res_series_prec,D,eta)
if not ans is None:
verbose("found series in cache")
return ans
K = QQ
R = PowerSeriesRing(K,'T',res_series_prec)
T = R(R.gen(),res_series_prec )
L = R(0)
one_plus_T_factor = R(1)
gamma_power = K(1)
teich = self.teichmuller(padic_prec)
if p == 2:
teich = [0, 1,-1]
gamma = K(5)
p_power = 2**(n-2)
a_range = 3
else:
teich = self.teichmuller(padic_prec)
gamma = K(1+ p)
p_power = p**(n-1)
a_range = p
si = 1-2*(eta % 2)
verbose("Now iterating over %s summands"%((p-1)*p_power))
verbose_level = get_verbose()
count_verb = 0
for j in range(p_power):
s = K(0)
if verbose_level >= 2 and j/p_power*100 > count_verb + 3:
verbose("%.2f percent done"%(float(j)/p_power*100))
count_verb += 3
for a in range(1,a_range):
b = teich[a] * gamma_power
s += teich[a]**eta * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si).lift()
L += s * one_plus_T_factor
one_plus_T_factor *= 1+T
gamma_power *= gamma
verbose("the series before adjusting the precision is %s"%L)
K = Qp(p, padic_prec, print_mode='series')
R = PowerSeriesRing(K,'T',res_series_prec)
L = R(L,res_series_prec)
aj = L.list()
if len(aj) > 0:
aj = [aj[0].add_bigoh(padic_prec-2)] + \
[aj[j].add_bigoh(bounds[j]) for j in range(1,len(aj))]
L = R(aj,res_series_prec )
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
self._set_series_in_cache(n, res_series_prec, D, eta, L)
return L
power_series = series
def is_ordinary(self):
return True
def is_supersingular(self):
return False
def _c_bound(self):
try:
return self.__c_bound
except AttributeError:
pass
E = self._E
p = self._p
if E.galois_representation().is_irreducible(p):
ans = 0
else:
m = E.modular_symbol_space(sign=1)
b = m.boundary_map().codomain()
C = b._known_cusps()
ans = max([valuation(self.modular_symbol(a).denominator(), p)
for a in C])
self.__c_bound = ans
return ans
def _prec_bounds(self, n, prec):
if self._p == 2:
e = self._e_bounds(n - 2, prec)
else:
e = self._e_bounds(n - 1, prec)
c = self._c_bound()
return [e[j] - c for j in range(len(e))]
class pAdicLseriesSupersingular(pAdicLseries):
def series(self, n=3, quadratic_twist=+1, prec=5, eta=0):
n = ZZ(n)
if n < 1:
raise ValueError("n (=%s) must be a positive integer" % n)
if self._p == 2 and n == 1:
raise ValueError("n (=%s) must be at least 2 when p=2" % n)
if prec < 1:
raise ValueError("Insufficient precision (%s)" % prec)
D = ZZ(quadratic_twist)
if D != 1:
if eta != 0: raise NotImplementedError("quadratic twists only implemented for the 0th Teichmueller component")
if D % 4 == 0:
d = D//4
if not d.is_squarefree() or d % 4 == 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
else:
if not D.is_squarefree() or D % 4 != 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field" % D)
if gcd(D,self._E.conductor()) != 1:
for ell in prime_divisors(D):
if valuation(self._E.conductor(), ell) > valuation(D,ell) :
raise ValueError("can not twist a curve of conductor (=%s) by the quadratic twist (=%s)." % (self._E.conductor(), D))
p = self._p
eta = ZZ(eta) % (p - 1)
if prec == 1:
if eta == 0:
# need to do any sum as L_p(E,0) = (1-1/alpha)^2 * m(0) (good case)
# set prec arbitrary to 20.
alpha = self.alpha(prec=20)
K = alpha.parent()
R = PowerSeriesRing(K,'T',1)
L = self.modular_symbol(0, sign=+1, quadratic_twist=D)
L *= (1-1/self.alpha())**2
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
L = R(L, 1)
return L
else:
# here we need some sums anyway
bounds = self._prec_bounds(n,prec)
alphaadic_prec = 20
else:
prec = min(p**(n-1), prec)
bounds = self._prec_bounds(n,prec)
alphaadic_prec = max(bounds[1:]) + 5
padic_prec = alphaadic_prec//2+1
verbose("using alpha-adic precision of %s"%padic_prec)
ans = self._get_series_from_cache(n, prec, quadratic_twist,eta)
if not ans is None:
verbose("found series in cache")
return ans
alpha = self.alpha(prec=padic_prec)
K = alpha.parent()
R = PowerSeriesRing(K,'T',prec)
T = R(R.gen(), prec)
L = R(0)
one_plus_T_factor = R(1)
gamma_power = 1
teich = self.teichmuller(padic_prec)
if p == 2:
teich = [0, 1,-1]
gamma = 5
p_power = 2**(n-2)
a_range = 3
else:
teich = self.teichmuller(padic_prec)
gamma = 1+ p
p_power = p**(n-1)
a_range = p
si = 1-2*(eta % 2)
verbose("Now iterating over %s summands"%((p-1)*p_power))
verbose_level = get_verbose()
count_verb = 0
for j in range(p_power):
s = K(0)
if verbose_level >= 2 and j/p_power*100 > count_verb + 3:
verbose("%.2f percent done"%(float(j)/p_power*100))
count_verb += 3
for a in range(1,a_range):
b = teich[a] * gamma_power
s += teich[a]**eta * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si)
L += s * one_plus_T_factor
one_plus_T_factor *= 1+T
gamma_power *= gamma
# Now create series but with each coefficient truncated
# so it is proven correct:
# the coefficients are now treated as alpha-adic numbers (trac 20254)
L = R(L,prec)
aj = L.list()
if len(aj) > 0:
bj = [aj[0].add_bigoh(2*(padic_prec-2))]
j = 1
while j < len(aj):
bj.append( aj[j].add_bigoh(bounds[j]) )
j += 1
L = R(bj, prec)
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
self._set_series_in_cache(n, prec, quadratic_twist, eta, L)
return L
power_series = series
def is_ordinary(self):
return False
def is_supersingular(self):
return True
def _prec_bounds(self, n, prec):
if self._p == 2:
e = self._e_bounds(n - 2, prec)
else:
e = self._e_bounds(n - 1, prec)
c0 = ZZ(n + 2)
return [infinity] + [2 * e[j] - c0 for j in range(1, len(e))]
def _poly(self, a):
# this should be implemented in elements of Eisenstein rings at some point trac 20248
if a.is_zero():
return [0,0]
v, k = a._ntl_rep_abs()
K = a.base_ring()
pi = K.uniformiser()
v0 = K(v[0]._sage_()) * pi**k
v1 = K(v[1]._sage_()) * pi**k
alpha = a.parent().gen()
assert v0 + v1*alpha == a
return [ v0, v1 ]
def Dp_valued_series(self, n=3, quadratic_twist=+1, prec=5):
E = self._E
p = self._p
lps = self.series(n, quadratic_twist=quadratic_twist, prec=prec)
# now split up the series in two lps = G + H * alpha
R = lps.base_ring().base_ring() # Qp
QpT , T = PowerSeriesRing(R, 'T', prec).objgen()
Gli = []
Hli = []
for n in range(lps.prec()):
v = self._poly(lps[n])
Gli.append(v[0])
Hli.append(v[1])
G = QpT(Gli, prec)
H = QpT(Hli, prec)
# now compute phi
phi = matrix.matrix([[0,-1/p],[1,E.ap(p)/p]])
lpv = vector([G + (E.ap(p))*H , - R(p) * H ]) # this is L_p
eps = (1-phi)**(-2)
resu = lpv*eps.transpose()
return resu
@rename_keyword(deprecation=6094, method="algorithm")
def frobenius(self, prec=20, algorithm = "mw"):
E = self._E
p = self._p
if algorithm != "mw" and algorithm !="approx":
raise ValueError("Unknown algorithm %s."%algorithm)
if algorithm == "approx":
return self.__phi_bpr(prec=prec)
if p < 4 and algorithm == "mw":
print("Warning: If this fails try again using algorithm=\"approx\"")
Ew = E.integral_short_weierstrass_model()
adjusted_prec = sage.schemes.hyperelliptic_curves.monsky_washnitzer.adjusted_prec(p, prec)
modprecring = Integers(p**adjusted_prec)
output_ring = Qp(p, prec)
R, x = PolynomialRing(modprecring, 'x').objgen()
Q = x**3 + modprecring(Ew.a4()) * x + modprecring(Ew.a6())
trace = Ew.ap(p)
fr = sage.schemes.hyperelliptic_curves.monsky_washnitzer.matrix_of_frobenius(Q, p, adjusted_prec, trace)
fr = matrix.matrix(output_ring,2,2,fr)
# return a vector for PARI's ellchangecurve to pass from e1 to e2
def isom(e1,e2):
if not e1.is_isomorphic(e2):
raise ValueError("Curves must be isomorphic.")
usq = (e1.discriminant()/e2.discriminant()).nth_root(6)
u = usq.sqrt()
s = (u * e2.a1() - e1.a1() )/ZZ(2)
r = (usq * e2.a2() - e1.a2() + s**2 + e1.a1()*s)/ZZ(3)
t = (u**3 * e2.a3() - e1.a3() - e1.a1()*r)/ZZ(2)
return [u,r,s,t]
v = isom(E,Ew)
u = v[0]
r = v[1]
A = matrix.matrix([[u,-r/u],[0,1/u]])
frn = A * fr * A**(-1)
return 1/p*frn
def __phi_bpr(self, prec=0):
E = self._E
p = self._p
if prec > 10:
print("Warning: Very large value for the precision.")
if prec == 0:
prec = floor((log(10000)/log(p)))
verbose("prec set to %s"%prec)
eh = E.formal()
om = eh.differential(prec = p**prec+3)
verbose("differential computed")
xt = eh.x(prec=p**prec + 3)
et = xt*om
cs = [om[p**k-1] for k in range(prec + 1)]
ds = [et[p**k-1] for k in range(prec + 1)]
delta = 0
dpr = 0
gamma = 0
dga = 0
for k in range(1,prec+1):
eq = [(p**dpr*cs[k]) % p**k,(-p**dga*ds[k]) % p**k , (delta*cs[k]-gamma*ds[k]-cs[k-1]) % p**k ]
verbose("valuations : %s"%([x.valuation(p) for x in eq]))
v = min([x.valuation(p) for x in eq])
if v == infinity:
verbose("no new information at step k=%s"%k)
else:
eq = [ZZ(x/p**v) for x in eq]
verbose("renormalised eq mod p^%s is now %s"%(k-v,eq))
if eq[0].valuation(p) == 0:
l = min(eq[1].valuation(p),k-v)
if l == 0:
verbose("not uniquely determined at step k=%s"%k)
else:
ainv = eq[0].inverse_mod(p**l)
delta = delta - eq[2]*ainv*p**dpr
dpr = dpr + l
delta = delta % p**dpr
verbose("delta_prec increased to %s\n delta is now %s"%(dpr,delta))
elif eq[1].valuation(p) == 0:
l = min(eq[0].valuation(p),k-v)
ainv = eq[1].inverse_mod(p**l)
gamma = gamma - eq[2]*ainv*p**dga
dga = dga + l
gamma = gamma % p**dga
verbose("gamma_prec increased to %s\n gamma is now %s"%(dga,gamma))
else:
raise RuntimeError("Bug: no delta or gamma can exist")
R = Qp(p,max(dpr,dga)+1)
delta = R(delta,absprec=dpr)
gamma = R(gamma,absprec=dga)
verbose("result delta = %s\n gamma = %s\n check : %s"%(delta,gamma, [Qp(p,k)(delta * cs[k] - gamma * ds[k] - cs[k-1]) for k in range(1,prec+1)] ))
a = delta
c = -gamma
d = E.ap(p) - a
b = (-1/p+a*d)/c
phi = matrix.matrix([[a,b],[c,d]])
return phi
def bernardi_sigma_function(self, prec=20):
E = self._E
Eh = E.formal()
lo = Eh.log(prec + 5)
F = lo.reverse()
S = LaurentSeriesRing(QQ,'z')
z = S.gen()
F = F(z)
xofF = Eh.x(prec + 2)(F)
g = (1/z**2 - xofF ).power_series()
h = g.integral().integral()
sigma_of_z = z.power_series() * h.exp()
return sigma_of_z
def Dp_valued_height(self,prec=20):
E = self._E
p = self._p
Ehat = E.formal()
elog = Ehat.log(prec + Integer(3))
n = arith.LCM(E.tamagawa_numbers())
n = arith.LCM(n, E.Np(p)) # allowed here because E has good reduction at p
def height(P,check=True):
if P.is_finite_order():
return Qp(p,prec)(0)
if check:
assert P.curve() == E, 'the point P must lie on the curve from which the height function was created'
Q = n * P
tt = - Q[0]/Q[1]
R = Qp(p,prec+5)
tt = R(tt)
zz = elog(tt)
homega = -zz**2/n**2
eQ = denominator(Q[1])/denominator(Q[0])
si = self.bernardi_sigma_function(prec=prec+4)
heta = 2 * log(si(zz)/eQ) / n**2
R = Qp(p,prec)
return vector([-R(heta),R(homega)])
return height
def Dp_valued_regulator(self, prec=20, v1=0, v2=0):
p = self._p
E = self._E
h = self.Dp_valued_height(prec=prec)
# this is the height_{v} (P) for a v in D_p
def hv(vec,P):
hP = h(P)
return - vec[0]*hP[1] +vec[1]*hP[0]
# def hvpairing(vec,P,Q):
# return (hv(vec, P+Q) - hv(vec,P)-hv(vec,Q))/2
K = Qp(p, prec)
if v1 == 0 and v2 == 0:
v1 = vector([K(0), K(1)]) # that is eta
v2 = vector([K(-1), K(1)]) # and this is eta-omega.
# the rest should not depend on this choice
# as long as it is outside Q_p * omega
rk = E.rank()
if rk == 0:
return vector([K(1), K(0)])
basis = E.gens()
def regv(vec):
M = matrix.matrix(K, rk, rk, 0)
point_height = [hv(vec, P) for P in basis]
for i in range(rk):
for j in range(i+1, rk):
M[i, j] = M[j, i] = (hv(vec,basis[i] + basis[j])- point_height[i] - point_height[j] )/2
for i in range(rk):
M[i, i] = point_height[i]
return M.determinant()
def Dp_pairing(vec1,vec2):
return (vec1[0]*vec2[1]-vec1[1]*vec2[0])
omega_vec = vector([K(1),K(0)])
# note the correction here with respect to Perrin-Riou's definition.
reg1 = regv(v1) / Dp_pairing(omega_vec, v1)**(rk - 1)
reg2 = regv(v2) / Dp_pairing(omega_vec, v2)**(rk - 1)
reg_oe = (reg1 * v2 - reg2 * v1 ) / Dp_pairing(v2, v1)
if p < 5:
phi = self.frobenius(min(6, prec), algorithm="approx")
else:
phi = self.frobenius(prec + 2, algorithm="mw")
c = phi[1, 0]
a = phi[0, 0]
return vector([reg_oe[0] - a/c*reg_oe[1],reg_oe[1]/c])
| true | true |
f7116ca368bdcdd42239a17f140dbe141d3d445c | 2,085 | py | Python | codeMarkDown/MD_Link.py | atria-tools/monk | 4961457f4db5dfa98fc6001a289c24e460e5b025 | [
"Apache-2.0"
] | null | null | null | codeMarkDown/MD_Link.py | atria-tools/monk | 4961457f4db5dfa98fc6001a289c24e460e5b025 | [
"Apache-2.0"
] | 1 | 2015-03-22T12:37:18.000Z | 2015-03-22T12:37:18.000Z | codeMarkDown/MD_Link.py | HeeroYui/monk | 4961457f4db5dfa98fc6001a289c24e460e5b025 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import monkDebug as debug
import sys
import monkTools
import re
##
## @brief Transcode:
## [http://votre_site.con] => http://votre_site.con
## [http://votre_site.con | text displayed] => text displayed
## [http://votre_site.con text displayed] => text displayed.
##
## @param[in] value String to transform.
## @return Transformed string.
##
def transcode(value):
# named link : [[http://plop.html | link name]]
value = re.sub(r'\[\[http://(.*?) \| (.*?)\]\]',
r'<a href="http://\1">\2</a>',
value)
# direct link : [[http://plop.html]]
value = re.sub(r'\[\[http://(.*?)\]\]',
r'<a href="http://\1">http://\1</a>',
value)
# direct lib link : [lib[libname]]
value = re.sub(r'\[lib\[(.*?) \| (.*?)\]\]',
r'<a href="../\1">\2</a>',
value)
value = re.sub(r'\[doc\[(.*?) \| (.*?)\]\]',
r'<a href="\1.html">\2</a>',
value)
value = re.sub(r'\[tutorial\[(.*?) \| (.*?)\]\]',
r'<a href="tutorial_\1.html">\2</a>',
value)
value = re.sub(r'\[(lib|class|methode)\[(.*?)\]\]',
replace_link_class,
value)
"""
p = re.compile('\[\[(.*?)(|(.*?))\]\])',
flags=re.DOTALL)
value = p.sub(replace_link,
value)
"""
return value
"""
def replace_link(match):
if match.group() == "":
return ""
#debug.verbose("plop: " + str(match.group()))
value = "<ul>"
value += re.sub(r':INDENT:',
r'',
match.group())
value += "</ul>"
return transcode(value)
"""
def replace_link_class(match):
if match.group() == "":
return ""
#debug.info("plop: " + str(match.group()))
if match.groups()[0] == 'class':
className = match.groups()[1]
value = re.sub(':', '_', className)
return '<a href="class_' + value + '.html">' + className + '</a>'
elif match.groups()[0] == 'lib':
return match.groups()[1]
elif match.groups()[0] == 'methode':
return match.groups()[1]
else:
return match.groups()[1]
| 24.529412 | 67 | 0.490647 |
import monkDebug as debug
import sys
import monkTools
import re
value)
value = re.sub(r'\[lib\[(.*?) \| (.*?)\]\]',
r'<a href="../\1">\2</a>',
value)
value = re.sub(r'\[doc\[(.*?) \| (.*?)\]\]',
r'<a href="\1.html">\2</a>',
value)
value = re.sub(r'\[tutorial\[(.*?) \| (.*?)\]\]',
r'<a href="tutorial_\1.html">\2</a>',
value)
value = re.sub(r'\[(lib|class|methode)\[(.*?)\]\]',
replace_link_class,
value)
return value
def replace_link_class(match):
if match.group() == "":
return ""
if match.groups()[0] == 'class':
className = match.groups()[1]
value = re.sub(':', '_', className)
return '<a href="class_' + value + '.html">' + className + '</a>'
elif match.groups()[0] == 'lib':
return match.groups()[1]
elif match.groups()[0] == 'methode':
return match.groups()[1]
else:
return match.groups()[1]
| true | true |
f7116e8e419ef38b1a22603a973c53b8ef2a6045 | 9,085 | py | Python | karesansui/lib/parser/collectdplugin.py | Kairiw/karesansui | d5a3acfe40b3953fb4f8d6f51e30d0307309a6ee | [
"MIT"
] | null | null | null | karesansui/lib/parser/collectdplugin.py | Kairiw/karesansui | d5a3acfe40b3953fb4f8d6f51e30d0307309a6ee | [
"MIT"
] | null | null | null | karesansui/lib/parser/collectdplugin.py | Kairiw/karesansui | d5a3acfe40b3953fb4f8d6f51e30d0307309a6ee | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui Core.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import re
import sys
import glob
from karesansui.lib.dict_op import DictOp
from karesansui.lib.parser.base.xml_like_conf_parser import xmlLikeConfParser as Parser
from karesansui.lib.utils import preprint_r, r_chgrp, r_chmod
from karesansui.lib.const import VENDOR_SYSCONF_DIR, \
COLLECTD_DATA_DIR, KARESANSUI_GROUP
"""
Define Variables for This Parser
"""
PARSER_COLLECTD_PLUGIN_DIR = "%s/collectd.d" % VENDOR_SYSCONF_DIR
class collectdpluginParser:
_module = "collectdplugin"
def __init__(self):
self.dop = DictOp()
self.dop.addconf(self._module,{})
self.parser = Parser()
self.parser.set_delim("[ \t]+")
self.parser.set_new_delim("\t")
self.parser.set_comment("#")
self.base_parser_name = self.parser.__class__.__name__
from karesansui.lib.parser.collectd import collectdParser
collectdp = collectdParser()
self.parser.set_opt_uni(collectdp.opt_uni)
self.parser.set_opt_multi(collectdp.opt_multi)
self.parser.set_opt_sect(collectdp.opt_sect)
pass
def set_footer(self, footer=""):
self.parser.set_footer(footer)
def source_file(self):
retval = []
glob_str = "%s/*.conf" % (PARSER_COLLECTD_PLUGIN_DIR,)
for _afile in glob.glob(glob_str):
retval.append(_afile)
return retval
def read_conf(self,extra_args=None):
retval = {}
for _afile in self.source_file():
plugin_name = re.sub("\.conf$","",os.path.basename(_afile))
try:
extra_args['include']
if not re.search(extra_args['include'],plugin_name):
continue
except:
pass
self.parser.set_source_file([_afile])
conf_arr = self.parser.read_conf()
try:
self.dop.set(self._module,[plugin_name],conf_arr[_afile]['value'])
except:
pass
self.dop.set(self._module,['@BASE_PARSER'],self.base_parser_name)
#self.dop.preprint_r(self._module)
return self.dop.getconf(self._module)
def _pre_write_conf(self,conf_arr={}):
# Change permission to be able to read/write data kss group.
if os.path.exists(COLLECTD_DATA_DIR):
if os.getuid() == 0:
r_chgrp(COLLECTD_DATA_DIR,KARESANSUI_GROUP)
r_chmod(COLLECTD_DATA_DIR,"g+rwx")
r_chmod(COLLECTD_DATA_DIR,"o-rwx")
dop = DictOp()
dop.addconf("__",conf_arr)
if dop.isset("__",["python"]) is True:
dop.cdp_unset("__",["python","Plugin","python","@ORDERS"],multiple_file=True)
orders = []
orders.append(['Encoding'])
orders.append(['LogTraces'])
orders.append(['Interactive'])
orders.append(['ModulePath'])
orders.append(['Import'])
orders.append(['Module'])
dop.cdp_set("__",["python","Plugin","python","@ORDERS"],orders,is_opt_multi=True,multiple_file=True)
return dop.getconf("__")
def write_conf(self,conf_arr={},extra_args=None,dryrun=False):
retval = True
conf_arr = self._pre_write_conf(conf_arr)
for plugin_name,_v in conf_arr.items():
_afile = "%s/%s.conf" % (PARSER_COLLECTD_PLUGIN_DIR,plugin_name,)
try:
_v['action']
if _v['action'] == "delete":
if os.path.exists(_afile):
os.unlink(_afile)
continue
except:
pass
#continue
try:
_v['value']
self.dop.addconf("parser",{})
self.dop.set("parser",[_afile],_v['value'])
#self.dop.preprint_r("parser")
arr = self.dop.getconf("parser")
self.parser.write_conf(arr,dryrun=dryrun)
except:
pass
return retval
"""
"""
if __name__ == '__main__':
"""Testing
"""
parser = collectdpluginParser()
# 読み込み
dop = DictOp()
dop.addconf("dum",parser.read_conf())
new_plugin_name = "takuma"
##########################################################
# Uniオプション (一箇所しか設定できないオプション) の追加
##########################################################
# 'Foo foo' を追加(設定値リスト形式モードよる addメソッド)
dop.add("dum",[new_plugin_name,"Foo"],["foo",[["comment foo1","comment foo2"],"comment foo3"]])
# 'Bar bar' を追加(設定値文字列形式モードによる cdp_setメソッド)
dop.cdp_set("dum",[new_plugin_name,"Bar"],"bar",multiple_file=True)
dop.cdp_set_pre_comment("dum",[new_plugin_name,"Bar"],["","comment bar1","comment bar2"],multiple_file=True)
dop.cdp_set_post_comment("dum",[new_plugin_name,"Bar"],"comment bar3",multiple_file=True)
##########################################################
# Multiオプション (複数設定できるオプション) の追加
##########################################################
# 'LoadPlugin target_hoge' を追加
dop.cdp_set("dum",[new_plugin_name,"LoadPlugin","target_hoge"],"target_hoge",multiple_file=True,is_opt_multi=True)
dop.cdp_set_pre_comment("dum",[new_plugin_name,"LoadPlugin","target_hoge"],["","Dis is target_hoge"],multiple_file=True)
##########################################################
# Sectオプション (<ブラケット>ディレクティブオプション) の追加
##########################################################
# 下記 を追加
# <Plugin "foobar">
# <View "hoge">
# SubOpt1 gege # post
# </View>
# Option2 false
# Option1 true
# </Plugin>
dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","Option1"],"true",multiple_file=True)
dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","Option2"],"false",multiple_file=True)
dop.cdp_set_pre_comment("dum",[new_plugin_name,"Plugin","foobar","Option2"],"pre comment",multiple_file=True)
dop.cdp_set_post_comment("dum",[new_plugin_name,"Plugin","foobar","Option2"],"post comment",multiple_file=True)
dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","View","hoge","SubOpt1"],"gege",multiple_file=True)
dop.cdp_set_post_comment("dum",[new_plugin_name,"Plugin","foobar","View","hoge","SubOpt1"],"post",multiple_file=True)
print(dop.get("dum",["filter","@ORDERS"],multiple_file=True))
# 複数ファイルを読み込むパーサーの場合は、is_parent_parser=Trueにすること
# '<Plugin foobar>' を 先頭にする
key = [new_plugin_name,"Plugin","foobar"]
dop.insert_order("dum",key,0,is_parent_parser=True)
# 'LoadPlugin target_hoge' を 先頭にする => '<Plugin foobar>' は2番目になる
key = [new_plugin_name,"LoadPlugin","target_hoge"]
dop.insert_order("dum",key,0,is_parent_parser=True)
# 'Foo foo' を 先頭にする => 'LoadPlugin target_hoge' は2番目になる
key = [new_plugin_name,"Foo"]
dop.insert_order("dum",key,0,is_parent_parser=True)
# work completely
#dop.cdp_comment("dum",["python","Plugin","python","Import"],multiple_file=True)
#dop.cdp_comment("dum",["python","Plugin","python","Module","notification"],multiple_file=True)
#dop.cdp_comment("dum",["python","Plugin","python","Module","notification","CountupDBPath"],multiple_file=True)
#dop.cdp_set("dum",["python","Plugin","python","Module","notification","@ORDERS"],[['Environ'],['CountupDBPath']],multiple_file=True,is_opt_multi=True)
# work completely, too.
#dop.cdp_comment("dum",["python","Plugin","python","ModulePath"],multiple_file=True)
# work completely, too. (but this is overwritten by _pre_write_conf() method)
#dop.cdp_set("dum",["python","Plugin","python","@ORDERS"],[['ModulePath'],['Encoding']],multiple_file=True,is_opt_multi=True)
#sys.exit()
# 配列確認
conf = dop.getconf("dum")
preprint_r(conf)
parser.write_conf(conf,dryrun=True)
| 37.697095 | 155 | 0.612438 |
import os
import re
import sys
import glob
from karesansui.lib.dict_op import DictOp
from karesansui.lib.parser.base.xml_like_conf_parser import xmlLikeConfParser as Parser
from karesansui.lib.utils import preprint_r, r_chgrp, r_chmod
from karesansui.lib.const import VENDOR_SYSCONF_DIR, \
COLLECTD_DATA_DIR, KARESANSUI_GROUP
PARSER_COLLECTD_PLUGIN_DIR = "%s/collectd.d" % VENDOR_SYSCONF_DIR
class collectdpluginParser:
_module = "collectdplugin"
def __init__(self):
self.dop = DictOp()
self.dop.addconf(self._module,{})
self.parser = Parser()
self.parser.set_delim("[ \t]+")
self.parser.set_new_delim("\t")
self.parser.set_comment("#")
self.base_parser_name = self.parser.__class__.__name__
from karesansui.lib.parser.collectd import collectdParser
collectdp = collectdParser()
self.parser.set_opt_uni(collectdp.opt_uni)
self.parser.set_opt_multi(collectdp.opt_multi)
self.parser.set_opt_sect(collectdp.opt_sect)
pass
def set_footer(self, footer=""):
self.parser.set_footer(footer)
def source_file(self):
retval = []
glob_str = "%s/*.conf" % (PARSER_COLLECTD_PLUGIN_DIR,)
for _afile in glob.glob(glob_str):
retval.append(_afile)
return retval
def read_conf(self,extra_args=None):
retval = {}
for _afile in self.source_file():
plugin_name = re.sub("\.conf$","",os.path.basename(_afile))
try:
extra_args['include']
if not re.search(extra_args['include'],plugin_name):
continue
except:
pass
self.parser.set_source_file([_afile])
conf_arr = self.parser.read_conf()
try:
self.dop.set(self._module,[plugin_name],conf_arr[_afile]['value'])
except:
pass
self.dop.set(self._module,['@BASE_PARSER'],self.base_parser_name)
return self.dop.getconf(self._module)
def _pre_write_conf(self,conf_arr={}):
if os.path.exists(COLLECTD_DATA_DIR):
if os.getuid() == 0:
r_chgrp(COLLECTD_DATA_DIR,KARESANSUI_GROUP)
r_chmod(COLLECTD_DATA_DIR,"g+rwx")
r_chmod(COLLECTD_DATA_DIR,"o-rwx")
dop = DictOp()
dop.addconf("__",conf_arr)
if dop.isset("__",["python"]) is True:
dop.cdp_unset("__",["python","Plugin","python","@ORDERS"],multiple_file=True)
orders = []
orders.append(['Encoding'])
orders.append(['LogTraces'])
orders.append(['Interactive'])
orders.append(['ModulePath'])
orders.append(['Import'])
orders.append(['Module'])
dop.cdp_set("__",["python","Plugin","python","@ORDERS"],orders,is_opt_multi=True,multiple_file=True)
return dop.getconf("__")
def write_conf(self,conf_arr={},extra_args=None,dryrun=False):
retval = True
conf_arr = self._pre_write_conf(conf_arr)
for plugin_name,_v in conf_arr.items():
_afile = "%s/%s.conf" % (PARSER_COLLECTD_PLUGIN_DIR,plugin_name,)
try:
_v['action']
if _v['action'] == "delete":
if os.path.exists(_afile):
os.unlink(_afile)
continue
except:
pass
try:
_v['value']
self.dop.addconf("parser",{})
self.dop.set("parser",[_afile],_v['value'])
arr = self.dop.getconf("parser")
self.parser.write_conf(arr,dryrun=dryrun)
except:
pass
return retval
if __name__ == '__main__':
parser = collectdpluginParser()
dop = DictOp()
dop.addconf("dum",parser.read_conf())
new_plugin_name = "takuma"
| true | true |
f7116ee3a17fcb6b15670369732a0331101ec14b | 13,492 | py | Python | inv/models/interfaceclassificationrule.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | inv/models/interfaceclassificationrule.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | inv/models/interfaceclassificationrule.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Interface Classification Rules models
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# Third-party modules
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import StringField, IntField, ListField, EmbeddedDocumentField, BooleanField
# NOC modules
from noc.core.mongo.fields import ForeignKeyField, PlainReferenceField
from noc.core.ip import IP
from noc.main.models.prefixtable import PrefixTable, PrefixTablePrefix
from noc.sa.models.managedobjectselector import ManagedObjectSelector
from noc.vc.models.vcfilter import VCFilter
from noc.core.comp import smart_text
from .interfaceprofile import InterfaceProfile
class InterfaceClassificationMatch(EmbeddedDocument):
# Field name
field = StringField(
choices=[
("name", "name"),
("description", "description"),
("ip", "ip"),
("tagged", "tagged vlan"),
("untagged", "untagged vlan"),
("hints", "hints"),
]
)
# Operation
op = StringField(choices=[("eq", "Equals"), ("regexp", "RegExp"), ("in", "in")])
#
value = StringField()
# "ip in"
prefix_table = ForeignKeyField(PrefixTable, required=False)
# *vlan in
vc_filter = ForeignKeyField(VCFilter, required=False)
description = StringField(required=False)
def __str__(self):
if self.prefix_table:
v = self.prefix_table.name
elif self.vc_filter:
v = self.vc_filter.name
else:
v = self.value
return "%s %s %s" % (self.field, self.op, v)
@property
def get_confdb_query(self):
query = ['Match("interfaces", ifname)']
if self.field == "name" and self.op == "eq":
query += ['Filter(ifname == "%s")' % self.value]
elif self.field == "name" and self.op == "regexp":
query += ['Re("%s", ifname, ignore_case=True)' % self.value]
if self.field == "description":
query += ['Match("interfaces", ifname, "description", ifdescr)']
if self.op == "eq":
query += ['Filter(ifdescr == "%s")' % self.value]
elif self.op == "regexp":
query += ['Re("%s", ifdescr, ignore_case=True)' % self.value]
if self.field == "hints" and self.op == "eq":
query += ['Match("interfaces", ifname, "hints", "%s")' % self.value]
if self.field == "ip" and self.op == "eq":
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "inet", "address", "%s")' % self.value
]
elif self.field == "ip" and self.op == "in" and self.prefix_table:
prefix_match = "( %s )" % " or ".join(
" MatchPrefix('%s', address)" % ptp.prefix
for ptp in PrefixTablePrefix.objects.filter(table=self.prefix_table)
)
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "inet", "address", address)'
" and %s and Del(vr, fi, address)" % prefix_match
]
if self.field == "untagged" and self.op == "eq" and self.value:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "untagged", %s)' % self.value
]
elif self.field == "untagged" and self.op == "in" and self.vc_filter:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "untagged", untagged)'
' and HasVLAN("%s", untagged) and Del(vr, fi, untagged)' % self.vc_filter.expression
]
if self.field == "tagged" and self.op == "eq" and (self.value or self.vc_filter):
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "tagged", tagged)'
' and MatchExactVLAN("%s", tagged) and Del(vr, fi, tagged)'
% (self.value or self.vc_filter.expression)
]
elif self.field == "tagged" and self.op == "in" and self.vc_filter:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "tagged", tagged)'
' and MatchAnyVLAN("%s", tagged) and Del(vr, fi, tagged)'
% self.vc_filter.expression
]
return " and ".join(query)
def compile(self, f_name):
a = getattr(self, "compile_%s_%s" % (self.field, self.op), None)
if a:
return a(f_name)
else:
raise SyntaxError("%s %s is not implemented" % (self.field, self.op))
# name
def compile_name_eq(self, f_name):
return "\n".join(
[
"def %s(iface):" % f_name,
" return iface.name.lower() == %s" % repr(self.value.lower()),
]
)
def compile_name_regexp(self, f_name):
return "\n".join(
[
"rx_%s = re.compile(%s, re.IGNORECASE)" % (f_name, repr(self.value)),
"def %s(iface):" % f_name,
" return bool(rx_%s.search(iface.name))" % f_name,
]
)
# description
def compile_description_eq(self, f_name):
return "\n".join(
[
"def %s(iface):" % f_name,
" return iface.description.lower() == %s" % repr(self.value.lower()),
]
)
def compile_description_regexp(self, f_name):
return "\n".join(
[
"rx_%s = re.compile(%s, re.IGNORECASE)" % (f_name, repr(self.value)),
"def %s(iface):" % f_name,
" return iface.description and bool(rx_%s.search(iface.description))" % f_name,
]
)
# IP
def compile_ip_eq(self, f_name):
v = IP.prefix(self.value)
r = [
"def %s(iface):" % f_name,
" a = [si.ipv%(afi)s_addresses for si in iface.subinterface_set.filter(enabled_afi='IPv%(afi)s')]"
% {"afi": v.afi},
" a = sum(a, [])",
]
if "/" in self.value:
# Compare prefixes
r += [" return any(x for x in a if x == %r)" % v.prefix]
else:
# Compare addresses
v = v.prefix.split("/")[0]
r += [" return any(x for x in a if x.split('/')[0] == %r)" % v]
return "\n".join(r)
def compile_ip_in(self, f_name):
r = [
"pt_%s = PrefixTable.objects.get(id=%s)" % (f_name, self.prefix_table.id),
"def %s(iface):" % f_name,
" for si in iface.subinterface_set.filter(enabled_afi='IPv4'):",
" for a in si.ipv4_addresses:",
" if a in pt_%s:" % f_name,
" return True",
" for si in iface.subinterface_set.filter(enabled_afi='IPv6'):",
" for a in si.ipv6_addresses:",
" if a in pt_%s:" % f_name,
" return True",
" return False",
]
return "\n".join(r)
# Untagged
def compile_untagged_eq(self, f_name):
vlan = int(self.value)
if vlan < 1 or vlan > 4096:
raise SyntaxError("Invalid VLAN")
r = [
"def %s(iface):" % f_name,
" return bool(iface.parent.subinterface_set.filter(enabled_afi='BRIDGE', untagged_vlan=%d).count())"
% vlan,
]
return "\n".join(r)
def compile_untagged_in(self, f_name):
r = [
"vcf_%s = VCFilter.get_by_id(id=%s)" % (f_name, self.vc_filter.id),
"if not vcf_%s:" % f_name,
" raise ValueError('Invalid VC Filter: %s')" % self.vc_filter.name,
"def %s(iface):" % f_name,
" for si in iface.parent.subinterface_set.filter(enabled_afi='BRIDGE'):",
" if si.untagged_vlan and vcf_%s.check(si.untagged_vlan):" % f_name,
" return True",
" return False",
]
return "\n".join(r)
# Tagged
def compile_tagged_eq(self, f_name):
vlan = int(self.value)
if vlan < 1 or vlan > 4096:
raise SyntaxError("Invalid VLAN")
r = [
"def %s(iface):" % f_name,
" return bool(iface.parent.subinterface_set.filter(enabled_afi='BRIDGE', tagged_vlans=%d).count())"
% vlan,
]
return "\n".join(r)
def compile_tagged_in(self, f_name):
r = [
"vcf_%s = VCFilter.get_by_id(id=%s)" % (f_name, self.vc_filter.id),
"if not vcf_%s:" % f_name,
" raise ValueError('Invalid VC Filter: %s')" % self.vc_filter.name,
"def %s(iface):" % f_name,
" for si in iface.parent.subinterface_set.filter(enabled_afi='BRIDGE'):",
" if si.tagged_vlans:",
" if any(vlan for vlan in si.tagged_vlans if vcf_%s.check(vlan)):" % f_name,
" return True",
" return False",
]
return "\n".join(r)
def compile_hints_eq(self, f_name):
r = ["def %s(iface):" % f_name, " return iface.hints and %r in iface.hints" % self.value]
return "\n".join(r)
class InterfaceClassificationRule(Document):
meta = {
"collection": "noc.inv.interfaceclassificationrules",
"strict": False,
"auto_create_index": False,
}
name = StringField(required=False)
is_active = BooleanField(default=True)
description = StringField(required=False)
order = IntField()
selector = ForeignKeyField(ManagedObjectSelector, required=False)
match = ListField(EmbeddedDocumentField(InterfaceClassificationMatch), required=False)
profile = PlainReferenceField(InterfaceProfile, default=InterfaceProfile.get_default_profile)
def __str__(self):
r = [smart_text(x) for x in self.match]
return "%s -> %s" % (", ".join(r), self.profile.name)
@property
def match_expr(self):
"""
Stringified match expression
"""
if not len(self.match):
return "any"
elif len(self.match) == 1:
return smart_text(self.match[0])
else:
return " AND ".join("(%s)" % smart_text(m) for m in self.match)
@property
def get_confdb_query(self):
if not len(self.match):
return 'Match("interfaces", ifname, "type", "physical")'
elif len(self.match) == 1:
return self.match[0].get_confdb_query
else:
return " and ".join("(%s)" % m.get_confdb_query for m in self.match)
@classmethod
def get_classificator_code(cls):
r = ["import re", "import bson", "from noc.sa.models.selectorcache import SelectorCache"]
mf = [
"gsc = {}",
"def classify(interface):",
" def in_selector(o, s):",
" if s in s_cache:",
" return s_cache[s]",
" if s in gsc:",
" selector = gsc[s]",
" else:",
" selector = ManagedObjectSelector.get_by_id(s)",
" gsc[s] = selector",
" r = SelectorCache.is_in_selector(o, selector)",
" # r = o in selector",
" s_cache[s] = r",
" return r",
" s_cache = {}",
" mo = interface.managed_object",
]
for rule in cls.objects.filter(is_active=True).order_by("order"):
rid = str(rule.id)
lmn = []
for i, m in enumerate(rule.match):
mn = "match_%s_%d" % (rid, i)
r += [m.compile(mn)]
lmn += ["%s(interface)" % mn]
if lmn:
mf += [
" if in_selector(mo, %d) and %s:" % (rule.selector.id, " and ".join(lmn)),
" return bson.ObjectId('%s')" % rule.profile.id,
]
else:
mf += [
" if in_selector(mo, %d):" % rule.selector.id,
" return bson.ObjectId('%s')" % rule.profile.id,
]
r += mf
return "\n".join(r)
@classmethod
def get_classificator(cls):
code = cls.get_classificator_code() + "\nhandlers[0] = classify\n"
# Hack to retrieve reference
handlers = {}
# Compile code
exec(
code,
{
"re": re,
"PrefixTable": PrefixTable,
"VCFilter": VCFilter,
"ManagedObjectSelector": ManagedObjectSelector,
"handlers": handlers,
},
)
return handlers[0]
| 39.335277 | 115 | 0.505633 |
import re
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import StringField, IntField, ListField, EmbeddedDocumentField, BooleanField
from noc.core.mongo.fields import ForeignKeyField, PlainReferenceField
from noc.core.ip import IP
from noc.main.models.prefixtable import PrefixTable, PrefixTablePrefix
from noc.sa.models.managedobjectselector import ManagedObjectSelector
from noc.vc.models.vcfilter import VCFilter
from noc.core.comp import smart_text
from .interfaceprofile import InterfaceProfile
class InterfaceClassificationMatch(EmbeddedDocument):
field = StringField(
choices=[
("name", "name"),
("description", "description"),
("ip", "ip"),
("tagged", "tagged vlan"),
("untagged", "untagged vlan"),
("hints", "hints"),
]
)
op = StringField(choices=[("eq", "Equals"), ("regexp", "RegExp"), ("in", "in")])
value = StringField()
prefix_table = ForeignKeyField(PrefixTable, required=False)
vc_filter = ForeignKeyField(VCFilter, required=False)
description = StringField(required=False)
def __str__(self):
if self.prefix_table:
v = self.prefix_table.name
elif self.vc_filter:
v = self.vc_filter.name
else:
v = self.value
return "%s %s %s" % (self.field, self.op, v)
@property
def get_confdb_query(self):
query = ['Match("interfaces", ifname)']
if self.field == "name" and self.op == "eq":
query += ['Filter(ifname == "%s")' % self.value]
elif self.field == "name" and self.op == "regexp":
query += ['Re("%s", ifname, ignore_case=True)' % self.value]
if self.field == "description":
query += ['Match("interfaces", ifname, "description", ifdescr)']
if self.op == "eq":
query += ['Filter(ifdescr == "%s")' % self.value]
elif self.op == "regexp":
query += ['Re("%s", ifdescr, ignore_case=True)' % self.value]
if self.field == "hints" and self.op == "eq":
query += ['Match("interfaces", ifname, "hints", "%s")' % self.value]
if self.field == "ip" and self.op == "eq":
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "inet", "address", "%s")' % self.value
]
elif self.field == "ip" and self.op == "in" and self.prefix_table:
prefix_match = "( %s )" % " or ".join(
" MatchPrefix('%s', address)" % ptp.prefix
for ptp in PrefixTablePrefix.objects.filter(table=self.prefix_table)
)
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "inet", "address", address)'
" and %s and Del(vr, fi, address)" % prefix_match
]
if self.field == "untagged" and self.op == "eq" and self.value:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "untagged", %s)' % self.value
]
elif self.field == "untagged" and self.op == "in" and self.vc_filter:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "untagged", untagged)'
' and HasVLAN("%s", untagged) and Del(vr, fi, untagged)' % self.vc_filter.expression
]
if self.field == "tagged" and self.op == "eq" and (self.value or self.vc_filter):
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "tagged", tagged)'
' and MatchExactVLAN("%s", tagged) and Del(vr, fi, tagged)'
% (self.value or self.vc_filter.expression)
]
elif self.field == "tagged" and self.op == "in" and self.vc_filter:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "tagged", tagged)'
' and MatchAnyVLAN("%s", tagged) and Del(vr, fi, tagged)'
% self.vc_filter.expression
]
return " and ".join(query)
def compile(self, f_name):
a = getattr(self, "compile_%s_%s" % (self.field, self.op), None)
if a:
return a(f_name)
else:
raise SyntaxError("%s %s is not implemented" % (self.field, self.op))
def compile_name_eq(self, f_name):
return "\n".join(
[
"def %s(iface):" % f_name,
" return iface.name.lower() == %s" % repr(self.value.lower()),
]
)
def compile_name_regexp(self, f_name):
return "\n".join(
[
"rx_%s = re.compile(%s, re.IGNORECASE)" % (f_name, repr(self.value)),
"def %s(iface):" % f_name,
" return bool(rx_%s.search(iface.name))" % f_name,
]
)
def compile_description_eq(self, f_name):
return "\n".join(
[
"def %s(iface):" % f_name,
" return iface.description.lower() == %s" % repr(self.value.lower()),
]
)
def compile_description_regexp(self, f_name):
return "\n".join(
[
"rx_%s = re.compile(%s, re.IGNORECASE)" % (f_name, repr(self.value)),
"def %s(iface):" % f_name,
" return iface.description and bool(rx_%s.search(iface.description))" % f_name,
]
)
def compile_ip_eq(self, f_name):
v = IP.prefix(self.value)
r = [
"def %s(iface):" % f_name,
" a = [si.ipv%(afi)s_addresses for si in iface.subinterface_set.filter(enabled_afi='IPv%(afi)s')]"
% {"afi": v.afi},
" a = sum(a, [])",
]
if "/" in self.value:
r += [" return any(x for x in a if x == %r)" % v.prefix]
else:
v = v.prefix.split("/")[0]
r += [" return any(x for x in a if x.split('/')[0] == %r)" % v]
return "\n".join(r)
def compile_ip_in(self, f_name):
r = [
"pt_%s = PrefixTable.objects.get(id=%s)" % (f_name, self.prefix_table.id),
"def %s(iface):" % f_name,
" for si in iface.subinterface_set.filter(enabled_afi='IPv4'):",
" for a in si.ipv4_addresses:",
" if a in pt_%s:" % f_name,
" return True",
" for si in iface.subinterface_set.filter(enabled_afi='IPv6'):",
" for a in si.ipv6_addresses:",
" if a in pt_%s:" % f_name,
" return True",
" return False",
]
return "\n".join(r)
def compile_untagged_eq(self, f_name):
vlan = int(self.value)
if vlan < 1 or vlan > 4096:
raise SyntaxError("Invalid VLAN")
r = [
"def %s(iface):" % f_name,
" return bool(iface.parent.subinterface_set.filter(enabled_afi='BRIDGE', untagged_vlan=%d).count())"
% vlan,
]
return "\n".join(r)
def compile_untagged_in(self, f_name):
r = [
"vcf_%s = VCFilter.get_by_id(id=%s)" % (f_name, self.vc_filter.id),
"if not vcf_%s:" % f_name,
" raise ValueError('Invalid VC Filter: %s')" % self.vc_filter.name,
"def %s(iface):" % f_name,
" for si in iface.parent.subinterface_set.filter(enabled_afi='BRIDGE'):",
" if si.untagged_vlan and vcf_%s.check(si.untagged_vlan):" % f_name,
" return True",
" return False",
]
return "\n".join(r)
def compile_tagged_eq(self, f_name):
vlan = int(self.value)
if vlan < 1 or vlan > 4096:
raise SyntaxError("Invalid VLAN")
r = [
"def %s(iface):" % f_name,
" return bool(iface.parent.subinterface_set.filter(enabled_afi='BRIDGE', tagged_vlans=%d).count())"
% vlan,
]
return "\n".join(r)
def compile_tagged_in(self, f_name):
r = [
"vcf_%s = VCFilter.get_by_id(id=%s)" % (f_name, self.vc_filter.id),
"if not vcf_%s:" % f_name,
" raise ValueError('Invalid VC Filter: %s')" % self.vc_filter.name,
"def %s(iface):" % f_name,
" for si in iface.parent.subinterface_set.filter(enabled_afi='BRIDGE'):",
" if si.tagged_vlans:",
" if any(vlan for vlan in si.tagged_vlans if vcf_%s.check(vlan)):" % f_name,
" return True",
" return False",
]
return "\n".join(r)
def compile_hints_eq(self, f_name):
r = ["def %s(iface):" % f_name, " return iface.hints and %r in iface.hints" % self.value]
return "\n".join(r)
class InterfaceClassificationRule(Document):
meta = {
"collection": "noc.inv.interfaceclassificationrules",
"strict": False,
"auto_create_index": False,
}
name = StringField(required=False)
is_active = BooleanField(default=True)
description = StringField(required=False)
order = IntField()
selector = ForeignKeyField(ManagedObjectSelector, required=False)
match = ListField(EmbeddedDocumentField(InterfaceClassificationMatch), required=False)
profile = PlainReferenceField(InterfaceProfile, default=InterfaceProfile.get_default_profile)
def __str__(self):
r = [smart_text(x) for x in self.match]
return "%s -> %s" % (", ".join(r), self.profile.name)
@property
def match_expr(self):
if not len(self.match):
return "any"
elif len(self.match) == 1:
return smart_text(self.match[0])
else:
return " AND ".join("(%s)" % smart_text(m) for m in self.match)
@property
def get_confdb_query(self):
if not len(self.match):
return 'Match("interfaces", ifname, "type", "physical")'
elif len(self.match) == 1:
return self.match[0].get_confdb_query
else:
return " and ".join("(%s)" % m.get_confdb_query for m in self.match)
@classmethod
def get_classificator_code(cls):
r = ["import re", "import bson", "from noc.sa.models.selectorcache import SelectorCache"]
mf = [
"gsc = {}",
"def classify(interface):",
" def in_selector(o, s):",
" if s in s_cache:",
" return s_cache[s]",
" if s in gsc:",
" selector = gsc[s]",
" else:",
" selector = ManagedObjectSelector.get_by_id(s)",
" gsc[s] = selector",
" r = SelectorCache.is_in_selector(o, selector)",
" # r = o in selector",
" s_cache[s] = r",
" return r",
" s_cache = {}",
" mo = interface.managed_object",
]
for rule in cls.objects.filter(is_active=True).order_by("order"):
rid = str(rule.id)
lmn = []
for i, m in enumerate(rule.match):
mn = "match_%s_%d" % (rid, i)
r += [m.compile(mn)]
lmn += ["%s(interface)" % mn]
if lmn:
mf += [
" if in_selector(mo, %d) and %s:" % (rule.selector.id, " and ".join(lmn)),
" return bson.ObjectId('%s')" % rule.profile.id,
]
else:
mf += [
" if in_selector(mo, %d):" % rule.selector.id,
" return bson.ObjectId('%s')" % rule.profile.id,
]
r += mf
return "\n".join(r)
@classmethod
def get_classificator(cls):
code = cls.get_classificator_code() + "\nhandlers[0] = classify\n"
handlers = {}
exec(
code,
{
"re": re,
"PrefixTable": PrefixTable,
"VCFilter": VCFilter,
"ManagedObjectSelector": ManagedObjectSelector,
"handlers": handlers,
},
)
return handlers[0]
| true | true |
f7116f15bfecc8ac518a725732046cefe35eea97 | 2,520 | py | Python | configs/opamp/biased_pmos_gain/15-layer-ft-all-pool-0.5/config.py | kouroshHakha/circuit-fewshot-code | 32007e119da30632736868a3f643027624bf08d2 | [
"BSD-3-Clause"
] | null | null | null | configs/opamp/biased_pmos_gain/15-layer-ft-all-pool-0.5/config.py | kouroshHakha/circuit-fewshot-code | 32007e119da30632736868a3f643027624bf08d2 | [
"BSD-3-Clause"
] | null | null | null | configs/opamp/biased_pmos_gain/15-layer-ft-all-pool-0.5/config.py | kouroshHakha/circuit-fewshot-code | 32007e119da30632736868a3f643027624bf08d2 | [
"BSD-3-Clause"
] | null | null | null | import time
import hashlib
import torch
from torch_geometric.data import DataLoader
from cgl.utils.params import ParamDict
from cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset
# from cgl.models.gnn import DeepGENNet
s = time.time()
print('Loading the dataset ...')
root = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'
cir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')
node_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']
vout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()
# gain mean and variance
gmean, gstd = -1.1057, 0.6559
def transform_fn(data):
data.gain = (data.vac_mag[vout_idx, 0].float() - gmean) / gstd
return data
dset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)
print(f'Dataset was loaded in {time.time() - s:.6f} seconds.')
sample_data = dset[0]
fract = 0.05
splits = dset.splits
train_idx = int(fract * len(splits['train']))
train_dset = dset[splits['train'][:train_idx]]
valid_dset = dset[splits['valid']]
test_dset = dset[splits['test']]
backbone_config = 'configs/opamp/dc/deep_gen_net/15-layer/config.py'
bb_id = hashlib.sha256(backbone_config.encode('utf-8')).hexdigest()[:6]
lr = 1e-3
activation = 'relu'
hidden_channels = 128
num_layers = 15
train_batch_size = min(256, len(train_dset))
valid_batch_size = min(256, len(valid_dset))
test_batch_size = min(256, len(test_dset))
exp_name = f'GAIN_PMOS_FT_Pool_{fract*10:.1f}_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'
mdl_config = ParamDict(
exp_name=exp_name,
num_nodes=sample_data.vdc.shape[0],
in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],
hidden_channels=hidden_channels,
num_layers=num_layers,
dropout=0,
activation=activation,
bins=50,
lr=lr,
freeze_backbone=False,
use_pooling=True,
output_label='gain',
output_sigmoid=False,
lr_warmup={'peak_lr': lr, 'weight_decay': 0,
'warmup_updates': 50, 'tot_updates': 20000, 'end_lr': 5e-5},
)
train_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)
valid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)
test_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)
# .to converts the weight dtype to match input
# model = DeepGENNet(mdl_config).to(sample_data.x.dtype)
| 32.727273 | 133 | 0.749603 | import time
import hashlib
import torch
from torch_geometric.data import DataLoader
from cgl.utils.params import ParamDict
from cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset
s = time.time()
print('Loading the dataset ...')
root = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'
cir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')
node_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']
vout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()
gmean, gstd = -1.1057, 0.6559
def transform_fn(data):
data.gain = (data.vac_mag[vout_idx, 0].float() - gmean) / gstd
return data
dset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)
print(f'Dataset was loaded in {time.time() - s:.6f} seconds.')
sample_data = dset[0]
fract = 0.05
splits = dset.splits
train_idx = int(fract * len(splits['train']))
train_dset = dset[splits['train'][:train_idx]]
valid_dset = dset[splits['valid']]
test_dset = dset[splits['test']]
backbone_config = 'configs/opamp/dc/deep_gen_net/15-layer/config.py'
bb_id = hashlib.sha256(backbone_config.encode('utf-8')).hexdigest()[:6]
lr = 1e-3
activation = 'relu'
hidden_channels = 128
num_layers = 15
train_batch_size = min(256, len(train_dset))
valid_batch_size = min(256, len(valid_dset))
test_batch_size = min(256, len(test_dset))
exp_name = f'GAIN_PMOS_FT_Pool_{fract*10:.1f}_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'
mdl_config = ParamDict(
exp_name=exp_name,
num_nodes=sample_data.vdc.shape[0],
in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],
hidden_channels=hidden_channels,
num_layers=num_layers,
dropout=0,
activation=activation,
bins=50,
lr=lr,
freeze_backbone=False,
use_pooling=True,
output_label='gain',
output_sigmoid=False,
lr_warmup={'peak_lr': lr, 'weight_decay': 0,
'warmup_updates': 50, 'tot_updates': 20000, 'end_lr': 5e-5},
)
train_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)
valid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)
test_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)
| true | true |
f7116fb8eeb110ebf0028104837ae9b3e293cbef | 27,964 | py | Python | pandapower/plotting/plotly/traces.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | null | null | null | pandapower/plotting/plotly/traces.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | null | null | null | pandapower/plotting/plotly/traces.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import math
import numpy as np
import pandas as pd
from packaging import version
from collections.abc import Iterable
from pandapower.plotting.plotly.get_colors import get_plotly_color, get_plotly_cmap
from pandapower.plotting.plotly.mapbox_plot import _on_map_test, _get_mapbox_token, MapboxTokenMissing
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from plotly import __version__ as plotly_version
from plotly.graph_objs.scatter.marker import ColorBar
from plotly.graph_objs import Figure, Layout
from plotly.graph_objs.layout import XAxis, YAxis
from plotly.graph_objs.scatter import Line, Marker
from plotly.graph_objs.scattermapbox import Line as scmLine
from plotly.graph_objs.scattermapbox import Marker as scmMarker
except ImportError:
logger.info("Failed to import plotly - interactive plotting will not be available")
def version_check():
if version.parse(plotly_version) < version.parse("3.1.1"):
raise UserWarning("Your plotly version {} is no longer supported.\r\n"
"Please upgrade your python-plotly installation, "
"e.g., via pip install --upgrade plotly".format(__version__))
def _in_ipynb():
"""
an auxiliary function which checks if plot is called from a jupyter-notebook or not
"""
import __main__ as main
return not hasattr(main, '__file__')
def sum_line_length(pts):
pt_diff = lambda p: (p[0][0] - p[1][0], p[0][1] - p[1][1])
diffs = map(pt_diff, zip(pts[:-1], pts[1:]))
line_length = sum(math.hypot(d1, d2) for d1, d2 in diffs)
return line_length
def get_line_neutral(coord):
if len(coord) == 1:
return coord[0]
half_length = sum_line_length(coord) / 2.0
length = 0.0
ind = 0
while length < half_length:
ind += 1
length = sum_line_length(coord[:ind])
start_coord = coord[ind - 2]
end_coord = coord[ind - 1]
mid = [(a1 + a2) / 2.0 for a1, a2 in zip(start_coord, end_coord)]
return mid
def create_edge_center_trace(line_trace, size=1, patch_type="circle", color="white", infofunc=None,
trace_name='edge_center', use_line_geodata=False):
"""
Creates a plotly trace of pandapower buses.
INPUT:
**line traces** (from pandapowerNet) - The already generated line traces with center geodata
OPTIONAL:
**size** (int, 5) - patch size
**patch_type** (str, "circle") - patch type, can be
- "circle" for a circle
- "square" for a rectangle
- "diamond" for a diamond
- much more pathc types at https://plot.ly/python/reference/#scatter-marker
**infofunc** (pd.Series, None) - hoverinfo for each trace element. Indices should correspond to the pandapower element indices
**trace_name** (String, "buses") - name of the trace which will appear in the legend
**color** (String, "blue") - color of buses in the trace
"""
# color = get_plotly_color(color)
center_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,
marker=dict(color=color, size=size, symbol=patch_type))
if not use_line_geodata:
center_trace['x'], center_trace['y'] = (line_trace[0]["x"][1::4], line_trace[0]["y"][1::4])
else:
x, y = [], []
for trace in line_trace:
coord = list(zip(trace["x"], trace["y"]))
mid_coord = get_line_neutral(coord)
x.append(mid_coord[0])
y.append(mid_coord[1])
center_trace['x'], center_trace['y'] = (x, y)
center_trace['text'] = infofunc
return center_trace
def create_bus_trace(net, buses=None, size=5, patch_type="circle", color="blue", infofunc=None,
trace_name='buses', legendgroup=None, cmap=None, cmap_vals=None,
cbar_title=None, cmin=None, cmax=None, cpos=1.0, colormap_column="vm_pu"):
"""
Creates a plotly trace of pandapower buses.
INPUT:
**net** (pandapowerNet) - The pandapower network
OPTIONAL:
**buses** (list, None) - The buses for which the collections are created.
If None, all buses in the network are considered.
**size** (int, 5) - patch size
**patch_type** (str, "circle") - patch type, can be
- "circle" for a circle
- "square" for a rectangle
- "diamond" for a diamond
- much more pathc types at https://plot.ly/python/reference/#scatter-marker
**infofunc** (pd.Series, None) - hoverinfo for bus elements. Indices should correspond to the pandapower element indices
**trace_name** (String, "buses") - name of the trace which will appear in the legend
**color** (String, "blue") - color of buses in the trace
**cmap** (String, None) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,
Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)
alternatively a custom discrete colormap can be used
**cmap_vals** (list, None) - values used for coloring using colormap
**cbar_title** (String, None) - title for the colorbar
**cmin** (float, None) - colorbar range minimum
**cmax** (float, None) - colorbar range maximum
**cpos** (float, 1.1) - position of the colorbar
**colormap_column** (str, "vm_pu") - set color of bus according to this variable
"""
color = get_plotly_color(color)
bus_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,
marker=dict(color=color, size=size, symbol=patch_type))
buses = net.bus.index.tolist() if buses is None else list(buses)
bus_plot_index = [b for b in buses if b in list(set(buses) & set(net.bus_geodata.index))]
bus_trace['x'], bus_trace['y'] = (net.bus_geodata.loc[bus_plot_index, 'x'].tolist(),
net.bus_geodata.loc[bus_plot_index, 'y'].tolist())
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(buses):
infofunc = pd.Series(index=buses, data=infofunc)
bus_trace['text'] = net.bus.loc[bus_plot_index, 'name'] if infofunc is None else infofunc.loc[buses]
if legendgroup:
bus_trace['legendgroup'] = legendgroup
# if color map is set
if cmap is not None:
# TODO introduce discrete colormaps (see contour plots in plotly)
# if cmap_vals are not given
cmap = 'Jet' if cmap is True else cmap
if cmap_vals is not None:
cmap_vals = cmap_vals
else:
if net.res_line.shape[0] == 0:
logger.error("There are no power flow results for buses voltage magnitudes which are default for bus "
"colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column].values
cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column] if cmap_vals is None else cmap_vals
cmin = cmin if cmin else cmap_vals.min()
cmax = cmax if cmax else cmap_vals.max()
bus_trace['marker'] = Marker(size=size,
color=cmap_vals, cmin=cmin, cmax=cmax,
colorscale=cmap,
colorbar=ColorBar(thickness=10,
x=cpos),
symbol=patch_type
)
if cbar_title:
bus_trace['marker']['colorbar']['title'] = cbar_title
bus_trace['marker']['colorbar']['title']['side'] = 'right'
return [bus_trace]
def _get_line_geodata_plotly(net, lines, use_line_geodata):
xs = []
ys = []
if use_line_geodata:
for line_ind, _ in lines.iterrows():
line_coords = net.line_geodata.loc[line_ind, 'coords']
linex, liney = list(zip(*line_coords))
xs += linex
xs += [None]
ys += liney
ys += [None]
else:
# getting x and y values from bus_geodata for from and to side of each line
from_bus = net.bus_geodata.loc[lines.from_bus, 'x'].tolist()
to_bus = net.bus_geodata.loc[lines.to_bus, 'x'].tolist()
# center point added because of the hovertool
center = (np.array(from_bus) + np.array(to_bus)) / 2
none_list = [None] * len(from_bus)
xs = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()
from_bus = net.bus_geodata.loc[lines.from_bus, 'y'].tolist()
to_bus = net.bus_geodata.loc[lines.to_bus, 'y'].tolist()
# center point added because of the hovertool
center = (np.array(from_bus) + np.array(to_bus)) / 2
none_list = [None] * len(from_bus)
ys = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()
# [:-1] is because the trace will not appear on maps if None is at the end
return xs[:-1], ys[:-1]
def create_line_trace(net, lines=None, use_line_geodata=True, respect_switches=False, width=1.0,
color='grey', infofunc=None, trace_name='lines', legendgroup=None,
cmap=None, cbar_title=None, show_colorbar=True, cmap_vals=None, cmin=None,
cmax=None, cpos=1.1):
"""
Creates a plotly trace of pandapower lines.
INPUT:
**net** (pandapowerNet) - The pandapower network
OPTIONAL:
**lines** (list, None) - The lines for which the collections are created.
If None, all lines in the network are considered.
**width** (int, 1) - line width
**respect_switches** (bool, False) - flag for consideration of disconnected lines
**infofunc** (pd.Series, None) - hoverinfo for line elements. Indices should correspond to the pandapower element indices
**trace_name** (String, "lines") - name of the trace which will appear in the legend
**color** (String, "grey") - color of lines in the trace
**legendgroup** (String, None) - defines groups of layers that will be displayed in a legend
e.g. groups according to voltage level (as used in `vlevel_plotly`)
**cmap** (String, None) - name of a colormap which exists within plotly if set to True default `Jet`
colormap is used, alternative colormaps : Greys, YlGnBu, Greens, YlOrRd,
Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis
**cmap_vals** (list, None) - values used for coloring using colormap
**show_colorbar** (bool, False) - flag for showing or not corresponding colorbar
**cbar_title** (String, None) - title for the colorbar
**cmin** (float, None) - colorbar range minimum
**cmax** (float, None) - colorbar range maximum
**cpos** (float, 1.1) - position of the colorbar
"""
color = get_plotly_color(color)
# defining lines to be plot
lines = net.line.index.tolist() if lines is None else list(lines)
if len(lines) == 0:
return []
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(lines):
infofunc = pd.Series(index=lines, data=infofunc)
if len(infofunc) != len(lines) and len(infofunc) != len(net.line):
raise UserWarning("Different amount of hover info than lines to plot")
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.line.index to the infofunc contents"
no_go_lines = set()
if respect_switches:
no_go_lines = set(lines) & set(net.switch.element[(net.switch.et == "l") & (net.switch.closed == 0)])
lines_to_plot = net.line.loc[set(net.line.index) & (set(lines) - no_go_lines)]
no_go_lines_to_plot = None
use_line_geodata = use_line_geodata if net.line_geodata.shape[0] > 0 else False
if use_line_geodata:
lines_to_plot = lines_to_plot.loc[set(lines_to_plot.index) & set(net.line_geodata.index)]
else:
lines_with_geodata = lines_to_plot.from_bus.isin(net.bus_geodata.index) & \
lines_to_plot.to_bus.isin(net.bus_geodata.index)
lines_to_plot = lines_to_plot.loc[lines_with_geodata]
cmap_lines = None
if cmap is not None:
# workaround: if colormap plot is used, each line need to be separate scatter object because
# plotly still doesn't support appropriately colormap for line objects
# TODO correct this when plotly solves existing github issue about Line colorbar
cmap = 'jet' if cmap is True else cmap
if cmap_vals is not None:
if not isinstance(cmap_vals, np.ndarray):
cmap_vals = np.asarray(cmap_vals)
else:
if net.res_line.shape[0] == 0:
logger.error("There are no power flow results for lines which are default for line colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_line.loc[lines_to_plot.index, 'loading_percent'].values
cmap_lines = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)
if len(cmap_lines) == len(net.line):
# some lines are not plotted although cmap_value were provided for all lines
line_idx_map = dict(zip(net.line.loc[lines].index.tolist(), range(len(lines))))
cmap_lines = [cmap_lines[line_idx_map[idx]] for idx in lines_to_plot.index]
else:
assert len(cmap_lines) == len(lines_to_plot), \
"Different amounts of cmap values and lines to plot were supplied"
line_traces = []
for col_i, (idx, line) in enumerate(lines_to_plot.iterrows()):
line_color = color
line_info = line['name']
if cmap is not None:
try:
line_color = cmap_lines[col_i]
line_info = line['name'] if infofunc is None else infofunc.loc[idx]
except IndexError:
logger.warning("No color and info for line {:d} (name: {}) available".format(idx, line['name']))
line_trace = dict(type='scatter', text=[], hoverinfo='text', mode='lines', name=trace_name,
line=Line(width=width, color=color))
line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, lines_to_plot.loc[idx:idx], use_line_geodata)
line_trace['line']['color'] = line_color
line_trace['text'] = line_info
line_traces.append(line_trace)
if show_colorbar and cmap is not None:
cmin = cmin if cmin else cmap_vals.min()
cmax = cmax if cmax else cmap_vals.max()
try:
# TODO for custom colormaps
cbar_cmap_name = 'Jet' if cmap is 'jet' else cmap
# workaround to get colorbar for lines (an unvisible node is added)
lines_cbar = dict(type='scatter', x=[net.bus_geodata.x[0]], y=[net.bus_geodata.y[0]], mode='markers',
marker=Marker(size=0, cmin=cmin, cmax=cmax,
color='rgb(255,255,255)',
colorscale=cbar_cmap_name,
colorbar=ColorBar(thickness=10,
x=cpos),
))
if cbar_title:
lines_cbar['marker']['colorbar']['title'] = cbar_title
lines_cbar['marker']['colorbar']['title']['side'] = 'right'
line_traces.append(lines_cbar)
except:
pass
if len(no_go_lines) > 0:
no_go_lines_to_plot = net.line.loc[no_go_lines]
for idx, line in no_go_lines_to_plot.iterrows():
line_color = color
line_trace = dict(type='scatter',
text=[], hoverinfo='text', mode='lines', name='disconnected lines',
line=Line(width=width / 2, color='grey', dash='dot'))
line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, no_go_lines_to_plot.loc[idx:idx], use_line_geodata)
line_trace['line']['color'] = line_color
try:
line_trace['text'] = infofunc.loc[idx]
except (KeyError, IndexError):
line_trace["text"] = line['name']
line_traces.append(line_trace)
if legendgroup:
line_trace['legendgroup'] = legendgroup
# sort infofunc so that it is the correct order lines_to_plot + no_go_lines_to_plot
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(net.line):
infofunc = pd.Series(index=net.line.index, data=infofunc)
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.line.index to the infofunc contents"
sorted_idx = lines_to_plot.index.tolist()
if no_go_lines_to_plot is not None:
sorted_idx += no_go_lines_to_plot.index.tolist()
infofunc = infofunc.loc[sorted_idx]
center_trace = create_edge_center_trace(line_traces, color=color, infofunc=infofunc,
use_line_geodata=use_line_geodata)
line_traces.append(center_trace)
return line_traces
def create_trafo_trace(net, trafos=None, color='green', width=5, infofunc=None, cmap=None,
trace_name='trafos', cmin=None, cmax=None, cmap_vals=None, use_line_geodata=None):
"""
Creates a plotly trace of pandapower trafos.
INPUT:
**net** (pandapowerNet) - The pandapower network
OPTIONAL:
**trafos** (list, None) - The trafos for which the collections are created.
If None, all trafos in the network are considered.
**width** (int, 5) - line width
**infofunc** (pd.Series, None) - hoverinfo for trafo elements. Indices should correspond to the pandapower element indices
**trace_name** (String, "lines") - name of the trace which will appear in the legend
**color** (String, "green") - color of lines in the trace
**cmap** (bool, False) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,
Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)
**cmap_vals** (list, None) - values used for coloring using colormap
**cbar_title** (String, None) - title for the colorbar
**cmin** (float, None) - colorbar range minimum
**cmax** (float, None) - colorbar range maximum
"""
color = get_plotly_color(color)
# defining lines to be plot
trafos = net.trafo.index.tolist() if trafos is None else list(trafos)
if len(trafos) == 0:
return []
trafo_buses_with_geodata = net.trafo.hv_bus.isin(net.bus_geodata.index) & \
net.trafo.lv_bus.isin(net.bus_geodata.index)
trafos_mask = net.trafo.index.isin(trafos)
trafos_to_plot = net.trafo[trafo_buses_with_geodata & trafos_mask]
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(trafos):
infofunc = pd.Series(index=trafos, data=infofunc)
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.trafo.index to the infofunc contents"
infofunc = infofunc.loc[trafos_to_plot.index]
cmap_colors = []
if cmap is not None:
cmap = 'jet' if cmap is None else cmap
cmin = 0 if cmin is None else cmin
cmax = 100 if cmin is None else cmax
if cmap_vals is not None:
cmap_vals = cmap_vals
else:
if net.res_trafo.shape[0] == 0:
logger.error("There are no power flow results for lines which are default for line colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_trafo.loc[trafos_to_plot.index, 'loading_percent'].values
cmap_colors = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)
trafo_traces = []
for col_i, (idx, trafo) in enumerate(trafos_to_plot.iterrows()):
if cmap is not None:
color = cmap_colors[col_i]
trafo_trace = dict(type='scatter', text=[], line=Line(width=width, color=color),
hoverinfo='text', mode='lines', name=trace_name)
trafo_trace['text'] = trafo['name'] if infofunc is None else infofunc.loc[idx]
from_bus = net.bus_geodata.loc[trafo.hv_bus, 'x']
to_bus = net.bus_geodata.loc[trafo.lv_bus, 'x']
trafo_trace['x'] = [from_bus, (from_bus + to_bus) / 2, to_bus]
from_bus = net.bus_geodata.loc[trafo.hv_bus, 'y']
to_bus = net.bus_geodata.loc[trafo.lv_bus, 'y']
trafo_trace['y'] = [from_bus, (from_bus + to_bus) / 2, to_bus]
trafo_traces.append(trafo_trace)
center_trace = create_edge_center_trace(trafo_traces, color=color, infofunc=infofunc,
use_line_geodata=use_line_geodata)
trafo_traces.append(center_trace)
return trafo_traces
def draw_traces(traces, on_map=False, map_style='basic', showlegend=True, figsize=1,
aspectratio='auto', filename="temp-plot.html"):
"""
plots all the traces (which can be created using :func:`create_bus_trace`, :func:`create_line_trace`,
:func:`create_trafo_trace`)
to PLOTLY (see https://plot.ly/python/)
INPUT:
**traces** - list of dicts which correspond to plotly traces
generated using: `create_bus_trace`, `create_line_trace`, `create_trafo_trace`
OPTIONAL:
**on_map** (bool, False) - enables using mapbox plot in plotly
**map_style** (str, 'basic') - enables using mapbox plot in plotly
- 'streets'
- 'bright'
- 'light'
- 'dark'
- 'satellite'
**showlegend** (bool, 'True') - enables legend display
**figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size
**aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata
any custom aspectration can be given as a tuple, e.g. (1.2, 1)
**filename** (str, "temp-plot.html") - plots to a html file called filename
"""
if on_map:
try:
on_map = _on_map_test(traces[0]['x'][0], traces[0]['y'][0])
except:
logger.warning("Test if geo-data are in lat/long cannot be performed using geopy -> "
"eventual plot errors are possible.")
if on_map is False:
logger.warning("Existing geodata are not real lat/lon geographical coordinates. -> "
"plot on maps is not possible.\n"
"Use geo_data_to_latlong(net, projection) to transform geodata from specific projection.")
if on_map:
# change traces for mapbox
# change trace_type to scattermapbox and rename x to lat and y to lon
for trace in traces:
trace['lat'] = trace.pop('x')
trace['lon'] = trace.pop('y')
trace['type'] = 'scattermapbox'
if "line" in trace and isinstance(trace["line"], Line):
# scattermapboxplot lines do not support dash for some reason, make it a red line instead
if "dash" in trace["line"]._props:
_prps = dict(trace["line"]._props)
_prps.pop("dash", None)
_prps["color"] = "red"
trace["line"] = scmLine(_prps)
else:
trace["line"] = scmLine(dict(trace["line"]._props))
elif "marker" in trace and isinstance(trace["marker"], Marker):
trace["marker"] = scmMarker(trace["marker"]._props)
# setting Figure object
fig = Figure(data=traces, # edge_trace
layout=Layout(
titlefont=dict(size=16),
showlegend=showlegend,
autosize=True if aspectratio is 'auto' else False,
hovermode='closest',
margin=dict(b=5, l=5, r=5, t=5),
# annotations=[dict(
# text="",
# showarrow=False,
# xref="paper", yref="paper",
# x=0.005, y=-0.002)],
xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),
yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False),
# legend=dict(x=0, y=1.0)
), )
# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map:
try:
mapbox_access_token = _get_mapbox_token()
except Exception:
logger.exception('mapbox token required for map plots. '
'Get Mapbox token by signing in to https://www.mapbox.com/.\n'
'After getting a token, set it to pandapower using:\n'
'pandapower.plotting.plotly.mapbox_plot.set_mapbox_token(\'<token>\')')
raise MapboxTokenMissing
fig['layout']['mapbox'] = dict(accesstoken=mapbox_access_token,
bearing=0,
center=dict(lat=pd.Series(traces[0]['lat']).dropna().mean(),
lon=pd.Series(traces[0]['lon']).dropna().mean()),
style=map_style,
pitch=0,
zoom=11)
# default aspectratio: if on_map use auto, else use 'original'
aspectratio = 'original' if not on_map and aspectratio is 'auto' else aspectratio
if aspectratio is not 'auto':
if aspectratio is 'original':
# TODO improve this workaround for getting original aspectratio
xs = []
ys = []
for trace in traces:
xs += trace['x']
ys += trace['y']
x_dropna = pd.Series(xs).dropna()
y_dropna = pd.Series(ys).dropna()
xrange = x_dropna.max() - x_dropna.min()
yrange = y_dropna.max() - y_dropna.min()
ratio = xrange / yrange
if ratio < 1:
aspectratio = (ratio, 1.)
else:
aspectratio = (1., 1 / ratio)
aspectratio = np.array(aspectratio) / max(aspectratio)
fig['layout']['width'], fig['layout']['height'] = ([ar * figsize * 700 for ar in aspectratio])
# check if called from ipynb or not in order to consider appropriate plot function
if _in_ipynb():
from plotly.offline import init_notebook_mode, iplot as plot
init_notebook_mode()
else:
from plotly.offline import plot as plot
plot(fig, filename=filename)
| 41.737313 | 134 | 0.601738 |
import math
import numpy as np
import pandas as pd
from packaging import version
from collections.abc import Iterable
from pandapower.plotting.plotly.get_colors import get_plotly_color, get_plotly_cmap
from pandapower.plotting.plotly.mapbox_plot import _on_map_test, _get_mapbox_token, MapboxTokenMissing
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from plotly import __version__ as plotly_version
from plotly.graph_objs.scatter.marker import ColorBar
from plotly.graph_objs import Figure, Layout
from plotly.graph_objs.layout import XAxis, YAxis
from plotly.graph_objs.scatter import Line, Marker
from plotly.graph_objs.scattermapbox import Line as scmLine
from plotly.graph_objs.scattermapbox import Marker as scmMarker
except ImportError:
logger.info("Failed to import plotly - interactive plotting will not be available")
def version_check():
if version.parse(plotly_version) < version.parse("3.1.1"):
raise UserWarning("Your plotly version {} is no longer supported.\r\n"
"Please upgrade your python-plotly installation, "
"e.g., via pip install --upgrade plotly".format(__version__))
def _in_ipynb():
import __main__ as main
return not hasattr(main, '__file__')
def sum_line_length(pts):
pt_diff = lambda p: (p[0][0] - p[1][0], p[0][1] - p[1][1])
diffs = map(pt_diff, zip(pts[:-1], pts[1:]))
line_length = sum(math.hypot(d1, d2) for d1, d2 in diffs)
return line_length
def get_line_neutral(coord):
if len(coord) == 1:
return coord[0]
half_length = sum_line_length(coord) / 2.0
length = 0.0
ind = 0
while length < half_length:
ind += 1
length = sum_line_length(coord[:ind])
start_coord = coord[ind - 2]
end_coord = coord[ind - 1]
mid = [(a1 + a2) / 2.0 for a1, a2 in zip(start_coord, end_coord)]
return mid
def create_edge_center_trace(line_trace, size=1, patch_type="circle", color="white", infofunc=None,
trace_name='edge_center', use_line_geodata=False):
center_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,
marker=dict(color=color, size=size, symbol=patch_type))
if not use_line_geodata:
center_trace['x'], center_trace['y'] = (line_trace[0]["x"][1::4], line_trace[0]["y"][1::4])
else:
x, y = [], []
for trace in line_trace:
coord = list(zip(trace["x"], trace["y"]))
mid_coord = get_line_neutral(coord)
x.append(mid_coord[0])
y.append(mid_coord[1])
center_trace['x'], center_trace['y'] = (x, y)
center_trace['text'] = infofunc
return center_trace
def create_bus_trace(net, buses=None, size=5, patch_type="circle", color="blue", infofunc=None,
trace_name='buses', legendgroup=None, cmap=None, cmap_vals=None,
cbar_title=None, cmin=None, cmax=None, cpos=1.0, colormap_column="vm_pu"):
color = get_plotly_color(color)
bus_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,
marker=dict(color=color, size=size, symbol=patch_type))
buses = net.bus.index.tolist() if buses is None else list(buses)
bus_plot_index = [b for b in buses if b in list(set(buses) & set(net.bus_geodata.index))]
bus_trace['x'], bus_trace['y'] = (net.bus_geodata.loc[bus_plot_index, 'x'].tolist(),
net.bus_geodata.loc[bus_plot_index, 'y'].tolist())
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(buses):
infofunc = pd.Series(index=buses, data=infofunc)
bus_trace['text'] = net.bus.loc[bus_plot_index, 'name'] if infofunc is None else infofunc.loc[buses]
if legendgroup:
bus_trace['legendgroup'] = legendgroup
if cmap is not None:
cmap = 'Jet' if cmap is True else cmap
if cmap_vals is not None:
cmap_vals = cmap_vals
else:
if net.res_line.shape[0] == 0:
logger.error("There are no power flow results for buses voltage magnitudes which are default for bus "
"colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column].values
cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column] if cmap_vals is None else cmap_vals
cmin = cmin if cmin else cmap_vals.min()
cmax = cmax if cmax else cmap_vals.max()
bus_trace['marker'] = Marker(size=size,
color=cmap_vals, cmin=cmin, cmax=cmax,
colorscale=cmap,
colorbar=ColorBar(thickness=10,
x=cpos),
symbol=patch_type
)
if cbar_title:
bus_trace['marker']['colorbar']['title'] = cbar_title
bus_trace['marker']['colorbar']['title']['side'] = 'right'
return [bus_trace]
def _get_line_geodata_plotly(net, lines, use_line_geodata):
xs = []
ys = []
if use_line_geodata:
for line_ind, _ in lines.iterrows():
line_coords = net.line_geodata.loc[line_ind, 'coords']
linex, liney = list(zip(*line_coords))
xs += linex
xs += [None]
ys += liney
ys += [None]
else:
from_bus = net.bus_geodata.loc[lines.from_bus, 'x'].tolist()
to_bus = net.bus_geodata.loc[lines.to_bus, 'x'].tolist()
center = (np.array(from_bus) + np.array(to_bus)) / 2
none_list = [None] * len(from_bus)
xs = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()
from_bus = net.bus_geodata.loc[lines.from_bus, 'y'].tolist()
to_bus = net.bus_geodata.loc[lines.to_bus, 'y'].tolist()
center = (np.array(from_bus) + np.array(to_bus)) / 2
none_list = [None] * len(from_bus)
ys = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()
return xs[:-1], ys[:-1]
def create_line_trace(net, lines=None, use_line_geodata=True, respect_switches=False, width=1.0,
color='grey', infofunc=None, trace_name='lines', legendgroup=None,
cmap=None, cbar_title=None, show_colorbar=True, cmap_vals=None, cmin=None,
cmax=None, cpos=1.1):
color = get_plotly_color(color)
lines = net.line.index.tolist() if lines is None else list(lines)
if len(lines) == 0:
return []
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(lines):
infofunc = pd.Series(index=lines, data=infofunc)
if len(infofunc) != len(lines) and len(infofunc) != len(net.line):
raise UserWarning("Different amount of hover info than lines to plot")
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.line.index to the infofunc contents"
no_go_lines = set()
if respect_switches:
no_go_lines = set(lines) & set(net.switch.element[(net.switch.et == "l") & (net.switch.closed == 0)])
lines_to_plot = net.line.loc[set(net.line.index) & (set(lines) - no_go_lines)]
no_go_lines_to_plot = None
use_line_geodata = use_line_geodata if net.line_geodata.shape[0] > 0 else False
if use_line_geodata:
lines_to_plot = lines_to_plot.loc[set(lines_to_plot.index) & set(net.line_geodata.index)]
else:
lines_with_geodata = lines_to_plot.from_bus.isin(net.bus_geodata.index) & \
lines_to_plot.to_bus.isin(net.bus_geodata.index)
lines_to_plot = lines_to_plot.loc[lines_with_geodata]
cmap_lines = None
if cmap is not None:
# TODO correct this when plotly solves existing github issue about Line colorbar
cmap = 'jet' if cmap is True else cmap
if cmap_vals is not None:
if not isinstance(cmap_vals, np.ndarray):
cmap_vals = np.asarray(cmap_vals)
else:
if net.res_line.shape[0] == 0:
logger.error("There are no power flow results for lines which are default for line colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_line.loc[lines_to_plot.index, 'loading_percent'].values
cmap_lines = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)
if len(cmap_lines) == len(net.line):
# some lines are not plotted although cmap_value were provided for all lines
line_idx_map = dict(zip(net.line.loc[lines].index.tolist(), range(len(lines))))
cmap_lines = [cmap_lines[line_idx_map[idx]] for idx in lines_to_plot.index]
else:
assert len(cmap_lines) == len(lines_to_plot), \
"Different amounts of cmap values and lines to plot were supplied"
line_traces = []
for col_i, (idx, line) in enumerate(lines_to_plot.iterrows()):
line_color = color
line_info = line['name']
if cmap is not None:
try:
line_color = cmap_lines[col_i]
line_info = line['name'] if infofunc is None else infofunc.loc[idx]
except IndexError:
logger.warning("No color and info for line {:d} (name: {}) available".format(idx, line['name']))
line_trace = dict(type='scatter', text=[], hoverinfo='text', mode='lines', name=trace_name,
line=Line(width=width, color=color))
line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, lines_to_plot.loc[idx:idx], use_line_geodata)
line_trace['line']['color'] = line_color
line_trace['text'] = line_info
line_traces.append(line_trace)
if show_colorbar and cmap is not None:
cmin = cmin if cmin else cmap_vals.min()
cmax = cmax if cmax else cmap_vals.max()
try:
# TODO for custom colormaps
cbar_cmap_name = 'Jet' if cmap is 'jet' else cmap
# workaround to get colorbar for lines (an unvisible node is added)
lines_cbar = dict(type='scatter', x=[net.bus_geodata.x[0]], y=[net.bus_geodata.y[0]], mode='markers',
marker=Marker(size=0, cmin=cmin, cmax=cmax,
color='rgb(255,255,255)',
colorscale=cbar_cmap_name,
colorbar=ColorBar(thickness=10,
x=cpos),
))
if cbar_title:
lines_cbar['marker']['colorbar']['title'] = cbar_title
lines_cbar['marker']['colorbar']['title']['side'] = 'right'
line_traces.append(lines_cbar)
except:
pass
if len(no_go_lines) > 0:
no_go_lines_to_plot = net.line.loc[no_go_lines]
for idx, line in no_go_lines_to_plot.iterrows():
line_color = color
line_trace = dict(type='scatter',
text=[], hoverinfo='text', mode='lines', name='disconnected lines',
line=Line(width=width / 2, color='grey', dash='dot'))
line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, no_go_lines_to_plot.loc[idx:idx], use_line_geodata)
line_trace['line']['color'] = line_color
try:
line_trace['text'] = infofunc.loc[idx]
except (KeyError, IndexError):
line_trace["text"] = line['name']
line_traces.append(line_trace)
if legendgroup:
line_trace['legendgroup'] = legendgroup
# sort infofunc so that it is the correct order lines_to_plot + no_go_lines_to_plot
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(net.line):
infofunc = pd.Series(index=net.line.index, data=infofunc)
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.line.index to the infofunc contents"
sorted_idx = lines_to_plot.index.tolist()
if no_go_lines_to_plot is not None:
sorted_idx += no_go_lines_to_plot.index.tolist()
infofunc = infofunc.loc[sorted_idx]
center_trace = create_edge_center_trace(line_traces, color=color, infofunc=infofunc,
use_line_geodata=use_line_geodata)
line_traces.append(center_trace)
return line_traces
def create_trafo_trace(net, trafos=None, color='green', width=5, infofunc=None, cmap=None,
trace_name='trafos', cmin=None, cmax=None, cmap_vals=None, use_line_geodata=None):
color = get_plotly_color(color)
# defining lines to be plot
trafos = net.trafo.index.tolist() if trafos is None else list(trafos)
if len(trafos) == 0:
return []
trafo_buses_with_geodata = net.trafo.hv_bus.isin(net.bus_geodata.index) & \
net.trafo.lv_bus.isin(net.bus_geodata.index)
trafos_mask = net.trafo.index.isin(trafos)
trafos_to_plot = net.trafo[trafo_buses_with_geodata & trafos_mask]
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(trafos):
infofunc = pd.Series(index=trafos, data=infofunc)
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.trafo.index to the infofunc contents"
infofunc = infofunc.loc[trafos_to_plot.index]
cmap_colors = []
if cmap is not None:
cmap = 'jet' if cmap is None else cmap
cmin = 0 if cmin is None else cmin
cmax = 100 if cmin is None else cmax
if cmap_vals is not None:
cmap_vals = cmap_vals
else:
if net.res_trafo.shape[0] == 0:
logger.error("There are no power flow results for lines which are default for line colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_trafo.loc[trafos_to_plot.index, 'loading_percent'].values
cmap_colors = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)
trafo_traces = []
for col_i, (idx, trafo) in enumerate(trafos_to_plot.iterrows()):
if cmap is not None:
color = cmap_colors[col_i]
trafo_trace = dict(type='scatter', text=[], line=Line(width=width, color=color),
hoverinfo='text', mode='lines', name=trace_name)
trafo_trace['text'] = trafo['name'] if infofunc is None else infofunc.loc[idx]
from_bus = net.bus_geodata.loc[trafo.hv_bus, 'x']
to_bus = net.bus_geodata.loc[trafo.lv_bus, 'x']
trafo_trace['x'] = [from_bus, (from_bus + to_bus) / 2, to_bus]
from_bus = net.bus_geodata.loc[trafo.hv_bus, 'y']
to_bus = net.bus_geodata.loc[trafo.lv_bus, 'y']
trafo_trace['y'] = [from_bus, (from_bus + to_bus) / 2, to_bus]
trafo_traces.append(trafo_trace)
center_trace = create_edge_center_trace(trafo_traces, color=color, infofunc=infofunc,
use_line_geodata=use_line_geodata)
trafo_traces.append(center_trace)
return trafo_traces
def draw_traces(traces, on_map=False, map_style='basic', showlegend=True, figsize=1,
aspectratio='auto', filename="temp-plot.html"):
if on_map:
try:
on_map = _on_map_test(traces[0]['x'][0], traces[0]['y'][0])
except:
logger.warning("Test if geo-data are in lat/long cannot be performed using geopy -> "
"eventual plot errors are possible.")
if on_map is False:
logger.warning("Existing geodata are not real lat/lon geographical coordinates. -> "
"plot on maps is not possible.\n"
"Use geo_data_to_latlong(net, projection) to transform geodata from specific projection.")
if on_map:
# change traces for mapbox
# change trace_type to scattermapbox and rename x to lat and y to lon
for trace in traces:
trace['lat'] = trace.pop('x')
trace['lon'] = trace.pop('y')
trace['type'] = 'scattermapbox'
if "line" in trace and isinstance(trace["line"], Line):
# scattermapboxplot lines do not support dash for some reason, make it a red line instead
if "dash" in trace["line"]._props:
_prps = dict(trace["line"]._props)
_prps.pop("dash", None)
_prps["color"] = "red"
trace["line"] = scmLine(_prps)
else:
trace["line"] = scmLine(dict(trace["line"]._props))
elif "marker" in trace and isinstance(trace["marker"], Marker):
trace["marker"] = scmMarker(trace["marker"]._props)
# setting Figure object
fig = Figure(data=traces, # edge_trace
layout=Layout(
titlefont=dict(size=16),
showlegend=showlegend,
autosize=True if aspectratio is 'auto' else False,
hovermode='closest',
margin=dict(b=5, l=5, r=5, t=5),
# annotations=[dict(
# text="",
# showarrow=False,
# xref="paper", yref="paper",
# x=0.005, y=-0.002)],
xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),
yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False),
# legend=dict(x=0, y=1.0)
), )
# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map:
try:
mapbox_access_token = _get_mapbox_token()
except Exception:
logger.exception('mapbox token required for map plots. '
'Get Mapbox token by signing in to https://www.mapbox.com/.\n'
'After getting a token, set it to pandapower using:\n'
'pandapower.plotting.plotly.mapbox_plot.set_mapbox_token(\'<token>\')')
raise MapboxTokenMissing
fig['layout']['mapbox'] = dict(accesstoken=mapbox_access_token,
bearing=0,
center=dict(lat=pd.Series(traces[0]['lat']).dropna().mean(),
lon=pd.Series(traces[0]['lon']).dropna().mean()),
style=map_style,
pitch=0,
zoom=11)
# default aspectratio: if on_map use auto, else use 'original'
aspectratio = 'original' if not on_map and aspectratio is 'auto' else aspectratio
if aspectratio is not 'auto':
if aspectratio is 'original':
# TODO improve this workaround for getting original aspectratio
xs = []
ys = []
for trace in traces:
xs += trace['x']
ys += trace['y']
x_dropna = pd.Series(xs).dropna()
y_dropna = pd.Series(ys).dropna()
xrange = x_dropna.max() - x_dropna.min()
yrange = y_dropna.max() - y_dropna.min()
ratio = xrange / yrange
if ratio < 1:
aspectratio = (ratio, 1.)
else:
aspectratio = (1., 1 / ratio)
aspectratio = np.array(aspectratio) / max(aspectratio)
fig['layout']['width'], fig['layout']['height'] = ([ar * figsize * 700 for ar in aspectratio])
# check if called from ipynb or not in order to consider appropriate plot function
if _in_ipynb():
from plotly.offline import init_notebook_mode, iplot as plot
init_notebook_mode()
else:
from plotly.offline import plot as plot
plot(fig, filename=filename)
| true | true |
f7116ff78296699e86d5db80a36bfce9887bbf28 | 41,747 | py | Python | lifelines/tests/utils/test_utils.py | stefan-de/lifelines | 519bd3abe6051bd9fb5da0dfffce24ab86171f3f | [
"MIT"
] | 1 | 2020-11-18T19:54:09.000Z | 2020-11-18T19:54:09.000Z | lifelines/tests/utils/test_utils.py | stefan-de/lifelines | 519bd3abe6051bd9fb5da0dfffce24ab86171f3f | [
"MIT"
] | null | null | null | lifelines/tests/utils/test_utils.py | stefan-de/lifelines | 519bd3abe6051bd9fb5da0dfffce24ab86171f3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
first_obs = np.zeros(df.shape[0])
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"], first_obs)
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
old_value_at_time_0 = seed_df["var1"].iloc[0]
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0, "var2": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=False)
expected = pd.DataFrame.from_records(
[{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0 + old_value_at_time_0, "var2": 2.0}]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=True)
expected = pd.DataFrame.from_records([{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0}])
assert_frame_equal(df, expected, check_like=True)
def test_enum_flag(self, seed_df, cv1, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", add_enum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", add_enum=True
)
idx = df["id"] == 1
n = idx.sum()
try:
assert_series_equal(df["enum"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)
except AssertionError as e:
# Windows Numpy and Pandas sometimes have int32 or int64 as default dtype
if os.name == "nt" and "int32" in str(e) and "int64" in str(e):
assert_series_equal(
df["enum"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df["enum"].loc[idx].dtypes), check_names=False
)
else:
raise e
def test_event_col_is_properly_inserted(self, seed_df, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E")
assert df.groupby("id").last()["E"].tolist() == [1, 0]
def test_redundant_cv_columns_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0, "var4": 1},
{"id": 1, "t": 1, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 3, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 6, "var3": 1, "var4": 1},
{"id": 1, "t": 9, "var3": 1, "var4": 1}, # redundant, as nothing changed during the interval
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_will_convert_event_column_to_bools(self, seed_df, cv1):
seed_df["E"] = seed_df["E"].astype(int)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E")
assert df.dtypes["E"] == bool
def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):
max_T = seed_df["stop"].max()
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0},
{"id": 1, "t": max_T + 10, "var3": 1}, # will be excluded
{"id": 2, "t": 0, "var3": 0},
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):
min_T = seed_df["start"].min()
cv = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": min_T - 1, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 3
def test_cvs_with_null_values_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[{"id": None, "t": 0, "var3": 0}, {"id": 1, "t": None, "var3": 1}, {"id": 2, "t": 0, "var3": None}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 1
def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv1 = pd.DataFrame.from_records([{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 5, "var3": 1}])
cv2 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var4": 0}, {"id": 1, "t": 5, "var4": 1.5}, {"id": 1, "t": 6, "var4": 1.7}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
assert df.shape[0] == 3
def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):
del seed_df["start"]
with pytest.raises(IndexError):
utils.add_covariate_to_timeline(seed_df, cv1, "id", "t", "E")
def test_cumulative_sum(self):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 5, "E": 1}])
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var4": 1}, {"id": 1, "t": 1, "var4": 1}, {"id": 1, "t": 3, "var4": 1}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{"id": 1, "start": 0, "stop": 1.0, "cumsum_var4": 1, "E": False},
{"id": 1, "start": 1, "stop": 3.0, "cumsum_var4": 2, "E": False},
{"id": 1, "start": 3, "stop": 5.0, "cumsum_var4": 3, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_delay(self, cv2):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 50, "E": 1}])
cv3 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "varA": 2}, {"id": 1, "t": 10, "varA": 4}, {"id": 1, "t": 20, "varA": 6}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, "id", "t", "E", delay=2).fillna(0)
expected = pd.DataFrame.from_records(
[
{"start": 0, "stop": 2.0, "varA": 0.0, "id": 1, "E": False},
{"start": 2, "stop": 12.0, "varA": 2.0, "id": 1, "E": False},
{"start": 12, "stop": 22.0, "varA": 4.0, "id": 1, "E": False},
{"start": 22, "stop": 50.0, "varA": 6.0, "id": 1, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_covariates_from_event_matrix_with_simple_addition(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=["id", "poison"])
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
assert pd.notnull(ldf).all().all()
expected = pd.DataFrame(
[
(0.0, 0.0, 1.0, 1, False),
(1.0, 1.0, 5.0, 1, True),
(0.0, 0.0, 2.0, 2, False),
(2.0, 1.0, 4.0, 2, True),
(0.0, 0.0, 3.0, 3, False),
(3.0, 1.0, 8.0, 3, True),
(0.0, 0.0, 4.0, 4, True),
],
columns=["start", "cumsum_poison", "stop", "id", "e"],
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_covariates_from_event_matrix(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame(
[[1, 1, None, 2], [2, None, 5, None], [3, 3, 3, 7]], columns=["id", "promotion", "movement", "raise"]
)
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 0.0,
"stop": 1.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 1.0,
"stop": 2.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 1.0,
"start": 2.0,
"stop": 5.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 1.0,
"id": 2.0,
"start": 0.0,
"stop": 4.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 0.0,
"stop": 3.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 3.0,
"stop": 7.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 3.0,
"start": 7.0,
"stop": 8.0,
},
{
"cumsum_movement": None,
"cumsum_promotion": None,
"cumsum_raise": None,
"e": 1.0,
"id": 4.0,
"start": 0.0,
"stop": 4.0,
},
]
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_to_episodic_format_with_long_time_gap_is_identical(self):
rossi = load_rossi()
rossi["id"] = np.arange(rossi.shape[0])
long_rossi = utils.to_episodic_format(rossi, duration_col="week", event_col="arrest", id_col="id", time_gaps=1000.0)
# using astype(int) would fail on Windows because int32 and int64 are used as dtype
long_rossi["week"] = long_rossi["stop"].astype(rossi["week"].dtype)
del long_rossi["start"]
del long_rossi["stop"]
assert_frame_equal(long_rossi, rossi, check_like=True)
def test_to_episodic_format_preserves_outcome(self):
E = [1, 1, 0, 0]
df = pd.DataFrame({"T": [1, 3, 1, 3], "E": E, "id": [1, 2, 3, 4]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 3 + 1 + 3
assert long_df.groupby("id").last()["E"].tolist() == E
def test_to_episodic_format_handles_floating_durations(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 4
assert long_df["stop"].tolist() == [0.1, 1, 2, 3, 3.5]
def test_to_episodic_format_handles_floating_durations_with_time_gaps(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df["stop"].tolist() == [0.1, 2, 3.5]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df.groupby("id").last()["E"].tolist() == [1, 0]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df.groupby("id").last()["E"].tolist() == [1, 0]
def test_to_episodic_format_adds_id_col(self):
df = pd.DataFrame({"T": [1, 3], "E": [1, 0]})
long_df = utils.to_episodic_format(df, "T", "E")
assert "id" in long_df.columns
def test_to_episodic_format_uses_custom_index_as_id(self):
df = pd.DataFrame({"T": [1, 3], "E": [1, 0]}, index=["A", "B"])
long_df = utils.to_episodic_format(df, "T", "E")
assert long_df["id"].tolist() == ["A", "B", "B", "B"]
class TestStepSizer:
def test_StepSizer_step_will_decrease_if_unstable(self):
start = 0.95
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(1.0)
ss.update(2.0)
ss.update(1.0)
ss.update(2.0)
assert ss.next() < start
def test_StepSizer_step_will_increase_if_stable(self):
start = 0.5
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(1.0)
ss.update(0.5)
ss.update(0.4)
ss.update(0.1)
assert ss.next() > start
def test_StepSizer_step_will_decrease_if_explodes(self):
start = 0.5
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(20.0)
assert ss.next() < start
class TestSklearnAdapter:
@pytest.fixture
def X(self):
return load_regression_dataset().drop("T", axis=1)
@pytest.fixture
def Y(self):
return load_regression_dataset().pop("T")
def test_model_has_correct_api(self, X, Y):
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
assert hasattr(cph, "fit")
cph.fit(X, Y)
assert hasattr(cph, "predict")
cph.predict(X)
assert hasattr(cph, "score")
cph.score(X, Y)
def test_sklearn_cross_val_score_accept_model(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
wf = base_model(penalizer=1.0)
assert len(cross_val_score(wf, X, Y, cv=3)) == 3
def test_sklearn_GridSearchCV_accept_model(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
grid_params = {"penalizer": 10.0 ** np.arange(-2, 3), "model_ancillary": [True, False]}
clf = GridSearchCV(base_model(), grid_params, cv=4)
clf.fit(X, Y)
assert clf.best_params_ == {"model_ancillary": True, "penalizer": 100.0}
assert clf.predict(X).shape[0] == X.shape[0]
def test_model_can_accept_things_like_strata(self, X, Y):
X["strata"] = np.random.randint(0, 2, size=X.shape[0])
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model(strata="strata")
cph.fit(X, Y)
def test_we_can_user_other_prediction_methods(self, X, Y):
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E", predict_method="predict_median")
wf = base_model(strata="strata")
wf.fit(X, Y)
assert wf.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_dill(self, X, Y):
import dill
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
cph.fit(X, Y)
s = dill.dumps(cph)
s = dill.loads(s)
assert cph.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_pickle(self, X, Y):
import pickle
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
cph.fit(X, Y)
s = pickle.dumps(cph, protocol=-1)
s = pickle.loads(s)
assert cph.predict(X).shape[0] == X.shape[0]
def test_isinstance(self):
from sklearn.base import BaseEstimator, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
assert isinstance(base_model(), BaseEstimator)
assert isinstance(base_model(), RegressorMixin)
assert isinstance(base_model(), MetaEstimatorMixin)
@pytest.mark.xfail
def test_sklearn_GridSearchCV_accept_model_with_parallelization(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
grid_params = {"penalizer": 10.0 ** np.arange(-2, 3), "l1_ratio": [0.05, 0.5, 0.95], "model_ancillary": [True, False]}
# note the n_jobs
clf = GridSearchCV(base_model(), grid_params, cv=4, n_jobs=-1)
clf.fit(X, Y)
assert clf.best_params_ == {"l1_ratio": 0.5, "model_ancillary": False, "penalizer": 0.01}
assert clf.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_joblib(self, X, Y):
from joblib import dump, load
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
clf = base_model()
clf.fit(X, Y)
dump(clf, "filename.joblib")
clf = load("filename.joblib")
@pytest.mark.xfail
def test_sklearn_check():
from sklearn.utils.estimator_checks import check_estimator
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
check_estimator(base_model())
def test_rmst_works_at_kaplan_meier_edge_case():
T = [1, 2, 3, 4, 10]
kmf = KaplanMeierFitter().fit(T)
# when S(t)=0, doesn't matter about extending past
assert utils.restricted_mean_survival_time(kmf, t=10) == utils.restricted_mean_survival_time(kmf, t=10.001)
assert utils.restricted_mean_survival_time(kmf, t=9.9) <= utils.restricted_mean_survival_time(kmf, t=10.0)
assert abs((utils.restricted_mean_survival_time(kmf, t=4) - (1.0 + 0.8 + 0.6 + 0.4))) < 0.0001
assert abs((utils.restricted_mean_survival_time(kmf, t=4 + 0.1) - (1.0 + 0.8 + 0.6 + 0.4 + 0.2 * 0.1))) < 0.0001
def test_rmst_exactely_with_known_solution():
T = np.random.exponential(2, 100)
exp = ExponentialFitter().fit(T)
lambda_ = exp.lambda_
assert abs(utils.restricted_mean_survival_time(exp) - lambda_) < 0.001
assert abs(utils.restricted_mean_survival_time(exp, t=lambda_) - lambda_ * (np.e - 1) / np.e) < 0.001
@flaky
def test_rmst_approximate_solution():
T = np.random.exponential(2, 4000)
exp = ExponentialFitter().fit(T, timeline=np.linspace(0, T.max(), 10000))
lambda_ = exp.lambda_
with pytest.warns(exceptions.ApproximationWarning) as w:
assert (
abs(
utils.restricted_mean_survival_time(exp, t=lambda_)
- utils.restricted_mean_survival_time(exp.survival_function_, t=lambda_)
)
< 0.001
)
def test_rmst_variance():
T = np.random.exponential(2, 1000)
expf = ExponentialFitter().fit(T)
hazard = 1 / expf.lambda_
t = 1
sq = 2 / hazard ** 2 * (1 - np.exp(-hazard * t) * (1 + hazard * t))
actual_mean = 1 / hazard * (1 - np.exp(-hazard * t))
actual_var = sq - actual_mean ** 2
assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[0] - actual_mean) < 0.001
assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[1] - actual_var) < 0.001
def test_find_best_parametric_model():
T = np.random.exponential(2, 1000)
E = np.ones_like(T)
model, score = utils.find_best_parametric_model(T, E)
assert True
def test_find_best_parametric_model_can_accept_other_models():
T = np.random.exponential(2, 1000)
model, score = utils.find_best_parametric_model(T, additional_models=[ExponentialFitter(), ExponentialFitter()])
assert True
def test_find_best_parametric_model_with_BIC():
T = np.random.exponential(2, 1000)
model, score = utils.find_best_parametric_model(T, scoring_method="BIC")
assert True
def test_find_best_parametric_model_works_for_left_censoring():
T = np.random.exponential(2, 100)
model, score = utils.find_best_parametric_model(T, censoring_type="left", show_progress=True)
assert True
def test_find_best_parametric_model_works_for_interval_censoring():
T_1 = np.random.exponential(2, 100)
T_2 = T_1 + 1
model, score = utils.find_best_parametric_model((T_1, T_2), censoring_type="interval", show_progress=True)
assert True
def test_find_best_parametric_model_works_with_weights_and_entry():
T = np.random.exponential(5, 100)
W = np.random.randint(1, 5, size=100)
entry = np.random.exponential(0.01, 100)
model, score = utils.find_best_parametric_model(T, weights=W, entry=entry, show_progress=True)
assert True
def test_safe_exp():
from lifelines.utils.safe_exp import MAX
assert safe_exp(4.0) == np.exp(4.0)
assert safe_exp(MAX) == np.exp(MAX)
assert safe_exp(MAX + 1) == np.exp(MAX)
from autograd import grad
assert grad(safe_exp)(4.0) == np.exp(4.0)
assert grad(safe_exp)(MAX) == np.exp(MAX)
assert grad(safe_exp)(MAX + 1) == np.exp(MAX)
| 37.917348 | 130 | 0.599612 |
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8)
proper_C = malformed_C > 0
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
first_obs = np.zeros(df.shape[0])
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"], first_obs)
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
old_value_at_time_0 = seed_df["var1"].iloc[0]
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0, "var2": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=False)
expected = pd.DataFrame.from_records(
[{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0 + old_value_at_time_0, "var2": 2.0}]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=True)
expected = pd.DataFrame.from_records([{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0}])
assert_frame_equal(df, expected, check_like=True)
def test_enum_flag(self, seed_df, cv1, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", add_enum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", add_enum=True
)
idx = df["id"] == 1
n = idx.sum()
try:
assert_series_equal(df["enum"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)
except AssertionError as e:
if os.name == "nt" and "int32" in str(e) and "int64" in str(e):
assert_series_equal(
df["enum"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df["enum"].loc[idx].dtypes), check_names=False
)
else:
raise e
def test_event_col_is_properly_inserted(self, seed_df, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E")
assert df.groupby("id").last()["E"].tolist() == [1, 0]
def test_redundant_cv_columns_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0, "var4": 1},
{"id": 1, "t": 1, "var3": 0, "var4": 1},
{"id": 1, "t": 3, "var3": 0, "var4": 1},
{"id": 1, "t": 6, "var3": 1, "var4": 1},
{"id": 1, "t": 9, "var3": 1, "var4": 1},
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_will_convert_event_column_to_bools(self, seed_df, cv1):
seed_df["E"] = seed_df["E"].astype(int)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E")
assert df.dtypes["E"] == bool
def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):
max_T = seed_df["stop"].max()
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0},
{"id": 1, "t": max_T + 10, "var3": 1},
{"id": 2, "t": 0, "var3": 0},
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):
min_T = seed_df["start"].min()
cv = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": min_T - 1, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 3
def test_cvs_with_null_values_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[{"id": None, "t": 0, "var3": 0}, {"id": 1, "t": None, "var3": 1}, {"id": 2, "t": 0, "var3": None}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 1
def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv1 = pd.DataFrame.from_records([{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 5, "var3": 1}])
cv2 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var4": 0}, {"id": 1, "t": 5, "var4": 1.5}, {"id": 1, "t": 6, "var4": 1.7}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
assert df.shape[0] == 3
def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):
del seed_df["start"]
with pytest.raises(IndexError):
utils.add_covariate_to_timeline(seed_df, cv1, "id", "t", "E")
def test_cumulative_sum(self):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 5, "E": 1}])
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var4": 1}, {"id": 1, "t": 1, "var4": 1}, {"id": 1, "t": 3, "var4": 1}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{"id": 1, "start": 0, "stop": 1.0, "cumsum_var4": 1, "E": False},
{"id": 1, "start": 1, "stop": 3.0, "cumsum_var4": 2, "E": False},
{"id": 1, "start": 3, "stop": 5.0, "cumsum_var4": 3, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_delay(self, cv2):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 50, "E": 1}])
cv3 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "varA": 2}, {"id": 1, "t": 10, "varA": 4}, {"id": 1, "t": 20, "varA": 6}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, "id", "t", "E", delay=2).fillna(0)
expected = pd.DataFrame.from_records(
[
{"start": 0, "stop": 2.0, "varA": 0.0, "id": 1, "E": False},
{"start": 2, "stop": 12.0, "varA": 2.0, "id": 1, "E": False},
{"start": 12, "stop": 22.0, "varA": 4.0, "id": 1, "E": False},
{"start": 22, "stop": 50.0, "varA": 6.0, "id": 1, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_covariates_from_event_matrix_with_simple_addition(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=["id", "poison"])
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
assert pd.notnull(ldf).all().all()
expected = pd.DataFrame(
[
(0.0, 0.0, 1.0, 1, False),
(1.0, 1.0, 5.0, 1, True),
(0.0, 0.0, 2.0, 2, False),
(2.0, 1.0, 4.0, 2, True),
(0.0, 0.0, 3.0, 3, False),
(3.0, 1.0, 8.0, 3, True),
(0.0, 0.0, 4.0, 4, True),
],
columns=["start", "cumsum_poison", "stop", "id", "e"],
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_covariates_from_event_matrix(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame(
[[1, 1, None, 2], [2, None, 5, None], [3, 3, 3, 7]], columns=["id", "promotion", "movement", "raise"]
)
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 0.0,
"stop": 1.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 1.0,
"stop": 2.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 1.0,
"start": 2.0,
"stop": 5.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 1.0,
"id": 2.0,
"start": 0.0,
"stop": 4.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 0.0,
"stop": 3.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 3.0,
"stop": 7.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 3.0,
"start": 7.0,
"stop": 8.0,
},
{
"cumsum_movement": None,
"cumsum_promotion": None,
"cumsum_raise": None,
"e": 1.0,
"id": 4.0,
"start": 0.0,
"stop": 4.0,
},
]
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_to_episodic_format_with_long_time_gap_is_identical(self):
rossi = load_rossi()
rossi["id"] = np.arange(rossi.shape[0])
long_rossi = utils.to_episodic_format(rossi, duration_col="week", event_col="arrest", id_col="id", time_gaps=1000.0)
long_rossi["week"] = long_rossi["stop"].astype(rossi["week"].dtype)
del long_rossi["start"]
del long_rossi["stop"]
assert_frame_equal(long_rossi, rossi, check_like=True)
def test_to_episodic_format_preserves_outcome(self):
E = [1, 1, 0, 0]
df = pd.DataFrame({"T": [1, 3, 1, 3], "E": E, "id": [1, 2, 3, 4]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 3 + 1 + 3
assert long_df.groupby("id").last()["E"].tolist() == E
def test_to_episodic_format_handles_floating_durations(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 4
assert long_df["stop"].tolist() == [0.1, 1, 2, 3, 3.5]
def test_to_episodic_format_handles_floating_durations_with_time_gaps(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df["stop"].tolist() == [0.1, 2, 3.5]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df.groupby("id").last()["E"].tolist() == [1, 0]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df.groupby("id").last()["E"].tolist() == [1, 0]
def test_to_episodic_format_adds_id_col(self):
df = pd.DataFrame({"T": [1, 3], "E": [1, 0]})
long_df = utils.to_episodic_format(df, "T", "E")
assert "id" in long_df.columns
def test_to_episodic_format_uses_custom_index_as_id(self):
df = pd.DataFrame({"T": [1, 3], "E": [1, 0]}, index=["A", "B"])
long_df = utils.to_episodic_format(df, "T", "E")
assert long_df["id"].tolist() == ["A", "B", "B", "B"]
class TestStepSizer:
def test_StepSizer_step_will_decrease_if_unstable(self):
start = 0.95
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(1.0)
ss.update(2.0)
ss.update(1.0)
ss.update(2.0)
assert ss.next() < start
def test_StepSizer_step_will_increase_if_stable(self):
start = 0.5
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(1.0)
ss.update(0.5)
ss.update(0.4)
ss.update(0.1)
assert ss.next() > start
def test_StepSizer_step_will_decrease_if_explodes(self):
start = 0.5
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(20.0)
assert ss.next() < start
class TestSklearnAdapter:
@pytest.fixture
def X(self):
return load_regression_dataset().drop("T", axis=1)
@pytest.fixture
def Y(self):
return load_regression_dataset().pop("T")
def test_model_has_correct_api(self, X, Y):
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
assert hasattr(cph, "fit")
cph.fit(X, Y)
assert hasattr(cph, "predict")
cph.predict(X)
assert hasattr(cph, "score")
cph.score(X, Y)
def test_sklearn_cross_val_score_accept_model(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
wf = base_model(penalizer=1.0)
assert len(cross_val_score(wf, X, Y, cv=3)) == 3
def test_sklearn_GridSearchCV_accept_model(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
grid_params = {"penalizer": 10.0 ** np.arange(-2, 3), "model_ancillary": [True, False]}
clf = GridSearchCV(base_model(), grid_params, cv=4)
clf.fit(X, Y)
assert clf.best_params_ == {"model_ancillary": True, "penalizer": 100.0}
assert clf.predict(X).shape[0] == X.shape[0]
def test_model_can_accept_things_like_strata(self, X, Y):
X["strata"] = np.random.randint(0, 2, size=X.shape[0])
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model(strata="strata")
cph.fit(X, Y)
def test_we_can_user_other_prediction_methods(self, X, Y):
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E", predict_method="predict_median")
wf = base_model(strata="strata")
wf.fit(X, Y)
assert wf.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_dill(self, X, Y):
import dill
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
cph.fit(X, Y)
s = dill.dumps(cph)
s = dill.loads(s)
assert cph.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_pickle(self, X, Y):
import pickle
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
cph.fit(X, Y)
s = pickle.dumps(cph, protocol=-1)
s = pickle.loads(s)
assert cph.predict(X).shape[0] == X.shape[0]
def test_isinstance(self):
from sklearn.base import BaseEstimator, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
assert isinstance(base_model(), BaseEstimator)
assert isinstance(base_model(), RegressorMixin)
assert isinstance(base_model(), MetaEstimatorMixin)
@pytest.mark.xfail
def test_sklearn_GridSearchCV_accept_model_with_parallelization(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
grid_params = {"penalizer": 10.0 ** np.arange(-2, 3), "l1_ratio": [0.05, 0.5, 0.95], "model_ancillary": [True, False]}
clf = GridSearchCV(base_model(), grid_params, cv=4, n_jobs=-1)
clf.fit(X, Y)
assert clf.best_params_ == {"l1_ratio": 0.5, "model_ancillary": False, "penalizer": 0.01}
assert clf.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_joblib(self, X, Y):
from joblib import dump, load
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
clf = base_model()
clf.fit(X, Y)
dump(clf, "filename.joblib")
clf = load("filename.joblib")
@pytest.mark.xfail
def test_sklearn_check():
from sklearn.utils.estimator_checks import check_estimator
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
check_estimator(base_model())
def test_rmst_works_at_kaplan_meier_edge_case():
T = [1, 2, 3, 4, 10]
kmf = KaplanMeierFitter().fit(T)
assert utils.restricted_mean_survival_time(kmf, t=10) == utils.restricted_mean_survival_time(kmf, t=10.001)
assert utils.restricted_mean_survival_time(kmf, t=9.9) <= utils.restricted_mean_survival_time(kmf, t=10.0)
assert abs((utils.restricted_mean_survival_time(kmf, t=4) - (1.0 + 0.8 + 0.6 + 0.4))) < 0.0001
assert abs((utils.restricted_mean_survival_time(kmf, t=4 + 0.1) - (1.0 + 0.8 + 0.6 + 0.4 + 0.2 * 0.1))) < 0.0001
def test_rmst_exactely_with_known_solution():
T = np.random.exponential(2, 100)
exp = ExponentialFitter().fit(T)
lambda_ = exp.lambda_
assert abs(utils.restricted_mean_survival_time(exp) - lambda_) < 0.001
assert abs(utils.restricted_mean_survival_time(exp, t=lambda_) - lambda_ * (np.e - 1) / np.e) < 0.001
@flaky
def test_rmst_approximate_solution():
T = np.random.exponential(2, 4000)
exp = ExponentialFitter().fit(T, timeline=np.linspace(0, T.max(), 10000))
lambda_ = exp.lambda_
with pytest.warns(exceptions.ApproximationWarning) as w:
assert (
abs(
utils.restricted_mean_survival_time(exp, t=lambda_)
- utils.restricted_mean_survival_time(exp.survival_function_, t=lambda_)
)
< 0.001
)
def test_rmst_variance():
T = np.random.exponential(2, 1000)
expf = ExponentialFitter().fit(T)
hazard = 1 / expf.lambda_
t = 1
sq = 2 / hazard ** 2 * (1 - np.exp(-hazard * t) * (1 + hazard * t))
actual_mean = 1 / hazard * (1 - np.exp(-hazard * t))
actual_var = sq - actual_mean ** 2
assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[0] - actual_mean) < 0.001
assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[1] - actual_var) < 0.001
def test_find_best_parametric_model():
T = np.random.exponential(2, 1000)
E = np.ones_like(T)
model, score = utils.find_best_parametric_model(T, E)
assert True
def test_find_best_parametric_model_can_accept_other_models():
T = np.random.exponential(2, 1000)
model, score = utils.find_best_parametric_model(T, additional_models=[ExponentialFitter(), ExponentialFitter()])
assert True
def test_find_best_parametric_model_with_BIC():
T = np.random.exponential(2, 1000)
model, score = utils.find_best_parametric_model(T, scoring_method="BIC")
assert True
def test_find_best_parametric_model_works_for_left_censoring():
T = np.random.exponential(2, 100)
model, score = utils.find_best_parametric_model(T, censoring_type="left", show_progress=True)
assert True
def test_find_best_parametric_model_works_for_interval_censoring():
T_1 = np.random.exponential(2, 100)
T_2 = T_1 + 1
model, score = utils.find_best_parametric_model((T_1, T_2), censoring_type="interval", show_progress=True)
assert True
def test_find_best_parametric_model_works_with_weights_and_entry():
T = np.random.exponential(5, 100)
W = np.random.randint(1, 5, size=100)
entry = np.random.exponential(0.01, 100)
model, score = utils.find_best_parametric_model(T, weights=W, entry=entry, show_progress=True)
assert True
def test_safe_exp():
from lifelines.utils.safe_exp import MAX
assert safe_exp(4.0) == np.exp(4.0)
assert safe_exp(MAX) == np.exp(MAX)
assert safe_exp(MAX + 1) == np.exp(MAX)
from autograd import grad
assert grad(safe_exp)(4.0) == np.exp(4.0)
assert grad(safe_exp)(MAX) == np.exp(MAX)
assert grad(safe_exp)(MAX + 1) == np.exp(MAX)
| true | true |
f7117002249c5cd4e3d455a21d49ac1b75cdb790 | 21,672 | py | Python | install/tools/script/common/project_utils.py | atframework/atsf4g-co | 7323ccbbc238f7d653f92c8d2aa9079beb4ef09b | [
"MIT"
] | 68 | 2016-09-08T14:36:22.000Z | 2022-03-15T09:45:53.000Z | install/tools/script/common/project_utils.py | atframework/atsf4g-co | 7323ccbbc238f7d653f92c8d2aa9079beb4ef09b | [
"MIT"
] | 3 | 2019-06-09T10:27:23.000Z | 2021-09-09T07:55:37.000Z | install/tools/script/common/project_utils.py | atframework/atsf4g-co | 7323ccbbc238f7d653f92c8d2aa9079beb4ef09b | [
"MIT"
] | 24 | 2016-11-17T12:53:24.000Z | 2021-09-26T07:37:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import ctypes
import platform
import cgi
import re
import hashlib
import stat
import codecs
import common.print_color
environment_check_shm = None
global_opts = None
global_cahce = dict(id_offset=0, services_type=[])
server_opts = None
server_name = ''
server_index = 1
server_alloc_listen_port = 0
server_proxy_addr = ''
server_cache_id = None
server_cache_full_name = None
server_cache_ip = dict()
project_templete_engine_lookup = None
def set_global_opts(opts, id_offset):
global global_opts
global global_cahce
global_opts = opts
global_cahce['id_offset'] = id_offset
for server_type in sorted(opts.items('atservice'), key=lambda x: int(x[1])):
if 'atgateway' == server_type[0]:
continue
svr_name = 'server.{0}'.format(server_type[0])
if global_opts.has_section(svr_name):
global_cahce['services_type'].append(server_type[0])
def set_templete_engine(engine):
global project_templete_engine_lookup
project_templete_engine_lookup = engine
def render_string(content, **render_options):
from mako.template import Template
tmpl = Template(content)
return tmpl.render(**render_options)
def render(template_name, **render_options):
if project_templete_engine_lookup is None:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'template not available now\r\n')
return ""
tmpl = project_templete_engine_lookup.get_template(template_name)
if tmpl is None:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'template {0} not found\r\n', template_name)
return ""
return tmpl.render(**render_options)
def render_to(template_name, output_path, **render_options):
dir_path = os.path.dirname(output_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
output_file = codecs.open(output_path, mode='w', encoding='utf-8')
if not output_file:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'try to render {0} but open {1} for writing failed\r\n', template_name, output_path)
return
output_file.write(render(template_name, **render_options))
os.chmod(output_path, stat.S_IRWXU + stat.S_IRWXG + stat.S_IROTH + stat.S_IXOTH)
def get_service_index_range(number=1):
return range(1 + global_cahce['id_offset'], 1 + global_cahce['id_offset'] + number)
def get_global_all_services():
return global_cahce['services_type']
def set_server_inst(opts, key, index):
global server_opts
global server_name
global server_index
global server_cache_id
global server_cache_full_name
server_opts = opts
server_name = key
server_index = index
server_cache_id = None
server_cache_full_name = None
def get_ipv4_level(ip_addr):
ip_addrs = [int(x) for x in ip_addr.split('.')]
# => invalid ipv4 level 99
if len(ip_addrs) != 4:
return 99
# 10.0.0.0/8 => private level 1
# 172.16.0.0/12 => private level 2
# 192.168.0.0/16 => private level 3
if ip_addrs[0] == 10:
return 1
if ip_addrs[0] == 172 and (ip_addrs[1] & 0x10) == 0x10:
return 2
if ip_addrs[0] == 192 and ip_addrs[1] == 168:
return 3
# 169.254.0.0/16 => link-local level 11
if ip_addrs[0] == 169 and ip_addrs[1] == 254:
return 11
# 127.0.0.0/8 => loopback level 21
if ip_addrs[0] == 127:
return 21
# 224.0.0.0/4 => group-cast level 31
if (ip_addrs[0] & 0xE0) == 0xE0:
return 31
# 240.0.0.0/4 => for-test level 32
if (ip_addrs[0] & 0xF0) == 0xF0:
return 32
# 255.255.255.255 => multi-cast level 51
if ip_addrs[0] == 255 and ip_addrs[1] == 255 and ip_addrs[2] == 255 and ip_addrs[3] == 255:
return 51
# public address => level 0
return 0
def is_ipv4_link_local(ip_addr):
return get_ipv4_level(ip_addr) >= 11
def is_ipv6_link_local(ip_addr):
ip_addr = ip_addr.lower()
if ip_addr == "::1" or ip_addr == "0:0:0:0:0:0:0:1":
return True
# fe80:/10 => Link local address
# FEC0:/10 => Site local address
if len(ip_addr) > 4 and ip_addr[0:4] == "fe80":
return True
# IPv4-mapped IPv6 addresses
if ip_addr == '::127.0.0.1' or ip_addr == '::ffff:127.0.0.1':
return True
return False
def get_ip_list_v4():
global server_cache_ip
if 'ipv4' not in server_cache_ip:
import socket
server_cache_ip['ipv4'] = []
try:
for ip_pair in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET, socket.SOCK_STREAM):
ip_addr = ip_pair[4][0]
if not is_ipv4_link_local(ip_addr):
server_cache_ip['ipv4'].append(ip_addr)
# use socket to detect ipv6 address if can not find any address
if 0 == len(server_cache_ip['ipv4']):
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 53)) # use google's DNS
res = csock.getsockname()
if res:
server_cache_ip['ipv4'].append(res[0])
csock.close()
server_cache_ip['ipv4'] = sorted(server_cache_ip['ipv4'], key=get_ipv4_level)
except:
pass
return server_cache_ip['ipv4']
def get_ip_list_v6():
global server_cache_ip
if 'ipv6' not in server_cache_ip:
import socket
server_cache_ip['ipv6'] = []
try:
for ip_pair in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET6, socket.SOCK_STREAM):
ip_addr = ip_pair[4][0]
interface_index = ip_addr.find('%')
# remove interface name
if interface_index > 0:
ip_addr = ip_addr[0:interface_index]
if not is_ipv6_link_local(ip_addr):
server_cache_ip['ipv6'].append(ip_addr)
# use socket to detect ipv6 address if can not find any address
if 0 == len(server_cache_ip['ipv6']):
csock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
csock.connect(('2001:4860:4860::8888', 53)) # use google's DNS
res = csock.getsockname()
if res:
ip_addr = res[0]
interface_index = ip_addr.find('%')
# remove interface name
if interface_index > 0:
ip_addr = ip_addr[0:interface_index]
if not is_ipv6_link_local(ip_addr):
server_cache_ip['ipv6'].append(ip_addr)
csock.close()
except:
pass
return server_cache_ip['ipv6']
def is_ip_v6_enabled():
ipv6s = get_ip_list_v6()
return len(ipv6s) > 0
def get_inner_ipv4():
if 'SYSTEM_MACRO_INNER_IPV4' in os.environ:
return os.environ['SYSTEM_MACRO_INNER_IPV4']
# detect inner ip address
res = get_ip_list_v4()
if 0 == len(res):
return '127.0.0.1'
return res[0]
def get_outer_ipv4():
if 'SYSTEM_MACRO_OUTER_IPV4' in os.environ:
return os.environ['SYSTEM_MACRO_OUTER_IPV4']
# detect inner ip address
res = get_ip_list_v4()
if 0 == len(res):
return '0.0.0.0'
ret = res[0]
if '127.0.0.1' == ret:
ret = '0.0.0.0'
return ret
def get_inner_ipv6():
if 'SYSTEM_MACRO_INNER_IPV6' in os.environ:
return os.environ['SYSTEM_MACRO_INNER_IPV6']
# detect inner ip address
res = get_ip_list_v6()
if 0 == len(res):
return '::1'
return res[0]
def get_outer_ipv6():
if 'SYSTEM_MACRO_OUTER_IPV6' in os.environ:
return os.environ['SYSTEM_MACRO_OUTER_IPV6']
# detect inner ip address
res = get_ip_list_v6()
if 0 == len(res):
return '::'
ret = res[0]
if '::1' == ret:
ret = '::'
return ret
def get_global_option(section, key, default_val, env_name=None):
global global_opts
if not env_name is None and env_name in os.environ:
return os.environ[env_name]
if global_opts.has_option(section, key):
return global_opts.get(section, key)
return default_val
def get_hostname():
global server_cache_ip
if 'hostname' not in server_cache_ip:
server_cache_ip['hostname'] = get_global_option(
'atsystem', 'hostname', '', 'SYSTEM_MACRO_HOSTNAME')
server_cache_ip['hostname_is_uuid'] = False
if server_cache_ip['hostname'] is None or len(server_cache_ip['hostname']) == 0:
# using uuid module to find physic address
import uuid
server_cache_ip['hostname'] = uuid.UUID(
int=uuid.getnode()).hex[-12:]
server_cache_ip['hostname_is_uuid'] = True
return server_cache_ip['hostname'], server_cache_ip['hostname_is_uuid']
def str_to_list(val):
ret = []
if val is None:
return ret
for item in str(val).split(','):
item_strip = item.strip()
if len(item_strip) > 0:
ret.append(item_strip)
return ret
def list_to_hosts(val):
ret = []
mat = re.compile('(.*):(\d+)-(\d+)(.*)$')
for item in val:
mat_res = mat.match(item)
if not mat_res is None:
for i in range(int(mat_res.group(2)), int(mat_res.group(3)) + 1):
ret.append('{0}:{1}{2}'.format(
mat_res.group(1), i, mat_res.group(4)))
else:
ret.append(item)
return ret
def str_to_hosts(val):
return list_to_hosts(str_to_list(val))
def get_global_list(section, key, default_val, env_name=None):
res = get_global_option(section, key, default_val, env_name)
if res is None:
return []
return str_to_list(res)
def get_global_list_to_hosts(section, key, default_val, env_name=None):
res = get_global_list(section, key, default_val, env_name)
return list_to_hosts(res)
def get_global_option_bool(section, key, default_val, env_name=None):
val = get_global_option(section, key, default_val, env_name)
if not val:
return False
val = str(val).lower().strip()
return len(val) > 0 and '0' != val and 'false' != val and 'no' != val and 'disable' != val
def get_server_name():
global server_name
return server_name
def get_server_type_id(server_name=None):
if server_name is None:
server_name = get_server_name()
if not global_opts.has_option('atservice', server_name):
return 0
return int(get_global_option('atservice', server_name, 0))
def get_server_option(key, default_val, env_name=None):
return get_global_option('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_list(key, default_val, env_name=None):
return get_global_list('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_list_to_hosts(key, default_val, env_name=None):
return get_global_list_to_hosts('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_option_bool(key, default_val, env_name=None):
return get_global_option_bool('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_or_global_option(section, key, default_val, env_name=None):
ret = get_server_option('{0}.{1}'.format(section, key), None, None)
if ret is None:
return get_global_option(section, key, default_val, env_name)
return ret
def get_server_or_global_list(section, key, default_val, env_name=None):
ret = get_server_list('{0}.{1}'.format(section, key), None, None)
if ret is None or len(ret) == 0:
return get_global_list(section, key, default_val, env_name)
return ret
def get_server_or_global_list_to_hosts(section, key, default_val, env_name=None):
ret = get_server_list_to_hosts('{0}.{1}'.format(section, key), None, None)
if ret is None or len(ret) == 0:
return get_global_list_to_hosts(section, key, default_val, env_name)
return ret
def get_server_or_global_bool(section, key, default_val, env_name=None):
try_section_name = '{0}.{1}'.format(section, key)
if get_server_option(try_section_name, None) is None:
return get_global_option_bool(section, key, default_val, env_name)
else:
return get_server_option_bool(try_section_name, default_val, env_name)
def get_server_index():
global server_index
return server_index
def get_server_group_inner_id(server_name=None, server_index=None):
global global_opts
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
if not global_opts.has_option('atservice', server_name):
return 0
type_step = int(get_global_option('global', 'type_step', 0x100))
type_id = int(get_global_option('atservice', server_name, 0))
return type_step * type_id + server_index
def get_server_proc_id(server_name=None, server_index=None):
group_id = int(get_global_option(
'global', 'group_id', 1, 'SYSTEM_MACRO_GROUP_ID'))
group_step = int(get_global_option('global', 'group_step',
0x10000, 'SYSTEM_MACRO_GROUP_STEP'))
return group_id * group_step + get_server_group_inner_id(server_name, server_index)
def get_server_id():
global server_cache_id
global global_opts
if not server_cache_id is None:
return server_cache_id
if not global_opts.has_option('atservice', get_server_name()):
return 0
server_cache_id = get_server_proc_id()
return server_cache_id
def get_server_full_name():
global server_cache_full_name
if not server_cache_full_name is None:
return server_cache_full_name
server_cache_full_name = '{0}-{1}'.format(
get_server_name(), get_server_index())
return server_cache_full_name
def get_log_level():
return get_global_option('global', 'log_level', 'debug', 'SYSTEM_MACRO_CUSTOM_LOG_LEVEL')
def get_log_dir():
return get_global_option('global', 'log_dir', '../log', 'SYSTEM_MACRO_CUSTOM_LOG_DIR')
def get_server_atbus_shm():
global environment_check_shm
if environment_check_shm is None:
# check if it support shm
if not os.path.exists('/proc/sys/kernel/shmmax'):
environment_check_shm = False
else:
shm_max_sz = int(open('/proc/sys/kernel/shmmax', 'r').read())
environment_check_shm = shm_max_sz > 0
if not environment_check_shm:
return None
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
base_key = int(get_global_option('atsystem', 'shm_key_pool', 0x16000000, 'SYSTEM_MACRO_CUSTOM_SHM_KEY'))
shm_key_offset = int(get_global_option('atsystem', 'shm_key_offset', 0, 'SYSTEM_MACRO_CUSTOM_SHM_KEY_OFFSET'))
shm_key = base_key + shm_key_offset + get_server_group_inner_id(get_server_name(), get_server_index()) + port_offset
return 'shm://{0}'.format(hex(shm_key))
def disable_server_atbus_shm():
global environment_check_shm
environment_check_shm = False
def get_calc_listen_port(server_name=None, server_index=None, base_port='port'):
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
ret = int(get_global_option(
'server.{0}'.format(server_name), base_port, 0))
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
if ret == 0:
base_port = int(get_global_option(
'atsystem', 'listen_port', 12000, 'SYSTEM_MACRO_CUSTOM_BASE_PORT'))
type_step = int(get_global_option('global', 'type_step', 0x100))
type_id = int(get_global_option('atservice', server_name, 0))
return base_port + type_step * server_index + type_id + port_offset
else:
return ret + server_index + port_offset
def get_server_atbus_port():
return get_calc_listen_port()
def get_server_atbus_tcp():
if is_ip_v6_enabled():
if 'atproxy' == get_server_name():
return 'ipv6://{0}:{1}'.format(get_outer_ipv6(), get_server_atbus_port())
else:
return 'ipv6://{0}:{1}'.format(get_inner_ipv6(), get_server_atbus_port())
else:
if 'atproxy' == get_server_name():
return 'ipv4://{0}:{1}'.format(get_outer_ipv4(), get_server_atbus_port())
else:
return 'ipv4://{0}:{1}'.format(get_inner_ipv4(), get_server_atbus_port())
def get_server_atbus_unix():
h = hashlib.sha1(__file__.encode('utf-8')).hexdigest()
if os.path.exists('/tmp'):
default_base = '/tmp/atapp/{0}/'.format(h)
elif os.path.exists('/run/tmp'):
default_base = '/run/tmp/atapp/{0}/'.format(h)
elif os.path.exists('/'):
default_base = '/tmp/atapp/{0}/'.format(h)
else:
default_base = './'
dir_path = get_global_option(
'atsystem', 'unix_sock_dir', default_base, 'SYSTEM_MACRO_CUSTOM_UNIX_SOCK_DIR')
return 'unix://{0}{1}-{2:x}.sock'.format(dir_path, get_server_full_name(), get_server_id())
def get_server_atbus_listen():
global server_cache_ip
ret = []
res = get_server_atbus_shm()
if not res is None:
ret.append(res)
if 'support_unix_sock' not in server_cache_ip:
import socket
if 'AF_UNIX' in socket.__dict__:
# test unix sock, maybe defined but not available
test_file_path = 'project-utils-test-unixx-sock.sock'
try:
test_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
test_sock.bind(test_file_path)
test_sock.close()
server_cache_ip['support_unix_sock'] = True
except:
server_cache_ip['support_unix_sock'] = False
if os.path.exists(test_file_path):
os.remove(test_file_path)
else:
server_cache_ip['support_unix_sock'] = False
if 0 == len(ret) or False == server_cache_ip['support_unix_sock'] or 'atproxy' == get_server_name():
ret.append(get_server_atbus_tcp())
else:
ret.append(get_server_atbus_unix())
return ret
def disable_server_atbus_unix_sock():
global server_cache_ip
server_cache_ip['support_unix_sock'] = False
def get_server_proxy():
global server_proxy_addr
if 'atproxy' == get_server_name():
server_proxy_addr = get_server_atbus_tcp()
return ''
return server_proxy_addr
def get_server_subnets():
ret = []
for subnet in get_server_list('subnets', ['0/0']):
if subnet.isdigit():
ret.append('0/{0}'.format(subnet))
else:
ret.append(subnet)
return ret
def get_server_recv_buffer_size():
return get_global_option('atsystem', 'shm_channel_size', 8 * 1024 * 1024)
def get_server_send_buffer_size():
return get_global_option('atsystem', 'iostream_channel_size', 2 * 1024 * 1024)
def get_server_gateway_index(server_name=None, server_index=None, gateway_name=None):
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
if gateway_name is None:
gateway_name = 'atgateway'
gateway_section_name = 'server.{0}'.format(gateway_name)
step = int(get_global_option(gateway_section_name, 'index_type_number', 1))
offset = get_global_option(gateway_section_name, 'index_map_{0}'.format(server_name), None)
if offset is None:
raise Exception(
'index_map_{0} is not found in {1}'.format(server_name, gateway_section_name))
return step * server_index + int(offset)
def get_server_gateway_port(server_name, server_index, gateway_name=None, base_port='default_port'):
if gateway_name is None:
gateway_name = 'atgateway'
gateway_section_name = 'server.{0}'.format(gateway_name)
ret = int(get_global_option(gateway_section_name, base_port, 0))
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
if ret <= 0:
ret = int(get_global_option('server.{0}'.format(gateway_name), 'default_port', 8000))
return ret + get_server_gateway_index(server_name, server_index, gateway_name) + port_offset
def get_gateway_server_names(gateway_name=None):
global global_opts
ret = []
if gateway_name is None:
gateway_name = 'atgateway'
for maybe_svr_name in global_opts.options('server.{0}'.format(gateway_name)):
if maybe_svr_name[0:10] != "index_map_":
continue
ret.append(maybe_svr_name[10:])
return ret
def get_etcd_client_urls():
etcd_number = int(get_global_option('server.etcd', 'number', '0'))
if etcd_number <= 0:
return get_server_or_global_option('etcd', 'hosts', 'http://127.0.0.1:2379', 'SYSTEM_MACRO_CUSTOM_ETCD_HOST')
client_urls = []
for svr_index in get_service_index_range(etcd_number):
client_urls.append('http://{0}:{1}'.format(get_outer_ipv4(), get_calc_listen_port('etcd', svr_index, 'client_port')))
return ','.join(client_urls)
| 34.842444 | 125 | 0.656285 |
import sys
import os
import ctypes
import platform
import cgi
import re
import hashlib
import stat
import codecs
import common.print_color
environment_check_shm = None
global_opts = None
global_cahce = dict(id_offset=0, services_type=[])
server_opts = None
server_name = ''
server_index = 1
server_alloc_listen_port = 0
server_proxy_addr = ''
server_cache_id = None
server_cache_full_name = None
server_cache_ip = dict()
project_templete_engine_lookup = None
def set_global_opts(opts, id_offset):
global global_opts
global global_cahce
global_opts = opts
global_cahce['id_offset'] = id_offset
for server_type in sorted(opts.items('atservice'), key=lambda x: int(x[1])):
if 'atgateway' == server_type[0]:
continue
svr_name = 'server.{0}'.format(server_type[0])
if global_opts.has_section(svr_name):
global_cahce['services_type'].append(server_type[0])
def set_templete_engine(engine):
global project_templete_engine_lookup
project_templete_engine_lookup = engine
def render_string(content, **render_options):
from mako.template import Template
tmpl = Template(content)
return tmpl.render(**render_options)
def render(template_name, **render_options):
if project_templete_engine_lookup is None:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'template not available now\r\n')
return ""
tmpl = project_templete_engine_lookup.get_template(template_name)
if tmpl is None:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'template {0} not found\r\n', template_name)
return ""
return tmpl.render(**render_options)
def render_to(template_name, output_path, **render_options):
dir_path = os.path.dirname(output_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
output_file = codecs.open(output_path, mode='w', encoding='utf-8')
if not output_file:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'try to render {0} but open {1} for writing failed\r\n', template_name, output_path)
return
output_file.write(render(template_name, **render_options))
os.chmod(output_path, stat.S_IRWXU + stat.S_IRWXG + stat.S_IROTH + stat.S_IXOTH)
def get_service_index_range(number=1):
return range(1 + global_cahce['id_offset'], 1 + global_cahce['id_offset'] + number)
def get_global_all_services():
return global_cahce['services_type']
def set_server_inst(opts, key, index):
global server_opts
global server_name
global server_index
global server_cache_id
global server_cache_full_name
server_opts = opts
server_name = key
server_index = index
server_cache_id = None
server_cache_full_name = None
def get_ipv4_level(ip_addr):
ip_addrs = [int(x) for x in ip_addr.split('.')]
if len(ip_addrs) != 4:
return 99
if ip_addrs[0] == 10:
return 1
if ip_addrs[0] == 172 and (ip_addrs[1] & 0x10) == 0x10:
return 2
if ip_addrs[0] == 192 and ip_addrs[1] == 168:
return 3
if ip_addrs[0] == 169 and ip_addrs[1] == 254:
return 11
if ip_addrs[0] == 127:
return 21
if (ip_addrs[0] & 0xE0) == 0xE0:
return 31
if (ip_addrs[0] & 0xF0) == 0xF0:
return 32
if ip_addrs[0] == 255 and ip_addrs[1] == 255 and ip_addrs[2] == 255 and ip_addrs[3] == 255:
return 51
return 0
def is_ipv4_link_local(ip_addr):
return get_ipv4_level(ip_addr) >= 11
def is_ipv6_link_local(ip_addr):
ip_addr = ip_addr.lower()
if ip_addr == "::1" or ip_addr == "0:0:0:0:0:0:0:1":
return True
if len(ip_addr) > 4 and ip_addr[0:4] == "fe80":
return True
if ip_addr == '::127.0.0.1' or ip_addr == '::ffff:127.0.0.1':
return True
return False
def get_ip_list_v4():
global server_cache_ip
if 'ipv4' not in server_cache_ip:
import socket
server_cache_ip['ipv4'] = []
try:
for ip_pair in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET, socket.SOCK_STREAM):
ip_addr = ip_pair[4][0]
if not is_ipv4_link_local(ip_addr):
server_cache_ip['ipv4'].append(ip_addr)
if 0 == len(server_cache_ip['ipv4']):
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 53))
res = csock.getsockname()
if res:
server_cache_ip['ipv4'].append(res[0])
csock.close()
server_cache_ip['ipv4'] = sorted(server_cache_ip['ipv4'], key=get_ipv4_level)
except:
pass
return server_cache_ip['ipv4']
def get_ip_list_v6():
global server_cache_ip
if 'ipv6' not in server_cache_ip:
import socket
server_cache_ip['ipv6'] = []
try:
for ip_pair in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET6, socket.SOCK_STREAM):
ip_addr = ip_pair[4][0]
interface_index = ip_addr.find('%')
# remove interface name
if interface_index > 0:
ip_addr = ip_addr[0:interface_index]
if not is_ipv6_link_local(ip_addr):
server_cache_ip['ipv6'].append(ip_addr)
# use socket to detect ipv6 address if can not find any address
if 0 == len(server_cache_ip['ipv6']):
csock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
csock.connect(('2001:4860:4860::8888', 53)) # use google's DNS
res = csock.getsockname()
if res:
ip_addr = res[0]
interface_index = ip_addr.find('%')
if interface_index > 0:
ip_addr = ip_addr[0:interface_index]
if not is_ipv6_link_local(ip_addr):
server_cache_ip['ipv6'].append(ip_addr)
csock.close()
except:
pass
return server_cache_ip['ipv6']
def is_ip_v6_enabled():
ipv6s = get_ip_list_v6()
return len(ipv6s) > 0
def get_inner_ipv4():
if 'SYSTEM_MACRO_INNER_IPV4' in os.environ:
return os.environ['SYSTEM_MACRO_INNER_IPV4']
res = get_ip_list_v4()
if 0 == len(res):
return '127.0.0.1'
return res[0]
def get_outer_ipv4():
if 'SYSTEM_MACRO_OUTER_IPV4' in os.environ:
return os.environ['SYSTEM_MACRO_OUTER_IPV4']
res = get_ip_list_v4()
if 0 == len(res):
return '0.0.0.0'
ret = res[0]
if '127.0.0.1' == ret:
ret = '0.0.0.0'
return ret
def get_inner_ipv6():
if 'SYSTEM_MACRO_INNER_IPV6' in os.environ:
return os.environ['SYSTEM_MACRO_INNER_IPV6']
res = get_ip_list_v6()
if 0 == len(res):
return '::1'
return res[0]
def get_outer_ipv6():
if 'SYSTEM_MACRO_OUTER_IPV6' in os.environ:
return os.environ['SYSTEM_MACRO_OUTER_IPV6']
res = get_ip_list_v6()
if 0 == len(res):
return '::'
ret = res[0]
if '::1' == ret:
ret = '::'
return ret
def get_global_option(section, key, default_val, env_name=None):
global global_opts
if not env_name is None and env_name in os.environ:
return os.environ[env_name]
if global_opts.has_option(section, key):
return global_opts.get(section, key)
return default_val
def get_hostname():
global server_cache_ip
if 'hostname' not in server_cache_ip:
server_cache_ip['hostname'] = get_global_option(
'atsystem', 'hostname', '', 'SYSTEM_MACRO_HOSTNAME')
server_cache_ip['hostname_is_uuid'] = False
if server_cache_ip['hostname'] is None or len(server_cache_ip['hostname']) == 0:
import uuid
server_cache_ip['hostname'] = uuid.UUID(
int=uuid.getnode()).hex[-12:]
server_cache_ip['hostname_is_uuid'] = True
return server_cache_ip['hostname'], server_cache_ip['hostname_is_uuid']
def str_to_list(val):
ret = []
if val is None:
return ret
for item in str(val).split(','):
item_strip = item.strip()
if len(item_strip) > 0:
ret.append(item_strip)
return ret
def list_to_hosts(val):
ret = []
mat = re.compile('(.*):(\d+)-(\d+)(.*)$')
for item in val:
mat_res = mat.match(item)
if not mat_res is None:
for i in range(int(mat_res.group(2)), int(mat_res.group(3)) + 1):
ret.append('{0}:{1}{2}'.format(
mat_res.group(1), i, mat_res.group(4)))
else:
ret.append(item)
return ret
def str_to_hosts(val):
return list_to_hosts(str_to_list(val))
def get_global_list(section, key, default_val, env_name=None):
res = get_global_option(section, key, default_val, env_name)
if res is None:
return []
return str_to_list(res)
def get_global_list_to_hosts(section, key, default_val, env_name=None):
res = get_global_list(section, key, default_val, env_name)
return list_to_hosts(res)
def get_global_option_bool(section, key, default_val, env_name=None):
val = get_global_option(section, key, default_val, env_name)
if not val:
return False
val = str(val).lower().strip()
return len(val) > 0 and '0' != val and 'false' != val and 'no' != val and 'disable' != val
def get_server_name():
global server_name
return server_name
def get_server_type_id(server_name=None):
if server_name is None:
server_name = get_server_name()
if not global_opts.has_option('atservice', server_name):
return 0
return int(get_global_option('atservice', server_name, 0))
def get_server_option(key, default_val, env_name=None):
return get_global_option('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_list(key, default_val, env_name=None):
return get_global_list('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_list_to_hosts(key, default_val, env_name=None):
return get_global_list_to_hosts('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_option_bool(key, default_val, env_name=None):
return get_global_option_bool('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_or_global_option(section, key, default_val, env_name=None):
ret = get_server_option('{0}.{1}'.format(section, key), None, None)
if ret is None:
return get_global_option(section, key, default_val, env_name)
return ret
def get_server_or_global_list(section, key, default_val, env_name=None):
ret = get_server_list('{0}.{1}'.format(section, key), None, None)
if ret is None or len(ret) == 0:
return get_global_list(section, key, default_val, env_name)
return ret
def get_server_or_global_list_to_hosts(section, key, default_val, env_name=None):
ret = get_server_list_to_hosts('{0}.{1}'.format(section, key), None, None)
if ret is None or len(ret) == 0:
return get_global_list_to_hosts(section, key, default_val, env_name)
return ret
def get_server_or_global_bool(section, key, default_val, env_name=None):
try_section_name = '{0}.{1}'.format(section, key)
if get_server_option(try_section_name, None) is None:
return get_global_option_bool(section, key, default_val, env_name)
else:
return get_server_option_bool(try_section_name, default_val, env_name)
def get_server_index():
global server_index
return server_index
def get_server_group_inner_id(server_name=None, server_index=None):
global global_opts
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
if not global_opts.has_option('atservice', server_name):
return 0
type_step = int(get_global_option('global', 'type_step', 0x100))
type_id = int(get_global_option('atservice', server_name, 0))
return type_step * type_id + server_index
def get_server_proc_id(server_name=None, server_index=None):
group_id = int(get_global_option(
'global', 'group_id', 1, 'SYSTEM_MACRO_GROUP_ID'))
group_step = int(get_global_option('global', 'group_step',
0x10000, 'SYSTEM_MACRO_GROUP_STEP'))
return group_id * group_step + get_server_group_inner_id(server_name, server_index)
def get_server_id():
global server_cache_id
global global_opts
if not server_cache_id is None:
return server_cache_id
if not global_opts.has_option('atservice', get_server_name()):
return 0
server_cache_id = get_server_proc_id()
return server_cache_id
def get_server_full_name():
global server_cache_full_name
if not server_cache_full_name is None:
return server_cache_full_name
server_cache_full_name = '{0}-{1}'.format(
get_server_name(), get_server_index())
return server_cache_full_name
def get_log_level():
return get_global_option('global', 'log_level', 'debug', 'SYSTEM_MACRO_CUSTOM_LOG_LEVEL')
def get_log_dir():
return get_global_option('global', 'log_dir', '../log', 'SYSTEM_MACRO_CUSTOM_LOG_DIR')
def get_server_atbus_shm():
global environment_check_shm
if environment_check_shm is None:
if not os.path.exists('/proc/sys/kernel/shmmax'):
environment_check_shm = False
else:
shm_max_sz = int(open('/proc/sys/kernel/shmmax', 'r').read())
environment_check_shm = shm_max_sz > 0
if not environment_check_shm:
return None
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
base_key = int(get_global_option('atsystem', 'shm_key_pool', 0x16000000, 'SYSTEM_MACRO_CUSTOM_SHM_KEY'))
shm_key_offset = int(get_global_option('atsystem', 'shm_key_offset', 0, 'SYSTEM_MACRO_CUSTOM_SHM_KEY_OFFSET'))
shm_key = base_key + shm_key_offset + get_server_group_inner_id(get_server_name(), get_server_index()) + port_offset
return 'shm://{0}'.format(hex(shm_key))
def disable_server_atbus_shm():
global environment_check_shm
environment_check_shm = False
def get_calc_listen_port(server_name=None, server_index=None, base_port='port'):
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
ret = int(get_global_option(
'server.{0}'.format(server_name), base_port, 0))
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
if ret == 0:
base_port = int(get_global_option(
'atsystem', 'listen_port', 12000, 'SYSTEM_MACRO_CUSTOM_BASE_PORT'))
type_step = int(get_global_option('global', 'type_step', 0x100))
type_id = int(get_global_option('atservice', server_name, 0))
return base_port + type_step * server_index + type_id + port_offset
else:
return ret + server_index + port_offset
def get_server_atbus_port():
return get_calc_listen_port()
def get_server_atbus_tcp():
if is_ip_v6_enabled():
if 'atproxy' == get_server_name():
return 'ipv6://{0}:{1}'.format(get_outer_ipv6(), get_server_atbus_port())
else:
return 'ipv6://{0}:{1}'.format(get_inner_ipv6(), get_server_atbus_port())
else:
if 'atproxy' == get_server_name():
return 'ipv4://{0}:{1}'.format(get_outer_ipv4(), get_server_atbus_port())
else:
return 'ipv4://{0}:{1}'.format(get_inner_ipv4(), get_server_atbus_port())
def get_server_atbus_unix():
h = hashlib.sha1(__file__.encode('utf-8')).hexdigest()
if os.path.exists('/tmp'):
default_base = '/tmp/atapp/{0}/'.format(h)
elif os.path.exists('/run/tmp'):
default_base = '/run/tmp/atapp/{0}/'.format(h)
elif os.path.exists('/'):
default_base = '/tmp/atapp/{0}/'.format(h)
else:
default_base = './'
dir_path = get_global_option(
'atsystem', 'unix_sock_dir', default_base, 'SYSTEM_MACRO_CUSTOM_UNIX_SOCK_DIR')
return 'unix://{0}{1}-{2:x}.sock'.format(dir_path, get_server_full_name(), get_server_id())
def get_server_atbus_listen():
global server_cache_ip
ret = []
res = get_server_atbus_shm()
if not res is None:
ret.append(res)
if 'support_unix_sock' not in server_cache_ip:
import socket
if 'AF_UNIX' in socket.__dict__:
test_file_path = 'project-utils-test-unixx-sock.sock'
try:
test_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
test_sock.bind(test_file_path)
test_sock.close()
server_cache_ip['support_unix_sock'] = True
except:
server_cache_ip['support_unix_sock'] = False
if os.path.exists(test_file_path):
os.remove(test_file_path)
else:
server_cache_ip['support_unix_sock'] = False
if 0 == len(ret) or False == server_cache_ip['support_unix_sock'] or 'atproxy' == get_server_name():
ret.append(get_server_atbus_tcp())
else:
ret.append(get_server_atbus_unix())
return ret
def disable_server_atbus_unix_sock():
global server_cache_ip
server_cache_ip['support_unix_sock'] = False
def get_server_proxy():
global server_proxy_addr
if 'atproxy' == get_server_name():
server_proxy_addr = get_server_atbus_tcp()
return ''
return server_proxy_addr
def get_server_subnets():
ret = []
for subnet in get_server_list('subnets', ['0/0']):
if subnet.isdigit():
ret.append('0/{0}'.format(subnet))
else:
ret.append(subnet)
return ret
def get_server_recv_buffer_size():
return get_global_option('atsystem', 'shm_channel_size', 8 * 1024 * 1024)
def get_server_send_buffer_size():
return get_global_option('atsystem', 'iostream_channel_size', 2 * 1024 * 1024)
def get_server_gateway_index(server_name=None, server_index=None, gateway_name=None):
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
if gateway_name is None:
gateway_name = 'atgateway'
gateway_section_name = 'server.{0}'.format(gateway_name)
step = int(get_global_option(gateway_section_name, 'index_type_number', 1))
offset = get_global_option(gateway_section_name, 'index_map_{0}'.format(server_name), None)
if offset is None:
raise Exception(
'index_map_{0} is not found in {1}'.format(server_name, gateway_section_name))
return step * server_index + int(offset)
def get_server_gateway_port(server_name, server_index, gateway_name=None, base_port='default_port'):
if gateway_name is None:
gateway_name = 'atgateway'
gateway_section_name = 'server.{0}'.format(gateway_name)
ret = int(get_global_option(gateway_section_name, base_port, 0))
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
if ret <= 0:
ret = int(get_global_option('server.{0}'.format(gateway_name), 'default_port', 8000))
return ret + get_server_gateway_index(server_name, server_index, gateway_name) + port_offset
def get_gateway_server_names(gateway_name=None):
global global_opts
ret = []
if gateway_name is None:
gateway_name = 'atgateway'
for maybe_svr_name in global_opts.options('server.{0}'.format(gateway_name)):
if maybe_svr_name[0:10] != "index_map_":
continue
ret.append(maybe_svr_name[10:])
return ret
def get_etcd_client_urls():
etcd_number = int(get_global_option('server.etcd', 'number', '0'))
if etcd_number <= 0:
return get_server_or_global_option('etcd', 'hosts', 'http://127.0.0.1:2379', 'SYSTEM_MACRO_CUSTOM_ETCD_HOST')
client_urls = []
for svr_index in get_service_index_range(etcd_number):
client_urls.append('http://{0}:{1}'.format(get_outer_ipv4(), get_calc_listen_port('etcd', svr_index, 'client_port')))
return ','.join(client_urls)
| true | true |
f7117058f8de1894f09859f44d2ac1935baabe07 | 6,301 | py | Python | src/char/trapsin.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | src/char/trapsin.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | src/char/trapsin.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | import keyboard
from utils.custom_mouse import mouse
from char import IChar
from pather import Pather
from logger import Logger
from screen import convert_abs_to_monitor, convert_screen_to_abs, grab
from config import Config
from utils.misc import wait, rotate_vec, unit_vector
import random
from pather import Location, Pather
import numpy as np
class Trapsin(IChar):
def __init__(self, skill_hotkeys: dict, pather: Pather):
Logger.info("Setting up Trapsin")
super().__init__(skill_hotkeys)
self._pather = pather
def pre_buff(self):
if Config().char["cta_available"]:
self._pre_buff_cta()
if self._skill_hotkeys["fade"]:
keyboard.send(self._skill_hotkeys["fade"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["shadow_warrior"]:
keyboard.send(self._skill_hotkeys["shadow_warrior"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["burst_of_speed"]:
keyboard.send(self._skill_hotkeys["burst_of_speed"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
def _left_attack(self, cast_pos_abs: tuple[float, float], spray: int = 10):
keyboard.send(Config().char["stand_still"], do_release=False)
if self._skill_hotkeys["skill_left"]:
keyboard.send(self._skill_hotkeys["skill_left"])
for _ in range(4):
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="left")
wait(0.2, 0.3)
mouse.release(button="left")
keyboard.send(Config().char["stand_still"], do_press=False)
def _right_attack(self, cast_pos_abs: tuple[float, float], spray: float = 10):
keyboard.send(self._skill_hotkeys["lightning_sentry"])
x = cast_pos_abs[0] + (random.random() * 2 * spray - spray)
y = cast_pos_abs[1] + (random.random() * 2 * spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
def atk(num: int):
for _ in range(num):
mouse.press(button="right")
wait(0.20)
mouse.release(button="right")
wait(0.15)
atk(4)
keyboard.send(self._skill_hotkeys["death_sentry"])
atk(1)
def kill_pindle(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_pindle"] / 2))
pindle_pos_abs = convert_screen_to_abs(Config().path["pindle_end"][0])
cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 11)
self._left_attack(cast_pos_abs, 11)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("pindle_end", self)
else:
self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, force_tp=True)
return True
def kill_eldritch(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_eldritch"] / 2))
eld_pos_abs = convert_screen_to_abs(Config().path["eldritch_end"][0])
cast_pos_abs = [eld_pos_abs[0] * 0.9, eld_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 90)
self._left_attack(cast_pos_abs, 90)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("eldritch_end", self)
else:
self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)
return True
def kill_shenk(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_shenk"] / 2))
shenk_pos_abs = self._pather.find_abs_node_pos(149, grab())
if shenk_pos_abs is None:
shenk_pos_abs = convert_screen_to_abs(Config().path["shenk_end"][0])
cast_pos_abs = [shenk_pos_abs[0] * 0.9, shenk_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 90)
self._left_attack(cast_pos_abs, 90)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, timeout=1.4, force_tp=True)
return True
def kill_nihlathak(self, end_nodes: list[int]) -> bool:
# Find nilhlatak position
atk_len = max(1, int(Config().char["atk_len_nihlathak"] / 2))
for i in range(atk_len):
nihlathak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], grab())
if nihlathak_pos_abs is None:
return False
cast_pos_abs = np.array([nihlathak_pos_abs[0] * 0.9, nihlathak_pos_abs[1] * 0.9])
self._left_attack(cast_pos_abs, 90)
self._right_attack(cast_pos_abs, 90)
# Do some tele "dancing" after each sequence
if i < atk_len - 1:
rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190)
tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100
pos_m = convert_abs_to_monitor(tele_pos_abs)
self.pre_move()
self.move(pos_m)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes(end_nodes, self, timeout=0.8)
return True
if __name__ == "__main__":
import os
import keyboard
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
keyboard.wait("f11")
from config import Config
from char import Trapsin
pather = Pather()
char = Trapsin(Config().trapsin, Config().char, pather) | 43.455172 | 133 | 0.628948 | import keyboard
from utils.custom_mouse import mouse
from char import IChar
from pather import Pather
from logger import Logger
from screen import convert_abs_to_monitor, convert_screen_to_abs, grab
from config import Config
from utils.misc import wait, rotate_vec, unit_vector
import random
from pather import Location, Pather
import numpy as np
class Trapsin(IChar):
def __init__(self, skill_hotkeys: dict, pather: Pather):
Logger.info("Setting up Trapsin")
super().__init__(skill_hotkeys)
self._pather = pather
def pre_buff(self):
if Config().char["cta_available"]:
self._pre_buff_cta()
if self._skill_hotkeys["fade"]:
keyboard.send(self._skill_hotkeys["fade"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["shadow_warrior"]:
keyboard.send(self._skill_hotkeys["shadow_warrior"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["burst_of_speed"]:
keyboard.send(self._skill_hotkeys["burst_of_speed"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
def _left_attack(self, cast_pos_abs: tuple[float, float], spray: int = 10):
keyboard.send(Config().char["stand_still"], do_release=False)
if self._skill_hotkeys["skill_left"]:
keyboard.send(self._skill_hotkeys["skill_left"])
for _ in range(4):
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="left")
wait(0.2, 0.3)
mouse.release(button="left")
keyboard.send(Config().char["stand_still"], do_press=False)
def _right_attack(self, cast_pos_abs: tuple[float, float], spray: float = 10):
keyboard.send(self._skill_hotkeys["lightning_sentry"])
x = cast_pos_abs[0] + (random.random() * 2 * spray - spray)
y = cast_pos_abs[1] + (random.random() * 2 * spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
def atk(num: int):
for _ in range(num):
mouse.press(button="right")
wait(0.20)
mouse.release(button="right")
wait(0.15)
atk(4)
keyboard.send(self._skill_hotkeys["death_sentry"])
atk(1)
def kill_pindle(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_pindle"] / 2))
pindle_pos_abs = convert_screen_to_abs(Config().path["pindle_end"][0])
cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 11)
self._left_attack(cast_pos_abs, 11)
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("pindle_end", self)
else:
self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, force_tp=True)
return True
def kill_eldritch(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_eldritch"] / 2))
eld_pos_abs = convert_screen_to_abs(Config().path["eldritch_end"][0])
cast_pos_abs = [eld_pos_abs[0] * 0.9, eld_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 90)
self._left_attack(cast_pos_abs, 90)
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("eldritch_end", self)
else:
self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)
return True
def kill_shenk(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_shenk"] / 2))
shenk_pos_abs = self._pather.find_abs_node_pos(149, grab())
if shenk_pos_abs is None:
shenk_pos_abs = convert_screen_to_abs(Config().path["shenk_end"][0])
cast_pos_abs = [shenk_pos_abs[0] * 0.9, shenk_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 90)
self._left_attack(cast_pos_abs, 90)
wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, timeout=1.4, force_tp=True)
return True
def kill_nihlathak(self, end_nodes: list[int]) -> bool:
atk_len = max(1, int(Config().char["atk_len_nihlathak"] / 2))
for i in range(atk_len):
nihlathak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], grab())
if nihlathak_pos_abs is None:
return False
cast_pos_abs = np.array([nihlathak_pos_abs[0] * 0.9, nihlathak_pos_abs[1] * 0.9])
self._left_attack(cast_pos_abs, 90)
self._right_attack(cast_pos_abs, 90)
if i < atk_len - 1:
rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190)
tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100
pos_m = convert_abs_to_monitor(tele_pos_abs)
self.pre_move()
self.move(pos_m)
wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes(end_nodes, self, timeout=0.8)
return True
if __name__ == "__main__":
import os
import keyboard
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
keyboard.wait("f11")
from config import Config
from char import Trapsin
pather = Pather()
char = Trapsin(Config().trapsin, Config().char, pather) | true | true |
f7117441264ed44f4707d96e06ad13baddd008e2 | 6,635 | py | Python | models/IPCC/model.py | TD21forever/QoS-Predcition-Algorithm-library | f4503462887d719a39c9ccddd6cc55546e783fd5 | [
"MIT"
] | 2 | 2022-02-08T08:19:59.000Z | 2022-02-17T01:42:54.000Z | models/IPCC/model.py | TD21forever/QoS-Predcition-Algorithm-library | f4503462887d719a39c9ccddd6cc55546e783fd5 | [
"MIT"
] | null | null | null | models/IPCC/model.py | TD21forever/QoS-Predcition-Algorithm-library | f4503462887d719a39c9ccddd6cc55546e783fd5 | [
"MIT"
] | null | null | null | import copy
import math
import numpy as np
from tqdm import tqdm
from utils.model_util import triad_to_matrix, nonzero_user_mean, nonzero_item_mean
# 相似度计算库
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import cosine_similarity
class IPCCModel(object):
def __init__(self) -> None:
super().__init__()
self.matrix = None # QoS矩阵
self.u_mean = None # 每个用户的评分均值(用于计算修正的余弦相似度)
self.i_mean = None # 每个项目的评分均值
self.similarity_matrix = None # 项目相似度矩阵
self._nan_symbol = -1 # 缺失项标记(数据集中使用-1表示缺失项)
def _get_similarity_matrix(self, matrix, metric):
"""获取项目相似度矩阵
Args:
matrix (): QoS矩阵
metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)
"""
_m = copy.deepcopy(matrix)
_m[_m == self._nan_symbol] = 0 # 将缺失项用0代替,以便之后计算
n_items = matrix.shape[1]
similarity_matrix = np.zeros((n_items, n_items))
# 计算相似度矩阵
for i in tqdm(range(n_items), desc="生成相似度矩阵"):
for j in range(i + 1, n_items):
col_i = _m[:, i]
col_j = _m[:, j]
nonzero_i = np.nonzero(col_i)[0] # 非0元素对应的下标
nonzero_j = np.nonzero(col_j)[0]
intersect = np.intersect1d(nonzero_i,
nonzero_j) # 对项目i,j同时有评分的用户集合
if len(intersect) == 0:
sim = 0
else:
# 依据指定的相似度计算方法计算项目i,j的相似度
try:
if metric == 'PCC':
# 如果一个项目的评分向量中所有值都相等,则无法计算皮尔逊相关系数
if len(set(col_i[intersect])) == 1 or len(
set(col_j[intersect])) == 1:
sim = 0
else:
sim = pearsonr(col_i[intersect],
col_j[intersect])[0]
elif metric == 'COS':
sim = cosine_similarity(col_i[intersect],
col_j[intersect])
elif metric == 'ACOS':
sim = adjusted_cosine_similarity(
col_i, col_j, intersect, self.u_mean)
except Exception as e:
sim = 0
similarity_matrix[i][j] = similarity_matrix[j][i] = sim
return similarity_matrix
def _get_similarity_items(self, iid, topk=-1):
"""获取相似用户
Args:
iid (): 当前项目
topk (): 相似项目数量, -1表示不限制数量
Returns:
依照相似度从大到小排序, 与当前项目最为相似的前topk个相似项目
"""
assert isinstance(topk, int)
ordered_sim_iid = (
-self.similarity_matrix[iid]).argsort() # 按相似度从大到小排序后, 相似用户对应的索引
if topk == -1:
return ordered_sim_iid
else:
assert topk > 0
return ordered_sim_iid[:topk]
def get_similarity(self, iid_a, iid_b):
"""传入两个uid,获取这两个用户的相似度
"""
if iid_a == iid_b:
return float(1)
if iid_a + 1 > self.matrix.shape[1] or iid_b + 1 > self.matrix.shape[1]:
return 0
if self.similarity_matrix is None:
assert self.matrix is not None, "Please fit first e.g. model.fit()"
self._get_similarity_matrix(self.matrix)
return self.similarity_matrix[iid_a][iid_b]
def fit(self, triad, metric='PCC'):
"""训练模型
Args:
triad (): 数据三元组: (uid, iid, rating)
metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)
"""
self.matrix = triad_to_matrix(triad, self._nan_symbol) # 数据三元组转QoS矩阵
self.u_mean = nonzero_user_mean(self.matrix,
self._nan_symbol) # 根据QoS矩阵计算每个用户的评分均值
# FIXME 考虑i_mean为0的情况
self.i_mean = nonzero_item_mean(self.matrix,
self._nan_symbol) # 根据QoS矩阵计算每个项目的评分均值
self.similarity_matrix = self._get_similarity_matrix(
self.matrix, metric) # 根据QoS矩阵获取项目相似矩阵
def predict(self, triad, topK=-1):
y_list = [] # 真实评分
y_pred_list = [] # 预测评分
cold_boot_cnt = 0 # 冷启动统计
for row in tqdm(triad, desc="Predict... "):
uid, iid, rate = int(row[0]), int(row[1]), float(row[2])
# 冷启动: 新用户因为没有计算过相似用户, 因此无法预测评分
if iid + 1 > self.matrix.shape[1]:
cold_boot_cnt += 1
continue
i_mean = self.i_mean[iid]
similarity_items = self._get_similarity_items(iid, topK)
up = 0 # 分子
down = 0 # 分母
# 对于当前项目的每一个相似项目
for sim_iid in similarity_items:
sim_item_rate = self.matrix[uid][sim_iid] # 当前用户对相似项目的评分
similarity = self.get_similarity(iid, sim_iid)
# 如果当前用户对相似项目没有评分,则不进行计算
if sim_item_rate == self._nan_symbol:
continue
up += similarity * (sim_item_rate - self.i_mean[sim_iid]
) # 相似度 * (相似项目评分 - 相似项目评分均值)
down += similarity # 相似度的绝对值
if down != 0:
y_pred = i_mean + up / down
else:
y_pred = 0
y_pred_list.append(y_pred)
y_list.append(rate)
print(f"cold boot :{cold_boot_cnt / len(triad) * 100:4f}%")
return y_list, y_pred_list
def adjusted_cosine_similarity(x, y, intersect, u_mean):
"""修正的余弦相似度
Returns:
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
if len(intersect) < 2:
raise ValueError('there must be at least two non-zero entries')
x = np.asarray(x)
y = np.asarray(y)
multiply_sum = sum(
(x[i] - u_mean[i]) * (y[i] - u_mean[i]) for i in intersect)
pow_sum_x = sum(math.pow(x[i] - u_mean[i], 2) for i in intersect)
pow_sum_y = sum(math.pow(y[i] - u_mean[i], 2) for i in intersect)
return multiply_sum / math.sqrt(pow_sum_x * pow_sum_y)
if __name__ == "__main__":
triad = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 0, 1],
[1, 1, 3],
[1, 2, 4],
[2, 0, 2],
[2, 1, 3],
[2, 2, 5],
])
test = np.array([[0, 2, 3]])
ipcc = IPCCModel()
ipcc.fit(triad)
ipcc.predict(test, 20)
| 33.341709 | 82 | 0.508063 | import copy
import math
import numpy as np
from tqdm import tqdm
from utils.model_util import triad_to_matrix, nonzero_user_mean, nonzero_item_mean
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import cosine_similarity
class IPCCModel(object):
def __init__(self) -> None:
super().__init__()
self.matrix = None
self.u_mean = None
self.i_mean = None
self.similarity_matrix = None
self._nan_symbol = -1
def _get_similarity_matrix(self, matrix, metric):
_m = copy.deepcopy(matrix)
_m[_m == self._nan_symbol] = 0
n_items = matrix.shape[1]
similarity_matrix = np.zeros((n_items, n_items))
for i in tqdm(range(n_items), desc="生成相似度矩阵"):
for j in range(i + 1, n_items):
col_i = _m[:, i]
col_j = _m[:, j]
nonzero_i = np.nonzero(col_i)[0]
nonzero_j = np.nonzero(col_j)[0]
intersect = np.intersect1d(nonzero_i,
nonzero_j)
if len(intersect) == 0:
sim = 0
else:
try:
if metric == 'PCC':
if len(set(col_i[intersect])) == 1 or len(
set(col_j[intersect])) == 1:
sim = 0
else:
sim = pearsonr(col_i[intersect],
col_j[intersect])[0]
elif metric == 'COS':
sim = cosine_similarity(col_i[intersect],
col_j[intersect])
elif metric == 'ACOS':
sim = adjusted_cosine_similarity(
col_i, col_j, intersect, self.u_mean)
except Exception as e:
sim = 0
similarity_matrix[i][j] = similarity_matrix[j][i] = sim
return similarity_matrix
def _get_similarity_items(self, iid, topk=-1):
assert isinstance(topk, int)
ordered_sim_iid = (
-self.similarity_matrix[iid]).argsort()
if topk == -1:
return ordered_sim_iid
else:
assert topk > 0
return ordered_sim_iid[:topk]
def get_similarity(self, iid_a, iid_b):
if iid_a == iid_b:
return float(1)
if iid_a + 1 > self.matrix.shape[1] or iid_b + 1 > self.matrix.shape[1]:
return 0
if self.similarity_matrix is None:
assert self.matrix is not None, "Please fit first e.g. model.fit()"
self._get_similarity_matrix(self.matrix)
return self.similarity_matrix[iid_a][iid_b]
def fit(self, triad, metric='PCC'):
self.matrix = triad_to_matrix(triad, self._nan_symbol)
self.u_mean = nonzero_user_mean(self.matrix,
self._nan_symbol)
self.i_mean = nonzero_item_mean(self.matrix,
self._nan_symbol)
self.similarity_matrix = self._get_similarity_matrix(
self.matrix, metric)
def predict(self, triad, topK=-1):
y_list = []
y_pred_list = []
cold_boot_cnt = 0
for row in tqdm(triad, desc="Predict... "):
uid, iid, rate = int(row[0]), int(row[1]), float(row[2])
if iid + 1 > self.matrix.shape[1]:
cold_boot_cnt += 1
continue
i_mean = self.i_mean[iid]
similarity_items = self._get_similarity_items(iid, topK)
up = 0
down = 0
for sim_iid in similarity_items:
sim_item_rate = self.matrix[uid][sim_iid]
similarity = self.get_similarity(iid, sim_iid)
if sim_item_rate == self._nan_symbol:
continue
up += similarity * (sim_item_rate - self.i_mean[sim_iid]
)
down += similarity
if down != 0:
y_pred = i_mean + up / down
else:
y_pred = 0
y_pred_list.append(y_pred)
y_list.append(rate)
print(f"cold boot :{cold_boot_cnt / len(triad) * 100:4f}%")
return y_list, y_pred_list
def adjusted_cosine_similarity(x, y, intersect, u_mean):
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
if len(intersect) < 2:
raise ValueError('there must be at least two non-zero entries')
x = np.asarray(x)
y = np.asarray(y)
multiply_sum = sum(
(x[i] - u_mean[i]) * (y[i] - u_mean[i]) for i in intersect)
pow_sum_x = sum(math.pow(x[i] - u_mean[i], 2) for i in intersect)
pow_sum_y = sum(math.pow(y[i] - u_mean[i], 2) for i in intersect)
return multiply_sum / math.sqrt(pow_sum_x * pow_sum_y)
if __name__ == "__main__":
triad = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 0, 1],
[1, 1, 3],
[1, 2, 4],
[2, 0, 2],
[2, 1, 3],
[2, 2, 5],
])
test = np.array([[0, 2, 3]])
ipcc = IPCCModel()
ipcc.fit(triad)
ipcc.predict(test, 20)
| true | true |
f71174c8a63e501662fba38d43a6adaa7178d911 | 1,172 | py | Python | tests/date_class_test.py | MartinVardanyan/My-first-project-on-GitHub | 0bfef25b691362f797b990758cc0fd342b72e891 | [
"Apache-2.0"
] | null | null | null | tests/date_class_test.py | MartinVardanyan/My-first-project-on-GitHub | 0bfef25b691362f797b990758cc0fd342b72e891 | [
"Apache-2.0"
] | null | null | null | tests/date_class_test.py | MartinVardanyan/My-first-project-on-GitHub | 0bfef25b691362f797b990758cc0fd342b72e891 | [
"Apache-2.0"
] | null | null | null | from date_class import Date
def test_init_repr(day, month, year):
obj = Date(day, month, year)
print(obj)
def test_get_day(d, m, y):
obj = Date(d, m, y)
print(obj.get_day())
def test_set_day(d, m, y, day):
obj = Date(d, m, y)
obj.set_day(day)
print(obj)
def test_get_month(d, m, y):
obj = Date(d, m, y)
print(obj.get_month())
def test_set_month(d, m, y, month):
obj = Date(d, m, y)
obj.set_month(month)
print(obj)
def test_get_year(d, m, y):
obj = Date(d, m, y)
print(obj.get_year())
def test_set_year(d, m, y, year):
obj = Date(d, m, y)
obj.set_year(year)
print(obj)
def test_add_day(d, m, y, day):
obj = Date(d, m, y)
obj.add_day(day)
print(obj)
def test_add_month(d, m, y, month):
obj = Date(d, m, y)
obj.add_month(month)
print(obj)
def test_add_year(d, m, y, year):
obj = Date(d, m, y)
obj.add_year(year)
print(obj)
def test_add_d_m_y(d, m, y, day, month, year):
obj = Date(d, m, y)
obj.add_day(day)
obj.add_month(month)
obj.add_year(year)
print(obj)
def test_leap_year(year):
obj = Date(29, 2, year)
print(obj)
| 16.507042 | 46 | 0.587884 | from date_class import Date
def test_init_repr(day, month, year):
obj = Date(day, month, year)
print(obj)
def test_get_day(d, m, y):
obj = Date(d, m, y)
print(obj.get_day())
def test_set_day(d, m, y, day):
obj = Date(d, m, y)
obj.set_day(day)
print(obj)
def test_get_month(d, m, y):
obj = Date(d, m, y)
print(obj.get_month())
def test_set_month(d, m, y, month):
obj = Date(d, m, y)
obj.set_month(month)
print(obj)
def test_get_year(d, m, y):
obj = Date(d, m, y)
print(obj.get_year())
def test_set_year(d, m, y, year):
obj = Date(d, m, y)
obj.set_year(year)
print(obj)
def test_add_day(d, m, y, day):
obj = Date(d, m, y)
obj.add_day(day)
print(obj)
def test_add_month(d, m, y, month):
obj = Date(d, m, y)
obj.add_month(month)
print(obj)
def test_add_year(d, m, y, year):
obj = Date(d, m, y)
obj.add_year(year)
print(obj)
def test_add_d_m_y(d, m, y, day, month, year):
obj = Date(d, m, y)
obj.add_day(day)
obj.add_month(month)
obj.add_year(year)
print(obj)
def test_leap_year(year):
obj = Date(29, 2, year)
print(obj)
| true | true |
f711752191b86c47ba0c964c73a8a06f6a4265dd | 286 | py | Python | steelscript/netshark/core/_exceptions.py | riverbed/steelscript-netshark | a944f18ad79c775ab6c072924b0bbb613d7462d2 | [
"MIT"
] | null | null | null | steelscript/netshark/core/_exceptions.py | riverbed/steelscript-netshark | a944f18ad79c775ab6c072924b0bbb613d7462d2 | [
"MIT"
] | null | null | null | steelscript/netshark/core/_exceptions.py | riverbed/steelscript-netshark | a944f18ad79c775ab6c072924b0bbb613d7462d2 | [
"MIT"
] | 1 | 2019-03-20T01:47:50.000Z | 2019-03-20T01:47:50.000Z | # Copyright (c) 2015 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
class NetSharkException(Exception):
pass
| 23.833333 | 78 | 0.748252 |
class NetSharkException(Exception):
pass
| true | true |
f711759e0907a6b1ed71c17303125fe06cbf6c66 | 26,496 | py | Python | EM_and_MD_answers_wc3.py | haddocking/MolMod-scripts | 604bdb05504c5b95a42e684b45b26f6306c856b9 | [
"Apache-2.0"
] | null | null | null | EM_and_MD_answers_wc3.py | haddocking/MolMod-scripts | 604bdb05504c5b95a42e684b45b26f6306c856b9 | [
"Apache-2.0"
] | null | null | null | EM_and_MD_answers_wc3.py | haddocking/MolMod-scripts | 604bdb05504c5b95a42e684b45b26f6306c856b9 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python
#############################################################
# Simple MD of Lennard Jones charged or uncharged particles #
# Alexandre Bonvin, Aalt Jan van Dijk, Utrecht University #
# Updated to a singular script with a modern look by Douwe #
# Schulte, Utrecht University (2022) #
# #
# Adapted from a script from Patrick Fuchs, Uni. Paris VI #
#############################################################
##################
# Import modules #
##################
from math import sqrt,log,sin,cos
from random import random,seed
from enum import Enum
from tkinter import Tk, Canvas, DoubleVar, StringVar
from tkinter.ttk import Label, Button, Style, Frame, Notebook, Entry
import sys
#####################
# Define parameters #
#####################
nAtoms = 20 # Number of atoms
Radius = 25.0 # Beware that Radius must be in a good range (according to nAtoms)
# In order to be able to place all atoms
Mass = 10.0 # Atom mass
Rmin = 2.24 * Radius # Distance at which Rmin is minimal
BoxDim = [500,500] # Box dimension
Atom_Coord = [] # List of the form : [nAtoms][2]
Epsilon = 2 * Radius # Well depth
Dielec = 1.0 # Dielectric constant
qat = 2 * Radius # Atom absolute charge
frac_neg = 0.5 # Fraction negative charges
OverlapFr = 0.0 # Fraction of overlap allowed
CutOff = 250 # Non-bonded cutoff
CutOffSquare = CutOff**2 # Precalculated square
speed = 5 # Canvas update speed
cstboltz = 0.00198722 # Boltzmann's constant in kcal/mol/K
cstboltz = 1000*cstboltz/4.18 # In kJ/mol/K
Seed = 42 # Random number seed
# Steepest Descent parameters
drinit = 1.00 # dr from EM
drmin = 0.00001 # Minimum dr value to step EM
drmax = 5.00 # Maximum dr
alpha = 1.05 # Scaling factor for dr if Enew < Eold
beta = 0.90 # Scaling factor for dr if Enew > Eold
deltaE = 0.001 # Energy difference threshold to stop EM
normFmin = 0.001 # Minimum force norm to step EM
# Verlet parameters
Temperature = 300.0 # Temperature in K
timestep = 5.0E-3 # MD time step
# Set specific behaviour for practical session 4
CalculateEnergyPeriodic = True # Practical #4 part 1
ShowOtherEnergyCutoffResults = False # Practical #4 part 2
# Additional program specific parameters
Minimizers = Enum("Minimisers", "SteepestDescent Verlet")
Minimizer = Minimizers.Verlet
drstep = drinit
Iterations = 0
canvas_event = None
Color = []
ATOM = []
##############################
# Steepest descent minimizer #
##############################
def steepest_descent(atom_coord,drstep,forces):
"""
This function gets as input parameters:
- atom_coord, a vector containing the x and y position and the charge of the i atoms
- drstep, the displacement for the minimizer
- force, a vector containing the x and y components of the force on the atoms
The function returns a list array (vector containing the new positions)
Implement in the following loop over all atoms the using the steepest descent algorithm
A few hints:
- powers in python are given by **, e.g.: x to the square is x**2
- squared root x: sqrt(x)
- avoid dividing by zero
"""
new_positions=[]
# 1) First calculate the norm of the total force vector
normf = 0.0
for force in forces:
normf=normf+force[0]**2.0+force[1]**2.0
normf=sqrt(normf)
if normf < 0: return atom_coord, normf
# 2) Then move the particles
for (coord, force) in zip(atom_coord, forces):
r0x=coord[0] # Coordinates
r0y=coord[1]
# Insert below the lines defining the new coordinates based on the old ones + forces + drstep.
#
# Forces are contained in force[0] for the x force component and force[1] for the y force.
# The step size for the move is given by drstep.
#
# ====>>>>>
sx=force[0]/normf
sy=force[1]/normf
r0xnew=r0x+drstep*sx
r0ynew=r0y+drstep*sy
# <<<<<====
new_positions.append([r0xnew,r0ynew,coord[2]])
return new_positions,normf
#####################
# Verlet integrator #
#####################
def verlet(atom_coord,forces,dtstep,old_atom_coord,mass):
"""
This function gets as input parameters:
- `atom_coord`, a vector containing the x and y position and the charge of the i atoms
- `old_atom_coord`, a vector containing the x and y positions from the previous MD step
- `forces`, a vector containing the x and y components of the force on the atoms
- `dtstep`, the integration time step
The function returns a list containing the new positions.
Implement in the following loop between the arrows the Verlet MD algorithm.
A few hints:
- Powers in python are given by **, e.g.: x to the square is `x**2`
- Squared root x: `sqrt(x)`
- Indents are important in python
"""
new_positions=[]
for coord,old_coord,force in zip(atom_coord, old_atom_coord, forces):
r0x=coord[0] # Coordinates
r0y=coord[1]
old_r0x=old_coord[0] # Old coordinates
old_r0y=old_coord[1]
# Insert below the lines defining the new x and y positions based on the old ones + forces + mass + dtstep.
#
# Forces are contained in force[0] for the x force component and force[1] for the y force.
# The step size for the move is given by dtstep.
#
# ====>>>>>
new_r0x = 2*r0x - old_r0x + force[0]/mass * dtstep**2
new_r0y = 2*r0y - old_r0y + force[1]/mass * dtstep**2
# <<<<<====
new_positions.append([new_r0x,new_r0y,coord[2]])
return new_positions
def verlet_step1(atom_coord,velocity,forces,dtstep,mass):
"""The first step for Verlet"""
global Ene,EneLJ,EneCoul,ELJ2,ELJ4
new_positions=[]
for coord, vel, force in zip(atom_coord, velocity, forces):
r0x=coord[0]+dtstep*vel[0]+0.5*dtstep**2*force[0]/mass
r0y=coord[1]+dtstep*vel[1]+0.5*dtstep**2*force[1]/mass
new_positions.append([r0x,r0y,coord[2]])
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
return new_positions
def calculate_velocities(old_atom_coord,atom_coord,dtstep):
"""Calculate velocities based on old and new positions"""
velocities=[]
for coord, old_coord in zip(atom_coord, old_atom_coord):
v0x=(coord[0]-old_coord[0])/(2*dtstep)
v0y=(coord[1]-old_coord[1])/(2*dtstep)
velocities.append([v0x,v0y])
return velocities
##########################
# Move particles with MD #
##########################
def simulate():
"""Execute the simulation"""
global Atom_Coord,Radius,Mass,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene,Old_Atom_Coord
global Velocity,timestep,report_var_total,report_var_subenergies, drstep, Ene_prev
global Color,report_var_time,Dielec,root,atom_canvas,speed,canvas_event
Force = calculate_force(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
tmp=Atom_Coord
if Minimizer == Minimizers.SteepestDescent:
if Iterations == 0: Ene_prev=Ene
Atom_Coord, normF=steepest_descent(Atom_Coord,drstep,Force)
if Minimizer == Minimizers.Verlet:
if Iterations == 0:
Old_Atom_Coord=Atom_Coord
Atom_Coord=verlet_step1(Atom_Coord,Velocity,Force,timestep,Mass)
Atom_Coord=verlet(Atom_Coord,Force,timestep,Old_Atom_Coord,Mass)
Velocity=calculate_velocities(Old_Atom_Coord,Atom_Coord,timestep)
Old_Atom_Coord=tmp
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Kin,temperature=calculate_temperature(Velocity,nAtoms,cstboltz,Mass)
# Update drstep
if Minimizer == Minimizers.SteepestDescent:
if Ene < Ene_prev:
drstep = min(drmax, drstep * alpha)
else:
drstep = drstep * beta
# Update top labels
report_var_time.set("Step: %d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
report_var_total.set("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
if ShowOtherEnergyCutoffResults:
report_var_subenergies.set("Elj: %6.2f Elj2: %6.2f Elj4: %6.2f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
report_var_subenergies.set("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
# Apply boundary conditions
for coord, old_coord in zip(Atom_Coord, Old_Atom_Coord):
for i in range(2): # i=0 -> case x coordinate ; i=1 -> case y coordinate
if coord[i] < 0:
coord[i] += BoxDim[i]
old_coord[i] += BoxDim[i]
if coord[i] > BoxDim[i]:
coord[i] -= BoxDim[i]
old_coord[i] -= BoxDim[i]
# Draw new canvas coordinates
for atom, coord in zip(ATOM, Atom_Coord):
x, y = coord[0], coord[1]
atom_canvas.coords(atom, x + Radius, y + Radius, x - Radius, y - Radius)
# Print to terminal window
if Iterations % 20 == 0:
if ShowOtherEnergyCutoffResults:
print("Step: %4d Time: %8.3f Etot: %6.1f Ekin: %6.1f Epot: %6.1f Elj: %6.1f Elj2: %6.1f Elj4: %6.1f Ecoul: %6.1f Temp: %6.1f" % (Iterations,float(Iterations)*timestep,Ene+Kin,Kin,Ene,EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
print("Step: %4d Time: %8.3f Etot: %6.1f Ekin: %6.1f Epot: %6.1f Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (Iterations,float(Iterations)*timestep,Ene+Kin,Kin,Ene,EneLJ,EneCoul,temperature))
# Stopping conditions
if Minimizer == Minimizers.SteepestDescent and (abs(Ene - Ene_prev) < deltaE or drstep < drmin or normF < normFmin):
print("STOPPING... deltaE <",deltaE,", or drstep <",drmin,", or normF <",normFmin)
outtext="Step: %4d Epot: %6.1f Elj: %6.1f Ecoul: %6.1f deltaE: %10.6f <normF>: %8.6f dr: %8.6f" % (Iterations,Ene,EneLJ,EneCoul,Ene - Ene_prev,normF,drstep)
print(outtext)
elif temperature > 1000000:
print("The system is exploding !!!")
print("Step: %4d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
print("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
print("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
print("Emergency stop")
else:
Ene_prev=Ene
Iterations=Iterations+1
canvas_event=atom_canvas.after(speed,simulate)
####################
# Energy functions #
####################
# Calculate Lennard Jones from the squared distance
def LJ2(r2, epsilon, sigma6):
# Uncomment the following lines to get a more obvious mathematical implementation
# r = sqrt(r2)
# sigma = sigma6**(1/6)
# return epsilon*((sigma/r)**12 - (sigma/r)**6)
# The following implementation is significantly faster so this is the default
Z = (1/r2)**3 * sigma6
return epsilon * Z * (Z-1)
# Classical Coulomb from the squared distance
def Coulomb2(r,dielec,qa,qb):
return qa*qb/(dielec*sqrt(r))
# Calculate energy Evdw + Ecoulomb (used squared distance)
def calculate_energy(coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
global CalculateEnergyPeriodic
cutoff2=2.0*rmin; cutoff2sq=cutoff2**2
cutoff4=4.0*rmin; cutoff4sq=cutoff4**2
Ene = 0.0; distsquare = 0
ELJ = 0.0; ECoul=0.0
ELJ2 = 0.0; ELJ4 = 0.0
rmin_exp6 = rmin**6
# Doubly nested loop over all particle pairs
for i in range(len(coord)-1):
for j in range(i+1,len(coord)):
# Calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# Chooses the nearest image
if CalculateEnergyPeriodic:
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# Compute vdw and Coulomb energy
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
vdw = LJ2(distsquare, epsilon, rmin_exp6)
Ene += vdw
ELJ += vdw
if elec:
CC = Coulomb2(distsquare,dielec,qa,qb)
Ene+=CC
ECoul+=CC
if distsquare < cutoff4sq:
ELJ4 += vdw
if distsquare < cutoff2sq:
ELJ2 += vdw
return Ene,ELJ,ECoul,ELJ2,ELJ4
# Calculate kinetic energy and temperature
def calculate_temperature(vel,nat,k,mass):
v2=0.0
for velocity in vel:
v2=v2+velocity[0]**2+velocity[1]**2
nkt=0.5*mass*v2 # Kinetic energy equals 0.5*m*v**2
kin=v2*0.5*mass
temp=nkt/(nat*k) # N*k*T=Kinetic Energy
return kin,temp
###################
# Force functions #
###################
# Force LJ (use squared distance)
def calculate_lennard_jones(distsquare, epsilon, rmin_exp6,xi):
rij=sqrt(distsquare)
Z = (1/distsquare)**3 * rmin_exp6
dedz=epsilon*(2*Z-1)
dzdr=rmin_exp6*(-6.0/rij**(7.0))
drdx=xi/rij
return dedz*dzdr*drdx
# Force Coulomb (use squared distance)
def calculate_coulomb(distsquare,dielec,qa,qb,xi):
rij=sqrt(distsquare)
dedr=-1.0*(qa*qb/dielec)*(1/distsquare)
drdx=xi/rij
return dedr*drdx
# Calculate force from Evdw + Ecoulomb (uses squared distance)
def calculate_force(coord,epsilon,rmin,dielec,cutoffsquare,boxdim):
Force=[] ; distsquare = 0
rmin_exp6 = rmin**6
# Doubly nested loop over all particle pairs
for i in range(len(coord)):
tmpforce=[0.0,0.0]
for j in range(len(coord)):
if not i==j:
# Calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# Chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# Compute vdw force
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
for k in range(2):
tmp = coord[j][k] - coord[i][k]
ff = calculate_lennard_jones(distsquare, epsilon, rmin_exp6,tmp)
ff += calculate_coulomb(distsquare,dielec,qa,qb,tmp)
tmpforce[k]+=ff
Force.append(tmpforce)
return Force
###################
# Other functions #
###################
# Normal Distance
def dist(A,B):
return sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
# Change sign
def SignR(a,b):
if b > 0:
return a
else:
return -a
# Color particules based on charge
def charge_color(charge,qat):
if charge == qat:
return "white"
else:
return "#333333"
##################
# Initialization #
##################
# Generate random coordinates
def InitConf(n,dim,radius,qat,frac_neg):
global Seed
seed(Seed)
print("Initializing box, please wait...", end='')
tmp_coord = []
ntrial = 0
i = 1
# Fix first atom
x = random()*(dim[0]-2*radius)+radius
y = random()*(dim[1]-2*radius)+radius
nneg = int(float(n) * frac_neg)
charge = -qat
if nneg == 0: charge = qat
tmp_coord.append([x,y,charge])
for negative in [-1, 1]:
while negative == -1 and i < nneg or negative == 1 and i < n:
x = random()*(dim[0]-2*radius)+radius
y = random()*(dim[1]-2*radius)+radius
# Check wether the new particle overlaps with an existing one
OVERLAP = False
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = True
if not OVERLAP:
tmp_coord.append([x,y,negative * qat])
i += 1
ntrial = ntrial + 1
if ntrial > 100000:
print('error')
print("Initialisation failed")
print("==> Reduce radius or number of atoms")
sys.exit()
print("done")
return tmp_coord
# Generate random charges
def InitCharge(n,qat,frac_neg):
global Atom_Coord
print("Initializing charges, please wait...", end='')
i = 0
nneg = int(float(n) * frac_neg)
charge = -qat
if nneg == 0: charge = qat
Atom_Coord[i][2]=charge
i += 1
while i < nneg:
Atom_Coord[i][2]=-qat
i += 1
while i < n:
Atom_Coord[i][2]=qat
i += 1
print("done")
# Generates initial velocities according to Maxwell distribution
def InitVel(n,temperature,cstboltz,mass):
global Seed
seed(Seed)
stdev=sqrt(cstboltz*temperature/mass)
print("Initializing velocities, please wait...", end='')
tmp_vel=[]
for i in range(n):
# Generate random numbers according to Gaussian:
r1=random()
r2=random()
x1=sqrt(-2.0*log(r1))*cos(r2)*stdev
x2=sqrt(-2.0*log(r1))*sin(0.5*r2)*stdev
tmp_vel.append([x1,x2])
# Remove overall motion
vxt=0.0
vyt=0.0
for item in tmp_vel:
vxt+=item[0]
vyt+=item[1]
for item in tmp_vel:
item[0] -= vxt/float(n)
item[1] -= vyt/float(n)
# Scaling factor is used to get temperature exactly equal to desired temperature
kin,tt=calculate_temperature(tmp_vel,n,cstboltz,mass)
scaling=sqrt(temperature/tt)
vel=[]
for item in tmp_vel:
vx=item[0]*scaling
vy=item[1]*scaling
vel.append([vx,vy])
print("done")
return vel
########################################
# Various functions for input + layout #
########################################
# Setup system
def set_up_atoms(repack=1):
global Iterations,Velocity,Temperature,Mass,cstboltz,atom_canvas,ATOM,Atom_Coord,Color
ATOM = []
if repack==1:
Atom_Coord = InitConf(nAtoms,BoxDim,Radius,qat,frac_neg)
Color = []
for i in range(nAtoms):
Color.append(charge_color(Atom_Coord[i][2],qat))
Velocity=InitVel(nAtoms,Temperature,cstboltz,Mass)
if repack==2:
InitCharge(nAtoms,qat,frac_neg)
Color = []
for i in range(nAtoms):
Color.append(charge_color(Atom_Coord[i][2],qat))
for (color, atom) in zip(Color, Atom_Coord):
x, y = atom[0], atom[1]
ATOM.append(atom_canvas.create_oval(x + Radius,y + Radius,x - Radius,y - Radius,fill=color))
update_energy()
# Set number of particles
def set_r(event):
global nAtoms
nAtoms=int(r.get())
update_canvas()
# Set atom Radius
def set_size(event):
global Radius,Rmin
Radius=int(size.get())
Rmin = 2 * Radius
update_canvas()
# Set epsilon for Lennard-Jones
def set_vdw1(event):
global Epsilon
Epsilon=int(vdw1.get())
update_canvas(0)
# Set sigma for Lennard-Jones
def set_vdw2(event):
global Rmin
Rmin=int(vdw2.get())
update_canvas(0)
# Set charge fraction
def set_frac(event):
global frac_neg
frac_neg=float(frac.get())
update_canvas(2)
# Set particle charge
def set_q(event):
global qat
qat=float(q.get())
update_canvas(2)
# Set dielectric constant
def set_diel(event):
global Dielec
Dielec=float(diel.get())
update_canvas(0)
# Set Temperature
def set_temp(event):
global Temperature
Temperature=float(temp.get())
update_canvas(0)
def set_tstep(event):
global timestep,Velocity,nAtoms,Temperature,cstboltz,Mass
timestep=float(tstep.get())
update_canvas(0)
Velocity=InitVel(nAtoms,Temperature,cstboltz,Mass)
# Set minimum Force norm difference for stop condition
def set_dFmin(event):
global normFmin
normFmin=float(Fmin.get())
update_canvas(0)
# Set minimum Energy difference for stop condition
def set_deltaE(event):
global deltaE
deltaE=float(Emin.get())
update_canvas(0)
# Set initial displacement for minimizer
def set_dxstep(event):
global drinit
drinit=float(dxstep.get())
update_canvas(0)
# Set alpha factor for increasing dr
def set_alpha(event):
global alpha
alpha=float(alphafactor.get())
update_canvas(0)
# Set beta factor for decreasing dr
def set_beta(event):
global beta
beta=float(betafactor.get())
update_canvas(0)
# Update energy
def update_energy():
global Atom_Coord,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene
global Dielec,Velocity,Mass,cstboltz
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Kin,temperature=calculate_temperature(Velocity,nAtoms,cstboltz,Mass)
report_var_time.set("Step: %d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
report_var_total.set("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
if ShowOtherEnergyCutoffResults:
report_var_subenergies.set("Elj: %6.2f Elj2: %6.2f Elj4: %6.2f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
report_var_subenergies.set("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
def update_canvas(repack=1):
global Iterations, atom_canvas, Atom_Coord, ATOM, Color
atom_canvas.delete("all")
set_up_atoms(repack)
update_energy()
Iterations = 0
################
# MAIN PROGRAM #
################
def die():
sys.exit()
def select_minimizer(*args):
global Minimizer, minimizer_selector
reset()
if minimizer_selector.index('current') == 0: # First tab is Steepest Descent
Minimizer = Minimizers.SteepestDescent
selected_method_text.set("Active method: Steepest Descent")
else:
Minimizer = Minimizers.Verlet
selected_method_text.set("Active method: Verlet")
def start():
global canvas_event
if canvas_event==None:
simulate()
def stop():
global canvas_event
if canvas_event != None: atom_canvas.after_cancel(canvas_event)
canvas_event = None
update_canvas(0)
def reset():
global canvas_event
if canvas_event != None: atom_canvas.after_cancel(canvas_event)
canvas_event = None
update_canvas()
root = Tk()
root.winfo_toplevel().title("MolMod Practical")
root.bind("<Escape>", die)
root.bind('<Control-c>', die)
top=Frame(root)
top.pack(side='top')
title=Frame(top)
title.pack(side='top')
labels=Frame(top)
labels.pack(side='top')
buttons=Frame(top)
buttons.pack(side='bottom')
atom_canvas=Canvas(root, width=BoxDim[0], height=BoxDim[1],bg="#ccddff")
atom_canvas.pack()
minimizer_selector=Notebook(root)
minimizer_selector.pack(side='bottom')
steepest_descent_pack=Frame(minimizer_selector)
steepest_descent_pack.pack(side='top')
verlet_pack=Frame(minimizer_selector)
verlet_pack.pack(side='top')
Style().configure("Notebook", foreground="black")
minimizer_selector.add(steepest_descent_pack, text="Steepest Descent")
minimizer_selector.add(verlet_pack, text="Verlet")
minimizer_selector.bind("<<NotebookTabChanged>>", select_minimizer)
minimizer_selector.select(1)
selected_method=Frame(root)
selected_method.pack(side='bottom')
low2=Frame(root)
low2.pack(side='bottom')
low1=Frame(root)
low1.pack(side='bottom')
r=DoubleVar()
size=DoubleVar()
vdw1=DoubleVar()
vdw2=DoubleVar()
frac=DoubleVar()
diel=DoubleVar()
q=DoubleVar()
temp=DoubleVar()
tstep=DoubleVar()
Emin=DoubleVar()
Fmin=DoubleVar()
alphafactor=DoubleVar()
betafactor=DoubleVar()
q=DoubleVar()
temp=DoubleVar()
dxstep=DoubleVar()
# Create an entry with a label
def create_entry(pack, text, var, bound_var, callback):
Label(pack,text=text + " =").pack(side='left')
var.set(bound_var)
tstep_entry=Entry(pack,width=6,textvariable=var)
tstep_entry.pack(side='left')
tstep_entry.bind('<Return>', callback)
tstep_entry.bind('<FocusOut>', callback)
# Set up the general parameters
create_entry(low1, "Atoms", r, nAtoms, set_r)
create_entry(low1, "VDW radius", size, Radius, set_size)
create_entry(low1, "VDW ε", vdw1, Epsilon, set_vdw1)
create_entry(low1, "VDW σ", vdw2, Rmin, set_vdw2)
create_entry(low2, "Coulomb param: fraction negative", frac, frac_neg, set_frac)
create_entry(low2, "Charge", q, qat, set_q)
create_entry(low2, "Dielec", diel, Dielec, set_diel)
# Steepest Descent Paramaters
create_entry(steepest_descent_pack, "DeltaE threshold", Emin, deltaE, set_deltaE)
create_entry(steepest_descent_pack, "dFmin", Fmin, normFmin, set_dFmin)
create_entry(steepest_descent_pack, "dr init", dxstep, drinit, set_dxstep)
create_entry(steepest_descent_pack, "α", alphafactor, alpha, set_alpha)
create_entry(steepest_descent_pack, "β", betafactor, beta, set_beta)
# Verlet Parameters
create_entry(verlet_pack, "T (K)", temp, Temperature, set_temp)
create_entry(verlet_pack, "Timestep", tstep, timestep, set_tstep)
# Set up title
Label(title,text="EM & MD",foreground='blue',font='times 18 bold').pack(side='left')
# Set up reporting labels
report_var_time = StringVar()
Label(labels,textvariable=report_var_time).pack(side='top')
report_var_total = StringVar()
Label(labels,textvariable=report_var_total).pack(side='top')
report_var_subenergies = StringVar()
Label(labels,textvariable=report_var_subenergies).pack(side='top')
selected_method_text = StringVar()
Label(selected_method,textvariable=selected_method_text).pack(side='top')
# Set up buttons
Style().configure("TButton", padding=1, relief="flat")
Style().configure("Start.TButton", foreground='blue')
Style().configure("Stop.TButton", foreground='red')
Style().configure("Reset.TButton", foreground='green')
Button(buttons,text='Start',command=start,style="Start.TButton").pack(side='left',fill='x')
Button(buttons,text='Stop',command=stop,style="Stop.TButton").pack(side='left')
Button(buttons,text='Reset',command=reset,style="Reset.TButton").pack(side='left')
# Set up the positions of the atoms and start the simulation
set_up_atoms()
print("Click on 'Start' to go ahead")
print("Use <ESC> or 'X' to quit")
root.mainloop()
| 34.012837 | 232 | 0.629265 |
ord):
v0x=(coord[0]-old_coord[0])/(2*dtstep)
v0y=(coord[1]-old_coord[1])/(2*dtstep)
velocities.append([v0x,v0y])
return velocities
##########################
# Move particles with MD #
##########################
def simulate():
global Atom_Coord,Radius,Mass,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene,Old_Atom_Coord
global Velocity,timestep,report_var_total,report_var_subenergies, drstep, Ene_prev
global Color,report_var_time,Dielec,root,atom_canvas,speed,canvas_event
Force = calculate_force(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
tmp=Atom_Coord
if Minimizer == Minimizers.SteepestDescent:
if Iterations == 0: Ene_prev=Ene
Atom_Coord, normF=steepest_descent(Atom_Coord,drstep,Force)
if Minimizer == Minimizers.Verlet:
if Iterations == 0:
Old_Atom_Coord=Atom_Coord
Atom_Coord=verlet_step1(Atom_Coord,Velocity,Force,timestep,Mass)
Atom_Coord=verlet(Atom_Coord,Force,timestep,Old_Atom_Coord,Mass)
Velocity=calculate_velocities(Old_Atom_Coord,Atom_Coord,timestep)
Old_Atom_Coord=tmp
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Kin,temperature=calculate_temperature(Velocity,nAtoms,cstboltz,Mass)
# Update drstep
if Minimizer == Minimizers.SteepestDescent:
if Ene < Ene_prev:
drstep = min(drmax, drstep * alpha)
else:
drstep = drstep * beta
# Update top labels
report_var_time.set("Step: %d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
report_var_total.set("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
if ShowOtherEnergyCutoffResults:
report_var_subenergies.set("Elj: %6.2f Elj2: %6.2f Elj4: %6.2f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
report_var_subenergies.set("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
# Apply boundary conditions
for coord, old_coord in zip(Atom_Coord, Old_Atom_Coord):
for i in range(2): # i=0 -> case x coordinate ; i=1 -> case y coordinate
if coord[i] < 0:
coord[i] += BoxDim[i]
old_coord[i] += BoxDim[i]
if coord[i] > BoxDim[i]:
coord[i] -= BoxDim[i]
old_coord[i] -= BoxDim[i]
# Draw new canvas coordinates
for atom, coord in zip(ATOM, Atom_Coord):
x, y = coord[0], coord[1]
atom_canvas.coords(atom, x + Radius, y + Radius, x - Radius, y - Radius)
# Print to terminal window
if Iterations % 20 == 0:
if ShowOtherEnergyCutoffResults:
print("Step: %4d Time: %8.3f Etot: %6.1f Ekin: %6.1f Epot: %6.1f Elj: %6.1f Elj2: %6.1f Elj4: %6.1f Ecoul: %6.1f Temp: %6.1f" % (Iterations,float(Iterations)*timestep,Ene+Kin,Kin,Ene,EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
print("Step: %4d Time: %8.3f Etot: %6.1f Ekin: %6.1f Epot: %6.1f Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (Iterations,float(Iterations)*timestep,Ene+Kin,Kin,Ene,EneLJ,EneCoul,temperature))
# Stopping conditions
if Minimizer == Minimizers.SteepestDescent and (abs(Ene - Ene_prev) < deltaE or drstep < drmin or normF < normFmin):
print("STOPPING... deltaE <",deltaE,", or drstep <",drmin,", or normF <",normFmin)
outtext="Step: %4d Epot: %6.1f Elj: %6.1f Ecoul: %6.1f deltaE: %10.6f <normF>: %8.6f dr: %8.6f" % (Iterations,Ene,EneLJ,EneCoul,Ene - Ene_prev,normF,drstep)
print(outtext)
elif temperature > 1000000:
print("The system is exploding !!!")
print("Step: %4d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
print("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
print("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
print("Emergency stop")
else:
Ene_prev=Ene
Iterations=Iterations+1
canvas_event=atom_canvas.after(speed,simulate)
####################
# Energy functions #
####################
# Calculate Lennard Jones from the squared distance
def LJ2(r2, epsilon, sigma6):
# Uncomment the following lines to get a more obvious mathematical implementation
# r = sqrt(r2)
# sigma = sigma6**(1/6)
# return epsilon*((sigma/r)**12 - (sigma/r)**6)
# The following implementation is significantly faster so this is the default
Z = (1/r2)**3 * sigma6
return epsilon * Z * (Z-1)
# Classical Coulomb from the squared distance
def Coulomb2(r,dielec,qa,qb):
return qa*qb/(dielec*sqrt(r))
# Calculate energy Evdw + Ecoulomb (used squared distance)
def calculate_energy(coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
global CalculateEnergyPeriodic
cutoff2=2.0*rmin; cutoff2sq=cutoff2**2
cutoff4=4.0*rmin; cutoff4sq=cutoff4**2
Ene = 0.0; distsquare = 0
ELJ = 0.0; ECoul=0.0
ELJ2 = 0.0; ELJ4 = 0.0
rmin_exp6 = rmin**6
# Doubly nested loop over all particle pairs
for i in range(len(coord)-1):
for j in range(i+1,len(coord)):
# Calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# Chooses the nearest image
if CalculateEnergyPeriodic:
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# Compute vdw and Coulomb energy
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
vdw = LJ2(distsquare, epsilon, rmin_exp6)
Ene += vdw
ELJ += vdw
if elec:
CC = Coulomb2(distsquare,dielec,qa,qb)
Ene+=CC
ECoul+=CC
if distsquare < cutoff4sq:
ELJ4 += vdw
if distsquare < cutoff2sq:
ELJ2 += vdw
return Ene,ELJ,ECoul,ELJ2,ELJ4
# Calculate kinetic energy and temperature
def calculate_temperature(vel,nat,k,mass):
v2=0.0
for velocity in vel:
v2=v2+velocity[0]**2+velocity[1]**2
nkt=0.5*mass*v2 # Kinetic energy equals 0.5*m*v**2
kin=v2*0.5*mass
temp=nkt/(nat*k) # N*k*T=Kinetic Energy
return kin,temp
###################
# Force functions #
###################
# Force LJ (use squared distance)
def calculate_lennard_jones(distsquare, epsilon, rmin_exp6,xi):
rij=sqrt(distsquare)
Z = (1/distsquare)**3 * rmin_exp6
dedz=epsilon*(2*Z-1)
dzdr=rmin_exp6*(-6.0/rij**(7.0))
drdx=xi/rij
return dedz*dzdr*drdx
# Force Coulomb (use squared distance)
def calculate_coulomb(distsquare,dielec,qa,qb,xi):
rij=sqrt(distsquare)
dedr=-1.0*(qa*qb/dielec)*(1/distsquare)
drdx=xi/rij
return dedr*drdx
# Calculate force from Evdw + Ecoulomb (uses squared distance)
def calculate_force(coord,epsilon,rmin,dielec,cutoffsquare,boxdim):
Force=[] ; distsquare = 0
rmin_exp6 = rmin**6
# Doubly nested loop over all particle pairs
for i in range(len(coord)):
tmpforce=[0.0,0.0]
for j in range(len(coord)):
if not i==j:
# Calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# Chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# Compute vdw force
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
for k in range(2):
tmp = coord[j][k] - coord[i][k]
ff = calculate_lennard_jones(distsquare, epsilon, rmin_exp6,tmp)
ff += calculate_coulomb(distsquare,dielec,qa,qb,tmp)
tmpforce[k]+=ff
Force.append(tmpforce)
return Force
###################
# Other functions #
###################
# Normal Distance
def dist(A,B):
return sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
# Change sign
def SignR(a,b):
if b > 0:
return a
else:
return -a
# Color particules based on charge
def charge_color(charge,qat):
if charge == qat:
return "white"
else:
return "#333333"
##################
# Initialization #
##################
# Generate random coordinates
def InitConf(n,dim,radius,qat,frac_neg):
global Seed
seed(Seed)
print("Initializing box, please wait...", end='')
tmp_coord = []
ntrial = 0
i = 1
# Fix first atom
x = random()*(dim[0]-2*radius)+radius
y = random()*(dim[1]-2*radius)+radius
nneg = int(float(n) * frac_neg)
charge = -qat
if nneg == 0: charge = qat
tmp_coord.append([x,y,charge])
for negative in [-1, 1]:
while negative == -1 and i < nneg or negative == 1 and i < n:
x = random()*(dim[0]-2*radius)+radius
y = random()*(dim[1]-2*radius)+radius
# Check wether the new particle overlaps with an existing one
OVERLAP = False
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = True
if not OVERLAP:
tmp_coord.append([x,y,negative * qat])
i += 1
ntrial = ntrial + 1
if ntrial > 100000:
print('error')
print("Initialisation failed")
print("==> Reduce radius or number of atoms")
sys.exit()
print("done")
return tmp_coord
# Generate random charges
def InitCharge(n,qat,frac_neg):
global Atom_Coord
print("Initializing charges, please wait...", end='')
i = 0
nneg = int(float(n) * frac_neg)
charge = -qat
if nneg == 0: charge = qat
Atom_Coord[i][2]=charge
i += 1
while i < nneg:
Atom_Coord[i][2]=-qat
i += 1
while i < n:
Atom_Coord[i][2]=qat
i += 1
print("done")
# Generates initial velocities according to Maxwell distribution
def InitVel(n,temperature,cstboltz,mass):
global Seed
seed(Seed)
stdev=sqrt(cstboltz*temperature/mass)
print("Initializing velocities, please wait...", end='')
tmp_vel=[]
for i in range(n):
# Generate random numbers according to Gaussian:
r1=random()
r2=random()
x1=sqrt(-2.0*log(r1))*cos(r2)*stdev
x2=sqrt(-2.0*log(r1))*sin(0.5*r2)*stdev
tmp_vel.append([x1,x2])
# Remove overall motion
vxt=0.0
vyt=0.0
for item in tmp_vel:
vxt+=item[0]
vyt+=item[1]
for item in tmp_vel:
item[0] -= vxt/float(n)
item[1] -= vyt/float(n)
# Scaling factor is used to get temperature exactly equal to desired temperature
kin,tt=calculate_temperature(tmp_vel,n,cstboltz,mass)
scaling=sqrt(temperature/tt)
vel=[]
for item in tmp_vel:
vx=item[0]*scaling
vy=item[1]*scaling
vel.append([vx,vy])
print("done")
return vel
########################################
# Various functions for input + layout #
########################################
# Setup system
def set_up_atoms(repack=1):
global Iterations,Velocity,Temperature,Mass,cstboltz,atom_canvas,ATOM,Atom_Coord,Color
ATOM = []
if repack==1:
Atom_Coord = InitConf(nAtoms,BoxDim,Radius,qat,frac_neg)
Color = []
for i in range(nAtoms):
Color.append(charge_color(Atom_Coord[i][2],qat))
Velocity=InitVel(nAtoms,Temperature,cstboltz,Mass)
if repack==2:
InitCharge(nAtoms,qat,frac_neg)
Color = []
for i in range(nAtoms):
Color.append(charge_color(Atom_Coord[i][2],qat))
for (color, atom) in zip(Color, Atom_Coord):
x, y = atom[0], atom[1]
ATOM.append(atom_canvas.create_oval(x + Radius,y + Radius,x - Radius,y - Radius,fill=color))
update_energy()
# Set number of particles
def set_r(event):
global nAtoms
nAtoms=int(r.get())
update_canvas()
# Set atom Radius
def set_size(event):
global Radius,Rmin
Radius=int(size.get())
Rmin = 2 * Radius
update_canvas()
# Set epsilon for Lennard-Jones
def set_vdw1(event):
global Epsilon
Epsilon=int(vdw1.get())
update_canvas(0)
# Set sigma for Lennard-Jones
def set_vdw2(event):
global Rmin
Rmin=int(vdw2.get())
update_canvas(0)
# Set charge fraction
def set_frac(event):
global frac_neg
frac_neg=float(frac.get())
update_canvas(2)
# Set particle charge
def set_q(event):
global qat
qat=float(q.get())
update_canvas(2)
# Set dielectric constant
def set_diel(event):
global Dielec
Dielec=float(diel.get())
update_canvas(0)
# Set Temperature
def set_temp(event):
global Temperature
Temperature=float(temp.get())
update_canvas(0)
def set_tstep(event):
global timestep,Velocity,nAtoms,Temperature,cstboltz,Mass
timestep=float(tstep.get())
update_canvas(0)
Velocity=InitVel(nAtoms,Temperature,cstboltz,Mass)
# Set minimum Force norm difference for stop condition
def set_dFmin(event):
global normFmin
normFmin=float(Fmin.get())
update_canvas(0)
# Set minimum Energy difference for stop condition
def set_deltaE(event):
global deltaE
deltaE=float(Emin.get())
update_canvas(0)
# Set initial displacement for minimizer
def set_dxstep(event):
global drinit
drinit=float(dxstep.get())
update_canvas(0)
# Set alpha factor for increasing dr
def set_alpha(event):
global alpha
alpha=float(alphafactor.get())
update_canvas(0)
# Set beta factor for decreasing dr
def set_beta(event):
global beta
beta=float(betafactor.get())
update_canvas(0)
# Update energy
def update_energy():
global Atom_Coord,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene
global Dielec,Velocity,Mass,cstboltz
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Kin,temperature=calculate_temperature(Velocity,nAtoms,cstboltz,Mass)
report_var_time.set("Step: %d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
report_var_total.set("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
if ShowOtherEnergyCutoffResults:
report_var_subenergies.set("Elj: %6.2f Elj2: %6.2f Elj4: %6.2f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
report_var_subenergies.set("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
def update_canvas(repack=1):
global Iterations, atom_canvas, Atom_Coord, ATOM, Color
atom_canvas.delete("all")
set_up_atoms(repack)
update_energy()
Iterations = 0
################
# MAIN PROGRAM #
################
def die():
sys.exit()
def select_minimizer(*args):
global Minimizer, minimizer_selector
reset()
if minimizer_selector.index('current') == 0: # First tab is Steepest Descent
Minimizer = Minimizers.SteepestDescent
selected_method_text.set("Active method: Steepest Descent")
else:
Minimizer = Minimizers.Verlet
selected_method_text.set("Active method: Verlet")
def start():
global canvas_event
if canvas_event==None:
simulate()
def stop():
global canvas_event
if canvas_event != None: atom_canvas.after_cancel(canvas_event)
canvas_event = None
update_canvas(0)
def reset():
global canvas_event
if canvas_event != None: atom_canvas.after_cancel(canvas_event)
canvas_event = None
update_canvas()
root = Tk()
root.winfo_toplevel().title("MolMod Practical")
root.bind("<Escape>", die)
root.bind('<Control-c>', die)
top=Frame(root)
top.pack(side='top')
title=Frame(top)
title.pack(side='top')
labels=Frame(top)
labels.pack(side='top')
buttons=Frame(top)
buttons.pack(side='bottom')
atom_canvas=Canvas(root, width=BoxDim[0], height=BoxDim[1],bg="#ccddff")
atom_canvas.pack()
minimizer_selector=Notebook(root)
minimizer_selector.pack(side='bottom')
steepest_descent_pack=Frame(minimizer_selector)
steepest_descent_pack.pack(side='top')
verlet_pack=Frame(minimizer_selector)
verlet_pack.pack(side='top')
Style().configure("Notebook", foreground="black")
minimizer_selector.add(steepest_descent_pack, text="Steepest Descent")
minimizer_selector.add(verlet_pack, text="Verlet")
minimizer_selector.bind("<<NotebookTabChanged>>", select_minimizer)
minimizer_selector.select(1)
selected_method=Frame(root)
selected_method.pack(side='bottom')
low2=Frame(root)
low2.pack(side='bottom')
low1=Frame(root)
low1.pack(side='bottom')
r=DoubleVar()
size=DoubleVar()
vdw1=DoubleVar()
vdw2=DoubleVar()
frac=DoubleVar()
diel=DoubleVar()
q=DoubleVar()
temp=DoubleVar()
tstep=DoubleVar()
Emin=DoubleVar()
Fmin=DoubleVar()
alphafactor=DoubleVar()
betafactor=DoubleVar()
q=DoubleVar()
temp=DoubleVar()
dxstep=DoubleVar()
# Create an entry with a label
def create_entry(pack, text, var, bound_var, callback):
Label(pack,text=text + " =").pack(side='left')
var.set(bound_var)
tstep_entry=Entry(pack,width=6,textvariable=var)
tstep_entry.pack(side='left')
tstep_entry.bind('<Return>', callback)
tstep_entry.bind('<FocusOut>', callback)
# Set up the general parameters
create_entry(low1, "Atoms", r, nAtoms, set_r)
create_entry(low1, "VDW radius", size, Radius, set_size)
create_entry(low1, "VDW ε", vdw1, Epsilon, set_vdw1)
create_entry(low1, "VDW σ", vdw2, Rmin, set_vdw2)
create_entry(low2, "Coulomb param: fraction negative", frac, frac_neg, set_frac)
create_entry(low2, "Charge", q, qat, set_q)
create_entry(low2, "Dielec", diel, Dielec, set_diel)
# Steepest Descent Paramaters
create_entry(steepest_descent_pack, "DeltaE threshold", Emin, deltaE, set_deltaE)
create_entry(steepest_descent_pack, "dFmin", Fmin, normFmin, set_dFmin)
create_entry(steepest_descent_pack, "dr init", dxstep, drinit, set_dxstep)
create_entry(steepest_descent_pack, "α", alphafactor, alpha, set_alpha)
create_entry(steepest_descent_pack, "β", betafactor, beta, set_beta)
# Verlet Parameters
create_entry(verlet_pack, "T (K)", temp, Temperature, set_temp)
create_entry(verlet_pack, "Timestep", tstep, timestep, set_tstep)
# Set up title
Label(title,text="EM & MD",foreground='blue',font='times 18 bold').pack(side='left')
# Set up reporting labels
report_var_time = StringVar()
Label(labels,textvariable=report_var_time).pack(side='top')
report_var_total = StringVar()
Label(labels,textvariable=report_var_total).pack(side='top')
report_var_subenergies = StringVar()
Label(labels,textvariable=report_var_subenergies).pack(side='top')
selected_method_text = StringVar()
Label(selected_method,textvariable=selected_method_text).pack(side='top')
# Set up buttons
Style().configure("TButton", padding=1, relief="flat")
Style().configure("Start.TButton", foreground='blue')
Style().configure("Stop.TButton", foreground='red')
Style().configure("Reset.TButton", foreground='green')
Button(buttons,text='Start',command=start,style="Start.TButton").pack(side='left',fill='x')
Button(buttons,text='Stop',command=stop,style="Stop.TButton").pack(side='left')
Button(buttons,text='Reset',command=reset,style="Reset.TButton").pack(side='left')
# Set up the positions of the atoms and start the simulation
set_up_atoms()
print("Click on 'Start' to go ahead")
print("Use <ESC> or 'X' to quit")
root.mainloop()
| true | true |
f71176f4acbc961020e19a3b62348580e1cc273d | 1,242 | py | Python | setup.py | parlarjb/github-cli | c5b4166976bbf94fc3f929cc369ce094bc02b88e | [
"BSD-3-Clause"
] | 2 | 2016-05-09T15:32:35.000Z | 2016-07-19T11:39:21.000Z | setup.py | parlarjb/github-cli | c5b4166976bbf94fc3f929cc369ce094bc02b88e | [
"BSD-3-Clause"
] | null | null | null | setup.py | parlarjb/github-cli | c5b4166976bbf94fc3f929cc369ce094bc02b88e | [
"BSD-3-Clause"
] | null | null | null | import os
from setuptools import setup, find_packages
version = '0.2.5.2'
description = "A command-line interface to the GitHub Issues API v2."
cur_dir = os.path.dirname(__file__)
try:
long_description = open(os.path.join(cur_dir, 'README.rst')).read()
except:
long_description = description
setup(
name = "github-cli",
version = version,
url = 'http://jsmits.github.com/github-cli',
license = 'BSD',
description = description,
long_description = long_description,
author = 'Sander Smits',
author_email = 'jhmsmits@gmail.com',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'simplejson'],
entry_points="""
[console_scripts]
ghi = github.issues:main
""",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Bug Tracking',
],
test_suite = 'nose.collector',
)
| 28.883721 | 71 | 0.633655 | import os
from setuptools import setup, find_packages
version = '0.2.5.2'
description = "A command-line interface to the GitHub Issues API v2."
cur_dir = os.path.dirname(__file__)
try:
long_description = open(os.path.join(cur_dir, 'README.rst')).read()
except:
long_description = description
setup(
name = "github-cli",
version = version,
url = 'http://jsmits.github.com/github-cli',
license = 'BSD',
description = description,
long_description = long_description,
author = 'Sander Smits',
author_email = 'jhmsmits@gmail.com',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'simplejson'],
entry_points="""
[console_scripts]
ghi = github.issues:main
""",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Bug Tracking',
],
test_suite = 'nose.collector',
)
| true | true |
f711771424da5145f12842b6f940f779e986d6f7 | 385 | py | Python | customer/migrations/0002_ordermodel_is_shipped.py | rishav142k/deliver-app | 89905c7341fdf17d124aec5042e2ff90ddd455e8 | [
"Unlicense",
"MIT"
] | null | null | null | customer/migrations/0002_ordermodel_is_shipped.py | rishav142k/deliver-app | 89905c7341fdf17d124aec5042e2ff90ddd455e8 | [
"Unlicense",
"MIT"
] | null | null | null | customer/migrations/0002_ordermodel_is_shipped.py | rishav142k/deliver-app | 89905c7341fdf17d124aec5042e2ff90ddd455e8 | [
"Unlicense",
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-04 04:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ordermodel',
name='is_shipped',
field=models.BooleanField(default=False),
),
]
| 20.263158 | 53 | 0.597403 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ordermodel',
name='is_shipped',
field=models.BooleanField(default=False),
),
]
| true | true |
f711775130951f9f56a2eb5a79b1c5e85a13ea77 | 2,614 | py | Python | var/spack/repos/builtin/packages/dftfe/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/dftfe/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/dftfe/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dftfe(CMakePackage):
"""Real-space DFT calculations using Finite Elements"""
homepage = "https://sites.google.com/umich.edu/dftfe/"
url = "https://github.com/dftfeDevelopers/dftfe/archive/0.5.1.tar.gz"
maintainers = ['rmsds']
version('0.6.0', sha256='66b633a3aae2f557f241ee45b2faa41aa179e4a0bdf39c4ae2e679a2970845a1')
version('0.5.2', sha256='9dc4fa9f16b00be6fb1890d8af4a1cd3e4a2f06a2539df999671a09f3d26ec64')
version('0.5.1', sha256='e47272d3783cf675dcd8bc31da07765695164110bfebbbab29f5815531f148c1')
version('0.5.0', sha256='9aadb9a9b059f98f88c7756b417423dc67d02f1cdd2ed7472ba395fcfafc6dcb')
variant('scalapack', default=True, description='Use ScaLAPACK, strongly recommended for problem sizes >5000 electrons')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
depends_on('mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi@9.0.0:', when='@0.5.1:')
depends_on('scalapack', when='+scalapack')
depends_on('alglib')
depends_on('libxc')
depends_on('spglib')
depends_on('libxml2')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER={0}'.format(spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER={0}'.format(spec['mpi'].mpicxx),
'-DALGLIB_DIR={0}'.format(spec['alglib'].prefix),
'-DLIBXC_DIR={0}'.format(spec['libxc'].prefix),
'-DXML_LIB_DIR={0}/lib'.format(spec['libxml2'].prefix),
'-DXML_INCLUDE_DIR={0}/include'.format(spec['libxml2'].prefix),
'-DSPGLIB_DIR={0}'.format(spec['spglib'].prefix),
]
if spec.satisfies('^intel-mkl'):
args.append('-DWITH_INTEL_MKL=ON')
else:
args.append('-DWITH_INTEL_MKL=OFF')
if spec.satisfies('%gcc'):
args.append('-DCMAKE_C_FLAGS=-fpermissive')
args.append('-DCMAKE_CXX_FLAGS=-fpermissive')
return args
@when('@:0.5.2')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.lib64)
install(join_path(self.build_directory, 'main'),
join_path(prefix.bin, 'dftfe'))
install(join_path(self.build_directory, 'libdftfe.so'),
prefix.lib64)
| 39.606061 | 123 | 0.65417 |
from spack import *
class Dftfe(CMakePackage):
homepage = "https://sites.google.com/umich.edu/dftfe/"
url = "https://github.com/dftfeDevelopers/dftfe/archive/0.5.1.tar.gz"
maintainers = ['rmsds']
version('0.6.0', sha256='66b633a3aae2f557f241ee45b2faa41aa179e4a0bdf39c4ae2e679a2970845a1')
version('0.5.2', sha256='9dc4fa9f16b00be6fb1890d8af4a1cd3e4a2f06a2539df999671a09f3d26ec64')
version('0.5.1', sha256='e47272d3783cf675dcd8bc31da07765695164110bfebbbab29f5815531f148c1')
version('0.5.0', sha256='9aadb9a9b059f98f88c7756b417423dc67d02f1cdd2ed7472ba395fcfafc6dcb')
variant('scalapack', default=True, description='Use ScaLAPACK, strongly recommended for problem sizes >5000 electrons')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
depends_on('mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi@9.0.0:', when='@0.5.1:')
depends_on('scalapack', when='+scalapack')
depends_on('alglib')
depends_on('libxc')
depends_on('spglib')
depends_on('libxml2')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER={0}'.format(spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER={0}'.format(spec['mpi'].mpicxx),
'-DALGLIB_DIR={0}'.format(spec['alglib'].prefix),
'-DLIBXC_DIR={0}'.format(spec['libxc'].prefix),
'-DXML_LIB_DIR={0}/lib'.format(spec['libxml2'].prefix),
'-DXML_INCLUDE_DIR={0}/include'.format(spec['libxml2'].prefix),
'-DSPGLIB_DIR={0}'.format(spec['spglib'].prefix),
]
if spec.satisfies('^intel-mkl'):
args.append('-DWITH_INTEL_MKL=ON')
else:
args.append('-DWITH_INTEL_MKL=OFF')
if spec.satisfies('%gcc'):
args.append('-DCMAKE_C_FLAGS=-fpermissive')
args.append('-DCMAKE_CXX_FLAGS=-fpermissive')
return args
@when('@:0.5.2')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.lib64)
install(join_path(self.build_directory, 'main'),
join_path(prefix.bin, 'dftfe'))
install(join_path(self.build_directory, 'libdftfe.so'),
prefix.lib64)
| true | true |
f71177de257d0e69f4e091f7b908830363392ce1 | 1,170 | py | Python | label_loader.py | fd873630/las_for_korean_bigginer | fa2a6fbbcaecb01bc649596699617afe4fd1f48d | [
"Apache-2.0"
] | null | null | null | label_loader.py | fd873630/las_for_korean_bigginer | fa2a6fbbcaecb01bc649596699617afe4fd1f48d | [
"Apache-2.0"
] | null | null | null | label_loader.py | fd873630/las_for_korean_bigginer | fa2a6fbbcaecb01bc649596699617afe4fd1f48d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019-present NAVER Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#-*- coding: utf-8 -*-
def load_label(label_path):
char2index = dict() # [ch] = id
index2char = dict() # [id] = ch
with open(label_path, 'r') as f: # 파일을 열면 닫아주는게 좋다. with문은 다음과 같은 역할을 한다.
for no, line in enumerate(f):
if line[0] == '#':
continue
index, char, freq = line.strip().split('\t')
char = char.strip()
if len(char) == 0:
char = ' '
char2index[char] = int(index)
index2char[int(index)] = char
return char2index, index2char
| 32.5 | 78 | 0.619658 |
def load_label(label_path):
char2index = dict()
index2char = dict()
with open(label_path, 'r') as f:
for no, line in enumerate(f):
if line[0] == '#':
continue
index, char, freq = line.strip().split('\t')
char = char.strip()
if len(char) == 0:
char = ' '
char2index[char] = int(index)
index2char[int(index)] = char
return char2index, index2char
| true | true |
f71178e955fe419d4b14918267313415cc8e3184 | 1,111 | py | Python | tests/template_tests/test_origin.py | KaushikSathvara/django | 3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946 | [
"BSD-3-Clause",
"0BSD"
] | 61,676 | 2015-01-01T00:05:13.000Z | 2022-03-31T20:37:54.000Z | tests/template_tests/test_origin.py | KaushikSathvara/django | 3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946 | [
"BSD-3-Clause",
"0BSD"
] | 8,884 | 2015-01-01T00:12:05.000Z | 2022-03-31T19:53:11.000Z | tests/template_tests/test_origin.py | mustafa0x/django | d7394cfa13a4d1a02356e3a83e10ec100fbb9948 | [
"BSD-3-Clause",
"0BSD"
] | 33,143 | 2015-01-01T02:04:52.000Z | 2022-03-31T19:42:46.000Z | import os
from unittest import TestCase
from django.template import Engine
from .utils import TEMPLATE_DIR
class OriginTestCase(TestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_origin_compares_equal(self):
a = self.engine.get_template('index.html')
b = self.engine.get_template('index.html')
self.assertEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, True)
self.assertIs(a.origin != b.origin, False)
def test_origin_compares_not_equal(self):
a = self.engine.get_template('first/test.html')
b = self.engine.get_template('second/test.html')
self.assertNotEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, False)
self.assertIs(a.origin != b.origin, True)
def test_repr(self):
a = self.engine.get_template('index.html')
name = os.path.join(TEMPLATE_DIR, 'index.html')
self.assertEqual(repr(a.origin), '<Origin name=%r>' % name)
| 33.666667 | 67 | 0.660666 | import os
from unittest import TestCase
from django.template import Engine
from .utils import TEMPLATE_DIR
class OriginTestCase(TestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_origin_compares_equal(self):
a = self.engine.get_template('index.html')
b = self.engine.get_template('index.html')
self.assertEqual(a.origin, b.origin)
self.assertIs(a.origin == b.origin, True)
self.assertIs(a.origin != b.origin, False)
def test_origin_compares_not_equal(self):
a = self.engine.get_template('first/test.html')
b = self.engine.get_template('second/test.html')
self.assertNotEqual(a.origin, b.origin)
self.assertIs(a.origin == b.origin, False)
self.assertIs(a.origin != b.origin, True)
def test_repr(self):
a = self.engine.get_template('index.html')
name = os.path.join(TEMPLATE_DIR, 'index.html')
self.assertEqual(repr(a.origin), '<Origin name=%r>' % name)
| true | true |
f711796f876d51ba27773e6025a60538502c0b87 | 325 | py | Python | stage_0_Maurya.py | kristahbel/team-greider | 7d44039d2b2c9abf63d0781cf0eb0b07b5b0ae15 | [
"MIT"
] | 9 | 2021-08-01T20:26:55.000Z | 2021-08-07T11:32:25.000Z | stage_0_Maurya.py | kristahbel/team-greider | 7d44039d2b2c9abf63d0781cf0eb0b07b5b0ae15 | [
"MIT"
] | 2 | 2021-08-02T09:08:09.000Z | 2021-08-03T21:10:24.000Z | stage_0_Maurya.py | kristahbel/team-greider | 7d44039d2b2c9abf63d0781cf0eb0b07b5b0ae15 | [
"MIT"
] | 16 | 2021-08-01T19:41:45.000Z | 2021-08-06T09:26:15.000Z | print("NAME: Maurya Sharma \nE-MAIL: mauryasharma2001@gmail.com \nSLACK USERNAME: @Maurya \nBIOSTACK: Drug Development \nTwitter Handle: @Maurya")
def hamming_distance(a,b):
count=0
for i in range(len(a)):
if a[i] != b[i]:
count +=1
return count
print(hamming_distance('@Maurya','@Maurya'))
| 29.545455 | 146 | 0.655385 | print("NAME: Maurya Sharma \nE-MAIL: mauryasharma2001@gmail.com \nSLACK USERNAME: @Maurya \nBIOSTACK: Drug Development \nTwitter Handle: @Maurya")
def hamming_distance(a,b):
count=0
for i in range(len(a)):
if a[i] != b[i]:
count +=1
return count
print(hamming_distance('@Maurya','@Maurya'))
| true | true |
f71179d38880b8bc4de2699f0c56637b7ad4d1ca | 7,757 | py | Python | frappe/core/doctype/report/report.py | linkmultiselect/frappe | 0871db97f6aa9738e6aff169ad2b9853980b0653 | [
"MIT"
] | null | null | null | frappe/core/doctype/report/report.py | linkmultiselect/frappe | 0871db97f6aa9738e6aff169ad2b9853980b0653 | [
"MIT"
] | 5 | 2020-03-24T18:15:00.000Z | 2021-03-25T23:28:34.000Z | frappe/core/doctype/report/report.py | linkmultiselect/frappe | 0871db97f6aa9738e6aff169ad2b9853980b0653 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json, datetime
from frappe import _, scrub
import frappe.desk.query_report
from frappe.utils import cint
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
from frappe.modules import make_boilerplate
from frappe.core.doctype.page.page import delete_custom_role
from frappe.core.doctype.custom_role.custom_role import get_custom_allowed_roles
from frappe.desk.reportview import append_totals_row
from six import iteritems
from frappe.utils.safe_exec import safe_exec
class Report(Document):
def validate(self):
"""only administrator can save standard report"""
if not self.module:
self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
if not self.is_standard:
self.is_standard = "No"
if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
self.is_standard = "Yes"
if self.is_standard == "No":
# allow only script manager to edit scripts
if frappe.session.user!="Administrator":
frappe.only_for('Script Manager', True)
if frappe.db.get_value("Report", self.name, "is_standard") == "Yes":
frappe.throw(_("Cannot edit a standard report. Please duplicate and create a new report"))
if self.is_standard == "Yes" and frappe.session.user!="Administrator":
frappe.throw(_("Only Administrator can save a standard report. Please rename and save."))
if self.report_type == "Report Builder":
self.update_report_json()
def before_insert(self):
self.set_doctype_roles()
def on_update(self):
self.export_doc()
def on_trash(self):
delete_custom_role('report', self.name)
def set_doctype_roles(self):
if not self.get('roles') and self.is_standard == 'No':
meta = frappe.get_meta(self.ref_doctype)
roles = [{'role': d.role} for d in meta.permissions if d.permlevel==0]
self.set('roles', roles)
def is_permitted(self):
"""Returns true if Has Role is not set or the user is allowed."""
from frappe.utils import has_common
allowed = [d.role for d in frappe.get_all("Has Role", fields=["role"],
filters={"parent": self.name})]
custom_roles = get_custom_allowed_roles('report', self.name)
allowed.extend(custom_roles)
if not allowed:
return True
if has_common(frappe.get_roles(), allowed):
return True
def update_report_json(self):
if not self.json:
self.json = '{}'
def export_doc(self):
if frappe.flags.in_import:
return
if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
export_to_files(record_list=[['Report', self.name]],
record_module=self.module, create_init=True)
self.create_report_py()
def create_report_py(self):
if self.report_type == "Script Report":
make_boilerplate("controller.py", self, {"name": self.name})
make_boilerplate("controller.js", self, {"name": self.name})
def execute_script_report(self, filters):
# save the timestamp to automatically set to prepared
threshold = 30
res = []
start_time = datetime.datetime.now()
# The JOB
if self.is_standard == 'Yes':
res = self.execute_module(filters)
else:
res = self.execute_script(filters)
# automatically set as prepared
execution_time = (datetime.datetime.now() - start_time).total_seconds()
if execution_time > threshold and not self.prepared_report:
self.db_set('prepared_report', 1)
frappe.cache().hset('report_execution_time', self.name, execution_time)
return res
def execute_module(self, filters):
# report in python module
module = self.module or frappe.db.get_value("DocType", self.ref_doctype, "module")
method_name = get_report_module_dotted_path(module, self.name) + ".execute"
return frappe.get_attr(method_name)(frappe._dict(filters))
def execute_script(self, filters):
# server script
loc = {"filters": frappe._dict(filters), 'data':[]}
safe_exec(self.report_script, None, loc)
return loc['data']
def get_data(self, filters=None, limit=None, user=None, as_dict=False):
columns = []
out = []
if self.report_type in ('Query Report', 'Script Report', 'Custom Report'):
# query and script reports
data = frappe.desk.query_report.run(self.name, filters=filters, user=user)
for d in data.get('columns'):
if isinstance(d, dict):
col = frappe._dict(d)
if not col.fieldname:
col.fieldname = col.label
columns.append(col)
else:
fieldtype, options = "Data", None
parts = d.split(':')
if len(parts) > 1:
if parts[1]:
fieldtype, options = parts[1], None
if fieldtype and '/' in fieldtype:
fieldtype, options = fieldtype.split('/')
columns.append(frappe._dict(label=parts[0], fieldtype=fieldtype, fieldname=parts[0], options=options))
out += data.get('result')
else:
# standard report
params = json.loads(self.json)
if params.get('fields'):
columns = params.get('fields')
elif params.get('columns'):
columns = params.get('columns')
elif params.get('fields'):
columns = params.get('fields')
else:
columns = [['name', self.ref_doctype]]
for df in frappe.get_meta(self.ref_doctype).fields:
if df.in_list_view:
columns.append([df.fieldname, self.ref_doctype])
_filters = params.get('filters') or []
if filters:
for key, value in iteritems(filters):
condition, _value = '=', value
if isinstance(value, (list, tuple)):
condition, _value = value
_filters.append([key, condition, _value])
def _format(parts):
# sort by is saved as DocType.fieldname, covert it to sql
return '`tab{0}`.`{1}`'.format(*parts)
if params.get('sort_by'):
order_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')
elif params.get('order_by'):
order_by = params.get('order_by')
else:
order_by = _format([self.ref_doctype, 'modified']) + ' desc'
if params.get('sort_by_next'):
order_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')
result = frappe.get_list(self.ref_doctype,
fields = [_format([c[1], c[0]]) for c in columns],
filters=_filters,
order_by = order_by,
as_list=True,
limit=limit,
user=user)
_columns = []
for (fieldname, doctype) in columns:
meta = frappe.get_meta(doctype)
if meta.get_field(fieldname):
field = meta.get_field(fieldname)
else:
field = frappe._dict(fieldname=fieldname, label=meta.get_label(fieldname))
# since name is the primary key for a document, it will always be a Link datatype
if fieldname == "name":
field.fieldtype = "Link"
field.options = doctype
_columns.append(field)
columns = _columns
out = out + [list(d) for d in result]
if params.get('add_totals_row'):
out = append_totals_row(out)
if as_dict:
data = []
for row in out:
if isinstance(row, (list, tuple)):
_row = frappe._dict()
for i, val in enumerate(row):
_row[columns[i].get('fieldname')] = val
elif isinstance(row, dict):
# no need to convert from dict to dict
_row = frappe._dict(row)
data.append(_row)
else:
data = out
return columns, data
@Document.whitelist
def toggle_disable(self, disable):
self.db_set("disabled", cint(disable))
@frappe.whitelist()
def is_prepared_report_disabled(report):
return frappe.db.get_value('Report',
report, 'disable_prepared_report') or 0
def get_report_module_dotted_path(module, report_name):
return frappe.local.module_app[scrub(module)] + "." + scrub(module) \
+ ".report." + scrub(report_name) + "." + scrub(report_name)
| 30.904382 | 107 | 0.695114 |
from __future__ import unicode_literals
import frappe
import json, datetime
from frappe import _, scrub
import frappe.desk.query_report
from frappe.utils import cint
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
from frappe.modules import make_boilerplate
from frappe.core.doctype.page.page import delete_custom_role
from frappe.core.doctype.custom_role.custom_role import get_custom_allowed_roles
from frappe.desk.reportview import append_totals_row
from six import iteritems
from frappe.utils.safe_exec import safe_exec
class Report(Document):
def validate(self):
if not self.module:
self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
if not self.is_standard:
self.is_standard = "No"
if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
self.is_standard = "Yes"
if self.is_standard == "No":
if frappe.session.user!="Administrator":
frappe.only_for('Script Manager', True)
if frappe.db.get_value("Report", self.name, "is_standard") == "Yes":
frappe.throw(_("Cannot edit a standard report. Please duplicate and create a new report"))
if self.is_standard == "Yes" and frappe.session.user!="Administrator":
frappe.throw(_("Only Administrator can save a standard report. Please rename and save."))
if self.report_type == "Report Builder":
self.update_report_json()
def before_insert(self):
self.set_doctype_roles()
def on_update(self):
self.export_doc()
def on_trash(self):
delete_custom_role('report', self.name)
def set_doctype_roles(self):
if not self.get('roles') and self.is_standard == 'No':
meta = frappe.get_meta(self.ref_doctype)
roles = [{'role': d.role} for d in meta.permissions if d.permlevel==0]
self.set('roles', roles)
def is_permitted(self):
from frappe.utils import has_common
allowed = [d.role for d in frappe.get_all("Has Role", fields=["role"],
filters={"parent": self.name})]
custom_roles = get_custom_allowed_roles('report', self.name)
allowed.extend(custom_roles)
if not allowed:
return True
if has_common(frappe.get_roles(), allowed):
return True
def update_report_json(self):
if not self.json:
self.json = '{}'
def export_doc(self):
if frappe.flags.in_import:
return
if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
export_to_files(record_list=[['Report', self.name]],
record_module=self.module, create_init=True)
self.create_report_py()
def create_report_py(self):
if self.report_type == "Script Report":
make_boilerplate("controller.py", self, {"name": self.name})
make_boilerplate("controller.js", self, {"name": self.name})
def execute_script_report(self, filters):
threshold = 30
res = []
start_time = datetime.datetime.now()
if self.is_standard == 'Yes':
res = self.execute_module(filters)
else:
res = self.execute_script(filters)
execution_time = (datetime.datetime.now() - start_time).total_seconds()
if execution_time > threshold and not self.prepared_report:
self.db_set('prepared_report', 1)
frappe.cache().hset('report_execution_time', self.name, execution_time)
return res
def execute_module(self, filters):
module = self.module or frappe.db.get_value("DocType", self.ref_doctype, "module")
method_name = get_report_module_dotted_path(module, self.name) + ".execute"
return frappe.get_attr(method_name)(frappe._dict(filters))
def execute_script(self, filters):
loc = {"filters": frappe._dict(filters), 'data':[]}
safe_exec(self.report_script, None, loc)
return loc['data']
def get_data(self, filters=None, limit=None, user=None, as_dict=False):
columns = []
out = []
if self.report_type in ('Query Report', 'Script Report', 'Custom Report'):
data = frappe.desk.query_report.run(self.name, filters=filters, user=user)
for d in data.get('columns'):
if isinstance(d, dict):
col = frappe._dict(d)
if not col.fieldname:
col.fieldname = col.label
columns.append(col)
else:
fieldtype, options = "Data", None
parts = d.split(':')
if len(parts) > 1:
if parts[1]:
fieldtype, options = parts[1], None
if fieldtype and '/' in fieldtype:
fieldtype, options = fieldtype.split('/')
columns.append(frappe._dict(label=parts[0], fieldtype=fieldtype, fieldname=parts[0], options=options))
out += data.get('result')
else:
params = json.loads(self.json)
if params.get('fields'):
columns = params.get('fields')
elif params.get('columns'):
columns = params.get('columns')
elif params.get('fields'):
columns = params.get('fields')
else:
columns = [['name', self.ref_doctype]]
for df in frappe.get_meta(self.ref_doctype).fields:
if df.in_list_view:
columns.append([df.fieldname, self.ref_doctype])
_filters = params.get('filters') or []
if filters:
for key, value in iteritems(filters):
condition, _value = '=', value
if isinstance(value, (list, tuple)):
condition, _value = value
_filters.append([key, condition, _value])
def _format(parts):
return '`tab{0}`.`{1}`'.format(*parts)
if params.get('sort_by'):
order_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')
elif params.get('order_by'):
order_by = params.get('order_by')
else:
order_by = _format([self.ref_doctype, 'modified']) + ' desc'
if params.get('sort_by_next'):
order_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')
result = frappe.get_list(self.ref_doctype,
fields = [_format([c[1], c[0]]) for c in columns],
filters=_filters,
order_by = order_by,
as_list=True,
limit=limit,
user=user)
_columns = []
for (fieldname, doctype) in columns:
meta = frappe.get_meta(doctype)
if meta.get_field(fieldname):
field = meta.get_field(fieldname)
else:
field = frappe._dict(fieldname=fieldname, label=meta.get_label(fieldname))
if fieldname == "name":
field.fieldtype = "Link"
field.options = doctype
_columns.append(field)
columns = _columns
out = out + [list(d) for d in result]
if params.get('add_totals_row'):
out = append_totals_row(out)
if as_dict:
data = []
for row in out:
if isinstance(row, (list, tuple)):
_row = frappe._dict()
for i, val in enumerate(row):
_row[columns[i].get('fieldname')] = val
elif isinstance(row, dict):
_row = frappe._dict(row)
data.append(_row)
else:
data = out
return columns, data
@Document.whitelist
def toggle_disable(self, disable):
self.db_set("disabled", cint(disable))
@frappe.whitelist()
def is_prepared_report_disabled(report):
return frappe.db.get_value('Report',
report, 'disable_prepared_report') or 0
def get_report_module_dotted_path(module, report_name):
return frappe.local.module_app[scrub(module)] + "." + scrub(module) \
+ ".report." + scrub(report_name) + "." + scrub(report_name)
| true | true |
f7117ab7e8d3065cd1ef2a5ccec476a5acced2e5 | 2,940 | py | Python | third_party/maya/lib/usdMaya/testenv/testUsdMayaBlockSceneModificationContext.py | octarrow/USD | 1845291a9701ab0a3a7d591bc243a1a80fdcba8a | [
"Unlicense"
] | 3 | 2019-02-20T07:34:17.000Z | 2019-08-13T08:17:04.000Z | third_party/maya/lib/usdMaya/testenv/testUsdMayaBlockSceneModificationContext.py | octarrow/USD | 1845291a9701ab0a3a7d591bc243a1a80fdcba8a | [
"Unlicense"
] | null | null | null | third_party/maya/lib/usdMaya/testenv/testUsdMayaBlockSceneModificationContext.py | octarrow/USD | 1845291a9701ab0a3a7d591bc243a1a80fdcba8a | [
"Unlicense"
] | null | null | null | #!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import unittest
try:
from pxr import UsdMaya
except ImportError:
from pixar import UsdMaya
from maya import cmds
from maya import standalone
class testUsdMayaBlockSceneModificationContext(unittest.TestCase):
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def _AssertSceneIsModified(self, modified):
isSceneModified = cmds.file(query=True, modified=True)
self.assertEqual(isSceneModified, modified)
def setUp(self):
cmds.file(new=True, force=True)
self._AssertSceneIsModified(False)
def testPreserveSceneModified(self):
"""
Tests that making scene modifications using a
UsdMayaBlockSceneModificationContext on a scene that has already been
modified correctly maintains the modification status after the context
exits.
"""
# Create a cube to dirty the scene.
cmds.polyCube()
self._AssertSceneIsModified(True)
with UsdMaya.BlockSceneModificationContext():
# Create a cube inside the context manager.
cmds.polyCube()
# The scene should still be modified.
self._AssertSceneIsModified(True)
def testPreserveSceneNotModified(self):
"""
Tests that making scene modifications using a
UsdMayaBlockSceneModificationContext on a scene that has not been
modified correctly maintains the modification status after the context
exits.
"""
with UsdMaya.BlockSceneModificationContext():
# Create a cube inside the context manager.
cmds.polyCube()
# The scene should NOT be modified.
self._AssertSceneIsModified(False)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 31.956522 | 78 | 0.709864 |
import unittest
try:
from pxr import UsdMaya
except ImportError:
from pixar import UsdMaya
from maya import cmds
from maya import standalone
class testUsdMayaBlockSceneModificationContext(unittest.TestCase):
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def _AssertSceneIsModified(self, modified):
isSceneModified = cmds.file(query=True, modified=True)
self.assertEqual(isSceneModified, modified)
def setUp(self):
cmds.file(new=True, force=True)
self._AssertSceneIsModified(False)
def testPreserveSceneModified(self):
cmds.polyCube()
self._AssertSceneIsModified(True)
with UsdMaya.BlockSceneModificationContext():
cmds.polyCube()
self._AssertSceneIsModified(True)
def testPreserveSceneNotModified(self):
with UsdMaya.BlockSceneModificationContext():
cmds.polyCube()
self._AssertSceneIsModified(False)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f7117b559401200e7a843fee1491e12e035b59b3 | 5,643 | py | Python | tests/python/relay/test_op_qnn_subtract.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | tests/python/relay/test_op_qnn_subtract.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 3,022 | 2020-11-24T14:02:31.000Z | 2022-03-31T23:55:31.000Z | tests/python/relay/test_op_qnn_subtract.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
def qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype="uint8"):
# all x, y and golden outputs should be of the same length
assert len(x_datas) == len(y_datas)
assert len(y_datas) == len(golden_outputs)
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
lhs_scale = relay.const(scale_and_zp["lhs_scale"], "float32")
lhs_zp = relay.const(scale_and_zp["lhs_zp"], "int32")
rhs_scale = relay.const(scale_and_zp["rhs_scale"], "float32")
rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
output_scale = relay.const(scale_and_zp["output_scale"], "float32")
output_zp = relay.const(scale_and_zp["output_zp"], "int32")
z = relay.qnn.op.subtract(
lhs=x,
rhs=y,
lhs_scale=lhs_scale,
lhs_zero_point=lhs_zp,
rhs_scale=rhs_scale,
rhs_zero_point=rhs_zp,
output_scale=output_scale,
output_zero_point=output_zp,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
for i in range(0, len(x_datas)):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_same_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.00784314,
"lhs_zp": 127,
"rhs_scale": 0.00784314,
"rhs_zp": 127,
"output_scale": 0.00784314,
"output_zp": 127,
}
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((63, 102, 127, 165)).reshape((1, 4)),
np.array((0, 102, 114, 255)).reshape((1, 4)),
np.array((0, 102, 255, 101)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_tflite_different_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.0156863,
"lhs_zp": 127,
"rhs_scale": 0.0117647,
"rhs_zp": 85,
"output_scale": 0.0235294,
"output_zp": 128,
}
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((68, 120, 123, 192)).reshape((1, 4)),
np.array((106, 120, 128, 140)).reshape((1, 4)),
np.array((68, 120, 192, 119)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_saturation():
# Same params
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
# Same params, different scale
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.25,
"output_zp": 0,
}
x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
# All params different
scale_and_zp = {
"lhs_scale": 0.5,
"lhs_zp": 0,
"rhs_scale": 0.25,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]
y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]
golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
| 34.619632 | 97 | 0.605352 |
import tvm
import numpy as np
from tvm import relay
def qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype="uint8"):
assert len(x_datas) == len(y_datas)
assert len(y_datas) == len(golden_outputs)
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
lhs_scale = relay.const(scale_and_zp["lhs_scale"], "float32")
lhs_zp = relay.const(scale_and_zp["lhs_zp"], "int32")
rhs_scale = relay.const(scale_and_zp["rhs_scale"], "float32")
rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
output_scale = relay.const(scale_and_zp["output_scale"], "float32")
output_zp = relay.const(scale_and_zp["output_zp"], "int32")
z = relay.qnn.op.subtract(
lhs=x,
rhs=y,
lhs_scale=lhs_scale,
lhs_zero_point=lhs_zp,
rhs_scale=rhs_scale,
rhs_zero_point=rhs_zp,
output_scale=output_scale,
output_zero_point=output_zp,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
for i in range(0, len(x_datas)):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_same_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.00784314,
"lhs_zp": 127,
"rhs_scale": 0.00784314,
"rhs_zp": 127,
"output_scale": 0.00784314,
"output_zp": 127,
}
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((63, 102, 127, 165)).reshape((1, 4)),
np.array((0, 102, 114, 255)).reshape((1, 4)),
np.array((0, 102, 255, 101)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_tflite_different_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.0156863,
"lhs_zp": 127,
"rhs_scale": 0.0117647,
"rhs_zp": 85,
"output_scale": 0.0235294,
"output_zp": 128,
}
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((68, 120, 123, 192)).reshape((1, 4)),
np.array((106, 120, 128, 140)).reshape((1, 4)),
np.array((68, 120, 192, 119)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_saturation():
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.25,
"output_zp": 0,
}
x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
scale_and_zp = {
"lhs_scale": 0.5,
"lhs_zp": 0,
"rhs_scale": 0.25,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]
y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]
golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
| true | true |
f7117b7fbebe9c690c58224ea3729a3b15dcc160 | 2,501 | py | Python | generate_thumbnails.py | AdamHawtin/bucket-thumbnail-generator | db42e0dca709156513b47f17cd540fb68c1dfe0e | [
"MIT"
] | null | null | null | generate_thumbnails.py | AdamHawtin/bucket-thumbnail-generator | db42e0dca709156513b47f17cd540fb68c1dfe0e | [
"MIT"
] | 6 | 2021-04-30T20:51:43.000Z | 2022-03-11T23:54:26.000Z | generate_thumbnails.py | AdamHawtin/bucket-thumbnail-generator | db42e0dca709156513b47f17cd540fb68c1dfe0e | [
"MIT"
] | null | null | null | import os
import tempfile
from pathlib import Path
from PIL import Image
from google.cloud import storage
from retrying import retry
THUMBNAIL_SIZE = int(os.getenv('THUMBNAIL_SIZE', '128'))
THUMBNAIL_MAX_DIM = THUMBNAIL_SIZE, THUMBNAIL_SIZE
THUMBNAIL_SUFFIX = f'_thumb{THUMBNAIL_SIZE}'
SUPPORTED_FILE_EXTENSIONS = {'jpg', 'jpeg', 'png'}
def receive_event(event, context):
"""Triggered by a change to a Cloud Storage bucket.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
file_extension = Path(event['name']).suffix.lstrip('.')
if not is_event_supported_image(event, file_extension):
return
print(event, context)
bucket = storage.Client().get_bucket(event['bucket'])
with tempfile.NamedTemporaryFile() as temp_image_file, tempfile.NamedTemporaryFile() as temp_thumb_file:
get_image_file(event, temp_image_file, bucket)
image_format = generate_and_save_thumbnail(temp_image_file.name, temp_thumb_file.name)
upload_thumbnail_to_bucket(bucket, temp_thumb_file, get_thumbnail_name(event['name'], file_extension),
image_format)
def is_file_extension_supported(file_extension):
return file_extension.lower() in SUPPORTED_FILE_EXTENSIONS
def is_event_supported_image(event, file_extension):
return (event['contentType'].startswith('image') and
THUMBNAIL_SUFFIX not in event['name'] and
is_file_extension_supported(file_extension))
def get_thumbnail_name(image_name, file_extension):
return f'{Path(image_name).stem}{THUMBNAIL_SUFFIX}.{file_extension}'
def generate_and_save_thumbnail(image_file_name, thumbnail_file_name):
image = Image.open(image_file_name)
image_format = image.format
image.thumbnail(THUMBNAIL_MAX_DIM, Image.ANTIALIAS)
image.save(thumbnail_file_name, format=image_format)
return image_format
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def upload_thumbnail_to_bucket(bucket, temp_thumb_file, thumbnail_filename, image_format):
bucket.blob(thumbnail_filename).upload_from_filename(temp_thumb_file.name,
content_type=f'image/{image_format.lower()}')
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def get_image_file(event, destination_file, bucket):
blob = bucket.get_blob(event['name'])
blob.download_to_file(destination_file)
| 38.476923 | 110 | 0.745702 | import os
import tempfile
from pathlib import Path
from PIL import Image
from google.cloud import storage
from retrying import retry
THUMBNAIL_SIZE = int(os.getenv('THUMBNAIL_SIZE', '128'))
THUMBNAIL_MAX_DIM = THUMBNAIL_SIZE, THUMBNAIL_SIZE
THUMBNAIL_SUFFIX = f'_thumb{THUMBNAIL_SIZE}'
SUPPORTED_FILE_EXTENSIONS = {'jpg', 'jpeg', 'png'}
def receive_event(event, context):
file_extension = Path(event['name']).suffix.lstrip('.')
if not is_event_supported_image(event, file_extension):
return
print(event, context)
bucket = storage.Client().get_bucket(event['bucket'])
with tempfile.NamedTemporaryFile() as temp_image_file, tempfile.NamedTemporaryFile() as temp_thumb_file:
get_image_file(event, temp_image_file, bucket)
image_format = generate_and_save_thumbnail(temp_image_file.name, temp_thumb_file.name)
upload_thumbnail_to_bucket(bucket, temp_thumb_file, get_thumbnail_name(event['name'], file_extension),
image_format)
def is_file_extension_supported(file_extension):
return file_extension.lower() in SUPPORTED_FILE_EXTENSIONS
def is_event_supported_image(event, file_extension):
return (event['contentType'].startswith('image') and
THUMBNAIL_SUFFIX not in event['name'] and
is_file_extension_supported(file_extension))
def get_thumbnail_name(image_name, file_extension):
return f'{Path(image_name).stem}{THUMBNAIL_SUFFIX}.{file_extension}'
def generate_and_save_thumbnail(image_file_name, thumbnail_file_name):
image = Image.open(image_file_name)
image_format = image.format
image.thumbnail(THUMBNAIL_MAX_DIM, Image.ANTIALIAS)
image.save(thumbnail_file_name, format=image_format)
return image_format
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def upload_thumbnail_to_bucket(bucket, temp_thumb_file, thumbnail_filename, image_format):
bucket.blob(thumbnail_filename).upload_from_filename(temp_thumb_file.name,
content_type=f'image/{image_format.lower()}')
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def get_image_file(event, destination_file, bucket):
blob = bucket.get_blob(event['name'])
blob.download_to_file(destination_file)
| true | true |
f7117ca57b1ebd5861c109a4273cbe7837519188 | 22,933 | py | Python | libcxx/utils/libcxx/test/config.py | val-verde/llvm-project | a5d4e884dad341ff80fbbdec6e7516b9c58c9eb0 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/libcxx/test/config.py | val-verde/llvm-project | a5d4e884dad341ff80fbbdec6e7516b9c58c9eb0 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/libcxx/test/config.py | val-verde/llvm-project | a5d4e884dad341ff80fbbdec6e7516b9c58c9eb0 | [
"Apache-2.0"
] | null | null | null | #===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import os
import pkgutil
import pipes
import platform
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
import libcxx.util
import libcxx.test.features
import libcxx.test.newconfig
import libcxx.test.params
import lit
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
self.use_clang_verify = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.target_info.is_windows() and not self.target_info.is_mingw():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.target_info = make_target_info(self)
self.executor = self.get_lit_conf('executor')
self.configure_cxx()
self.configure_src_root()
self.configure_obj_root()
self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
self.abi_library_root = self.get_lit_conf('abi_library_root') or self.cxx_library_root
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
self.abi_runtime_root = self.get_lit_conf('abi_runtime_root', self.abi_library_root)
self.configure_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_coverage()
self.configure_substitutions()
self.configure_features()
libcxx.test.newconfig.configure(
libcxx.test.params.DEFAULT_PARAMETERS,
libcxx.test.features.DEFAULT_FEATURES,
self.config,
self.lit_config
)
self.lit_config.note("All available features: {}".format(self.config.available_features))
def print_config_info(self):
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
self.lit_config.note("Linking against the ABI Library at {}".format(self.abi_library_root))
self.lit_config.note("Running against the ABI Library at {}".format(self.abi_runtime_root))
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.executor,
exec_env=self.exec_env)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx).startswith('clang-cl')
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
self.cxx.compile_env = dict(os.environ)
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = []
link_flags = _prefixed_env_list('LIB', '-L')
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_features(self):
if self.target_info.is_windows():
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Don't warn about using common but nonstandard unprefixed functions
# like chdir, fileno.
self.cxx.compile_flags += ['-D_CRT_NONSTDC_NO_WARNINGS']
# Build the tests in the same configuration as libcxx itself,
# to avoid mismatches if linked statically.
self.cxx.compile_flags += ['-D_CRT_STDIO_ISO_WIDE_SPECIFIERS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
self.target_info.add_cxx_flags(self.cxx.flags)
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# On GCC, the libc++ headers cause errors due to throw() decorators
# on operator new clashing with those from the test suite, so we
# don't enable warnings in system headers on GCC.
if self.cxx.type != 'gcc':
self.cxx.compile_flags += ['-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER']
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.target_info.is_windows() and \
not self.target_info.is_zos():
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.target_info.is_windows() and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers is None and self.cxx_stdlib_under_test != 'libc++':
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='{}' is not a directory.".format(cxx_headers))
(path, version) = os.path.split(cxx_headers)
(path, cxx) = os.path.split(path)
triple = self.get_lit_conf('target_triple', None)
if triple is not None:
cxx_target_headers = os.path.join(path, triple, cxx, version)
if os.path.isdir(cxx_target_headers):
self.cxx.compile_flags += ['-I' + cxx_target_headers]
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib++']
else:
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.target_info.is_windows() and not self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.target_info.is_windows() and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.target_info.is_windows() and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if self.abi_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_runtime_root]
else:
self.add_path(self.exec_env, self.abi_runtime_root)
def configure_link_flags_cxx_library(self):
if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
if self.cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(self.cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
if self.abi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(self.abi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
# This matches the set of libraries linked in the toplevel
# libcxx CMakeLists.txt if building targeting msvc.
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt', 'msvcprt']]
# The compiler normally links in oldnames.lib too, but we've
# specified -nostdlib above, so we need to specify it manually.
self.cxx.link_flags += ['-loldnames']
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def quote(self, s):
if platform.system() == 'Windows':
return lit.TestRunner.quote_windows_command([s])
return pipes.quote(s)
def configure_substitutions(self):
sub = self.config.substitutions
sub.append(('%{cxx}', self.quote(self.cxx.path)))
flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
sub.append(('%{flags}', ' '.join(map(self.quote, flags))))
sub.append(('%{compile_flags}', ' '.join(map(self.quote, compile_flags))))
sub.append(('%{link_flags}', ' '.join(map(self.quote, self.cxx.link_flags))))
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
env_vars = ' '.join('%s=%s' % (k, self.quote(v)) for (k, v) in self.exec_env.items())
exec_args = [
'--execdir %T',
'--codesign_identity "{}"'.format(codesign_ident),
'--env {}'.format(env_vars)
]
sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
def configure_env(self):
self.config.environment = dict(os.environ)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)
| 46.706721 | 106 | 0.608032 |
mport copy
import os
import pkgutil
import pipes
import platform
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
import libcxx.util
import libcxx.test.features
import libcxx.test.newconfig
import libcxx.test.params
import lit
def loadSiteConfig(lit_config, config, param_name, env_name):
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
self.use_clang_verify = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def make_static_lib_name(self, name):
if self.target_info.is_windows() and not self.target_info.is_mingw():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.target_info = make_target_info(self)
self.executor = self.get_lit_conf('executor')
self.configure_cxx()
self.configure_src_root()
self.configure_obj_root()
self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
self.abi_library_root = self.get_lit_conf('abi_library_root') or self.cxx_library_root
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
self.abi_runtime_root = self.get_lit_conf('abi_runtime_root', self.abi_library_root)
self.configure_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_coverage()
self.configure_substitutions()
self.configure_features()
libcxx.test.newconfig.configure(
libcxx.test.params.DEFAULT_PARAMETERS,
libcxx.test.features.DEFAULT_FEATURES,
self.config,
self.lit_config
)
self.lit_config.note("All available features: {}".format(self.config.available_features))
def print_config_info(self):
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
self.lit_config.note("Linking against the ABI Library at {}".format(self.abi_library_root))
self.lit_config.note("Running against the ABI Library at {}".format(self.abi_runtime_root))
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.executor,
exec_env=self.exec_env)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx).startswith('clang-cl')
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
self.cxx.compile_env = dict(os.environ)
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = []
link_flags = _prefixed_env_list('LIB', '-L')
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_features(self):
if self.target_info.is_windows():
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Don't warn about using common but nonstandard unprefixed functions
self.cxx.compile_flags += ['-D_CRT_NONSTDC_NO_WARNINGS']
self.cxx.compile_flags += ['-D_CRT_STDIO_ISO_WIDE_SPECIFIERS']
# and so that those tests don't have to be changed to tolerate
self.cxx.compile_flags += ['-DNOMINMAX']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
self.target_info.add_cxx_flags(self.cxx.flags)
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
if self.cxx.type != 'gcc':
self.cxx.compile_flags += ['-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER']
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.target_info.is_windows() and \
not self.target_info.is_zos():
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.target_info.is_windows() and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers is None and self.cxx_stdlib_under_test != 'libc++':
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='{}' is not a directory.".format(cxx_headers))
(path, version) = os.path.split(cxx_headers)
(path, cxx) = os.path.split(path)
triple = self.get_lit_conf('target_triple', None)
if triple is not None:
cxx_target_headers = os.path.join(path, triple, cxx, version)
if os.path.isdir(cxx_target_headers):
self.cxx.compile_flags += ['-I' + cxx_target_headers]
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib++']
else:
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.target_info.is_windows() and not self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.target_info.is_windows() and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.target_info.is_windows() and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if self.abi_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_runtime_root]
else:
self.add_path(self.exec_env, self.abi_runtime_root)
def configure_link_flags_cxx_library(self):
if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
if self.cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(self.cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
if self.abi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(self.abi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt', 'msvcprt']]
# specified -nostdlib above, so we need to specify it manually.
self.cxx.link_flags += ['-loldnames']
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def quote(self, s):
if platform.system() == 'Windows':
return lit.TestRunner.quote_windows_command([s])
return pipes.quote(s)
def configure_substitutions(self):
sub = self.config.substitutions
sub.append(('%{cxx}', self.quote(self.cxx.path)))
flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
sub.append(('%{flags}', ' '.join(map(self.quote, flags))))
sub.append(('%{compile_flags}', ' '.join(map(self.quote, compile_flags))))
sub.append(('%{link_flags}', ' '.join(map(self.quote, self.cxx.link_flags))))
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
env_vars = ' '.join('%s=%s' % (k, self.quote(v)) for (k, v) in self.exec_env.items())
exec_args = [
'--execdir %T',
'--codesign_identity "{}"'.format(codesign_ident),
'--env {}'.format(env_vars)
]
sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
def configure_env(self):
self.config.environment = dict(os.environ)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)
| true | true |
f7117d2b4d5ff936d18079dc6cda0605b3a809a0 | 2,778 | py | Python | aodh/event.py | ISCAS-VDI/aodh-base | 4e7a8aef80054c49e8e8b32715cecd32b425d617 | [
"Apache-2.0"
] | null | null | null | aodh/event.py | ISCAS-VDI/aodh-base | 4e7a8aef80054c49e8e8b32715cecd32b425d617 | [
"Apache-2.0"
] | null | null | null | aodh/event.py | ISCAS-VDI/aodh-base | 4e7a8aef80054c49e8e8b32715cecd32b425d617 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 NEC Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_service import service
from aodh.evaluator import event
from aodh import messaging
from aodh import storage
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('event_alarm_topic',
default='alarm.all',
deprecated_group='DEFAULT',
help='The topic that aodh uses for event alarm evaluation.'),
cfg.IntOpt('batch_size',
default=1,
help='Number of notification messages to wait before '
'dispatching them.'),
cfg.IntOpt('batch_timeout',
default=None,
help='Number of seconds to wait before dispatching samples '
'when batch_size is not reached (None means indefinitely).'),
]
class EventAlarmEndpoint(object):
def __init__(self, evaluator):
self.evaluator = evaluator
def sample(self, notifications):
LOG.debug('Received %s messages in batch.', len(notifications))
for notification in notifications:
self.evaluator.evaluate_events(notification['payload'])
class EventAlarmEvaluationService(service.Service):
def __init__(self, conf):
super(EventAlarmEvaluationService, self).__init__()
self.conf = conf
def start(self):
super(EventAlarmEvaluationService, self).start()
self.storage_conn = storage.get_connection_from_config(self.conf)
self.evaluator = event.EventAlarmEvaluator(self.conf)
self.listener = messaging.get_batch_notification_listener(
messaging.get_transport(self.conf),
[oslo_messaging.Target(
topic=self.conf.listener.event_alarm_topic)],
[EventAlarmEndpoint(self.evaluator)], False,
self.conf.listener.batch_size,
self.conf.listener.batch_timeout)
self.listener.start()
# Add a dummy thread to have wait() working
self.tg.add_timer(604800, lambda: None)
def stop(self):
if getattr(self, 'listener', None):
self.listener.stop()
self.listener.wait()
super(EventAlarmEvaluationService, self).stop()
| 34.725 | 76 | 0.677826 |
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_service import service
from aodh.evaluator import event
from aodh import messaging
from aodh import storage
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('event_alarm_topic',
default='alarm.all',
deprecated_group='DEFAULT',
help='The topic that aodh uses for event alarm evaluation.'),
cfg.IntOpt('batch_size',
default=1,
help='Number of notification messages to wait before '
'dispatching them.'),
cfg.IntOpt('batch_timeout',
default=None,
help='Number of seconds to wait before dispatching samples '
'when batch_size is not reached (None means indefinitely).'),
]
class EventAlarmEndpoint(object):
def __init__(self, evaluator):
self.evaluator = evaluator
def sample(self, notifications):
LOG.debug('Received %s messages in batch.', len(notifications))
for notification in notifications:
self.evaluator.evaluate_events(notification['payload'])
class EventAlarmEvaluationService(service.Service):
def __init__(self, conf):
super(EventAlarmEvaluationService, self).__init__()
self.conf = conf
def start(self):
super(EventAlarmEvaluationService, self).start()
self.storage_conn = storage.get_connection_from_config(self.conf)
self.evaluator = event.EventAlarmEvaluator(self.conf)
self.listener = messaging.get_batch_notification_listener(
messaging.get_transport(self.conf),
[oslo_messaging.Target(
topic=self.conf.listener.event_alarm_topic)],
[EventAlarmEndpoint(self.evaluator)], False,
self.conf.listener.batch_size,
self.conf.listener.batch_timeout)
self.listener.start()
self.tg.add_timer(604800, lambda: None)
def stop(self):
if getattr(self, 'listener', None):
self.listener.stop()
self.listener.wait()
super(EventAlarmEvaluationService, self).stop()
| true | true |
f7117d745cb7ceac2a3526e0c33d5632064d579a | 14,533 | py | Python | seekr2/tests/test_analyze.py | astokely/seekr2 | 2fd8496dc885339437678a729b1f97a4b0bf9cfd | [
"MIT"
] | null | null | null | seekr2/tests/test_analyze.py | astokely/seekr2 | 2fd8496dc885339437678a729b1f97a4b0bf9cfd | [
"MIT"
] | null | null | null | seekr2/tests/test_analyze.py | astokely/seekr2 | 2fd8496dc885339437678a729b1f97a4b0bf9cfd | [
"MIT"
] | null | null | null | """
test_analyze.py
Testing analyze.py
"""
import os
from collections import defaultdict
import numpy as np
import seekr2.modules.common_analyze as common_analyze
import seekr2.modules.mmvt_analyze as mmvt_analyze
import seekr2.analyze as analyze
import seekr2.modules.common_base as base
import seekr2.modules.mmvt_base as mmvt_base
import seekr2.tests.smoluchowski_system as smoluchowski
this_dir = os.path.dirname(os.path.realpath(__file__))
test_output_filename = os.path.join(this_dir, "test_analyze_outputfile.txt")
test_statistics_filename = os.path.join(this_dir, "test_analyze_statistics.txt")
def test_read_output_file():
N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \
R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \
T_alpha_list, T_alpha_average, T_alpha_std_dev, \
T_alpha_total, existing_lines \
= mmvt_analyze.openmm_read_output_file_list(
[test_output_filename])
N_i_j_alpha_dict1 = N_i_j_alpha
R_i_alpha_dict1 = R_i_alpha_total
N_alpha_beta_dict1 = N_alpha_beta
T_alpha1 = T_alpha_total
#N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha1 = \
# analyze.openmm_read_output_file_list([test_output_filename])
N_i_j_alpha_dict2 = {(1, 2): 52, (2, 1): 52}
R_i_alpha_dict2 = {1: 1658.696, 2: 198.912}
N_alpha_beta_dict2 = {1: 2423, 2: 98}
T_alpha2 = 1954.760
for key in N_i_j_alpha_dict1:
assert key in N_i_j_alpha_dict2
assert np.isclose(N_i_j_alpha_dict1[key], N_i_j_alpha_dict2[key])
for key in R_i_alpha_dict1:
assert key in R_i_alpha_dict2
assert np.isclose(R_i_alpha_dict1[key], R_i_alpha_dict2[key])
for key in N_alpha_beta_dict1:
assert key in N_alpha_beta_dict2
assert np.isclose(N_alpha_beta_dict1[key], N_alpha_beta_dict2[key])
assert np.isclose(T_alpha1, T_alpha2)
N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \
R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \
T_alpha_list, T_alpha_average, T_alpha_std_dev, \
T_alpha_total, existing_lines \
= mmvt_analyze.openmm_read_output_file_list([test_output_filename,
test_output_filename],
skip_restart_check=True)
N_i_j_alpha_dict1 = N_i_j_alpha
R_i_alpha_dict1 = R_i_alpha_total
N_alpha_beta_dict1 = N_alpha_beta
T_alpha1 = T_alpha_total
#N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha = \
# analyze.openmm_read_output_file_list([test_output_filename,
# test_output_filename])
for key in N_i_j_alpha_dict1:
assert key in N_i_j_alpha_dict2
assert np.isclose(N_i_j_alpha_dict1[key], 2*N_i_j_alpha_dict2[key],
rtol=0.01)
for key in N_alpha_beta_dict1:
assert key in N_alpha_beta_dict2
assert np.isclose(N_alpha_beta_dict1[key], 2*N_alpha_beta_dict2[key],
rtol=0.01)
return
def test_minor2d():
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
B = np.array([[1,3],[7,9]])
C = np.array([[1,2],[4,5]])
D = np.array([[2,8],[3,9]])
assert common_analyze.minor2d(A, 1, 1).all() == B.all()
assert common_analyze.minor2d(A, 2, 2).all() == C.all()
assert common_analyze.minor2d(A, 1, 0).all() == D.all()
return
def test_minor1d():
A = np.array([1,2,3])
B = np.array([1,3])
C = np.array([2,3])
D = np.array([1,2])
assert common_analyze.minor1d(A, 1).all() == B.all()
assert common_analyze.minor1d(A, 0).all() == C.all()
assert common_analyze.minor1d(A, 2).all() == D.all()
return
def test_pretty_string_value_error():
mystr = common_analyze.pretty_string_value_error(
5.6e-2, 2.0e-3, error_digits=1, use_unicode=False)
expectedstr = "5.6 +/- 0.2 * 10^-02"
assert(mystr == expectedstr)
mystr = common_analyze.pretty_string_value_error(
5.6e-2, 2.0e-1, error_digits=1, use_unicode=False)
expectedstr = "5.6 +/- 20.0 * 10^-02"
assert(mystr == expectedstr)
mystr = common_analyze.pretty_string_value_error(
1.23456789e8, 4.5678e5, error_digits=2, use_unicode=False)
expectedstr = "1.2346 +/- 0.0046 * 10^+08"
assert(mystr == expectedstr)
def make_fake_output_file_osc(anchor, tmp_path, timestep=1.0):
num_steps = 50
mmvt_output_filename = os.path.join(
tmp_path, anchor.name, "prod",
"%s%d.%s" % (mmvt_base.OPENMMVT_BASENAME, 1,
mmvt_base.OPENMMVT_EXTENSION))
with open(mmvt_output_filename, "w") as f:
if anchor.index == 0:
for i in range(num_steps+1):
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
else:
for i in range(num_steps+1):
if (i % 2) == 0:
line = "%d,%d,%f\n" % (2, i, i*timestep)
f.write(line)
else:
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
return
def make_fake_output_file2(anchor, tmp_path, ups=1, downs=9, timestep=1.0):
num_steps = 50
total = ups + downs
mmvt_output_filename = os.path.join(
tmp_path, anchor.name, "prod",
"%s%d.%s" % (mmvt_base.OPENMMVT_BASENAME, 1,
mmvt_base.OPENMMVT_EXTENSION))
with open(mmvt_output_filename, "w") as f:
if anchor.index == 0:
for i in range(num_steps+1):
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
else:
for i in range(num_steps+1):
if (i % total) < ups:
line = "%d,%d,%f\n" % (2, i, i*timestep)
f.write(line)
else:
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
return
def test_solve_rate_matrix():
Q = np.array(
[[-0.5, 0.5, 0.0, 0.0],
[0.1, -0.3, 0.2, 0.0],
[0.0, 0.15, -0.3, 0.15],
[0.0, 0.0, 0.3, -0.4]])
K = np.zeros(Q.shape, dtype=np.longdouble)
for i in range(Q.shape[0]):
for j in range(Q.shape[0]):
if i == j:
K[i,j] = 0.0
else:
K[i,j] = -Q[i,j] / Q[i,i]
for i in range(K.shape[0]-1):
my_sum = sum(K[i,:])
for j in range(K.shape[0]):
K[i,j] = K[i,j] / my_sum
test_times_1 = common_analyze.solve_rate_matrix(Q)
one_vector = np.ones((Q.shape[0]))
test_times_2 = np.linalg.solve(Q, -one_vector)
error = np.linalg.norm(test_times_2 - test_times_1)
assert error < 1e-8
return
"""
def make_smol_calculation(tmp_path, func=None):
num_anchors = 10
D = 0.01
interval = 1.0
n = 101
intervals = []
for i in range(num_anchors):
intervals.append(interval)
if func is None:
func = smoluchowski.expW_constant
q_s = np.zeros(num_anchors)
mymodel = smoluchowski.make_smol_model(tmp_path, num_anchors, intervals)
my_analysis = analyze.Analysis(mymodel)
elberN_ij = defaultdict(float)
elberR_i = defaultdict(float)
smols = []
for i, anchor in enumerate(mymodel.anchors[:-1]):
a = interval*i
b = interval*(i+1)
smol = smoluchowski.Smoluchowski(a, b, func, n=n, D=D)
q_s[i] = smol.expWq
if i == 0:
smol.reflect_lower = True
k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, \
R_i_backwards, R_i_forwards, N_ij_backwards, N_ij_forwards \
= smol.compute_MMVT_kinetics_quantities()
N_i_j_alpha_dict = defaultdict(int)
R_i_alpha_dict = defaultdict(float)
N_alpha_beta_dict = defaultdict(int)
new_time_factor = (R_i_forwards + R_i_backwards) / T_alpha
new_T_alpha = new_time_factor * T_alpha
if i == 0:
N_alpha_beta_dict[1] = new_time_factor
R_i_alpha_dict[1] = new_T_alpha
else:
N_i_j_alpha_dict[(1, 2)] = N_ij_forwards
N_i_j_alpha_dict[(2, 1)] = N_ij_backwards
R_i_alpha_dict[1] = R_i_forwards
R_i_alpha_dict[2] = R_i_backwards
N_alpha_beta_dict[1] = N_backwards * new_time_factor
N_alpha_beta_dict[2] = N_forwards * new_time_factor
anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha=i)
anchor_stats.N_i_j_alpha = N_i_j_alpha_dict
anchor_stats.R_i_alpha_total = R_i_alpha_dict
anchor_stats.R_i_alpha_std_dev = R_i_alpha_dict
anchor_stats.R_i_alpha_list = {}
for key in anchor_stats.R_i_alpha_total:
anchor_stats.R_i_alpha_list[key] = []
anchor_stats.N_alpha_beta = N_alpha_beta_dict
anchor_stats.T_alpha_total = new_T_alpha
anchor_stats.T_alpha_std_dev = new_T_alpha
for key in N_alpha_beta_dict:
anchor_stats.k_alpha_beta[key] = N_alpha_beta_dict[key] \
/ new_T_alpha
# N_i_j_alpha_dict, R_i_alpha_dict, N_alpha_beta_dict, new_T_alpha,
# alpha=i)
# FIll out values here...
my_analysis.anchor_stats_list.append(anchor_stats)
smols.append(smol)
for i, anchor in enumerate(mymodel.anchors[:-1]):
smol1 = smols[i]
if i == 0:
smol2 = smols[i+1]
elberN_ij[(0,1)] = 1.0
# need to make sure that u and exp(-beta*W) match up
# on the edge.
smol1_edge_value = smol1.expWfunc(smol1.b, q=smol1.expWq)
elberR_i[0] = (smol2.u_q_forward + (1.0/smol1_edge_value)) / (smol2.J_forward)
elif i == mymodel.num_milestones-1:
elberN_ij[(mymodel.num_milestones-1,mymodel.num_milestones-2)] = 1.0
elberR_i[mymodel.num_milestones-1] = (smol1.u_q_backward) / (smol1.J_backward)
else:
smol2 = smols[i+1]
elberN_ij[(i,i+1)] = smol2.J_forward / (smol2.J_forward + smol1.J_backward)
elberN_ij[(i,i-1)] = smol1.J_backward / (smol2.J_forward + smol1.J_backward)
elberR_i[i] = (smol2.u_q_forward + smol1.u_q_backward) / (smol2.J_forward + smol1.J_backward)
my_analysis.mmvt_check_anchor_stats()
#my_analyze._calculate_equilibrium_probability()
#my_analyze._calculate_overall_statistics()
#my_analysis.extract_data()
my_analysis.fill_out_data_samples()
my_analysis.main_data_sample.pi_alpha = np.zeros(mymodel.num_anchors)
for i, anchor in enumerate(mymodel.anchors[:-1]):
my_analysis.main_data_sample.pi_alpha[i] = q_s[i] / np.sum(q_s)
my_analysis.fill_out_data_samples()
my_analysis.process_data_samples()
my_analysis.main_data_sample.Q = np.zeros((mymodel.num_milestones,
mymodel.num_milestones), dtype=np.longdouble)
elberQ = np.zeros((mymodel.num_milestones,
mymodel.num_milestones), dtype=np.longdouble)
for i in range(mymodel.num_milestones):
for j in range(mymodel.num_milestones):
if my_analysis.main_data_sample.R_i[i] == 0.0:
my_analysis.main_data_sample.Q[i,j] = 0.0
else:
my_analysis.main_data_sample.Q[i,j] \
= my_analysis.main_data_sample.N_ij[i,j] \
/ my_analysis.main_data_sample.R_i[i]
if elberR_i[i] > 0.0:
elberQ[i,j] = elberN_ij[i,j] / elberR_i[i]
for i in range(mymodel.num_milestones):
my_analysis.main_data_sample.Q[i][i] = \
-np.sum(my_analysis.main_data_sample.Q[i])
elberQ[i][i] = -np.sum(elberQ[i])
#my_analyze._rate_mat_to_prob_mat()
#print("my_analyze.Q:", my_analyze.Q)
#print("elberQ:", elberQ)
#print("my_analyze.K:", my_analyze.K)
#my_analyze.calculate_kinetics()
my_analysis.main_data_sample.calculate_kinetics()
mmvt_time = my_analysis.main_data_sample.MFPTs[(0,"bulk")]
#print("mmvt_time:", mmvt_time)
my_analysis.main_data_sample.Q = elberQ
my_analysis.main_data_sample.calculate_kinetics()
elber_time = my_analysis.main_data_sample.MFPTs[(0,"bulk")]
#print("elber_time:", elber_time)
a1 = 0.0
b1 = interval
a2 = interval
b2 = interval*num_anchors
smol1 = smoluchowski.Smoluchowski(a1, b1, func, n=n, D=D)
smol2 = smoluchowski.Smoluchowski(a2, b2, func, n=n, D=D)
q1 = smol1.expWq
q2 = smol2.expWq
k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, R_i_backwards, \
R_i_forwards, N_ij_backwards, N_ij_forwards \
= smol2.compute_MMVT_kinetics_quantities()
J2 = q2 / (R_i_forwards + R_i_backwards)
correct_time = R_i_forwards + q1/J2
#print("correct_time:", correct_time)
print("Time predicted by Elber:", elber_time, "Time predicted by MMVT:",
mmvt_time, "Exact time:", correct_time)
""
x_s = np.arange(0.0, num_anchors, interval)
func_vals1 = np.zeros(num_anchors)
func_vals2 = np.zeros(num_anchors)
print("q_s:", q_s)
for i, x in enumerate(x_s):
print("i:", i, "my_analyze.pi_alpha[i]:", my_analyze.pi_alpha[i], "q_s[i]:", q_s[i] / np.sum(q_s))
func_vals1[i] = my_analyze.pi_alpha[i]
func_vals2[i] = q_s[i] / np.sum(q_s)
plt.plot(x_s, func_vals1, "g", x_s, func_vals2, "r")
plt.show()
""
return mmvt_time, elber_time, correct_time
def test_smoluchowski_solution_flat_1(tmp_path):
print("Constant PMF:")
mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path)
assert np.isclose(mmvt_time, true_time, rtol=0.001)
assert np.isclose(elber_time, true_time, rtol=0.001)
print("linear PMF:")
func = smoluchowski.expW_linear
mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)
assert np.isclose(mmvt_time, true_time, rtol=0.001)
assert np.isclose(elber_time, true_time, rtol=0.001)
print("quadratic PMF:")
func = smoluchowski.expW_quadratic
mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)
assert np.isclose(mmvt_time, true_time, rtol=0.001)
assert np.isclose(elber_time, true_time, rtol=0.001)
""" | 38.651596 | 106 | 0.616321 |
import os
from collections import defaultdict
import numpy as np
import seekr2.modules.common_analyze as common_analyze
import seekr2.modules.mmvt_analyze as mmvt_analyze
import seekr2.analyze as analyze
import seekr2.modules.common_base as base
import seekr2.modules.mmvt_base as mmvt_base
import seekr2.tests.smoluchowski_system as smoluchowski
this_dir = os.path.dirname(os.path.realpath(__file__))
test_output_filename = os.path.join(this_dir, "test_analyze_outputfile.txt")
test_statistics_filename = os.path.join(this_dir, "test_analyze_statistics.txt")
def test_read_output_file():
N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \
R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \
T_alpha_list, T_alpha_average, T_alpha_std_dev, \
T_alpha_total, existing_lines \
= mmvt_analyze.openmm_read_output_file_list(
[test_output_filename])
N_i_j_alpha_dict1 = N_i_j_alpha
R_i_alpha_dict1 = R_i_alpha_total
N_alpha_beta_dict1 = N_alpha_beta
T_alpha1 = T_alpha_total
N_i_j_alpha_dict2 = {(1, 2): 52, (2, 1): 52}
R_i_alpha_dict2 = {1: 1658.696, 2: 198.912}
N_alpha_beta_dict2 = {1: 2423, 2: 98}
T_alpha2 = 1954.760
for key in N_i_j_alpha_dict1:
assert key in N_i_j_alpha_dict2
assert np.isclose(N_i_j_alpha_dict1[key], N_i_j_alpha_dict2[key])
for key in R_i_alpha_dict1:
assert key in R_i_alpha_dict2
assert np.isclose(R_i_alpha_dict1[key], R_i_alpha_dict2[key])
for key in N_alpha_beta_dict1:
assert key in N_alpha_beta_dict2
assert np.isclose(N_alpha_beta_dict1[key], N_alpha_beta_dict2[key])
assert np.isclose(T_alpha1, T_alpha2)
N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \
R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \
T_alpha_list, T_alpha_average, T_alpha_std_dev, \
T_alpha_total, existing_lines \
= mmvt_analyze.openmm_read_output_file_list([test_output_filename,
test_output_filename],
skip_restart_check=True)
N_i_j_alpha_dict1 = N_i_j_alpha
R_i_alpha_dict1 = R_i_alpha_total
N_alpha_beta_dict1 = N_alpha_beta
T_alpha1 = T_alpha_total
for key in N_i_j_alpha_dict1:
assert key in N_i_j_alpha_dict2
assert np.isclose(N_i_j_alpha_dict1[key], 2*N_i_j_alpha_dict2[key],
rtol=0.01)
for key in N_alpha_beta_dict1:
assert key in N_alpha_beta_dict2
assert np.isclose(N_alpha_beta_dict1[key], 2*N_alpha_beta_dict2[key],
rtol=0.01)
return
def test_minor2d():
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
B = np.array([[1,3],[7,9]])
C = np.array([[1,2],[4,5]])
D = np.array([[2,8],[3,9]])
assert common_analyze.minor2d(A, 1, 1).all() == B.all()
assert common_analyze.minor2d(A, 2, 2).all() == C.all()
assert common_analyze.minor2d(A, 1, 0).all() == D.all()
return
def test_minor1d():
A = np.array([1,2,3])
B = np.array([1,3])
C = np.array([2,3])
D = np.array([1,2])
assert common_analyze.minor1d(A, 1).all() == B.all()
assert common_analyze.minor1d(A, 0).all() == C.all()
assert common_analyze.minor1d(A, 2).all() == D.all()
return
def test_pretty_string_value_error():
mystr = common_analyze.pretty_string_value_error(
5.6e-2, 2.0e-3, error_digits=1, use_unicode=False)
expectedstr = "5.6 +/- 0.2 * 10^-02"
assert(mystr == expectedstr)
mystr = common_analyze.pretty_string_value_error(
5.6e-2, 2.0e-1, error_digits=1, use_unicode=False)
expectedstr = "5.6 +/- 20.0 * 10^-02"
assert(mystr == expectedstr)
mystr = common_analyze.pretty_string_value_error(
1.23456789e8, 4.5678e5, error_digits=2, use_unicode=False)
expectedstr = "1.2346 +/- 0.0046 * 10^+08"
assert(mystr == expectedstr)
def make_fake_output_file_osc(anchor, tmp_path, timestep=1.0):
num_steps = 50
mmvt_output_filename = os.path.join(
tmp_path, anchor.name, "prod",
"%s%d.%s" % (mmvt_base.OPENMMVT_BASENAME, 1,
mmvt_base.OPENMMVT_EXTENSION))
with open(mmvt_output_filename, "w") as f:
if anchor.index == 0:
for i in range(num_steps+1):
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
else:
for i in range(num_steps+1):
if (i % 2) == 0:
line = "%d,%d,%f\n" % (2, i, i*timestep)
f.write(line)
else:
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
return
def make_fake_output_file2(anchor, tmp_path, ups=1, downs=9, timestep=1.0):
num_steps = 50
total = ups + downs
mmvt_output_filename = os.path.join(
tmp_path, anchor.name, "prod",
"%s%d.%s" % (mmvt_base.OPENMMVT_BASENAME, 1,
mmvt_base.OPENMMVT_EXTENSION))
with open(mmvt_output_filename, "w") as f:
if anchor.index == 0:
for i in range(num_steps+1):
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
else:
for i in range(num_steps+1):
if (i % total) < ups:
line = "%d,%d,%f\n" % (2, i, i*timestep)
f.write(line)
else:
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
return
def test_solve_rate_matrix():
Q = np.array(
[[-0.5, 0.5, 0.0, 0.0],
[0.1, -0.3, 0.2, 0.0],
[0.0, 0.15, -0.3, 0.15],
[0.0, 0.0, 0.3, -0.4]])
K = np.zeros(Q.shape, dtype=np.longdouble)
for i in range(Q.shape[0]):
for j in range(Q.shape[0]):
if i == j:
K[i,j] = 0.0
else:
K[i,j] = -Q[i,j] / Q[i,i]
for i in range(K.shape[0]-1):
my_sum = sum(K[i,:])
for j in range(K.shape[0]):
K[i,j] = K[i,j] / my_sum
test_times_1 = common_analyze.solve_rate_matrix(Q)
one_vector = np.ones((Q.shape[0]))
test_times_2 = np.linalg.solve(Q, -one_vector)
error = np.linalg.norm(test_times_2 - test_times_1)
assert error < 1e-8
return
| true | true |
f7117e9d65d3037430dcb359a8dce4e7f2d80980 | 2,577 | py | Python | oamap/backend/numpyfile.py | Jayd-1234/oamap | b10ddab18ca2bf71171c5489c1b748a60ec962fb | [
"BSD-3-Clause"
] | null | null | null | oamap/backend/numpyfile.py | Jayd-1234/oamap | b10ddab18ca2bf71171c5489c1b748a60ec962fb | [
"BSD-3-Clause"
] | null | null | null | oamap/backend/numpyfile.py | Jayd-1234/oamap | b10ddab18ca2bf71171c5489c1b748a60ec962fb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy
import oamap.dataset
import oamap.database
class NumpyFileBackend(oamap.database.FilesystemBackend):
def __init__(self, directory):
super(NumpyFileBackend, self).__init__(directory, arraysuffix=".npy")
def instantiate(self, partitionid):
return NumpyArrays(lambda name: self.fullname(partitionid, name, create=False),
lambda name: self.fullname(partitionid, name, create=True))
class NumpyArrays(object):
def __init__(self, loadname, storename):
self._loadname = loadname
self._storename = storename
def __getitem__(self, name):
return numpy.load(self._loadname(name))
def __setitem__(self, name, value):
numpy.save(self._storename(name), value)
class NumpyFileDatabase(oamap.database.FilesystemDatabase):
def __init__(self, directory, namespace=""):
super(NumpyFileDatabase, self).__init__(directory, backends={namespace: NumpyFileBackend(directory)}, namespace=namespace)
| 42.95 | 130 | 0.756306 |
import os
import numpy
import oamap.dataset
import oamap.database
class NumpyFileBackend(oamap.database.FilesystemBackend):
def __init__(self, directory):
super(NumpyFileBackend, self).__init__(directory, arraysuffix=".npy")
def instantiate(self, partitionid):
return NumpyArrays(lambda name: self.fullname(partitionid, name, create=False),
lambda name: self.fullname(partitionid, name, create=True))
class NumpyArrays(object):
def __init__(self, loadname, storename):
self._loadname = loadname
self._storename = storename
def __getitem__(self, name):
return numpy.load(self._loadname(name))
def __setitem__(self, name, value):
numpy.save(self._storename(name), value)
class NumpyFileDatabase(oamap.database.FilesystemDatabase):
def __init__(self, directory, namespace=""):
super(NumpyFileDatabase, self).__init__(directory, backends={namespace: NumpyFileBackend(directory)}, namespace=namespace)
| true | true |
f7117f6c404b1da6555aeeae0b08a7f9be81f25d | 89,437 | py | Python | intersight/model/softwarerepository_file_server.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/softwarerepository_file_server.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/softwarerepository_file_server.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_complex_type import MoBaseComplexType
from intersight.model.softwarerepository_appliance_upload import SoftwarerepositoryApplianceUpload
from intersight.model.softwarerepository_cifs_server import SoftwarerepositoryCifsServer
from intersight.model.softwarerepository_http_server import SoftwarerepositoryHttpServer
from intersight.model.softwarerepository_local_machine import SoftwarerepositoryLocalMachine
from intersight.model.softwarerepository_nfs_server import SoftwarerepositoryNfsServer
globals()['MoBaseComplexType'] = MoBaseComplexType
globals()['SoftwarerepositoryApplianceUpload'] = SoftwarerepositoryApplianceUpload
globals()['SoftwarerepositoryCifsServer'] = SoftwarerepositoryCifsServer
globals()['SoftwarerepositoryHttpServer'] = SoftwarerepositoryHttpServer
globals()['SoftwarerepositoryLocalMachine'] = SoftwarerepositoryLocalMachine
globals()['SoftwarerepositoryNfsServer'] = SoftwarerepositoryNfsServer
class SoftwarerepositoryFileServer(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
('object_type',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'softwarerepository.ApplianceUpload': SoftwarerepositoryApplianceUpload,
'softwarerepository.CifsServer': SoftwarerepositoryCifsServer,
'softwarerepository.HttpServer': SoftwarerepositoryHttpServer,
'softwarerepository.LocalMachine': SoftwarerepositoryLocalMachine,
'softwarerepository.NfsServer': SoftwarerepositoryNfsServer,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""SoftwarerepositoryFileServer - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseComplexType,
],
'oneOf': [
],
}
| 65.377924 | 1,678 | 0.677617 |
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_complex_type import MoBaseComplexType
from intersight.model.softwarerepository_appliance_upload import SoftwarerepositoryApplianceUpload
from intersight.model.softwarerepository_cifs_server import SoftwarerepositoryCifsServer
from intersight.model.softwarerepository_http_server import SoftwarerepositoryHttpServer
from intersight.model.softwarerepository_local_machine import SoftwarerepositoryLocalMachine
from intersight.model.softwarerepository_nfs_server import SoftwarerepositoryNfsServer
globals()['MoBaseComplexType'] = MoBaseComplexType
globals()['SoftwarerepositoryApplianceUpload'] = SoftwarerepositoryApplianceUpload
globals()['SoftwarerepositoryCifsServer'] = SoftwarerepositoryCifsServer
globals()['SoftwarerepositoryHttpServer'] = SoftwarerepositoryHttpServer
globals()['SoftwarerepositoryLocalMachine'] = SoftwarerepositoryLocalMachine
globals()['SoftwarerepositoryNfsServer'] = SoftwarerepositoryNfsServer
class SoftwarerepositoryFileServer(ModelComposed):
allowed_values = {
('class_id',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
('object_type',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = True
@cached_property
def openapi_types():
lazy_import()
return {
'class_id': (str,),
'object_type': (str,),
}
@cached_property
def discriminator():
lazy_import()
val = {
'softwarerepository.ApplianceUpload': SoftwarerepositoryApplianceUpload,
'softwarerepository.CifsServer': SoftwarerepositoryCifsServer,
'softwarerepository.HttpServer': SoftwarerepositoryHttpServer,
'softwarerepository.LocalMachine': SoftwarerepositoryLocalMachine,
'softwarerepository.NfsServer': SoftwarerepositoryNfsServer,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseComplexType,
],
'oneOf': [
],
}
| true | true |
f71180058b96632b2f3dd3308a51630c15ace975 | 1,638 | bzl | Python | test/haxe_executable_dependency.bzl | kigero/rules_haxe | 1cee9a6ccf4c26656c5b7d1fb2d56f2fa9c01fa1 | [
"MIT"
] | null | null | null | test/haxe_executable_dependency.bzl | kigero/rules_haxe | 1cee9a6ccf4c26656c5b7d1fb2d56f2fa9c01fa1 | [
"MIT"
] | null | null | null | test/haxe_executable_dependency.bzl | kigero/rules_haxe | 1cee9a6ccf4c26656c5b7d1fb2d56f2fa9c01fa1 | [
"MIT"
] | null | null | null | """
Test that the build parameters for external modules with dependencies are computed correctly.
"""
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts")
load("//:providers.bzl", "HaxeLibraryInfo")
load("//:utils.bzl", "determine_source_root")
def _haxe_executable_dependency_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = analysistest.target_under_test(env)
hxml = target_under_test[HaxeLibraryInfo].hxml
asserts.equals(env, 2, len(hxml["source_files"]))
asserts.equals(env, "module-bin", hxml["output_dir"])
asserts.equals(env, "bazel-out/x64_windows-fastbuild/bin/external/test-module-dist/module-bin", target_under_test[HaxeLibraryInfo].lib.path)
asserts.equals(env, "external/test-module-a/", determine_source_root(hxml["source_files"][0]))
# The directory portion 'external/dist-test/' in this test comes from the fact that the test is being loaded via a
# dependent module below, in the target_under_test parameter. When run in the test directory itself, the value is
# correct, without the 'external/dist-test/'.
asserts.equals(env, "bazel-out/x64_windows-fastbuild/bin/external/dist-test/module-bin/dist-test", target_under_test[HaxeLibraryInfo].hxml["build_file"])
return analysistest.end(env)
haxe_executable_dependency_test = analysistest.make(_haxe_executable_dependency_test_impl)
def test_haxe_executable_dependency():
haxe_executable_dependency_test(
name = "haxe_executable_dependency_test",
target_under_test = "@test-module-dist//:module-bin",
size = "small",
)
| 46.8 | 158 | 0.738706 |
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts")
load("//:providers.bzl", "HaxeLibraryInfo")
load("//:utils.bzl", "determine_source_root")
def _haxe_executable_dependency_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = analysistest.target_under_test(env)
hxml = target_under_test[HaxeLibraryInfo].hxml
asserts.equals(env, 2, len(hxml["source_files"]))
asserts.equals(env, "module-bin", hxml["output_dir"])
asserts.equals(env, "bazel-out/x64_windows-fastbuild/bin/external/test-module-dist/module-bin", target_under_test[HaxeLibraryInfo].lib.path)
asserts.equals(env, "external/test-module-a/", determine_source_root(hxml["source_files"][0]))
asserts.equals(env, "bazel-out/x64_windows-fastbuild/bin/external/dist-test/module-bin/dist-test", target_under_test[HaxeLibraryInfo].hxml["build_file"])
return analysistest.end(env)
haxe_executable_dependency_test = analysistest.make(_haxe_executable_dependency_test_impl)
def test_haxe_executable_dependency():
haxe_executable_dependency_test(
name = "haxe_executable_dependency_test",
target_under_test = "@test-module-dist//:module-bin",
size = "small",
)
| true | true |
f711809cde8a0a5d600e12a758e6baa43a035a9b | 3,957 | py | Python | whalesong/managers/group_metadata.py | gladimaz/whalesong | dab496498d188b3f7b4d5d67ef1c0e2a46cce535 | [
"MIT"
] | 57 | 2018-06-20T12:37:58.000Z | 2021-08-14T14:32:26.000Z | whalesong/managers/group_metadata.py | gladimaz/whalesong | dab496498d188b3f7b4d5d67ef1c0e2a46cce535 | [
"MIT"
] | 91 | 2018-06-21T02:04:29.000Z | 2020-04-12T20:16:04.000Z | whalesong/managers/group_metadata.py | gladimaz/whalesong | dab496498d188b3f7b4d5d67ef1c0e2a46cce535 | [
"MIT"
] | 24 | 2018-07-02T08:31:52.000Z | 2020-05-19T01:36:18.000Z | from typing import List
from dirty_models import ArrayField, BooleanField, ModelField, StringField, StringIdField
from . import BaseCollectionManager, BaseModelManager
from ..models import BaseModel, DateTimeField
from ..results import Result
class Participant(BaseModel):
is_admin = BooleanField(default=False)
"""
Whether the participant is a group administrator or not.
"""
is_super_admin = BooleanField()
"""
Whether the participant is a group super administrator or not. ¿?
"""
class GroupMetadata(BaseModel):
"""
Group metadata model.
"""
announce = StringIdField()
"""
¿?
"""
creation = DateTimeField()
"""
Group creation timestamp.
"""
desc = StringField()
"""
Group description.
"""
desc_owner = StringIdField()
"""
Who change last group description.
"""
desc_time = DateTimeField()
"""
When last group description was changed.
"""
owner = StringIdField()
"""
Who made group.
"""
participants = ArrayField(field_type=ModelField(model_class=Participant))
"""
List of participants.
"""
restrict = StringIdField()
"""
¿?
"""
group_invite_link = StringIdField()
"""
Group link to invite people.
"""
invite_code = StringIdField()
"""
Group code to invite people.
"""
class ParticipantManager(BaseModelManager[Participant]):
"""
Participant manager.
"""
MODEL_CLASS = Participant
class ParticipantCollectionManager(BaseCollectionManager[ParticipantManager]):
"""
Participant collection manager. It allows manage group participants.
"""
MODEL_MANAGER_CLASS = ParticipantManager
def add_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('addParticipants', {'contactIds': contact_ids})
def can_add(self, contact_id: str) -> Result[bool]:
return self._execute_command('canAdd', {'contactId': contact_id})
def remove_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('removeParticipants', {'contactIds': contact_ids})
def can_remove(self, contact_id: str) -> Result[bool]:
return self._execute_command('canRemove', {'contactId': contact_id})
def promote_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('promoteParticipants', {'contactIds': contact_ids})
def can_promote(self, contact_id: str) -> Result[bool]:
return self._execute_command('canPromote', {'contactId': contact_id})
def demote_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('demoteParticipants', {'contactIds': contact_ids})
def can_demote(self, contact_id: str) -> Result[bool]:
return self._execute_command('canDemote', {'contactId': contact_id})
class GroupMetadataManager(BaseModelManager[GroupMetadata]):
"""
Group metadata manager. It allows manage groups, further than a chat.
.. attribute:: participants
:class:`~whalesong.managers.group_metadata.ParticipantCollectionManager`
Group's participants collection manager.
"""
MODEL_CLASS = GroupMetadata
def __init__(self, driver, manager_path=''):
super(GroupMetadataManager, self).__init__(driver=driver, manager_path=manager_path)
self.add_submanager('participants', ParticipantCollectionManager(
driver=self._driver,
manager_path=self._build_command('participants')
))
def group_invite_code(self) -> Result[None]:
return self._execute_command('groupInviteCode')
def revoke_group_invite(self) -> Result[None]:
return self._execute_command('revokeGroupInvite')
class GroupMetadataCollectionManager(BaseCollectionManager[GroupMetadataManager]):
MODEL_MANAGER_CLASS = GroupMetadataManager
| 26.736486 | 92 | 0.686631 | from typing import List
from dirty_models import ArrayField, BooleanField, ModelField, StringField, StringIdField
from . import BaseCollectionManager, BaseModelManager
from ..models import BaseModel, DateTimeField
from ..results import Result
class Participant(BaseModel):
is_admin = BooleanField(default=False)
is_super_admin = BooleanField()
class GroupMetadata(BaseModel):
announce = StringIdField()
creation = DateTimeField()
desc = StringField()
desc_owner = StringIdField()
desc_time = DateTimeField()
owner = StringIdField()
participants = ArrayField(field_type=ModelField(model_class=Participant))
restrict = StringIdField()
group_invite_link = StringIdField()
invite_code = StringIdField()
class ParticipantManager(BaseModelManager[Participant]):
MODEL_CLASS = Participant
class ParticipantCollectionManager(BaseCollectionManager[ParticipantManager]):
MODEL_MANAGER_CLASS = ParticipantManager
def add_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('addParticipants', {'contactIds': contact_ids})
def can_add(self, contact_id: str) -> Result[bool]:
return self._execute_command('canAdd', {'contactId': contact_id})
def remove_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('removeParticipants', {'contactIds': contact_ids})
def can_remove(self, contact_id: str) -> Result[bool]:
return self._execute_command('canRemove', {'contactId': contact_id})
def promote_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('promoteParticipants', {'contactIds': contact_ids})
def can_promote(self, contact_id: str) -> Result[bool]:
return self._execute_command('canPromote', {'contactId': contact_id})
def demote_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('demoteParticipants', {'contactIds': contact_ids})
def can_demote(self, contact_id: str) -> Result[bool]:
return self._execute_command('canDemote', {'contactId': contact_id})
class GroupMetadataManager(BaseModelManager[GroupMetadata]):
MODEL_CLASS = GroupMetadata
def __init__(self, driver, manager_path=''):
super(GroupMetadataManager, self).__init__(driver=driver, manager_path=manager_path)
self.add_submanager('participants', ParticipantCollectionManager(
driver=self._driver,
manager_path=self._build_command('participants')
))
def group_invite_code(self) -> Result[None]:
return self._execute_command('groupInviteCode')
def revoke_group_invite(self) -> Result[None]:
return self._execute_command('revokeGroupInvite')
class GroupMetadataCollectionManager(BaseCollectionManager[GroupMetadataManager]):
MODEL_MANAGER_CLASS = GroupMetadataManager
| true | true |
f71181212ea961615fd991f19319e4e5222e52f0 | 5,905 | py | Python | skan/pipe.py | marlene09/skan | 4e84ff8ac83e912c9cf4a44b8274c3a5699027ad | [
"BSD-3-Clause"
] | 1 | 2021-02-08T04:45:38.000Z | 2021-02-08T04:45:38.000Z | skan/pipe.py | mkcor/skan | 97a217d36ec1393b380d4a797b5b7ceb68e824ec | [
"BSD-3-Clause"
] | null | null | null | skan/pipe.py | mkcor/skan | 97a217d36ec1393b380d4a797b5b7ceb68e824ec | [
"BSD-3-Clause"
] | null | null | null | import os
from . import pre, csr
import imageio
from tqdm import tqdm
import numpy as np
from skimage import morphology
import pandas as pd
from .image_stats import image_summary
from skimage.feature import shape_index
from concurrent.futures import ThreadPoolExecutor, as_completed
import multiprocessing as mp
CPU_COUNT = int(os.environ.get('CPU_COUNT', mp.cpu_count()))
def _get_scale(image, md_path_or_scale):
"""Get a valid scale from an image and a metadata path or scale.
Parameters
----------
image : np.ndarray
The input image.
md_path_or_scale : float or image filename
The path to the file containing the metadata, or the scale.
Returns
-------
scale : float
"""
scale = None
try:
scale = float(md_path_or_scale)
except ValueError:
pass
if md_path_or_scale is not None and scale is None:
md_path = md_path_or_scale.split(sep='/')
meta = image.meta
for key in md_path:
meta = meta[key]
scale = float(meta)
else:
if scale is None:
scale = 1 # measurements will be in pixel units
return scale
def process_single_image(filename, image_format, scale_metadata_path,
threshold_radius, smooth_radius,
brightness_offset, crop_radius, smooth_method):
image = imageio.imread(filename, format=image_format)
scale = _get_scale(image, scale_metadata_path)
if crop_radius > 0:
c = crop_radius
image = image[c:-c, c:-c]
pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
thresholded = pre.threshold(image, sigma=pixel_smoothing_radius,
radius=pixel_threshold_radius,
offset=brightness_offset,
smooth_method=smooth_method)
quality = shape_index(image, sigma=pixel_smoothing_radius,
mode='reflect')
skeleton = morphology.skeletonize(thresholded) * quality
framedata = csr.summarise(skeleton, spacing=scale)
framedata['squiggle'] = np.log2(framedata['branch-distance'] /
framedata['euclidean-distance'])
framedata['scale'] = scale
framedata.rename(columns={'mean pixel value': 'mean shape index'},
inplace=True)
framedata['filename'] = filename
return image, thresholded, skeleton, framedata
def process_images(filenames, image_format, threshold_radius,
smooth_radius, brightness_offset, scale_metadata_path,
crop_radius=0, smooth_method='Gaussian',
num_threads=CPU_COUNT):
"""Full pipeline from images to skeleton stats with local median threshold.
Parameters
----------
filenames : list of string
The list of input filenames.
image_format : string
The format of the files. 'auto' is automatically determined by the
imageio library. See imageio documentation for valid image formats.
threshold_radius : float
The radius for median thresholding,
smooth_radius : float in [0, 1]
The value of sigma with which to Gaussian-smooth the image,
**relative to `threshold_radius`**.
brightness_offset : float
The standard brightness value with which to threshold is the local
median, `m(x, y)`. Use this value to offset from there: the threshold
used will be `m(x, y) + brightness_offset`.
scale_metadata_path : string
The path in the image dictionary to find the metadata on pixel scale,
separated by forward slashes ('/').
crop_radius : int, optional
Crop `crop_radius` pixels from each margin of the image before
processing.
smooth_method : {'Gaussian', 'TV', 'NL'}, optional
Which method to use for smoothing.
num_threads : int, optional
How many threads to use for computation. This should generally be
set to the number of CPU cores available to you.
Returns
-------
results : generator
The pipeline yields individual image results in the form of a tuple
of ``(filename, image, thresholded_image, skeleton, data_frame)``.
Finally, after all the images have been processed, the pipeline yields
a DataFrame containing all the collated branch-level results.
"""
image_format = None if image_format == 'auto' else image_format
results = []
image_results = []
with ThreadPoolExecutor(max_workers=num_threads) as ex:
future_data = {ex.submit(process_single_image, filename,
image_format, scale_metadata_path,
threshold_radius, smooth_radius,
brightness_offset, crop_radius,
smooth_method): filename
for filename in filenames}
for completed_data in tqdm(as_completed(future_data)):
image, thresholded, skeleton, framedata = completed_data.result()
filename = future_data[completed_data]
results.append(framedata)
image_stats = image_summary(skeleton,
spacing=framedata['scale'][0])
image_stats['filename'] = filename
image_stats['branch density'] = (framedata.shape[0] /
image_stats['area'])
j2j = framedata[framedata['branch-type'] == 2]
image_stats['mean J2J branch distance'] = (
j2j['branch-distance'].mean())
image_results.append(image_stats)
yield filename, image, thresholded, skeleton, framedata
yield pd.concat(results), pd.concat(image_results)
| 41.584507 | 79 | 0.631499 | import os
from . import pre, csr
import imageio
from tqdm import tqdm
import numpy as np
from skimage import morphology
import pandas as pd
from .image_stats import image_summary
from skimage.feature import shape_index
from concurrent.futures import ThreadPoolExecutor, as_completed
import multiprocessing as mp
CPU_COUNT = int(os.environ.get('CPU_COUNT', mp.cpu_count()))
def _get_scale(image, md_path_or_scale):
scale = None
try:
scale = float(md_path_or_scale)
except ValueError:
pass
if md_path_or_scale is not None and scale is None:
md_path = md_path_or_scale.split(sep='/')
meta = image.meta
for key in md_path:
meta = meta[key]
scale = float(meta)
else:
if scale is None:
scale = 1
return scale
def process_single_image(filename, image_format, scale_metadata_path,
threshold_radius, smooth_radius,
brightness_offset, crop_radius, smooth_method):
image = imageio.imread(filename, format=image_format)
scale = _get_scale(image, scale_metadata_path)
if crop_radius > 0:
c = crop_radius
image = image[c:-c, c:-c]
pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
thresholded = pre.threshold(image, sigma=pixel_smoothing_radius,
radius=pixel_threshold_radius,
offset=brightness_offset,
smooth_method=smooth_method)
quality = shape_index(image, sigma=pixel_smoothing_radius,
mode='reflect')
skeleton = morphology.skeletonize(thresholded) * quality
framedata = csr.summarise(skeleton, spacing=scale)
framedata['squiggle'] = np.log2(framedata['branch-distance'] /
framedata['euclidean-distance'])
framedata['scale'] = scale
framedata.rename(columns={'mean pixel value': 'mean shape index'},
inplace=True)
framedata['filename'] = filename
return image, thresholded, skeleton, framedata
def process_images(filenames, image_format, threshold_radius,
smooth_radius, brightness_offset, scale_metadata_path,
crop_radius=0, smooth_method='Gaussian',
num_threads=CPU_COUNT):
image_format = None if image_format == 'auto' else image_format
results = []
image_results = []
with ThreadPoolExecutor(max_workers=num_threads) as ex:
future_data = {ex.submit(process_single_image, filename,
image_format, scale_metadata_path,
threshold_radius, smooth_radius,
brightness_offset, crop_radius,
smooth_method): filename
for filename in filenames}
for completed_data in tqdm(as_completed(future_data)):
image, thresholded, skeleton, framedata = completed_data.result()
filename = future_data[completed_data]
results.append(framedata)
image_stats = image_summary(skeleton,
spacing=framedata['scale'][0])
image_stats['filename'] = filename
image_stats['branch density'] = (framedata.shape[0] /
image_stats['area'])
j2j = framedata[framedata['branch-type'] == 2]
image_stats['mean J2J branch distance'] = (
j2j['branch-distance'].mean())
image_results.append(image_stats)
yield filename, image, thresholded, skeleton, framedata
yield pd.concat(results), pd.concat(image_results)
| true | true |
f71183a31b57d8e8d37c461a4adb535c2b4581ed | 931 | py | Python | backend/public_info/migrations/0001_initial.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | backend/public_info/migrations/0001_initial.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | backend/public_info/migrations/0001_initial.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Sharing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('public', models.BooleanField(default=True)),
('sharing_id', models.PositiveIntegerField()),
('sharing_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
],
),
]
| 31.033333 | 128 | 0.627282 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Sharing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('public', models.BooleanField(default=True)),
('sharing_id', models.PositiveIntegerField()),
('sharing_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
],
),
]
| true | true |
f71183b5bb67d22693deef525c7d46aca9642f09 | 41,533 | py | Python | src/uvm/base/uvm_object.py | mgielda/uvm-python | 7750bc163130f59741e464bb5fcf8fb5324dbf56 | [
"Apache-2.0"
] | null | null | null | src/uvm/base/uvm_object.py | mgielda/uvm-python | 7750bc163130f59741e464bb5fcf8fb5324dbf56 | [
"Apache-2.0"
] | null | null | null | src/uvm/base/uvm_object.py | mgielda/uvm-python | 7750bc163130f59741e464bb5fcf8fb5324dbf56 | [
"Apache-2.0"
] | null | null | null | #
#-----------------------------------------------------------------------------
# Copyright 2007-2011 Mentor Graphics Corporation
# Copyright 2007-2011 Cadence Design Systems, Inc.
# Copyright 2010 Synopsys, Inc.
# Copyright 2013 NVIDIA Corporation
# Copyright 2019-2020 Tuomas Poikela (tpoikela)
# All Rights Reserved Worldwide
#
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See
# the License for the specific language governing
# permissions and limitations under the License.
#-----------------------------------------------------------------------------
from typing import Dict, Any
from .sv import sv, sv_obj
from .uvm_misc import UVMStatusContainer
from .uvm_object_globals import (UVM_PRINT, UVM_NONE, UVM_COPY, UVM_COMPARE,
UVM_RECORD, UVM_SETINT, UVM_SETOBJ, UVM_SETSTR, UVM_PACK, UVM_UNPACK)
from .uvm_globals import uvm_report_error, uvm_report_warning, uvm_report_info
from typing import Tuple
class UVMObject(sv_obj):
"""
The `UVMObject` class is the base class for all UVM data and hierarchical classes.
Its primary role is to define a set of methods for such common operations as `create`,
`copy`, `compare`, `print`, and `record`.
Classes deriving from `UVMObject` must implement methods such as
`create` and `get_type_name`.
:ivar str name: Name of the object
:ivar int inst_id: Unique instance ID for this object
Group: Seeding
:cvar bool use_uvm_seeding: This bit enables or disables the UVM seeding
mechanism. It globally affects the operation of the `reseed` method.
When enabled, UVM-based objects are seeded based on their type and full
hierarchical name rather than allocation order. This improves random
stability for objects whose instance names are unique across each type.
The `UVMComponent` class is an example of a type that has a unique
instance name.
"""
# Should be set by uvm_*_utils macro
type_id = None # type: Any
depth = 0
m_inst_count = 0
m_inst_count = 0
use_uvm_seeding = True
uvm_global_copy_map = {} # type: Dict['UVMObject', 'UVMObject']
_m_uvm_status_container = UVMStatusContainer()
def __init__(self, name: str):
""" Creates a new uvm_object with the given instance `name`. If `name` is not
supplied, the object is unnamed.
"""
sv_obj.__init__(self)
self.name = name
self.inst_id = UVMObject.m_inst_count
UVMObject.m_inst_count += 1
self.leaf_name = name
def reseed(self) -> None:
"""
Calls `srandom` on the object to reseed the object using the UVM seeding
mechanism, which sets the seed based on type name and instance name instead
of based on instance position in a thread.
If the `use_uvm_seeding` static variable is set to 0, then reseed() does
not perform any function.
"""
if (UVMObject.use_uvm_seeding):
pass
def set_name(self, name: str):
"""
Group: Identification
Sets the instance name of this object, overwriting any previously
given name.
Args:
name:
"""
self.leaf_name = name
def get_name(self) -> str:
"""
Returns the name of the object, as provided by the `name` argument in the
`new` constructor or `set_name` method.
Returns:
str: Name of the object.
"""
return self.leaf_name
def get_full_name(self) -> str:
"""
Objects possessing hierarchy, such as <uvm_components>, override the default
implementation. Other objects might be associated with component hierarchy
but are not themselves components. For example, <uvm_sequence #(REQ,RSP)>
classes are typically associated with a <uvm_sequencer #(REQ,RSP)>. In this
case, it is useful to override get_full_name to return the sequencer's
full name concatenated with the sequence's name. This provides the sequence
a full context, which is useful when debugging.
Returns:
str: The full hierarchical name of this object. The default
implementation is the same as <get_name>, as uvm_objects do not inherently
possess hierarchy.
"""
return self.get_name()
def get_inst_id(self) -> int:
"""
Returns:
int: The object's unique, numeric instance identifier.
"""
return self.inst_id
@classmethod
def get_inst_count(self) -> int:
"""
Returns:
int: The current value of the instance counter, which represents the
total number of uvm_object-based objects that have been allocated in
simulation. The instance counter is used to form a unique numeric instance
identifier.
"""
return UVMObject.m_inst_count
def get_type(self) -> None:
"""
Returns the type-proxy (wrapper) for this object. The `UVMFactory`'s
type-based override and creation methods take arguments of
`uvm_object_wrapper`. This method, if implemented, can be used as convenient
means of supplying those arguments.
The default implementation of this method produces an error and returns
`None`. To enable use of this method, a user's subtype must implement a
version that returns the subtype's wrapper.
For example:
.. code-block:: python
class cmd(UVMObject):
type_id = None
@classmethod
def get_type(cls):
return cls.type_id.get()
Then, to use:
.. code-block:: python
factory.set_type_override(cmd.get_type(), subcmd.get_type())
This function is implemented by the uvm_*_utils functions, if employed.
Returns:
"""
uvm_report_error("NOTYPID", "get_type not implemented in derived class: "
+ str(self), UVM_NONE)
return None
def get_object_type(self) -> Any:
"""
Function: get_object_type
Returns the type-proxy (wrapper) for this object. The `uvm_factory`'s
type-based override and creation methods take arguments of
`uvm_object_wrapper`. This method, if implemented, can be used as convenient
means of supplying those arguments. This method is the same as the static
`get_type` method, but uses an already allocated object to determine
the type-proxy to access (instead of using the static object).
The default implementation of this method does a factory lookup of the
proxy using the return value from `get_type_name`. If the type returned
by `get_type_name` is not registered with the factory, then a `None`
handle is returned.
For example:
.. code-block:: python
class cmd (UVMObject):
type_id = UVMObjectRegistry()
@classmethod
def type_id get_type(cls):
return type_id.get()
def get_object_type(self):
return cmd.type_id.get()
This function is implemented by the `uvm_*_utils macros, if employed.
Returns:
"""
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
factory = cs.get_factory()
if self.get_type_name() == "<unknown>":
return None
return factory.find_wrapper_by_name(self.get_type_name())
def get_type_name(self) -> str:
"""
This function returns the type name of the object, which is typically the
type identifier enclosed in quotes. It is used for various debugging
functions in the library, and it is used by the factory for creating
objects.
This function must be defined in every derived class.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
type_name = "mytype"
def get_type_name(self):
return my_type.type_name
We define the `type_name` static variable to enable access to the type name
without need of an object of the class, i.e., to enable access via the
scope operator, ~mytype::type_name~.
Returns:
str: Type name of the object.
"""
return "<unknown>"
def create(self, name="") -> 'UVMObject':
"""
Group: Creation
The `create` method allocates a new object of the same type as this object
and returns it via a base uvm_object handle. Every class deriving from
uvm_object, directly or indirectly, must implement the create method.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
def create(self, name=""):
mytype t = mytype(name)
return t
Args:
name (str): Name of the created object.
Returns:
obj: New object.
"""
return UVMObject(name)
def clone(self) -> 'UVMObject':
"""
The `clone` method creates and returns an exact copy of this object.
The default implementation calls `create` followed by `copy`. As clone is
virtual, derived classes may override this implementation if desired.
Returns:
UVMObject: Clone of the object.
"""
tmp = self.create(self.get_name())
if tmp is None:
uvm_report_warning("CRFLD", sv.sformatf(
"The create method failed for %s, object cannot be cloned",
self.get_name()), UVM_NONE)
else:
tmp.copy(self)
return tmp
def print_obj(self, printer=None) -> None:
"""
Group: Printing
Function: print
The `print` method deep-prints this object's properties in a format and
manner governed by the given `printer` argument; if the `printer` argument
is not provided, the global `uvm_default_printer` is used. See
`uvm_printer` for more information on printer output formatting. See also
`uvm_line_printer`, `uvm_tree_printer`, and `uvm_table_printer` for details
on the pre-defined printer "policies," or formatters, provided by the UVM.
The `print` method is not virtual and must not be overloaded. To include
custom information in the `print` and `sprint` operations, derived classes
must override the `do_print` method and use the provided printer policy
class to format the output.
Args:
printer (UVMPrinter): Printer that is used in printing.
"""
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if printer is None:
uvm_report_error("NonePRINTER", "uvm_default_printer is None")
sv.fwrite(printer.knobs.mcd, self.sprint(printer))
def sprint(self, printer=None) -> str:
"""
The `sprint` method works just like the `print` method, except the output
is returned in a string rather than displayed.
The `sprint` method is not virtual and must not be overloaded. To include
additional fields in the `print` and `sprint` operation, derived classes
must override the `do_print` method and use the provided printer policy
class to format the output. The printer policy will manage all string
concatenations and provide the string to `sprint` to return to the caller.
Args:
printer (UVMPrinter): Printer that is used in printing.
Returns:
str: String representation of the object.
"""
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if not printer.istop():
UVMObject._m_uvm_status_container.printer = printer
self._m_uvm_field_automation(None, UVM_PRINT, "")
self.do_print(printer)
return ""
self._m_uvm_status_container = UVMObject._m_uvm_status_container
printer.print_object(self.get_name(), self)
if printer.m_string != "":
return printer.m_string
return printer.emit()
def do_print(self, printer) -> None:
"""
The `do_print` method is the user-definable hook called by `print` and
`sprint` that allows users to customize what gets printed or sprinted
beyond the field information provided by the `uvm_field_* macros,
<Utility and Field Macros for Components and Objects>.
The `printer` argument is the policy object that governs the format and
content of the output. To ensure correct `print` and `sprint` operation,
and to ensure a consistent output format, the `printer` must be used
by all `do_print` implementations. That is, instead of using ~$display~ or
string concatenations directly, a `do_print` implementation must call
through the ~printer's~ API to add information to be printed or sprinted.
An example implementation of `do_print` is as follows::
class mytype (UVMObject):
data_obj data
int f1
virtual function void do_print (uvm_printer printer)
super.do_print(printer)
printer.print_field_int("f1", f1, $bits(f1), UVM_DEC)
printer.print_object("data", data)
endfunction
Then, to print and sprint the object, you could write::
t = mytype()
t.print()
uvm_report_info("Received",t.sprint())
See `UVMPrinter` for information about the printer API.
Args:
printer (UVMPrinter): Printer that is used in printing.
"""
return
def convert2string(self) -> str:
"""
This virtual function is a user-definable hook, called directly by the
user, that allows users to provide object information in the form of
a string. Unlike `sprint`, there is no requirement to use a `uvm_printer`
policy object. As such, the format and content of the output is fully
customizable, which may be suitable for applications not requiring the
consistent formatting offered by the `print`/`sprint`/`do_print`
API.
Fields declared in <Utility Macros> macros (`uvm_field_*), if used, will
not automatically appear in calls to convert2string.
An example implementation of convert2string follows.
.. code-block:: python
class Base(UVMObject):
field = "foo"
def convert2string(self):
return "base_field=" + self.field
class Obj2(UVMObject):
field = "bar"
def convert2string()
convert2string = "child_field=" + self.field
class Obj(Base):
addr = 0x123
data = 0x456
write = 1
child = Obj2()
def convert2string(self):
convert2string = super().convert2string() +
sv.sformatf(" write=%0d addr=%8h data=%8h ",write,addr,data) +
child.convert2string()
Then, to display an object, you could write:
.. code-block:: python
o = Obj()
uvm_report_info("BusMaster", "Sending:\n " + o.convert2string())
The output will look similar to::
UVM_INFO @ 0: reporter [BusMaster] Sending:
base_field=foo write=1 addr=00000123 data=00000456 child_field=bar
Returns:
str: Object converted into string.
"""
return ""
def _m_uvm_field_automation(self, tmp_data__, what__, str__) -> None:
pass
def record(self, recorder=None) -> None:
"""
Group: Recording
The `record` method deep-records this object's properties according to an
optional `recorder` policy. The method is not virtual and must not be
overloaded. To include additional fields in the record operation, derived
classes should override the `do_record` method.
The optional `recorder` argument specifies the recording policy, which
governs how recording takes place. See
`uvm_recorder` for information.
A simulator's recording mechanism is vendor-specific. By providing access
via a common interface, the uvm_recorder policy provides vendor-independent
access to a simulator's recording capabilities.
Args:
recorder (UVMRecorder):
"""
if recorder is None:
return
UVMObject._m_uvm_status_container.recorder = recorder
recorder.recording_depth += 1
self._m_uvm_field_automation(None, UVM_RECORD, "")
self.do_record(recorder)
recorder.recording_depth -= 1
def do_record(self, recorder) -> None:
"""
The `do_record` method is the user-definable hook called by the `record`
method. A derived class should override this method to include its fields
in a record operation.
The `recorder` argument is policy object for recording this object. A
do_record implementation should call the appropriate recorder methods for
each of its fields. Vendor-specific recording implementations are
encapsulated in the `recorder` policy, thereby insulating user-code from
vendor-specific behavior. See `uvm_recorder` for more information.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
data_obj data
int f1
def do_record (self, recorder):
recorder.record_field("f1", f1, sv.bits(f1), UVM_DEC)
recorder.record_object("data", data)
Args:
recorder (UVMRecorder): Recorder policy object.
"""
return
def copy(self, rhs: 'UVMObject'):
"""
The copy makes this object a copy of the specified object.
The `copy` method is not virtual and should not be overloaded in derived
classes. To copy the fields of a derived class, that class should override
the `do_copy` method.
Args:
rhs (UVMObject): An object to be copied.
"""
# For cycle checking
UVMObject.depth = 0
if (rhs is not None) and rhs in UVMObject.uvm_global_copy_map:
return
if rhs is None:
uvm_report_warning("NoneCP",
"A None object was supplied to copy; copy is ignored", UVM_NONE)
return
UVMObject.uvm_global_copy_map[rhs] = self
UVMObject.depth += 1
self._m_uvm_field_automation(rhs, UVM_COPY, "")
self.do_copy(rhs)
UVMObject.depth -= 1
if UVMObject.depth == 0:
UVMObject.uvm_global_copy_map = {}
def do_copy(self, rhs) -> None:
"""
The `do_copy` method is the user-definable hook called by the `copy` method.
A derived class should override this method to include its fields in a `copy`
operation.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
field_1 = 0
def do_copy(self, rhs):
super.do_copy(rhs)
# Optionanl type checking
field_1 = rhs.field_1
The implementation must call `super().do_copy`, and can optionally do
type checking before copying.
Args:
rhs (UVMObject): Object to be copied.
"""
return
def compare(self, rhs, comparer=None) -> bool:
"""
Deep compares members of this data object with those of the object provided
in the `rhs` (right-hand side) argument, returning 1 on a match, 0 otherwise.
The `compare` method is not virtual and should not be overloaded in derived
classes. To compare the fields of a derived class, that class should
override the `do_compare` method.
The optional `comparer` argument specifies the comparison policy. It allows
you to control some aspects of the comparison operation. It also stores the
results of the comparison, such as field-by-field miscompare information
and the total number of miscompares. If a compare policy is not provided,
then the global `uvm_default_comparer` policy is used. See `uvm_comparer`
for more information.
Args:
rhs (UVMObject): Object to be compared against.
comparer (UVMComparer): Comparer policy object.
Returns:
bool: True if objects match, False otherwise.
"""
# t = 0
dc = 0
#static int style
# style = 0
done = 0
cls = UVMObject
if comparer is not None:
cls._m_uvm_status_container.comparer = comparer
else:
from .uvm_global_vars import uvm_default_comparer
cls._m_uvm_status_container.comparer = uvm_default_comparer
comparer = cls._m_uvm_status_container.comparer
if(not cls._m_uvm_status_container.scope.depth()):
comparer.compare_map.delete()
comparer.result = 0
comparer.miscompares = ""
comparer.scope = cls._m_uvm_status_container.scope
if self.get_name() == "":
cls._m_uvm_status_container.scope.down("<object>")
else:
cls._m_uvm_status_container.scope.down(self.get_name())
if(not done and (rhs is None)):
if(cls._m_uvm_status_container.scope.depth()):
comparer.print_msg_object(self, rhs)
else:
comparer.print_msg_object(self, rhs)
uvm_report_info("MISCMP",
sv.sformatf("%0d Miscompare(s) for object %s@%0d vs. None",
comparer.result,
cls._m_uvm_status_container.scope.get(),
self.get_inst_id()),
cls._m_uvm_status_container.comparer.verbosity)
done = 1
if(not done and comparer.compare_map.exists(rhs)):
if(comparer.compare_map[rhs] != self):
comparer.print_msg_object(self, comparer.compare_map[rhs])
done = 1 # don't do any more work after this case, but do cleanup
if(not done and comparer.check_type and (rhs is not None) and
(self.get_type_name() != rhs.get_type_name())):
cls._m_uvm_status_container.stringv = ("lhs type = \"" + self.get_type_name()
+ "' : rhs type = '" + rhs.get_type_name() + "'")
comparer.print_msg(cls._m_uvm_status_container.stringv)
if not done:
comparer.compare_map[rhs] = self
self._m_uvm_field_automation(rhs, UVM_COMPARE, "")
dc = self.do_compare(rhs, comparer)
if cls._m_uvm_status_container.scope.depth() == 1:
cls._m_uvm_status_container.scope.up()
if rhs is not None:
comparer.print_rollup(self, rhs)
return (comparer.result == 0 and dc == 1)
def do_compare(self, rhs, comparer) -> bool:
"""
The `do_compare` method is the user-definable hook called by the `compare`
method. A derived class should override this method to include its fields
in a compare operation. It should return 1 if the comparison succeeds, 0
otherwise.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
f1 = 0
def do_compare(self, rhs, comparer):
do_compare = super.do_compare(rhs,comparer)
# Optional type checking
do_compare &= comparer.compare_field_int("f1", f1, rhs.f1)
return do_compare
A derived class implementation must call `super().do_compare` to ensure its
base class' properties, if any, are included in the comparison. If type
matching is required instead of duck-typing, the user can also
implemented this checking.
The actual comparison should be implemented using the `UVMComparer` object
rather than direct field-by-field comparison. This enables users of your
class to customize how comparisons are performed and how much miscompare
information is collected. See `UVMComparer` for more details.
Args:
rhs (UVMObject):
comparer (UVMComparer):
Returns:
bool: True if objects match, False otherwise.
"""
return True
# // Group: Packing
# // Function: pack
#
# extern function int pack (ref bit bitstream[],
# input uvm_packer packer=None)
def pack(self, packer=None) -> Tuple[Any, Any]:
packer = self.m_pack(packer)
return packer.get_packed_size(), packer.get_bits()
# // Function: pack_bytes
#
# extern function int pack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def pack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_pack(packer)
packed_bytes = packer.get_bytes()
for b in packed_bytes:
bytestream.append(b)
return packer.get_packed_size()
# // Function: pack_ints
# //
# // The pack methods bitwise-concatenate this object's properties into an array
# // of bits, bytes, or ints. The methods are not virtual and must not be
# // overloaded. To include additional fields in the pack operation, derived
# // classes should override the <do_pack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // the packing operation. If a packer policy is not provided, the global
# // <uvm_default_packer> policy is used. See <uvm_packer> for more information.
# //
# // The return value is the total number of bits packed into the given array.
# // Use the array's built-in `size` method to get the number of bytes or ints
# // consumed during the packing process.
#
# extern function int pack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def pack_ints(self, intstream, packer=None) -> Any:
packer = self.m_pack(packer)
ints = packer.get_ints()
for i in ints:
intstream.append(i)
return packer.get_packed_size()
# // Function: do_pack
# //
# // The `do_pack` method is the user-definable hook called by the <pack> methods.
# // A derived class should override this method to include its fields in a pack
# // operation.
# //
# // The `packer` argument is the policy object for packing. The policy object
# // should be used to pack objects.
# //
# // A typical example of an object packing itself is as follows
# //
# //| class mysubtype extends mysupertype
# //| ...
# //| shortint myshort
# //| obj_type myobj
# //| byte myarray[]
# //| ...
# //| function void do_pack (uvm_packer packer)
# //| super.do_pack(packer); // pack mysupertype properties
# //| packer.pack_field_int(myarray.size(), 32)
# //| foreach (myarray)
# //| packer.pack_field_int(myarray[index], 8)
# //| packer.pack_field_int(myshort, $bits(myshort))
# //| packer.pack_object(myobj)
# //| endfunction
# //
# // The implementation must call ~super.do_pack~ so that base class properties
# // are packed as well.
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to unpack into an equivalent data
# // structure when unpacking, you must include meta-information about the
# // dynamic data when packing as follows.
# //
# // - For queues, dynamic arrays, or associative arrays, pack the number of
# // elements in the array in the 32 bits immediately before packing
# // individual elements, as shown above.
# //
# // - For string data types, append a zero byte after packing the string
# // contents.
# //
# // - For objects, pack 4 bits immediately before packing the object. For `None`
# // objects, pack 4'b0000. For non-`None` objects, pack 4'b0001.
# //
# // When the `uvm_field_* macros are used,
# // <Utility and Field Macros for Components and Objects>,
# // the above meta information is included provided the <uvm_packer::use_metadata>
# // variable is set for the packer.
# //
# // Packing order does not need to match declaration order. However, unpacking
# // order must match packing order.
def do_pack(self, packer) -> None:
return
# // Group: Unpacking
#
# // Function: unpack
#
# extern function int unpack (ref bit bitstream[],
# input uvm_packer packer=None)
def unpack(self, bitstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bits(bitstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_bytes
#
# extern function int unpack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def unpack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bytes(bytestream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_ints
# //
# // The unpack methods extract property values from an array of bits, bytes, or
# // ints. The method of unpacking `must` exactly correspond to the method of
# // packing. This is assured if (a) the same `packer` policy is used to pack
# // and unpack, and (b) the order of unpacking is the same as the order of
# // packing used to create the input array.
# //
# // The unpack methods are fixed (non-virtual) entry points that are directly
# // callable by the user. To include additional fields in the <unpack>
# // operation, derived classes should override the <do_unpack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // both the pack and unpack operation. If a packer policy is not provided,
# // then the global `uvm_default_packer` policy is used. See uvm_packer for
# // more information.
# //
# // The return value is the actual number of bits unpacked from the given array.
#
# extern function int unpack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def unpack_ints(self, intstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_ints(intstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: do_unpack
# //
# // The `do_unpack` method is the user-definable hook called by the <unpack>
# // method. A derived class should override this method to include its fields
# // in an unpack operation.
# //
# // The `packer` argument is the policy object for both packing and unpacking.
# // It must be the same packer used to pack the object into bits. Also,
# // do_unpack must unpack fields in the same order in which they were packed.
# // See <uvm_packer> for more information.
# //
# // The following implementation corresponds to the example given in do_pack.
# //
# //| function void do_unpack (uvm_packer packer)
# //| int sz
# //| super.do_unpack(packer); // unpack super's properties
# //| sz = packer.unpack_field_int(myarray.size(), 32)
# //| myarray.delete()
# //| for(int index=0; index<sz; index++)
# //| myarray[index] = packer.unpack_field_int(8)
# //| myshort = packer.unpack_field_int($bits(myshort))
# //| packer.unpack_object(myobj)
# //| endfunction
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to <unpack> into an equivalent data
# // structure, you must have included meta-information about the dynamic data
# // when it was packed.
# //
# // - For queues, dynamic arrays, or associative arrays, unpack the number of
# // elements in the array from the 32 bits immediately before unpacking
# // individual elements, as shown above.
# //
# // - For string data types, unpack into the new string until a `None` byte is
# // encountered.
# //
# // - For objects, unpack 4 bits into a byte or int variable. If the value
# // is 0, the target object should be set to `None` and unpacking continues to
# // the next property, if any. If the least significant bit is 1, then the
# // target object should be allocated and its properties unpacked.
def do_unpack(self, packer) -> None:
return
def set_int_local(self, field_name: str, value: int, recurse=True):
"""
Group: Configuration
Args:
field_name (str): Variable to set
value: Value for the variable
recurse (bool):
"""
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.bitstream = value
self._m_uvm_field_automation(None, UVM_SETINT, field_name)
if UVMObject._m_uvm_status_container.warning and not self._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name),UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_string_local(self, field_name: str, value: str, recurse=True):
"""
Function: set_string_local
Args:
field_name (str): Variable to set
value: Value for the variable
recurse (bool): If True, recurse into sub-objects.
"""
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.stringv = value
self._m_uvm_field_automation(None, UVM_SETSTR, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s (@%0d)",
field_name, self.get_inst_id()), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_object_local(self, field_name: str, value: 'UVMObject', clone=1, recurse=1):
"""
These methods provide write access to integral, string, and
uvm_object-based properties indexed by a `field_name` string. The object
designer choose which, if any, properties will be accessible, and overrides
the appropriate methods depending on the properties' types. For objects,
the optional `clone` argument specifies whether to clone the `value`
argument before assignment.
The global `uvm_is_match` function is used to match the field names, so
`field_name` may contain wildcards.
An example implementation of all three methods is as follows.
.. code-block:: python
class mytype(UVMObject):
def __init__(self, name):
super().__init__(name)
self.myint = 0
self.mybyte = 0
self.myshort = 0
self.mystring = ""
self.myobj = None
# provide access to integral properties
def set_int_local(self, field_name, value):
if (uvm_is_match (field_name, "myint")):
self.myint = value
elif (uvm_is_match (field_name, "mybyte")):
selef.mybyte = value
# provide access to string properties
def set_string_local(self, field_name, value):
if (uvm_is_match (field_name, "mystring")):
self.mystring = value
# provide access to sub-objects
def set_object_local(self, field_name, value,clone=1):
if (uvm_is_match (field_name, "myobj")):
if (value is not None):
tmp = None
# if provided value is not correct type, produce error
if (!$cast(tmp, value)):
# error
else:
if(clone)
self.myobj = tmp.clone()
else
self.myobj = tmp
else:
myobj = None # value is None, so simply assign None to myobj
end
...
Although the object designer implements these methods to provide outside
access to one or more properties, they are intended for internal use (e.g.,
for command-line debugging and auto-configuration) and should not be called
directly by the user.
Args:
field_name (str): Variable to set
value: Value for the variable
clone (bool):
recurse (bool):
"""
# cc = None # uvm_object cc
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
if clone and (value is not None):
cc = value.clone()
if cc is not None:
cc.set_name(field_name)
value = cc
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.object = value
UVMObject._m_uvm_status_container.clone = clone
self._m_uvm_field_automation(None, UVM_SETOBJ, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
# //---------------------------------------------------------------------------
# // **** Internal Methods and Properties ***
# // Do not use directly
# //---------------------------------------------------------------------------
#
# extern local function void m_pack (inout uvm_packer packer)
def m_pack(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_PACK, "")
self.do_pack(packer)
packer.set_packed_size()
packer.scope.up()
return packer
# extern local function void m_unpack_pre (inout uvm_packer packer)
def m_unpack_pre(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
return packer
# extern local function void m_unpack_post (uvm_packer packer)
def m_unpack_post(self, packer) -> None:
provided_size = packer.get_packed_size()
# Put this object into the hierarchy
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_UNPACK, "")
self.do_unpack(packer)
# Scope back up before leaving
packer.scope.up()
if packer.get_packed_size() != provided_size:
uvm_report_warning("BDUNPK", sv.sformatf(
"Unpack operation unsuccessful: unpacked %0d bits from a total of %0d bits",
packer.get_packed_size(), provided_size), UVM_NONE)
| 38.815888 | 109 | 0.618905 |
from typing import Dict, Any
from .sv import sv, sv_obj
from .uvm_misc import UVMStatusContainer
from .uvm_object_globals import (UVM_PRINT, UVM_NONE, UVM_COPY, UVM_COMPARE,
UVM_RECORD, UVM_SETINT, UVM_SETOBJ, UVM_SETSTR, UVM_PACK, UVM_UNPACK)
from .uvm_globals import uvm_report_error, uvm_report_warning, uvm_report_info
from typing import Tuple
class UVMObject(sv_obj):
type_id = None
depth = 0
m_inst_count = 0
m_inst_count = 0
use_uvm_seeding = True
uvm_global_copy_map = {}
_m_uvm_status_container = UVMStatusContainer()
def __init__(self, name: str):
sv_obj.__init__(self)
self.name = name
self.inst_id = UVMObject.m_inst_count
UVMObject.m_inst_count += 1
self.leaf_name = name
def reseed(self) -> None:
if (UVMObject.use_uvm_seeding):
pass
def set_name(self, name: str):
self.leaf_name = name
def get_name(self) -> str:
return self.leaf_name
def get_full_name(self) -> str:
return self.get_name()
def get_inst_id(self) -> int:
return self.inst_id
@classmethod
def get_inst_count(self) -> int:
return UVMObject.m_inst_count
def get_type(self) -> None:
uvm_report_error("NOTYPID", "get_type not implemented in derived class: "
+ str(self), UVM_NONE)
return None
def get_object_type(self) -> Any:
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
factory = cs.get_factory()
if self.get_type_name() == "<unknown>":
return None
return factory.find_wrapper_by_name(self.get_type_name())
def get_type_name(self) -> str:
return "<unknown>"
def create(self, name="") -> 'UVMObject':
return UVMObject(name)
def clone(self) -> 'UVMObject':
tmp = self.create(self.get_name())
if tmp is None:
uvm_report_warning("CRFLD", sv.sformatf(
"The create method failed for %s, object cannot be cloned",
self.get_name()), UVM_NONE)
else:
tmp.copy(self)
return tmp
def print_obj(self, printer=None) -> None:
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if printer is None:
uvm_report_error("NonePRINTER", "uvm_default_printer is None")
sv.fwrite(printer.knobs.mcd, self.sprint(printer))
def sprint(self, printer=None) -> str:
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if not printer.istop():
UVMObject._m_uvm_status_container.printer = printer
self._m_uvm_field_automation(None, UVM_PRINT, "")
self.do_print(printer)
return ""
self._m_uvm_status_container = UVMObject._m_uvm_status_container
printer.print_object(self.get_name(), self)
if printer.m_string != "":
return printer.m_string
return printer.emit()
def do_print(self, printer) -> None:
return
def convert2string(self) -> str:
return ""
def _m_uvm_field_automation(self, tmp_data__, what__, str__) -> None:
pass
def record(self, recorder=None) -> None:
if recorder is None:
return
UVMObject._m_uvm_status_container.recorder = recorder
recorder.recording_depth += 1
self._m_uvm_field_automation(None, UVM_RECORD, "")
self.do_record(recorder)
recorder.recording_depth -= 1
def do_record(self, recorder) -> None:
return
def copy(self, rhs: 'UVMObject'):
UVMObject.depth = 0
if (rhs is not None) and rhs in UVMObject.uvm_global_copy_map:
return
if rhs is None:
uvm_report_warning("NoneCP",
"A None object was supplied to copy; copy is ignored", UVM_NONE)
return
UVMObject.uvm_global_copy_map[rhs] = self
UVMObject.depth += 1
self._m_uvm_field_automation(rhs, UVM_COPY, "")
self.do_copy(rhs)
UVMObject.depth -= 1
if UVMObject.depth == 0:
UVMObject.uvm_global_copy_map = {}
def do_copy(self, rhs) -> None:
return
def compare(self, rhs, comparer=None) -> bool:
dc = 0
done = 0
cls = UVMObject
if comparer is not None:
cls._m_uvm_status_container.comparer = comparer
else:
from .uvm_global_vars import uvm_default_comparer
cls._m_uvm_status_container.comparer = uvm_default_comparer
comparer = cls._m_uvm_status_container.comparer
if(not cls._m_uvm_status_container.scope.depth()):
comparer.compare_map.delete()
comparer.result = 0
comparer.miscompares = ""
comparer.scope = cls._m_uvm_status_container.scope
if self.get_name() == "":
cls._m_uvm_status_container.scope.down("<object>")
else:
cls._m_uvm_status_container.scope.down(self.get_name())
if(not done and (rhs is None)):
if(cls._m_uvm_status_container.scope.depth()):
comparer.print_msg_object(self, rhs)
else:
comparer.print_msg_object(self, rhs)
uvm_report_info("MISCMP",
sv.sformatf("%0d Miscompare(s) for object %s@%0d vs. None",
comparer.result,
cls._m_uvm_status_container.scope.get(),
self.get_inst_id()),
cls._m_uvm_status_container.comparer.verbosity)
done = 1
if(not done and comparer.compare_map.exists(rhs)):
if(comparer.compare_map[rhs] != self):
comparer.print_msg_object(self, comparer.compare_map[rhs])
done = 1
if(not done and comparer.check_type and (rhs is not None) and
(self.get_type_name() != rhs.get_type_name())):
cls._m_uvm_status_container.stringv = ("lhs type = \"" + self.get_type_name()
+ "' : rhs type = '" + rhs.get_type_name() + "'")
comparer.print_msg(cls._m_uvm_status_container.stringv)
if not done:
comparer.compare_map[rhs] = self
self._m_uvm_field_automation(rhs, UVM_COMPARE, "")
dc = self.do_compare(rhs, comparer)
if cls._m_uvm_status_container.scope.depth() == 1:
cls._m_uvm_status_container.scope.up()
if rhs is not None:
comparer.print_rollup(self, rhs)
return (comparer.result == 0 and dc == 1)
def do_compare(self, rhs, comparer) -> bool:
return True
# // Group: Packing
# // Function: pack
#
# extern function int pack (ref bit bitstream[],
# input uvm_packer packer=None)
def pack(self, packer=None) -> Tuple[Any, Any]:
packer = self.m_pack(packer)
return packer.get_packed_size(), packer.get_bits()
# // Function: pack_bytes
#
# extern function int pack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def pack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_pack(packer)
packed_bytes = packer.get_bytes()
for b in packed_bytes:
bytestream.append(b)
return packer.get_packed_size()
# // Function: pack_ints
# //
# // The pack methods bitwise-concatenate this object's properties into an array
# // of bits, bytes, or ints. The methods are not virtual and must not be
# // overloaded. To include additional fields in the pack operation, derived
# // classes should override the <do_pack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // the packing operation. If a packer policy is not provided, the global
# // <uvm_default_packer> policy is used. See <uvm_packer> for more information.
# //
# // The return value is the total number of bits packed into the given array.
# // Use the array's built-in `size` method to get the number of bytes or ints
# // consumed during the packing process.
#
# extern function int pack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def pack_ints(self, intstream, packer=None) -> Any:
packer = self.m_pack(packer)
ints = packer.get_ints()
for i in ints:
intstream.append(i)
return packer.get_packed_size()
# // Function: do_pack
# //
# // The `do_pack` method is the user-definable hook called by the <pack> methods.
# // A derived class should override this method to include its fields in a pack
# // operation.
# //
# // The `packer` argument is the policy object for packing. The policy object
# // should be used to pack objects.
# //
# // A typical example of an object packing itself is as follows
# //
# //| class mysubtype extends mysupertype
# //| ...
# //| shortint myshort
# //| obj_type myobj
# //| byte myarray[]
# //| ...
# //| function void do_pack (uvm_packer packer)
# //| super.do_pack(packer); // pack mysupertype properties
# //| packer.pack_field_int(myarray.size(), 32)
# //| foreach (myarray)
# //| packer.pack_field_int(myarray[index], 8)
# //| packer.pack_field_int(myshort, $bits(myshort))
# //| packer.pack_object(myobj)
# //| endfunction
# //
# // The implementation must call ~super.do_pack~ so that base class properties
# // are packed as well.
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to unpack into an equivalent data
# // structure when unpacking, you must include meta-information about the
# // dynamic data when packing as follows.
# //
# // - For queues, dynamic arrays, or associative arrays, pack the number of
# // elements in the array in the 32 bits immediately before packing
# // individual elements, as shown above.
# //
# // - For string data types, append a zero byte after packing the string
# // contents.
# //
# // - For objects, pack 4 bits immediately before packing the object. For `None`
# // objects, pack 4'b0000. For non-`None` objects, pack 4'b0001.
# //
# // When the `uvm_field_* macros are used,
# // <Utility and Field Macros for Components and Objects>,
# // the above meta information is included provided the <uvm_packer::use_metadata>
# // variable is set for the packer.
# //
# // Packing order does not need to match declaration order. However, unpacking
# // order must match packing order.
def do_pack(self, packer) -> None:
return
# // Group: Unpacking
#
# // Function: unpack
#
# extern function int unpack (ref bit bitstream[],
# input uvm_packer packer=None)
def unpack(self, bitstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bits(bitstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_bytes
#
# extern function int unpack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def unpack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bytes(bytestream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_ints
# //
# // The unpack methods extract property values from an array of bits, bytes, or
# // ints. The method of unpacking `must` exactly correspond to the method of
# // packing. This is assured if (a) the same `packer` policy is used to pack
# // and unpack, and (b) the order of unpacking is the same as the order of
# // packing used to create the input array.
# //
# // The unpack methods are fixed (non-virtual) entry points that are directly
# // callable by the user. To include additional fields in the <unpack>
# // operation, derived classes should override the <do_unpack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // both the pack and unpack operation. If a packer policy is not provided,
# // then the global `uvm_default_packer` policy is used. See uvm_packer for
# // more information.
# //
# // The return value is the actual number of bits unpacked from the given array.
#
# extern function int unpack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def unpack_ints(self, intstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_ints(intstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: do_unpack
# //
# // The `do_unpack` method is the user-definable hook called by the <unpack>
# // method. A derived class should override this method to include its fields
# // in an unpack operation.
# //
# // The `packer` argument is the policy object for both packing and unpacking.
# // It must be the same packer used to pack the object into bits. Also,
# // do_unpack must unpack fields in the same order in which they were packed.
# // See <uvm_packer> for more information.
# //
# // The following implementation corresponds to the example given in do_pack.
# //
# //| function void do_unpack (uvm_packer packer)
# //| int sz
# //| super.do_unpack(packer); // unpack super's properties
# //| sz = packer.unpack_field_int(myarray.size(), 32)
# //| myarray.delete()
# //| for(int index=0; index<sz; index++)
# //| myarray[index] = packer.unpack_field_int(8)
# //| myshort = packer.unpack_field_int($bits(myshort))
# //| packer.unpack_object(myobj)
# //| endfunction
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to <unpack> into an equivalent data
# // structure, you must have included meta-information about the dynamic data
# // when it was packed.
# //
# // - For queues, dynamic arrays, or associative arrays, unpack the number of
# // elements in the array from the 32 bits immediately before unpacking
# // individual elements, as shown above.
# //
# // - For string data types, unpack into the new string until a `None` byte is
# // encountered.
# //
# // - For objects, unpack 4 bits into a byte or int variable. If the value
# // is 0, the target object should be set to `None` and unpacking continues to
# // the next property, if any. If the least significant bit is 1, then the
# // target object should be allocated and its properties unpacked.
def do_unpack(self, packer) -> None:
return
def set_int_local(self, field_name: str, value: int, recurse=True):
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.bitstream = value
self._m_uvm_field_automation(None, UVM_SETINT, field_name)
if UVMObject._m_uvm_status_container.warning and not self._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name),UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_string_local(self, field_name: str, value: str, recurse=True):
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.stringv = value
self._m_uvm_field_automation(None, UVM_SETSTR, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s (@%0d)",
field_name, self.get_inst_id()), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_object_local(self, field_name: str, value: 'UVMObject', clone=1, recurse=1):
# cc = None # uvm_object cc
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
if clone and (value is not None):
cc = value.clone()
if cc is not None:
cc.set_name(field_name)
value = cc
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.object = value
UVMObject._m_uvm_status_container.clone = clone
self._m_uvm_field_automation(None, UVM_SETOBJ, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
# //---------------------------------------------------------------------------
# // **** Internal Methods and Properties ***
# // Do not use directly
# //---------------------------------------------------------------------------
#
# extern local function void m_pack (inout uvm_packer packer)
def m_pack(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_PACK, "")
self.do_pack(packer)
packer.set_packed_size()
packer.scope.up()
return packer
# extern local function void m_unpack_pre (inout uvm_packer packer)
def m_unpack_pre(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
return packer
# extern local function void m_unpack_post (uvm_packer packer)
def m_unpack_post(self, packer) -> None:
provided_size = packer.get_packed_size()
# Put this object into the hierarchy
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_UNPACK, "")
self.do_unpack(packer)
# Scope back up before leaving
packer.scope.up()
if packer.get_packed_size() != provided_size:
uvm_report_warning("BDUNPK", sv.sformatf(
"Unpack operation unsuccessful: unpacked %0d bits from a total of %0d bits",
packer.get_packed_size(), provided_size), UVM_NONE)
| true | true |
f71184809c1d37c6cf6003cf5cf482e0e2c00413 | 851 | py | Python | api/source/actions/report.py | 1pkg/ReRe | 83f77d2cece0fb5f6d7b86a395fcca7d4e16459f | [
"MIT"
] | 1 | 2019-12-17T10:31:48.000Z | 2019-12-17T10:31:48.000Z | api/source/actions/report.py | c-pkg/ReRe | 83f77d2cece0fb5f6d7b86a395fcca7d4e16459f | [
"MIT"
] | null | null | null | api/source/actions/report.py | c-pkg/ReRe | 83f77d2cece0fb5f6d7b86a395fcca7d4e16459f | [
"MIT"
] | 1 | 2019-04-29T08:19:36.000Z | 2019-04-29T08:19:36.000Z | from errors import Request
from .mixins import Identify
class Report(Identify):
def _validate(self, request):
super()._validate(request)
validator = self._application.validator
self.__message = self._get(request, 'message', '').strip()
if validator.isempty(self.__message):
raise Request('message', self.__message)
def _process(self, request):
storage = self._application.storage
mail = self._application.mail
token = self._session.token
task_id = self._task.id
subject = f'Report from {token} about task #{task_id}'
mail.send(subject, self.__message)
storage.push(
self._session.account.uuid,
'''
Thank you for leaving report
We're working on your issue
''',
)
| 29.344828 | 66 | 0.599295 | from errors import Request
from .mixins import Identify
class Report(Identify):
def _validate(self, request):
super()._validate(request)
validator = self._application.validator
self.__message = self._get(request, 'message', '').strip()
if validator.isempty(self.__message):
raise Request('message', self.__message)
def _process(self, request):
storage = self._application.storage
mail = self._application.mail
token = self._session.token
task_id = self._task.id
subject = f'Report from {token} about task #{task_id}'
mail.send(subject, self.__message)
storage.push(
self._session.account.uuid,
'''
Thank you for leaving report
We're working on your issue
''',
)
| true | true |
f71184decb3ccbf135e84e58d21e4b1a23058855 | 2,750 | py | Python | Canvas Announcement to Telegram bot/main.py | VoluSign/CanvasToTelegramBot | ce009616b0e59928705d216bb60ba38960a8121a | [
"MIT"
] | null | null | null | Canvas Announcement to Telegram bot/main.py | VoluSign/CanvasToTelegramBot | ce009616b0e59928705d216bb60ba38960a8121a | [
"MIT"
] | null | null | null | Canvas Announcement to Telegram bot/main.py | VoluSign/CanvasToTelegramBot | ce009616b0e59928705d216bb60ba38960a8121a | [
"MIT"
] | null | null | null | #install canvasapi, pyTelegramBotAPI
# Imports
import sys, os
import canvasapi
import telebot
from html.parser import HTMLParser
from canvasapi import Canvas, discussion_topic
#----# CANVAS #----#
#Class handles html to ascii conversion
class HTMLFilter(HTMLParser):
text = ""
def handle_data(self, postContent):
self.text += postContent
#bool for check
new = False
#Canvas API URL
API_URL = "!CANVAS BASE URL!"
#Canvas API key
API_KEY = "!CANVAS USER API KEY!"
#Initialize a new Canvas object
canvas = Canvas(API_URL, API_KEY)
COURSEID = "123456"
#Grab course 123456
course = canvas.get_course(COURSEID)
#Access the course's name
courseName = course.name
#For output
user = "Teacher"
#Opens txt file for check
aCheck = open("latest.txt","r")
aCheckStr = aCheck.read()
#Gets latest announcement
ann = canvas.get_announcements(context_codes=['course_{}'.format(COURSEID)])
#gets dumb stupid message from html
postContent = str(ann[0].__getattribute__("message"))
#Converts post from html to ascii
post = HTMLFilter()
post.feed(postContent)
finalPost = post.text
#Converts to string for following if statement
a = str(ann[0])
#stores message so it doesnt send repeating messages
if a != str(aCheckStr):
new = True
aCheckOverWrite = open("latest.txt","w+")
aCheckOverWrite.write(a)
aCheck.close()
aCheckOverWrite.close()
#---------------------#
#if new = true, use to push message
#---# Telegram #---#
bot = telebot.TeleBot("!TELEGRAM BOT API KEY!")
#Handle commands: /link, /help, & /latest
@bot.message_handler(commands=['link'])
def handle_command(message):
bot.reply_to(message, "Bot message: Here is a direct link to the canvas course. It will only work if you're logged in: https://gastoncs.instructure.com/courses/102829")
@bot.message_handler(commands=['help'])
def handle_command(message):
bot.reply_to(message, "Bot message: The bot is Active. This bot was made in python by the one and only VoluSign. The source code for this bot can be found at https://github.com/VoluSign/CanvasToTelegramBot")
bot.reply_to(message, "Commands: /help, /link, /latest")
@bot.message_handler(commands=['latest'])
def handle_command(message):
bot.reply_to(message, "Bot message: The following message will contain the most recent post to the Class of 2022 pertaining to scholarships:")
bot.reply_to(message, f"{courseName} - {user}: {finalPost}")
#Bot sends latest post on start up (Trying to get automatic push if bool permits)
if new == True:
bot.reply_to(message, f'Latest Announcement: {finalPost}')
#Starts server while script is running
bot.polling()
| 26.442308 | 215 | 0.697091 |
import sys, os
import canvasapi
import telebot
from html.parser import HTMLParser
from canvasapi import Canvas, discussion_topic
Parser):
text = ""
def handle_data(self, postContent):
self.text += postContent
new = False
API_URL = "!CANVAS BASE URL!"
API_KEY = "!CANVAS USER API KEY!"
canvas = Canvas(API_URL, API_KEY)
COURSEID = "123456"
course = canvas.get_course(COURSEID)
courseName = course.name
#For output
user = "Teacher"
#Opens txt file for check
aCheck = open("latest.txt","r")
aCheckStr = aCheck.read()
#Gets latest announcement
ann = canvas.get_announcements(context_codes=['course_{}'.format(COURSEID)])
#gets dumb stupid message from html
postContent = str(ann[0].__getattribute__("message"))
#Converts post from html to ascii
post = HTMLFilter()
post.feed(postContent)
finalPost = post.text
#Converts to string for following if statement
a = str(ann[0])
#stores message so it doesnt send repeating messages
if a != str(aCheckStr):
new = True
aCheckOverWrite = open("latest.txt","w+")
aCheckOverWrite.write(a)
aCheck.close()
aCheckOverWrite.close()
#---------------------#
#if new = true, use to push message
#---# Telegram #---#
bot = telebot.TeleBot("!TELEGRAM BOT API KEY!")
#Handle commands: /link, /help, & /latest
@bot.message_handler(commands=['link'])
def handle_command(message):
bot.reply_to(message, "Bot message: Here is a direct link to the canvas course. It will only work if you're logged in: https://gastoncs.instructure.com/courses/102829")
@bot.message_handler(commands=['help'])
def handle_command(message):
bot.reply_to(message, "Bot message: The bot is Active. This bot was made in python by the one and only VoluSign. The source code for this bot can be found at https://github.com/VoluSign/CanvasToTelegramBot")
bot.reply_to(message, "Commands: /help, /link, /latest")
@bot.message_handler(commands=['latest'])
def handle_command(message):
bot.reply_to(message, "Bot message: The following message will contain the most recent post to the Class of 2022 pertaining to scholarships:")
bot.reply_to(message, f"{courseName} - {user}: {finalPost}")
if new == True:
bot.reply_to(message, f'Latest Announcement: {finalPost}')
bot.polling()
| true | true |
f71185711cb0e985705a4aeadd7aec248ed1e378 | 1,208 | py | Python | tests/integration/bulkexports/v1/test_export.py | ashish-s/twilio-python | 5462b05af0906a1464b1e95a56a1f15afddc3b8c | [
"MIT"
] | 1 | 2020-10-29T19:28:25.000Z | 2020-10-29T19:28:25.000Z | tests/integration/bulkexports/v1/test_export.py | CostantiniMatteo/twilio-python | 9eee1ca9e73790b12678e9a5660206ea44948d00 | [
"MIT"
] | 1 | 2020-08-25T15:27:57.000Z | 2020-08-25T15:27:57.000Z | tests/integration/bulkexports/v1/test_export.py | team-telnyx/twexit-python | 69e11c5c2b5681f9bc410795dda0cf8942219e6f | [
"MIT"
] | null | null | null | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ExportTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.bulkexports.v1.exports("resource_type").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://bulkexports.twilio.com/v1/Exports/resource_type',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"resource_type": "Calls",
"url": "https://bulkexports.twilio.com/v1/Exports/Calls",
"links": {
"days": "https://bulkexports.twilio.com/v1/Exports/Calls/Days"
}
}
'''
))
actual = self.client.bulkexports.v1.exports("resource_type").fetch()
self.assertIsNotNone(actual)
| 26.844444 | 82 | 0.575331 |
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ExportTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.bulkexports.v1.exports("resource_type").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://bulkexports.twilio.com/v1/Exports/resource_type',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"resource_type": "Calls",
"url": "https://bulkexports.twilio.com/v1/Exports/Calls",
"links": {
"days": "https://bulkexports.twilio.com/v1/Exports/Calls/Days"
}
}
'''
))
actual = self.client.bulkexports.v1.exports("resource_type").fetch()
self.assertIsNotNone(actual)
| true | true |
f7118586b9feaedeb8d333db83568c6708557252 | 2,356 | py | Python | oidc_server/app/models.py | didx-xyz/aries-accpy-oidc | 63a91a58861383c38a43bca0aab0e0e837596a98 | [
"Apache-2.0"
] | null | null | null | oidc_server/app/models.py | didx-xyz/aries-accpy-oidc | 63a91a58861383c38a43bca0aab0e0e837596a98 | [
"Apache-2.0"
] | null | null | null | oidc_server/app/models.py | didx-xyz/aries-accpy-oidc | 63a91a58861383c38a43bca0aab0e0e837596a98 | [
"Apache-2.0"
] | null | null | null | '''OIDC server example'''
# import datetime
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.sqlite import JSON
# from authlib.integrations.sqla_oauth2 import (
# OAuth2ClientMixin,
# OAuth2TokenMixin,
# OAuth2AuthorizationCodeMixin
# )
from database import Base
from utils import disambiguate_referent
import uuid
class User(Base): # pylint: disable=R0903
'''User class example'''
__tablename__ = "user"
id = Column(Integer, primary_key=True)
uuid = Column(String(100), unique=True)
def get_id(self):
'''Fetch user identifier'''
return self.id
# OIDC Authentication Challenge
# Template for a proof request that will be sent as a challenge to authenticating users
class OIDCProofRequest(Base):
'''OIDC Proof Request class example'''
__tablename__ = 'oidc_proof_request'
# The oidc scope allows a relying party to specify the proof request the OP should challenge the user with
oidc_scope = Column(String(100), primary_key=True)
# Attribute within the proof request that identifies the subject responding the to authentication challenge
subject_identifier = Column(String(100))
proof_request = Column(JSON)
def get_oidc_scope(self):
'''Fetch oidc proof request identifier'''
return self.oidc_scope
def __str__(self):
return f"{self.id}"
def to_json(self):
proof_request = {
"name": self.proof_request.get("name", ""),
"version": self.proof_request.get("version", ""),
"requested_attributes": {},
"requested_predicates": {},
}
for attr in self.proof_request.get("requested_attributes", []):
label = attr.get("label", str(uuid.uuid4()))
if label in proof_request.get("requested_attributes", {}).keys():
label = disambiguate_referent(label)
proof_request["requested_attributes"].update({label: attr})
for attr in self.proof_request.get("requested_predicates", []):
label = attr.get("label", str(uuid.uuid4()))
if label in proof_request.get("requested_predicates", {}).keys():
label = disambiguate_referent(label)
proof_request["requested_predicates"].update({label: attr})
return {"proof_request": proof_request} | 32.722222 | 111 | 0.667233 |
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.sqlite import JSON
from database import Base
from utils import disambiguate_referent
import uuid
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
uuid = Column(String(100), unique=True)
def get_id(self):
return self.id
class OIDCProofRequest(Base):
__tablename__ = 'oidc_proof_request'
oidc_scope = Column(String(100), primary_key=True)
subject_identifier = Column(String(100))
proof_request = Column(JSON)
def get_oidc_scope(self):
return self.oidc_scope
def __str__(self):
return f"{self.id}"
def to_json(self):
proof_request = {
"name": self.proof_request.get("name", ""),
"version": self.proof_request.get("version", ""),
"requested_attributes": {},
"requested_predicates": {},
}
for attr in self.proof_request.get("requested_attributes", []):
label = attr.get("label", str(uuid.uuid4()))
if label in proof_request.get("requested_attributes", {}).keys():
label = disambiguate_referent(label)
proof_request["requested_attributes"].update({label: attr})
for attr in self.proof_request.get("requested_predicates", []):
label = attr.get("label", str(uuid.uuid4()))
if label in proof_request.get("requested_predicates", {}).keys():
label = disambiguate_referent(label)
proof_request["requested_predicates"].update({label: attr})
return {"proof_request": proof_request} | true | true |
f7118591d151dce6e3fd6f3d413d3c52d74ea81f | 440 | py | Python | examples/send.py | apistd/uni-python-sdk | e0607cfce86bea696896864a55f157d69ec532a2 | [
"MIT"
] | null | null | null | examples/send.py | apistd/uni-python-sdk | e0607cfce86bea696896864a55f157d69ec532a2 | [
"MIT"
] | null | null | null | examples/send.py | apistd/uni-python-sdk | e0607cfce86bea696896864a55f157d69ec532a2 | [
"MIT"
] | null | null | null | from unisdk.sms import UniSMS
from unisdk.exception import UniException
def example():
client = UniSMS("your access key id", "your access key secret")
try:
res = client.send({
"to": "your phone number",
"signature": "UniSMS",
"templateId": "login_tmpl",
"templateData": {
"code": 7777
}
})
print(res)
except UniException as e:
print(e)
if __name__ == '__main__':
example()
| 20 | 65 | 0.604545 | from unisdk.sms import UniSMS
from unisdk.exception import UniException
def example():
client = UniSMS("your access key id", "your access key secret")
try:
res = client.send({
"to": "your phone number",
"signature": "UniSMS",
"templateId": "login_tmpl",
"templateData": {
"code": 7777
}
})
print(res)
except UniException as e:
print(e)
if __name__ == '__main__':
example()
| true | true |
f7118593cd8a341fc9d5b082a83299a32ae61b18 | 1,666 | py | Python | setup.py | rafiibrahim8/truewho | 59323414ec196964bcb69364b942b0e11a26b592 | [
"MIT"
] | 1 | 2021-05-24T15:37:36.000Z | 2021-05-24T15:37:36.000Z | setup.py | rafiibrahim8/truewho | 59323414ec196964bcb69364b942b0e11a26b592 | [
"MIT"
] | 2 | 2021-08-17T16:22:24.000Z | 2022-02-08T20:40:55.000Z | setup.py | rafiibrahim8/truewho | 59323414ec196964bcb69364b942b0e11a26b592 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from truewho import __version__
def read_file(filename, lines=False):
try:
with open(filename, "r") as f:
if lines:
return [i.strip() for i in f.readlines() if (i.strip())]
return f.read()
except:
print("Can not read file:", filename)
return None
long_description = read_file("README.md")
setup(
name="truewho",
version=__version__,
author="Ibrahim Rafi",
author_email="me@ibrahimrafi.me",
license="MIT",
url="https://github.com/rafiibrahim8/truewho",
download_url="https://github.com/rafiibrahim8/truewho/archive/v{}.tar.gz".format(
__version__
),
install_requires=["phone-iso3166", "requests", "click"],
description="Check a phone number for name with Truecaller in command line.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords=["truewho", "Truecaller", "Spam", "Call"],
packages=find_packages(),
entry_points=dict(console_scripts=["truewho=truewho.truewho:main"]),
platforms=["any"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 33.32 | 85 | 0.62485 | from setuptools import setup, find_packages
from truewho import __version__
def read_file(filename, lines=False):
try:
with open(filename, "r") as f:
if lines:
return [i.strip() for i in f.readlines() if (i.strip())]
return f.read()
except:
print("Can not read file:", filename)
return None
long_description = read_file("README.md")
setup(
name="truewho",
version=__version__,
author="Ibrahim Rafi",
author_email="me@ibrahimrafi.me",
license="MIT",
url="https://github.com/rafiibrahim8/truewho",
download_url="https://github.com/rafiibrahim8/truewho/archive/v{}.tar.gz".format(
__version__
),
install_requires=["phone-iso3166", "requests", "click"],
description="Check a phone number for name with Truecaller in command line.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords=["truewho", "Truecaller", "Spam", "Call"],
packages=find_packages(),
entry_points=dict(console_scripts=["truewho=truewho.truewho:main"]),
platforms=["any"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| true | true |
f71185ef3a091033b981050e0faaa74cbc6fd8c3 | 1,244 | py | Python | airflow/contrib/operators/bigquery_to_bigquery.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | airflow/contrib/operators/bigquery_to_bigquery.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 1 | 2019-05-14T14:32:40.000Z | 2019-05-14T14:32:40.000Z | airflow/contrib/operators/bigquery_to_bigquery.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_bigquery`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.operators.bigquery_to_bigquery import BigQueryToBigQueryOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_bigquery`.",
DeprecationWarning, stacklevel=2
)
| 41.466667 | 109 | 0.777331 |
import warnings
from airflow.providers.google.cloud.operators.bigquery_to_bigquery import BigQueryToBigQueryOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_bigquery`.",
DeprecationWarning, stacklevel=2
)
| true | true |
f71186d761ae034d147180c445dbdb503ea95a8a | 270 | py | Python | messaging_abstract/protocol/protocol.py | fgiorgetti/qpid-dispatch-tests | 164c609d28db87692eed53d5361aa1ee5c97375c | [
"Apache-2.0"
] | null | null | null | messaging_abstract/protocol/protocol.py | fgiorgetti/qpid-dispatch-tests | 164c609d28db87692eed53d5361aa1ee5c97375c | [
"Apache-2.0"
] | 9 | 2018-09-25T10:10:58.000Z | 2019-03-19T14:59:43.000Z | messaging_abstract/protocol/protocol.py | fgiorgetti/qpid-dispatch-tests | 164c609d28db87692eed53d5361aa1ee5c97375c | [
"Apache-2.0"
] | 1 | 2019-03-13T10:40:35.000Z | 2019-03-13T10:40:35.000Z | class Protocol:
""" Protocol abstraction"""
def __init__(self, transaction, transport, default_port=None):
self.name = type(self).__name__
self.default_port = default_port
self.transaction = transaction
self.transport = transport
| 33.75 | 66 | 0.677778 | class Protocol:
def __init__(self, transaction, transport, default_port=None):
self.name = type(self).__name__
self.default_port = default_port
self.transaction = transaction
self.transport = transport
| true | true |
f71186fae75d95345162e3986a1e82e564e2b9b9 | 55,150 | py | Python | openprocurement/tender/openuadefense/tests/bid.py | Leits/openprocurement.tender.openuadefense | e7c512ed21166ae1928950bce80a11106fa2e545 | [
"Apache-2.0"
] | null | null | null | openprocurement/tender/openuadefense/tests/bid.py | Leits/openprocurement.tender.openuadefense | e7c512ed21166ae1928950bce80a11106fa2e545 | [
"Apache-2.0"
] | 2 | 2021-03-26T00:34:56.000Z | 2022-03-21T22:20:41.000Z | openprocurement/tender/openuadefense/tests/bid.py | leits/openprocurement.tender.openuadefense | e7c512ed21166ae1928950bce80a11106fa2e545 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import test_organization
from openprocurement.tender.openua.tests.base import test_bids
from openprocurement.tender.openuadefense.tests.base import (
BaseTenderUAContentWebTest,
test_tender_data,
test_features_tender_ua_data)
class TenderBidResourceTest(BaseTenderUAContentWebTest):
initial_status = 'active.tendering'
def test_create_tender_biddder_invalid(self):
response = self.app.post_json('/tenders/some_id/bids', {
'data': {'tenderers': [test_organization], "value": {"amount": 500}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
request_path = '/tenders/{}/bids'.format(self.tender_id)
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'No JSON object could be decoded',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(
request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {
'invalid_field': 'invalid_value'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'identifier': [
u'Please use a mapping for this field or Identifier instance instead of unicode.']}, u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': {}}], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.']}, u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [{
'name': 'name', 'identifier': {'uri': 'invalid_value'}}], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'], u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], "value": {"amount": 500, 'valueAddedTaxIncluded': False}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of tender'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], "value": {"amount": 500, 'currency': "USD"}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'currency of bid should be identical to currency of value of tender'], u'location': u'body', u'name': u'value'},
])
response = self.app.post_json(request_path, {'data': {'tenderers': test_organization, "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u"invalid literal for int() with base 10: 'contactPoint'", u'location': u'body', u'name': u'data'},
])
def test_create_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
self.assertEqual(bid['tenderers'][0]['name'], test_organization['name'])
self.assertIn('id', bid)
self.assertIn(bid['id'], response.headers['Location'])
self.set_status('complete')
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add bid in current (complete) tender status")
def test_patch_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'selfEligible': True, 'selfQualified': True, 'status': 'draft',
'tenderers': [test_organization], "value": {"amount": 500}}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 600}}}, status=200)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token), {"data": {'status': 'active'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be less than value of tender'], u'location': u'body', u'name': u'value'}
])
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {'status': 'active', "value": {"amount": 500}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 400}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
self.assertNotEqual(response.json['data']['date'], bid['date'])
response = self.app.patch_json('/tenders/{}/bids/some_id'.format(self.tender_id), {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.patch_json('/tenders/some_id/bids/some_id', {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
self.set_status('complete')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 400}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update bid in current (complete) tender status")
def test_get_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], bid)
self.set_status('active.qualification')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid_data = response.json['data']
#self.assertIn(u'participationUrl', bid_data)
#bid_data.pop(u'participationUrl')
self.assertEqual(bid_data, bid)
response = self.app.get('/tenders/{}/bids/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't delete bid in current (active.qualification) tender status")
def test_delete_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bid['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
# deleted bid does not contain bid information
self.assertFalse('value' in response.json['data'])
self.assertFalse('tenderers' in response.json['data'])
self.assertFalse('date' in response.json['data'])
revisions = self.db.get(self.tender_id).get('revisions')
self.assertTrue(any([i for i in revisions[-2][u'changes'] if i['op'] == u'remove' and i['path'] == u'/bids']))
self.assertTrue(any([i for i in revisions[-1][u'changes'] if i['op'] == u'replace' and i['path'] == u'/bids/0/status']))
response = self.app.delete('/tenders/{}/bids/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.delete('/tenders/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
# finished tender does not show deleted bid info
self.set_status('complete')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['bids']), 1)
bid_data = response.json['data']['bids'][0]
self.assertEqual(bid_data['id'], bid['id'])
self.assertEqual(bid_data['status'], 'deleted')
self.assertFalse('value' in bid_data)
self.assertFalse('tenderers' in bid_data)
self.assertFalse('date' in bid_data)
def test_deleted_bid_is_not_restorable(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bid['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
# try to restore deleted bid
response = self.app.patch_json('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), {"data": {
'status': 'active',
}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']['status'], 'deleted')
self.assertEqual(response.json['data']['status'], 'active')
def test_deleted_bid_do_not_locks_tender_in_state(self):
bids = []
for bid_amount in (400, 405):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": bid_amount}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids.append(response.json['data'])
# delete first bid
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bids[0]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bids[0]['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
# try to change tender state
self.set_status('active.qualification')
# check tender status
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active.qualification')
# check bids
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bids[0]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'deleted')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bids[1]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
def test_get_tender_tenderers(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.get('/tenders/{}/bids'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bids in current (active.tendering) tender status")
self.set_status('active.qualification')
response = self.app.get('/tenders/{}/bids'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'][0], bid)
response = self.app.get('/tenders/some_id/bids', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
def test_bid_Administrator_change(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), {"data": {
'tenderers': [{"identifier": {"id": "00000000"}}],
"value": {"amount": 400}
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']["value"]["amount"], 400)
self.assertEqual(response.json['data']["tenderers"][0]["identifier"]["id"], "00000000")
def test_bids_invalidation_on_tender_change(self):
bids_access = {}
# submit bids
for data in test_bids:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids_access[response.json['data']['id']] = response.json['access']['token']
# check initial status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# update tender. we can set value that is less than a value in bids as
# they will be invalidated by this request
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token), {"data":
{"value": {'amount': 300.0}}
})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["value"]["amount"], 300)
# check bids status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
# check that tender status change does not invalidate bids
# submit one more bid. check for invalid value first
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': test_bids[0]}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be less than value of tender'], u'location': u'body', u'name': u'value'}
])
# and submit valid bid
data = deepcopy(test_bids[0])
data['value']['amount'] = 299
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
valid_bid_id = response.json['data']['id']
# change tender status
self.set_status('active.qualification')
# check tender status
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active.qualification')
# tender should display all bids
self.assertEqual(len(response.json['data']['bids']), 3)
# invalidated bids should show only 'id' and 'status' fields
for bid in response.json['data']['bids']:
if bid['status'] == 'invalid':
self.assertTrue('id' in bid)
self.assertFalse('value' in bid)
self.assertFalse('tenderers' in bid)
self.assertFalse('date' in bid)
# invalidated bids stay invalidated
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
# invalidated bids displays only 'id' and 'status' fields
self.assertFalse('value' in response.json['data'])
self.assertFalse('tenderers' in response.json['data'])
self.assertFalse('date' in response.json['data'])
# and valid bid is not invalidated
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, valid_bid_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# and displays all his data
self.assertTrue('value' in response.json['data'])
self.assertTrue('tenderers' in response.json['data'])
self.assertTrue('date' in response.json['data'])
# check bids availability on finished tender
self.set_status('complete')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']['bids']), 3)
for bid in response.json['data']['bids']:
if bid['id'] in bids_access: # previously invalidated bids
self.assertEqual(bid['status'], 'invalid')
self.assertFalse('value' in bid)
self.assertFalse('tenderers' in bid)
self.assertFalse('date' in bid)
else: # valid bid
self.assertEqual(bid['status'], 'active')
self.assertTrue('value' in bid)
self.assertTrue('tenderers' in bid)
self.assertTrue('date' in bid)
def test_bids_activation_on_tender_documents(self):
bids_access = {}
# submit bids
for data in test_bids:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids_access[response.json['data']['id']] = response.json['access']['token']
# check initial status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.post('/tenders/{}/documents?acc_token={}'.format(
self.tender_id, self.tender_token), upload_files=[('file', u'укр.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
class TenderBidFeaturesResourceTest(BaseTenderUAContentWebTest):
initial_data = test_features_tender_ua_data
initial_status = 'active.tendering'
def test_features_bidder(self):
test_features_bids = [
{
# "status": "active",
"parameters": [
{
"code": i["code"],
"value": 0.1,
}
for i in self.initial_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
},
{
"status": "active",
"parameters": [
{
"code": i["code"],
"value": 0.15,
}
for i in self.initial_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
}
]
for i in test_features_bids:
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': i})
i['status'] = "active"
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid.pop(u'date')
bid.pop(u'id')
self.assertEqual(bid, i)
def test_features_bidder_invalid(self):
data = {
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
}
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"] = [
{
"code": "OCDS-123454-AIR-INTAKE",
"value": 0.1,
}
]
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'All features parameters is required.'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"].append({
"code": "OCDS-123454-AIR-INTAKE",
"value": 0.1,
})
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'Parameter code should be uniq for all parameters'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"][1]["code"] = "OCDS-123454-YEARS"
data["parameters"][1]["value"] = 0.2
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'value': [u'value should be one of feature value.']}], u'location': u'body', u'name': u'parameters'}
])
class TenderBidDocumentResourceTest(BaseTenderUAContentWebTest):
initial_status = 'active.tendering'
def setUp(self):
super(TenderBidDocumentResourceTest, self).setUp()
# Create bid
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
bid = response.json['data']
self.bid_id = bid['id']
self.bid_token = response.json['access']['token']
def test_not_found(self):
response = self.app.post('/tenders/some_id/bids/some_id/documents', status=404, upload_files=[
('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.post('/tenders/{}/bids/some_id/documents'.format(self.tender_id), status=404, upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.post('/tenders/{}/bids/{}/documents'.format(self.tender_id, self.bid_id), status=404, upload_files=[
('invalid_value', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.get('/tenders/some_id/bids/some_id/documents', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/bids/some_id/documents'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/some_id/bids/some_id/documents/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/bids/some_id/documents/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/{}/bids/{}/documents/some_id'.format(self.tender_id, self.bid_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'document_id'}
])
response = self.app.put('/tenders/some_id/bids/some_id/documents/some_id', status=404,
upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.put('/tenders/{}/bids/some_id/documents/some_id'.format(self.tender_id), status=404, upload_files=[
('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.put('/tenders/{}/bids/{}/documents/some_id'.format(
self.tender_id, self.bid_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
self.app.authorization = ('Basic', ('invalid', ''))
response = self.app.put('/tenders/{}/bids/{}/documents/some_id'.format(
self.tender_id, self.bid_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def test_create_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('name.doc', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents'.format(self.tender_id, self.bid_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid documents in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents?acc_token={}'.format(self.tender_id, self.bid_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/tenders/{}/bids/{}/documents?all=true&acc_token={}'.format(self.tender_id, self.bid_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?download=some_id&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'download'}
])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}'.format(
self.tender_id, self.bid_id, doc_id, key), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid document in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 7)
self.assertEqual(response.body, 'content')
response = self.app.get('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid document in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
self.set_status('active.awarded')
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending or active state")
def test_put_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id),
status=404,
upload_files=[('invalid_name', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content2')
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), 'content3', content_type='application/msword')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content3')
self.set_status('active.awarded')
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), upload_files=[('file', 'name.doc', 'content3')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
def test_patch_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {
"documentOf": "lot"
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'relatedItem'},
])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {
"documentOf": "lot",
"relatedItem": '0' * 32
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'relatedItem should be one of lots'], u'location': u'body', u'name': u'relatedItem'}
])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {"description": "document description"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('document description', response.json["data"]["description"])
self.set_status('active.awarded')
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
def test_create_tender_bidder_document_nopending(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
bid = response.json['data']
bid_id = bid['id']
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.set_status('active.qualification')
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, bid_id, doc_id), 'content3', content_type='application/msword', status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending or active state")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderBidDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderBidFeaturesResourceTest))
suite.addTest(unittest.makeSuite(TenderBidResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 54.38856 | 300 | 0.621958 |
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import test_organization
from openprocurement.tender.openua.tests.base import test_bids
from openprocurement.tender.openuadefense.tests.base import (
BaseTenderUAContentWebTest,
test_tender_data,
test_features_tender_ua_data)
class TenderBidResourceTest(BaseTenderUAContentWebTest):
initial_status = 'active.tendering'
def test_create_tender_biddder_invalid(self):
response = self.app.post_json('/tenders/some_id/bids', {
'data': {'tenderers': [test_organization], "value": {"amount": 500}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
request_path = '/tenders/{}/bids'.format(self.tender_id)
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'No JSON object could be decoded',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(
request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {
'invalid_field': 'invalid_value'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'identifier': [
u'Please use a mapping for this field or Identifier instance instead of unicode.']}, u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': {}}], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.']}, u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [{
'name': 'name', 'identifier': {'uri': 'invalid_value'}}], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'], u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], "value": {"amount": 500, 'valueAddedTaxIncluded': False}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of tender'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], "value": {"amount": 500, 'currency': "USD"}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'currency of bid should be identical to currency of value of tender'], u'location': u'body', u'name': u'value'},
])
response = self.app.post_json(request_path, {'data': {'tenderers': test_organization, "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u"invalid literal for int() with base 10: 'contactPoint'", u'location': u'body', u'name': u'data'},
])
def test_create_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
self.assertEqual(bid['tenderers'][0]['name'], test_organization['name'])
self.assertIn('id', bid)
self.assertIn(bid['id'], response.headers['Location'])
self.set_status('complete')
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add bid in current (complete) tender status")
def test_patch_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'selfEligible': True, 'selfQualified': True, 'status': 'draft',
'tenderers': [test_organization], "value": {"amount": 500}}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 600}}}, status=200)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token), {"data": {'status': 'active'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be less than value of tender'], u'location': u'body', u'name': u'value'}
])
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {'status': 'active', "value": {"amount": 500}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 400}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
self.assertNotEqual(response.json['data']['date'], bid['date'])
response = self.app.patch_json('/tenders/{}/bids/some_id'.format(self.tender_id), {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.patch_json('/tenders/some_id/bids/some_id', {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
self.set_status('complete')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 400}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update bid in current (complete) tender status")
def test_get_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], bid)
self.set_status('active.qualification')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid_data = response.json['data']
#self.assertIn(u'participationUrl', bid_data)
#bid_data.pop(u'participationUrl')
self.assertEqual(bid_data, bid)
response = self.app.get('/tenders/{}/bids/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't delete bid in current (active.qualification) tender status")
def test_delete_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bid['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
self.assertFalse('value' in response.json['data'])
self.assertFalse('tenderers' in response.json['data'])
self.assertFalse('date' in response.json['data'])
revisions = self.db.get(self.tender_id).get('revisions')
self.assertTrue(any([i for i in revisions[-2][u'changes'] if i['op'] == u'remove' and i['path'] == u'/bids']))
self.assertTrue(any([i for i in revisions[-1][u'changes'] if i['op'] == u'replace' and i['path'] == u'/bids/0/status']))
response = self.app.delete('/tenders/{}/bids/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.delete('/tenders/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
self.set_status('complete')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['bids']), 1)
bid_data = response.json['data']['bids'][0]
self.assertEqual(bid_data['id'], bid['id'])
self.assertEqual(bid_data['status'], 'deleted')
self.assertFalse('value' in bid_data)
self.assertFalse('tenderers' in bid_data)
self.assertFalse('date' in bid_data)
def test_deleted_bid_is_not_restorable(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bid['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
response = self.app.patch_json('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), {"data": {
'status': 'active',
}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']['status'], 'deleted')
self.assertEqual(response.json['data']['status'], 'active')
def test_deleted_bid_do_not_locks_tender_in_state(self):
bids = []
for bid_amount in (400, 405):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": bid_amount}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids.append(response.json['data'])
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bids[0]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bids[0]['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
self.set_status('active.qualification')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active.qualification')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bids[0]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'deleted')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bids[1]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
def test_get_tender_tenderers(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.get('/tenders/{}/bids'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bids in current (active.tendering) tender status")
self.set_status('active.qualification')
response = self.app.get('/tenders/{}/bids'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'][0], bid)
response = self.app.get('/tenders/some_id/bids', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
def test_bid_Administrator_change(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), {"data": {
'tenderers': [{"identifier": {"id": "00000000"}}],
"value": {"amount": 400}
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']["value"]["amount"], 400)
self.assertEqual(response.json['data']["tenderers"][0]["identifier"]["id"], "00000000")
def test_bids_invalidation_on_tender_change(self):
bids_access = {}
# submit bids
for data in test_bids:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids_access[response.json['data']['id']] = response.json['access']['token']
# check initial status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# update tender. we can set value that is less than a value in bids as
# they will be invalidated by this request
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token), {"data":
{"value": {'amount': 300.0}}
})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["value"]["amount"], 300)
# check bids status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
# check that tender status change does not invalidate bids
# submit one more bid. check for invalid value first
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': test_bids[0]}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be less than value of tender'], u'location': u'body', u'name': u'value'}
])
# and submit valid bid
data = deepcopy(test_bids[0])
data['value']['amount'] = 299
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
valid_bid_id = response.json['data']['id']
# change tender status
self.set_status('active.qualification')
# check tender status
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active.qualification')
# tender should display all bids
self.assertEqual(len(response.json['data']['bids']), 3)
# invalidated bids should show only 'id' and 'status' fields
for bid in response.json['data']['bids']:
if bid['status'] == 'invalid':
self.assertTrue('id' in bid)
self.assertFalse('value' in bid)
self.assertFalse('tenderers' in bid)
self.assertFalse('date' in bid)
# invalidated bids stay invalidated
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
# invalidated bids displays only 'id' and 'status' fields
self.assertFalse('value' in response.json['data'])
self.assertFalse('tenderers' in response.json['data'])
self.assertFalse('date' in response.json['data'])
# and valid bid is not invalidated
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, valid_bid_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# and displays all his data
self.assertTrue('value' in response.json['data'])
self.assertTrue('tenderers' in response.json['data'])
self.assertTrue('date' in response.json['data'])
# check bids availability on finished tender
self.set_status('complete')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']['bids']), 3)
for bid in response.json['data']['bids']:
if bid['id'] in bids_access: # previously invalidated bids
self.assertEqual(bid['status'], 'invalid')
self.assertFalse('value' in bid)
self.assertFalse('tenderers' in bid)
self.assertFalse('date' in bid)
else: # valid bid
self.assertEqual(bid['status'], 'active')
self.assertTrue('value' in bid)
self.assertTrue('tenderers' in bid)
self.assertTrue('date' in bid)
def test_bids_activation_on_tender_documents(self):
bids_access = {}
# submit bids
for data in test_bids:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids_access[response.json['data']['id']] = response.json['access']['token']
# check initial status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.post('/tenders/{}/documents?acc_token={}'.format(
self.tender_id, self.tender_token), upload_files=[('file', u'укр.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
class TenderBidFeaturesResourceTest(BaseTenderUAContentWebTest):
initial_data = test_features_tender_ua_data
initial_status = 'active.tendering'
def test_features_bidder(self):
test_features_bids = [
{
# "status": "active",
"parameters": [
{
"code": i["code"],
"value": 0.1,
}
for i in self.initial_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
},
{
"status": "active",
"parameters": [
{
"code": i["code"],
"value": 0.15,
}
for i in self.initial_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
}
]
for i in test_features_bids:
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': i})
i['status'] = "active"
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid.pop(u'date')
bid.pop(u'id')
self.assertEqual(bid, i)
def test_features_bidder_invalid(self):
data = {
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
}
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"] = [
{
"code": "OCDS-123454-AIR-INTAKE",
"value": 0.1,
}
]
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'All features parameters is required.'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"].append({
"code": "OCDS-123454-AIR-INTAKE",
"value": 0.1,
})
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'Parameter code should be uniq for all parameters'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"][1]["code"] = "OCDS-123454-YEARS"
data["parameters"][1]["value"] = 0.2
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'value': [u'value should be one of feature value.']}], u'location': u'body', u'name': u'parameters'}
])
class TenderBidDocumentResourceTest(BaseTenderUAContentWebTest):
initial_status = 'active.tendering'
def setUp(self):
super(TenderBidDocumentResourceTest, self).setUp()
# Create bid
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
bid = response.json['data']
self.bid_id = bid['id']
self.bid_token = response.json['access']['token']
def test_not_found(self):
response = self.app.post('/tenders/some_id/bids/some_id/documents', status=404, upload_files=[
('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.post('/tenders/{}/bids/some_id/documents'.format(self.tender_id), status=404, upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.post('/tenders/{}/bids/{}/documents'.format(self.tender_id, self.bid_id), status=404, upload_files=[
('invalid_value', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.get('/tenders/some_id/bids/some_id/documents', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/bids/some_id/documents'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/some_id/bids/some_id/documents/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/bids/some_id/documents/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/{}/bids/{}/documents/some_id'.format(self.tender_id, self.bid_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'document_id'}
])
response = self.app.put('/tenders/some_id/bids/some_id/documents/some_id', status=404,
upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.put('/tenders/{}/bids/some_id/documents/some_id'.format(self.tender_id), status=404, upload_files=[
('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.put('/tenders/{}/bids/{}/documents/some_id'.format(
self.tender_id, self.bid_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
self.app.authorization = ('Basic', ('invalid', ''))
response = self.app.put('/tenders/{}/bids/{}/documents/some_id'.format(
self.tender_id, self.bid_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def test_create_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('name.doc', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents'.format(self.tender_id, self.bid_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid documents in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents?acc_token={}'.format(self.tender_id, self.bid_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/tenders/{}/bids/{}/documents?all=true&acc_token={}'.format(self.tender_id, self.bid_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?download=some_id&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'download'}
])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}'.format(
self.tender_id, self.bid_id, doc_id, key), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid document in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 7)
self.assertEqual(response.body, 'content')
response = self.app.get('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid document in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
self.set_status('active.awarded')
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending or active state")
def test_put_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id),
status=404,
upload_files=[('invalid_name', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content2')
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), 'content3', content_type='application/msword')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content3')
self.set_status('active.awarded')
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), upload_files=[('file', 'name.doc', 'content3')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
def test_patch_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {
"documentOf": "lot"
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'relatedItem'},
])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {
"documentOf": "lot",
"relatedItem": '0' * 32
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'relatedItem should be one of lots'], u'location': u'body', u'name': u'relatedItem'}
])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {"description": "document description"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('document description', response.json["data"]["description"])
self.set_status('active.awarded')
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
def test_create_tender_bidder_document_nopending(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
bid = response.json['data']
bid_id = bid['id']
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.set_status('active.qualification')
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, bid_id, doc_id), 'content3', content_type='application/msword', status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending or active state")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderBidDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderBidFeaturesResourceTest))
suite.addTest(unittest.makeSuite(TenderBidResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| true | true |
f711872787c8b74e9d0ed508cf7608719b9ca9e6 | 578 | py | Python | simsoexp/migrations/0007_schedulingpolicy_contributor.py | Scriptopathe/simso-exp | d618463272f42a1ca3345ff162b7a9f1a6fab3f8 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | simsoexp/migrations/0007_schedulingpolicy_contributor.py | Scriptopathe/simso-exp | d618463272f42a1ca3345ff162b7a9f1a6fab3f8 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2015-07-31T15:00:21.000Z | 2015-07-31T15:09:37.000Z | simsoexp/migrations/0007_schedulingpolicy_contributor.py | Scriptopathe/simso-exp | d618463272f42a1ca3345ff162b7a9f1a6fab3f8 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('simsoexp', '0006_auto_20150721_1432'),
]
operations = [
migrations.AddField(
model_name='schedulingpolicy',
name='contributor',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 25.130435 | 76 | 0.65917 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('simsoexp', '0006_auto_20150721_1432'),
]
operations = [
migrations.AddField(
model_name='schedulingpolicy',
name='contributor',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| true | true |
f71187924dd29115494236387e90989c2fbe1e27 | 15,473 | py | Python | tests/integration/test_install_basic.py | jrottenberg/pipenv | cda15b3b30e04e038ee286bced6c47a311f1e0ec | [
"MIT"
] | 6,263 | 2017-01-20T17:41:36.000Z | 2022-02-15T20:48:57.000Z | tests/integration/test_install_basic.py | jrottenberg/pipenv | cda15b3b30e04e038ee286bced6c47a311f1e0ec | [
"MIT"
] | 1,100 | 2017-01-20T19:41:52.000Z | 2017-12-06T09:15:13.000Z | tests/integration/test_install_basic.py | jrottenberg/pipenv | cda15b3b30e04e038ee286bced6c47a311f1e0ec | [
"MIT"
] | 366 | 2017-01-21T10:06:52.000Z | 2021-11-25T17:09:19.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import pytest
from flaky import flaky
from pipenv._compat import Path, TemporaryDirectory
from pipenv.utils import temp_environ
from pipenv.vendor import delegator
@pytest.mark.setup
@pytest.mark.basic
@pytest.mark.install
def test_basic_setup(PipenvInstance):
with PipenvInstance() as p:
with PipenvInstance(pipfile=False) as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.skip_osx
def test_basic_install(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
def test_mirror_install(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
mirror_url = os.environ.pop(
"PIPENV_TEST_INDEX", "https://pypi.python.org/simple"
)
assert "pypi.org" not in mirror_url
# This should sufficiently demonstrate the mirror functionality
# since pypi.org is the default when PIPENV_TEST_INDEX is unset.
c = p.pipenv("install requests --pypi-mirror {0}".format(mirror_url))
assert c.return_code == 0
# Ensure the --pypi-mirror parameter hasn't altered the Pipfile or Pipfile.lock sources
assert len(p.pipfile["source"]) == 1
assert len(p.lockfile["_meta"]["sources"]) == 1
assert "https://pypi.org/simple" == p.pipfile["source"][0]["url"]
assert "https://pypi.org/simple" == p.lockfile["_meta"]["sources"][0]["url"]
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.needs_internet
def test_bad_mirror_install(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
# This demonstrates that the mirror parameter is being used
os.environ.pop("PIPENV_TEST_INDEX", None)
c = p.pipenv("install requests --pypi-mirror https://pypi.example.org")
assert c.return_code != 0
@pytest.mark.lock
@pytest.mark.complex
@pytest.mark.skip(reason="Does not work unless you can explicitly install into py2")
def test_complex_lock(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install apscheduler")
assert c.return_code == 0
assert "apscheduler" in p.pipfile["packages"]
assert "funcsigs" in p.lockfile[u"default"]
assert "futures" in p.lockfile[u"default"]
@flaky
@pytest.mark.dev
@pytest.mark.run
def test_basic_dev_install(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests --dev")
assert c.return_code == 0
assert "requests" in p.pipfile["dev-packages"]
assert "requests" in p.lockfile["develop"]
assert "chardet" in p.lockfile["develop"]
assert "idna" in p.lockfile["develop"]
assert "urllib3" in p.lockfile["develop"]
assert "certifi" in p.lockfile["develop"]
c = p.pipenv("run python -m requests.help")
assert c.return_code == 0
@flaky
@pytest.mark.dev
@pytest.mark.basic
@pytest.mark.install
def test_install_without_dev(PipenvInstance):
"""Ensure that running `pipenv install` doesn't install dev packages"""
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
[dev-packages]
tablib = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.pipfile["packages"]
assert "tablib" in p.pipfile["dev-packages"]
assert "six" in p.lockfile["default"]
assert "tablib" in p.lockfile["develop"]
c = p.pipenv('run python -c "import tablib"')
assert c.return_code != 0
c = p.pipenv('run python -c "import six"')
assert c.return_code == 0
@flaky
@pytest.mark.basic
@pytest.mark.install
def test_install_without_dev_section(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.pipfile["packages"]
assert p.pipfile.get("dev-packages", {}) == {}
assert "six" in p.lockfile["default"]
assert p.lockfile["develop"] == {}
c = p.pipenv('run python -c "import six"')
assert c.return_code == 0
@flaky
@pytest.mark.lock
@pytest.mark.extras
@pytest.mark.install
def test_extras_install(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install requests[socks]")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "extras" in p.pipfile["packages"]["requests"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "pysocks" in p.lockfile["default"]
@flaky
@pytest.mark.pin
@pytest.mark.basic
@pytest.mark.install
def test_windows_pinned_pipfile(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
requests = "==2.19.1"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.resolver
@pytest.mark.backup_resolver
def test_backup_resolver(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
"ibm-db-sa-py3" = "==0.3.1-1"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "ibm-db-sa-py3" in p.lockfile["default"]
@flaky
@pytest.mark.run
@pytest.mark.alt
def test_alternative_version_specifier(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
requests = {version = "*"}
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
c = p.pipenv('run python -c "import requests; import idna; import certifi;"')
assert c.return_code == 0
@flaky
@pytest.mark.run
@pytest.mark.alt
def test_outline_table_specifier(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages.requests]
version = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
c = p.pipenv('run python -c "import requests; import idna; import certifi;"')
assert c.return_code == 0
@pytest.mark.bad
@pytest.mark.basic
@pytest.mark.install
def test_bad_packages(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install NotAPackage")
assert c.return_code > 0
@pytest.mark.lock
@pytest.mark.extras
@pytest.mark.install
@pytest.mark.requirements
def test_requirements_to_pipfile(PipenvInstance, pypi):
with PipenvInstance(pipfile=False, chdir=True) as p:
# Write a requirements file
with open("requirements.txt", "w") as f:
f.write("-i {}\nrequests[socks]==2.19.1\n".format(pypi.url))
c = p.pipenv("install")
assert c.return_code == 0
print(c.out)
print(c.err)
print(delegator.run("ls -l").out)
# assert stuff in pipfile
assert "requests" in p.pipfile["packages"]
assert "extras" in p.pipfile["packages"]["requests"]
# assert stuff in lockfile
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "pysocks" in p.lockfile["default"]
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.skip_osx
@pytest.mark.requirements
def test_skip_requirements_when_pipfile(PipenvInstance):
"""Ensure requirements.txt is NOT imported when
1. We do `pipenv install [package]`
2. A Pipfile already exists when we run `pipenv install`.
"""
with PipenvInstance(chdir=True) as p:
with open("requirements.txt", "w") as f:
f.write("requests==2.18.1\n")
c = p.pipenv("install six")
assert c.return_code == 0
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
fake_package = "<0.12"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.ok
assert "fake_package" in p.pipfile["packages"]
assert "fake-package" in p.lockfile["default"]
assert "six" in p.pipfile["packages"]
assert "six" in p.lockfile["default"]
assert "requests" not in p.pipfile["packages"]
assert "requests" not in p.lockfile["default"]
@pytest.mark.cli
@pytest.mark.clean
def test_clean_on_empty_venv(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("clean")
assert c.return_code == 0
@pytest.mark.basic
@pytest.mark.install
def test_install_does_not_extrapolate_environ(PipenvInstance):
"""Ensure environment variables are not expanded in lock file.
"""
with temp_environ(), PipenvInstance(chdir=True) as p:
# os.environ["PYPI_URL"] = pypi.url
os.environ["PYPI_URL"] = p.pypi
with open(p.pipfile_path, "w") as f:
f.write(
"""
[[source]]
url = '${PYPI_URL}/simple'
verify_ssl = true
name = 'mockpi'
"""
)
# Ensure simple install does not extrapolate.
c = p.pipenv("install")
assert c.return_code == 0
assert p.pipfile["source"][0]["url"] == "${PYPI_URL}/simple"
assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple"
# Ensure package install does not extrapolate.
c = p.pipenv("install six")
assert c.return_code == 0
assert p.pipfile["source"][0]["url"] == "${PYPI_URL}/simple"
assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple"
@pytest.mark.basic
@pytest.mark.editable
@pytest.mark.badparameter
@pytest.mark.install
def test_editable_no_args(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install -e")
assert c.return_code != 0
assert "Error: -e option requires an argument" in c.err
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.virtualenv
def test_install_venv_project_directory(PipenvInstance):
"""Test the project functionality during virtualenv creation.
"""
with PipenvInstance(chdir=True) as p:
with temp_environ(), TemporaryDirectory(
prefix="pipenv-", suffix="temp_workon_home"
) as workon_home:
os.environ["WORKON_HOME"] = workon_home.name
if "PIPENV_VENV_IN_PROJECT" in os.environ:
del os.environ["PIPENV_VENV_IN_PROJECT"]
c = p.pipenv("install six")
assert c.return_code == 0
venv_loc = None
for line in c.err.splitlines():
if line.startswith("Virtualenv location:"):
venv_loc = Path(line.split(":", 1)[-1].strip())
assert venv_loc is not None
assert venv_loc.joinpath(".project").exists()
@pytest.mark.cli
@pytest.mark.deploy
@pytest.mark.system
def test_system_and_deploy_work(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install tablib")
assert c.return_code == 0
c = p.pipenv("--rm")
assert c.return_code == 0
c = delegator.run("virtualenv .venv")
assert c.return_code == 0
c = p.pipenv("install --system --deploy")
assert c.return_code == 0
c = p.pipenv("--rm")
assert c.return_code == 0
Path(p.pipfile_path).write_text(
u"""
[packages]
tablib = "*"
""".strip()
)
c = p.pipenv("install --system")
assert c.return_code == 0
@pytest.mark.basic
@pytest.mark.install
def test_install_creates_pipfile(PipenvInstance):
with PipenvInstance(chdir=True) as p:
if os.path.isfile(p.pipfile_path):
os.unlink(p.pipfile_path)
if "PIPENV_PIPFILE" in os.environ:
del os.environ["PIPENV_PIPFILE"]
assert not os.path.isfile(p.pipfile_path)
c = p.pipenv("install")
assert c.return_code == 0
assert os.path.isfile(p.pipfile_path)
@pytest.mark.basic
@pytest.mark.install
def test_install_non_exist_dep(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install dateutil")
assert not c.ok
assert "dateutil" not in p.pipfile["packages"]
@pytest.mark.basic
@pytest.mark.install
def test_install_package_with_dots(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install backports.html")
assert c.ok
assert "backports.html" in p.pipfile["packages"]
@pytest.mark.basic
@pytest.mark.install
def test_rewrite_outline_table(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, 'w') as f:
contents = """
[packages]
six = {version = "*"}
[packages.requests]
version = "*"
extras = ["socks"]
""".strip()
f.write(contents)
c = p.pipenv("install flask")
assert c.return_code == 0
with open(p.pipfile_path) as f:
contents = f.read()
assert "[packages.requests]" not in contents
assert 'six = {version = "*"}' in contents
assert 'requests = {version = "*"' in contents
assert 'flask = "*"' in contents
| 31.195565 | 95 | 0.625541 |
from __future__ import absolute_import, print_function
import os
import pytest
from flaky import flaky
from pipenv._compat import Path, TemporaryDirectory
from pipenv.utils import temp_environ
from pipenv.vendor import delegator
@pytest.mark.setup
@pytest.mark.basic
@pytest.mark.install
def test_basic_setup(PipenvInstance):
with PipenvInstance() as p:
with PipenvInstance(pipfile=False) as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.skip_osx
def test_basic_install(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
def test_mirror_install(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
mirror_url = os.environ.pop(
"PIPENV_TEST_INDEX", "https://pypi.python.org/simple"
)
assert "pypi.org" not in mirror_url
c = p.pipenv("install requests --pypi-mirror {0}".format(mirror_url))
assert c.return_code == 0
assert len(p.pipfile["source"]) == 1
assert len(p.lockfile["_meta"]["sources"]) == 1
assert "https://pypi.org/simple" == p.pipfile["source"][0]["url"]
assert "https://pypi.org/simple" == p.lockfile["_meta"]["sources"][0]["url"]
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.needs_internet
def test_bad_mirror_install(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
# This demonstrates that the mirror parameter is being used
os.environ.pop("PIPENV_TEST_INDEX", None)
c = p.pipenv("install requests --pypi-mirror https://pypi.example.org")
assert c.return_code != 0
@pytest.mark.lock
@pytest.mark.complex
@pytest.mark.skip(reason="Does not work unless you can explicitly install into py2")
def test_complex_lock(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install apscheduler")
assert c.return_code == 0
assert "apscheduler" in p.pipfile["packages"]
assert "funcsigs" in p.lockfile[u"default"]
assert "futures" in p.lockfile[u"default"]
@flaky
@pytest.mark.dev
@pytest.mark.run
def test_basic_dev_install(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests --dev")
assert c.return_code == 0
assert "requests" in p.pipfile["dev-packages"]
assert "requests" in p.lockfile["develop"]
assert "chardet" in p.lockfile["develop"]
assert "idna" in p.lockfile["develop"]
assert "urllib3" in p.lockfile["develop"]
assert "certifi" in p.lockfile["develop"]
c = p.pipenv("run python -m requests.help")
assert c.return_code == 0
@flaky
@pytest.mark.dev
@pytest.mark.basic
@pytest.mark.install
def test_install_without_dev(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
[dev-packages]
tablib = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.pipfile["packages"]
assert "tablib" in p.pipfile["dev-packages"]
assert "six" in p.lockfile["default"]
assert "tablib" in p.lockfile["develop"]
c = p.pipenv('run python -c "import tablib"')
assert c.return_code != 0
c = p.pipenv('run python -c "import six"')
assert c.return_code == 0
@flaky
@pytest.mark.basic
@pytest.mark.install
def test_install_without_dev_section(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.pipfile["packages"]
assert p.pipfile.get("dev-packages", {}) == {}
assert "six" in p.lockfile["default"]
assert p.lockfile["develop"] == {}
c = p.pipenv('run python -c "import six"')
assert c.return_code == 0
@flaky
@pytest.mark.lock
@pytest.mark.extras
@pytest.mark.install
def test_extras_install(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install requests[socks]")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "extras" in p.pipfile["packages"]["requests"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "pysocks" in p.lockfile["default"]
@flaky
@pytest.mark.pin
@pytest.mark.basic
@pytest.mark.install
def test_windows_pinned_pipfile(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
requests = "==2.19.1"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.resolver
@pytest.mark.backup_resolver
def test_backup_resolver(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
"ibm-db-sa-py3" = "==0.3.1-1"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "ibm-db-sa-py3" in p.lockfile["default"]
@flaky
@pytest.mark.run
@pytest.mark.alt
def test_alternative_version_specifier(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
requests = {version = "*"}
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
c = p.pipenv('run python -c "import requests; import idna; import certifi;"')
assert c.return_code == 0
@flaky
@pytest.mark.run
@pytest.mark.alt
def test_outline_table_specifier(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages.requests]
version = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
c = p.pipenv('run python -c "import requests; import idna; import certifi;"')
assert c.return_code == 0
@pytest.mark.bad
@pytest.mark.basic
@pytest.mark.install
def test_bad_packages(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install NotAPackage")
assert c.return_code > 0
@pytest.mark.lock
@pytest.mark.extras
@pytest.mark.install
@pytest.mark.requirements
def test_requirements_to_pipfile(PipenvInstance, pypi):
with PipenvInstance(pipfile=False, chdir=True) as p:
# Write a requirements file
with open("requirements.txt", "w") as f:
f.write("-i {}\nrequests[socks]==2.19.1\n".format(pypi.url))
c = p.pipenv("install")
assert c.return_code == 0
print(c.out)
print(c.err)
print(delegator.run("ls -l").out)
# assert stuff in pipfile
assert "requests" in p.pipfile["packages"]
assert "extras" in p.pipfile["packages"]["requests"]
# assert stuff in lockfile
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "pysocks" in p.lockfile["default"]
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.skip_osx
@pytest.mark.requirements
def test_skip_requirements_when_pipfile(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with open("requirements.txt", "w") as f:
f.write("requests==2.18.1\n")
c = p.pipenv("install six")
assert c.return_code == 0
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
fake_package = "<0.12"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.ok
assert "fake_package" in p.pipfile["packages"]
assert "fake-package" in p.lockfile["default"]
assert "six" in p.pipfile["packages"]
assert "six" in p.lockfile["default"]
assert "requests" not in p.pipfile["packages"]
assert "requests" not in p.lockfile["default"]
@pytest.mark.cli
@pytest.mark.clean
def test_clean_on_empty_venv(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("clean")
assert c.return_code == 0
@pytest.mark.basic
@pytest.mark.install
def test_install_does_not_extrapolate_environ(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
# os.environ["PYPI_URL"] = pypi.url
os.environ["PYPI_URL"] = p.pypi
with open(p.pipfile_path, "w") as f:
f.write(
"""
[[source]]
url = '${PYPI_URL}/simple'
verify_ssl = true
name = 'mockpi'
"""
)
# Ensure simple install does not extrapolate.
c = p.pipenv("install")
assert c.return_code == 0
assert p.pipfile["source"][0]["url"] == "${PYPI_URL}/simple"
assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple"
# Ensure package install does not extrapolate.
c = p.pipenv("install six")
assert c.return_code == 0
assert p.pipfile["source"][0]["url"] == "${PYPI_URL}/simple"
assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple"
@pytest.mark.basic
@pytest.mark.editable
@pytest.mark.badparameter
@pytest.mark.install
def test_editable_no_args(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install -e")
assert c.return_code != 0
assert "Error: -e option requires an argument" in c.err
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.virtualenv
def test_install_venv_project_directory(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with temp_environ(), TemporaryDirectory(
prefix="pipenv-", suffix="temp_workon_home"
) as workon_home:
os.environ["WORKON_HOME"] = workon_home.name
if "PIPENV_VENV_IN_PROJECT" in os.environ:
del os.environ["PIPENV_VENV_IN_PROJECT"]
c = p.pipenv("install six")
assert c.return_code == 0
venv_loc = None
for line in c.err.splitlines():
if line.startswith("Virtualenv location:"):
venv_loc = Path(line.split(":", 1)[-1].strip())
assert venv_loc is not None
assert venv_loc.joinpath(".project").exists()
@pytest.mark.cli
@pytest.mark.deploy
@pytest.mark.system
def test_system_and_deploy_work(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install tablib")
assert c.return_code == 0
c = p.pipenv("--rm")
assert c.return_code == 0
c = delegator.run("virtualenv .venv")
assert c.return_code == 0
c = p.pipenv("install --system --deploy")
assert c.return_code == 0
c = p.pipenv("--rm")
assert c.return_code == 0
Path(p.pipfile_path).write_text(
u"""
[packages]
tablib = "*"
""".strip()
)
c = p.pipenv("install --system")
assert c.return_code == 0
@pytest.mark.basic
@pytest.mark.install
def test_install_creates_pipfile(PipenvInstance):
with PipenvInstance(chdir=True) as p:
if os.path.isfile(p.pipfile_path):
os.unlink(p.pipfile_path)
if "PIPENV_PIPFILE" in os.environ:
del os.environ["PIPENV_PIPFILE"]
assert not os.path.isfile(p.pipfile_path)
c = p.pipenv("install")
assert c.return_code == 0
assert os.path.isfile(p.pipfile_path)
@pytest.mark.basic
@pytest.mark.install
def test_install_non_exist_dep(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install dateutil")
assert not c.ok
assert "dateutil" not in p.pipfile["packages"]
@pytest.mark.basic
@pytest.mark.install
def test_install_package_with_dots(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install backports.html")
assert c.ok
assert "backports.html" in p.pipfile["packages"]
@pytest.mark.basic
@pytest.mark.install
def test_rewrite_outline_table(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, 'w') as f:
contents = """
[packages]
six = {version = "*"}
[packages.requests]
version = "*"
extras = ["socks"]
""".strip()
f.write(contents)
c = p.pipenv("install flask")
assert c.return_code == 0
with open(p.pipfile_path) as f:
contents = f.read()
assert "[packages.requests]" not in contents
assert 'six = {version = "*"}' in contents
assert 'requests = {version = "*"' in contents
assert 'flask = "*"' in contents
| true | true |
f71187b2306a598f985c5bc1f8ab04fb06221b75 | 4,764 | py | Python | docs/conf.py | aadu/cake | 4831ebea425a7c0414020702415b793e489e024e | [
"MIT"
] | null | null | null | docs/conf.py | aadu/cake | 4831ebea425a7c0414020702415b793e489e024e | [
"MIT"
] | 5 | 2020-03-24T16:25:11.000Z | 2021-06-01T22:51:29.000Z | docs/conf.py | aadu/cake | 4831ebea425a7c0414020702415b793e489e024e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cake documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cake
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cake'
copyright = u"2018, Aaron Duke"
author = u"Aaron Duke"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cake.__version__
# The full version, including alpha/beta/rc tags.
release = cake.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cakedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cake.tex',
u'cake Documentation',
u'Aaron Duke', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cake',
u'cake Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cake',
u'cake Documentation',
author,
'cake',
'One line description of project.',
'Miscellaneous'),
]
| 29.04878 | 77 | 0.680101 |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cake
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'cake'
copyright = u"2018, Aaron Duke"
author = u"Aaron Duke"
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cake.__version__
# The full version, including alpha/beta/rc tags.
release = cake.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cakedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cake.tex',
u'cake Documentation',
u'Aaron Duke', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cake',
u'cake Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cake',
u'cake Documentation',
author,
'cake',
'One line description of project.',
'Miscellaneous'),
]
| true | true |
f7118823a87b93a43872741884cd7efddc1ccb73 | 13,516 | py | Python | test/functional/rpc_scantxoutset.py | BitPalnet/BitPal | 0cce7aea31a2e45fecec0e2ef42728aa26355704 | [
"MIT"
] | 1 | 2021-03-09T22:25:20.000Z | 2021-03-09T22:25:20.000Z | test/functional/rpc_scantxoutset.py | BitPalnet/BitPal | 0cce7aea31a2e45fecec0e2ef42728aa26355704 | [
"MIT"
] | null | null | null | test/functional/rpc_scantxoutset.py | BitPalnet/BitPal | 0cce7aea31a2e45fecec0e2ef42728aa26355704 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The BitPal Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the scantxoutset rpc call."""
from test_framework.test_framework import BitPalTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import shutil
import os
def descriptors(out):
return sorted(u['desc'] for u in out['unspents'])
class ScantxoutsetTest(BitPalTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(110)
addr_P2SH_SEGWIT = self.nodes[0].getnewaddress("", "p2sh-segwit")
pubk1 = self.nodes[0].getaddressinfo(addr_P2SH_SEGWIT)['pubkey']
addr_LEGACY = self.nodes[0].getnewaddress("", "legacy")
pubk2 = self.nodes[0].getaddressinfo(addr_LEGACY)['pubkey']
addr_BECH32 = self.nodes[0].getnewaddress("", "bech32")
pubk3 = self.nodes[0].getaddressinfo(addr_BECH32)['pubkey']
self.nodes[0].sendtoaddress(addr_P2SH_SEGWIT, 0.001)
self.nodes[0].sendtoaddress(addr_LEGACY, 0.002)
self.nodes[0].sendtoaddress(addr_BECH32, 0.004)
#send to child keys of tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK
self.nodes[0].sendtoaddress("mkHV1C6JLheLoUSSZYk7x3FH5tnx9bu7yc", 0.008) # (m/0'/0'/0')
self.nodes[0].sendtoaddress("mipUSRmJAj2KrjSvsPQtnP8ynUon7FhpCR", 0.016) # (m/0'/0'/1')
self.nodes[0].sendtoaddress("n37dAGe6Mq1HGM9t4b6rFEEsDGq7Fcgfqg", 0.032) # (m/0'/0'/1500')
self.nodes[0].sendtoaddress("mqS9Rpg8nNLAzxFExsgFLCnzHBsoQ3PRM6", 0.064) # (m/0'/0'/0)
self.nodes[0].sendtoaddress("mnTg5gVWr3rbhHaKjJv7EEEc76ZqHgSj4S", 0.128) # (m/0'/0'/1)
self.nodes[0].sendtoaddress("mketCd6B9U9Uee1iCsppDJJBHfvi6U6ukC", 0.256) # (m/0'/0'/1500)
self.nodes[0].sendtoaddress("mj8zFzrbBcdaWXowCQ1oPZ4qioBVzLzAp7", 0.512) # (m/1/1/0')
self.nodes[0].sendtoaddress("mfnKpKQEftniaoE1iXuMMePQU3PUpcNisA", 1.024) # (m/1/1/1')
self.nodes[0].sendtoaddress("mou6cB1kaP1nNJM1sryW6YRwnd4shTbXYQ", 2.048) # (m/1/1/1500')
self.nodes[0].sendtoaddress("mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", 4.096) # (m/1/1/0)
self.nodes[0].sendtoaddress("mxp7w7j8S1Aq6L8StS2PqVvtt4HGxXEvdy", 8.192) # (m/1/1/1)
self.nodes[0].sendtoaddress("mpQ8rokAhp1TAtJQR6F6TaUmjAWkAWYYBq", 16.384) # (m/1/1/1500)
self.nodes[0].generate(1)
self.log.info("Stop node, remove wallet, mine again some blocks...")
self.stop_node(0)
shutil.rmtree(os.path.join(self.nodes[0].datadir, self.chain, 'wallets'))
self.start_node(0)
self.nodes[0].generate(110)
scan = self.nodes[0].scantxoutset("start", [])
info = self.nodes[0].gettxoutsetinfo()
assert_equal(scan['success'], True)
assert_equal(scan['height'], info['height'])
assert_equal(scan['txouts'], info['txouts'])
assert_equal(scan['bestblock'], info['bestblock'])
self.restart_node(0, ['-nowallet'])
self.log.info("Test if we have found the non HD unspent outputs.")
assert_equal(self.nodes[0].scantxoutset("start", [ "pkh(" + pubk1 + ")", "pkh(" + pubk2 + ")", "pkh(" + pubk3 + ")"])['total_amount'], Decimal("0.002"))
assert_equal(self.nodes[0].scantxoutset("start", [ "wpkh(" + pubk1 + ")", "wpkh(" + pubk2 + ")", "wpkh(" + pubk3 + ")"])['total_amount'], Decimal("0.004"))
assert_equal(self.nodes[0].scantxoutset("start", [ "sh(wpkh(" + pubk1 + "))", "sh(wpkh(" + pubk2 + "))", "sh(wpkh(" + pubk3 + "))"])['total_amount'], Decimal("0.001"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(" + pubk1 + ")", "combo(" + pubk2 + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "addr(" + addr_BECH32 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
self.log.info("Test range validation.")
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": -1}])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [-1, 10]}])
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]}])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [2, 1]}])
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [0, 1000001]}])
self.log.info("Test extended key derivation.")
# Run various scans, and verify that the sum of the amounts of the matches corresponds to the expected subset.
# Note that all amounts in the UTXO set are powers of 2 multiplied by 0.001 BCC, so each amounts uniquely identifies a subset.
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/0h)"])['total_amount'], Decimal("0.008"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/1h)"])['total_amount'], Decimal("0.016"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500')"])['total_amount'], Decimal("0.032"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0h/0)"])['total_amount'], Decimal("0.064"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/1)"])['total_amount'], Decimal("0.128"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500)"])['total_amount'], Decimal("0.256"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*h)", "range": 1499}])['total_amount'], Decimal("0.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/*h)", "range": 1500}])['total_amount'], Decimal("0.056"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])['total_amount'], Decimal("0.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*)", "range": 1500}])['total_amount'], Decimal("0.448"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0')"])['total_amount'], Decimal("0.512"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1')"])['total_amount'], Decimal("1.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500h)"])['total_amount'], Decimal("2.048"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo([abcdef88/1/2'/3/4h]tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1499}])['total_amount'], Decimal("1.536"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1500}])['total_amount'], Decimal("3.584"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": [1500,1500]}])['total_amount'], Decimal("16.384"))
# Test the reported descriptors for a few matches
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])), ["pkh([0c5f9a1e/0'/0'/0]026dbd8b2315f296d36e6b6920b1579ca75569464875c7ebe869b536a7d9503c8c)#dzxw429x", "pkh([0c5f9a1e/0'/0'/1]033e6f25d76c00bedb3a8993c7d5739ee806397f0529b1b31dda31ef890f19a60c)#43rvceed"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])), ["pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])), ['pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8', 'pkh([0c5f9a1e/1/1/1500]03832901c250025da2aebae2bfb38d5c703a57ab66ad477f9c578bfbcd78abca6f)#vchwd07g', 'pkh([0c5f9a1e/1/1/1]030d820fc9e8211c4169be8530efbc632775d8286167afd178caaf1089b77daba7)#z2t3ypsa'])
# Check that status and abort don't need second arg
assert_equal(self.nodes[0].scantxoutset("status"), None)
assert_equal(self.nodes[0].scantxoutset("abort"), False)
# Check that second arg is needed for start
assert_raises_rpc_error(-1, "scanobjects argument is required for the start action", self.nodes[0].scantxoutset, "start")
if __name__ == '__main__':
ScantxoutsetTest().main()
| 105.59375 | 531 | 0.73831 |
from test_framework.test_framework import BitPalTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import shutil
import os
def descriptors(out):
return sorted(u['desc'] for u in out['unspents'])
class ScantxoutsetTest(BitPalTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(110)
addr_P2SH_SEGWIT = self.nodes[0].getnewaddress("", "p2sh-segwit")
pubk1 = self.nodes[0].getaddressinfo(addr_P2SH_SEGWIT)['pubkey']
addr_LEGACY = self.nodes[0].getnewaddress("", "legacy")
pubk2 = self.nodes[0].getaddressinfo(addr_LEGACY)['pubkey']
addr_BECH32 = self.nodes[0].getnewaddress("", "bech32")
pubk3 = self.nodes[0].getaddressinfo(addr_BECH32)['pubkey']
self.nodes[0].sendtoaddress(addr_P2SH_SEGWIT, 0.001)
self.nodes[0].sendtoaddress(addr_LEGACY, 0.002)
self.nodes[0].sendtoaddress(addr_BECH32, 0.004)
self.nodes[0].sendtoaddress("mkHV1C6JLheLoUSSZYk7x3FH5tnx9bu7yc", 0.008)
self.nodes[0].sendtoaddress("mipUSRmJAj2KrjSvsPQtnP8ynUon7FhpCR", 0.016) # (m/0'/0'/1')
self.nodes[0].sendtoaddress("n37dAGe6Mq1HGM9t4b6rFEEsDGq7Fcgfqg", 0.032)
self.nodes[0].sendtoaddress("mqS9Rpg8nNLAzxFExsgFLCnzHBsoQ3PRM6", 0.064) # (m/0'/0'/0)
self.nodes[0].sendtoaddress("mnTg5gVWr3rbhHaKjJv7EEEc76ZqHgSj4S", 0.128) # (m/0'/0'/1)
self.nodes[0].sendtoaddress("mketCd6B9U9Uee1iCsppDJJBHfvi6U6ukC", 0.256) # (m/0'/0'/1500)
self.nodes[0].sendtoaddress("mj8zFzrbBcdaWXowCQ1oPZ4qioBVzLzAp7", 0.512) # (m/1/1/0')
self.nodes[0].sendtoaddress("mfnKpKQEftniaoE1iXuMMePQU3PUpcNisA", 1.024)
self.nodes[0].sendtoaddress("mou6cB1kaP1nNJM1sryW6YRwnd4shTbXYQ", 2.048) # (m/1/1/1500')
self.nodes[0].sendtoaddress("mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", 4.096)
self.nodes[0].sendtoaddress("mxp7w7j8S1Aq6L8StS2PqVvtt4HGxXEvdy", 8.192)
self.nodes[0].sendtoaddress("mpQ8rokAhp1TAtJQR6F6TaUmjAWkAWYYBq", 16.384)
self.nodes[0].generate(1)
self.log.info("Stop node, remove wallet, mine again some blocks...")
self.stop_node(0)
shutil.rmtree(os.path.join(self.nodes[0].datadir, self.chain, 'wallets'))
self.start_node(0)
self.nodes[0].generate(110)
scan = self.nodes[0].scantxoutset("start", [])
info = self.nodes[0].gettxoutsetinfo()
assert_equal(scan['success'], True)
assert_equal(scan['height'], info['height'])
assert_equal(scan['txouts'], info['txouts'])
assert_equal(scan['bestblock'], info['bestblock'])
self.restart_node(0, ['-nowallet'])
self.log.info("Test if we have found the non HD unspent outputs.")
assert_equal(self.nodes[0].scantxoutset("start", [ "pkh(" + pubk1 + ")", "pkh(" + pubk2 + ")", "pkh(" + pubk3 + ")"])['total_amount'], Decimal("0.002"))
assert_equal(self.nodes[0].scantxoutset("start", [ "wpkh(" + pubk1 + ")", "wpkh(" + pubk2 + ")", "wpkh(" + pubk3 + ")"])['total_amount'], Decimal("0.004"))
assert_equal(self.nodes[0].scantxoutset("start", [ "sh(wpkh(" + pubk1 + "))", "sh(wpkh(" + pubk2 + "))", "sh(wpkh(" + pubk3 + "))"])['total_amount'], Decimal("0.001"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(" + pubk1 + ")", "combo(" + pubk2 + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "addr(" + addr_BECH32 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
self.log.info("Test range validation.")
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": -1}])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [-1, 10]}])
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]}])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [2, 1]}])
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [0, 1000001]}])
self.log.info("Test extended key derivation.")
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/0h)"])['total_amount'], Decimal("0.008"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/1h)"])['total_amount'], Decimal("0.016"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500')"])['total_amount'], Decimal("0.032"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0h/0)"])['total_amount'], Decimal("0.064"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/1)"])['total_amount'], Decimal("0.128"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500)"])['total_amount'], Decimal("0.256"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*h)", "range": 1499}])['total_amount'], Decimal("0.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/*h)", "range": 1500}])['total_amount'], Decimal("0.056"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])['total_amount'], Decimal("0.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*)", "range": 1500}])['total_amount'], Decimal("0.448"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0')"])['total_amount'], Decimal("0.512"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1')"])['total_amount'], Decimal("1.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500h)"])['total_amount'], Decimal("2.048"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo([abcdef88/1/2'/3/4h]tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1499}])['total_amount'], Decimal("1.536"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1500}])['total_amount'], Decimal("3.584"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": [1500,1500]}])['total_amount'], Decimal("16.384"))
# Test the reported descriptors for a few matches
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])), ["pkh([0c5f9a1e/0'/0'/0]026dbd8b2315f296d36e6b6920b1579ca75569464875c7ebe869b536a7d9503c8c)#dzxw429x", "pkh([0c5f9a1e/0'/0'/1]033e6f25d76c00bedb3a8993c7d5739ee806397f0529b1b31dda31ef890f19a60c)#43rvceed"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])), ["pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])), ['pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8', 'pkh([0c5f9a1e/1/1/1500]03832901c250025da2aebae2bfb38d5c703a57ab66ad477f9c578bfbcd78abca6f)#vchwd07g', 'pkh([0c5f9a1e/1/1/1]030d820fc9e8211c4169be8530efbc632775d8286167afd178caaf1089b77daba7)#z2t3ypsa'])
assert_equal(self.nodes[0].scantxoutset("status"), None)
assert_equal(self.nodes[0].scantxoutset("abort"), False)
# Check that second arg is needed for start
assert_raises_rpc_error(-1, "scanobjects argument is required for the start action", self.nodes[0].scantxoutset, "start")
if __name__ == '__main__':
ScantxoutsetTest().main()
| true | true |
f711891408202ec5e37aa87f924bca39e8995574 | 5,611 | py | Python | reproject/utils.py | astropy/reproject | 43b0d8a4a5641cfbe6adbc3b1f2d7598f2fd5930 | [
"BSD-3-Clause"
] | 39 | 2019-05-11T19:23:33.000Z | 2022-02-28T12:25:37.000Z | reproject/utils.py | astropy/reproject | 43b0d8a4a5641cfbe6adbc3b1f2d7598f2fd5930 | [
"BSD-3-Clause"
] | 121 | 2018-12-06T16:36:05.000Z | 2022-03-31T23:52:40.000Z | reproject/utils.py | astropy/reproject | 43b0d8a4a5641cfbe6adbc3b1f2d7598f2fd5930 | [
"BSD-3-Clause"
] | 17 | 2018-12-05T04:14:48.000Z | 2021-12-09T22:29:54.000Z | import numpy as np
import astropy.nddata
from astropy.io import fits
from astropy.io.fits import CompImageHDU, HDUList, Header, ImageHDU, PrimaryHDU
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
__all__ = ['parse_input_data', 'parse_input_shape', 'parse_input_weights',
'parse_output_projection']
def parse_input_data(input_data, hdu_in=None):
"""
Parse input data to return a Numpy array and WCS object.
"""
if isinstance(input_data, str):
return parse_input_data(fits.open(input_data), hdu_in=hdu_in)
elif isinstance(input_data, HDUList):
if hdu_in is None:
if len(input_data) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_data(input_data[hdu_in])
elif isinstance(input_data, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_data.data, WCS(input_data.header)
elif isinstance(input_data, tuple) and isinstance(input_data[0], np.ndarray):
if isinstance(input_data[1], Header):
return input_data[0], WCS(input_data[1])
else:
return input_data
elif isinstance(input_data, astropy.nddata.NDDataBase):
return input_data.data, input_data.wcs
else:
raise TypeError("input_data should either be an HDU object or a tuple "
"of (array, WCS) or (array, Header)")
def parse_input_shape(input_shape, hdu_in=None):
"""
Parse input shape information to return an array shape tuple and WCS object.
"""
if isinstance(input_shape, str):
return parse_input_shape(fits.open(input_shape), hdu_in=hdu_in)
elif isinstance(input_shape, HDUList):
if hdu_in is None:
if len(input_shape) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_shape(input_shape[hdu_in])
elif isinstance(input_shape, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_shape.shape, WCS(input_shape.header)
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], np.ndarray):
if isinstance(input_shape[1], Header):
return input_shape[0].shape, WCS(input_shape[1])
else:
return input_shape[0].shape, input_shape[1]
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], tuple):
if isinstance(input_shape[1], Header):
return input_shape[0], WCS(input_shape[1])
else:
return input_shape
elif isinstance(input_shape, astropy.nddata.NDDataBase):
return input_shape.data.shape, input_shape.wcs
else:
raise TypeError("input_shape should either be an HDU object or a tuple "
"of (array-or-shape, WCS) or (array-or-shape, Header)")
def parse_input_weights(input_weights, hdu_weights=None):
"""
Parse input weights to return a Numpy array.
"""
if isinstance(input_weights, str):
return parse_input_data(fits.open(input_weights), hdu_in=hdu_weights)[0]
elif isinstance(input_weights, HDUList):
if hdu_weights is None:
if len(input_weights) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_weights=`` option")
else:
hdu_weights = 0
return parse_input_data(input_weights[hdu_weights])[0]
elif isinstance(input_weights, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_weights.data
elif isinstance(input_weights, np.ndarray):
return input_weights
else:
raise TypeError("input_weights should either be an HDU object or a Numpy array")
def parse_output_projection(output_projection, shape_out=None, output_array=None):
if shape_out is None:
if output_array is not None:
shape_out = output_array.shape
elif shape_out is not None and output_array is not None:
if shape_out != output_array.shape:
raise ValueError("shape_out does not match shape of output_array")
if isinstance(output_projection, Header):
wcs_out = WCS(output_projection)
try:
shape_out = [output_projection['NAXIS{}'.format(i + 1)]
for i in range(output_projection['NAXIS'])][::-1]
except KeyError:
if shape_out is None:
raise ValueError("Need to specify shape since output header "
"does not contain complete shape information")
elif isinstance(output_projection, BaseHighLevelWCS):
wcs_out = output_projection
if shape_out is None:
raise ValueError("Need to specify shape_out when specifying "
"output_projection as WCS object")
elif isinstance(output_projection, str):
hdu_list = fits.open(output_projection)
shape_out = hdu_list[0].data.shape
header = hdu_list[0].header
wcs_out = WCS(header)
hdu_list.close()
else:
raise TypeError('output_projection should either be a Header, a WCS '
'object, or a filename')
if len(shape_out) == 0:
raise ValueError("The shape of the output image should not be an "
"empty tuple")
return wcs_out, shape_out
| 41.257353 | 88 | 0.640171 | import numpy as np
import astropy.nddata
from astropy.io import fits
from astropy.io.fits import CompImageHDU, HDUList, Header, ImageHDU, PrimaryHDU
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
__all__ = ['parse_input_data', 'parse_input_shape', 'parse_input_weights',
'parse_output_projection']
def parse_input_data(input_data, hdu_in=None):
if isinstance(input_data, str):
return parse_input_data(fits.open(input_data), hdu_in=hdu_in)
elif isinstance(input_data, HDUList):
if hdu_in is None:
if len(input_data) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_data(input_data[hdu_in])
elif isinstance(input_data, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_data.data, WCS(input_data.header)
elif isinstance(input_data, tuple) and isinstance(input_data[0], np.ndarray):
if isinstance(input_data[1], Header):
return input_data[0], WCS(input_data[1])
else:
return input_data
elif isinstance(input_data, astropy.nddata.NDDataBase):
return input_data.data, input_data.wcs
else:
raise TypeError("input_data should either be an HDU object or a tuple "
"of (array, WCS) or (array, Header)")
def parse_input_shape(input_shape, hdu_in=None):
if isinstance(input_shape, str):
return parse_input_shape(fits.open(input_shape), hdu_in=hdu_in)
elif isinstance(input_shape, HDUList):
if hdu_in is None:
if len(input_shape) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_shape(input_shape[hdu_in])
elif isinstance(input_shape, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_shape.shape, WCS(input_shape.header)
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], np.ndarray):
if isinstance(input_shape[1], Header):
return input_shape[0].shape, WCS(input_shape[1])
else:
return input_shape[0].shape, input_shape[1]
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], tuple):
if isinstance(input_shape[1], Header):
return input_shape[0], WCS(input_shape[1])
else:
return input_shape
elif isinstance(input_shape, astropy.nddata.NDDataBase):
return input_shape.data.shape, input_shape.wcs
else:
raise TypeError("input_shape should either be an HDU object or a tuple "
"of (array-or-shape, WCS) or (array-or-shape, Header)")
def parse_input_weights(input_weights, hdu_weights=None):
if isinstance(input_weights, str):
return parse_input_data(fits.open(input_weights), hdu_in=hdu_weights)[0]
elif isinstance(input_weights, HDUList):
if hdu_weights is None:
if len(input_weights) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_weights=`` option")
else:
hdu_weights = 0
return parse_input_data(input_weights[hdu_weights])[0]
elif isinstance(input_weights, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_weights.data
elif isinstance(input_weights, np.ndarray):
return input_weights
else:
raise TypeError("input_weights should either be an HDU object or a Numpy array")
def parse_output_projection(output_projection, shape_out=None, output_array=None):
if shape_out is None:
if output_array is not None:
shape_out = output_array.shape
elif shape_out is not None and output_array is not None:
if shape_out != output_array.shape:
raise ValueError("shape_out does not match shape of output_array")
if isinstance(output_projection, Header):
wcs_out = WCS(output_projection)
try:
shape_out = [output_projection['NAXIS{}'.format(i + 1)]
for i in range(output_projection['NAXIS'])][::-1]
except KeyError:
if shape_out is None:
raise ValueError("Need to specify shape since output header "
"does not contain complete shape information")
elif isinstance(output_projection, BaseHighLevelWCS):
wcs_out = output_projection
if shape_out is None:
raise ValueError("Need to specify shape_out when specifying "
"output_projection as WCS object")
elif isinstance(output_projection, str):
hdu_list = fits.open(output_projection)
shape_out = hdu_list[0].data.shape
header = hdu_list[0].header
wcs_out = WCS(header)
hdu_list.close()
else:
raise TypeError('output_projection should either be a Header, a WCS '
'object, or a filename')
if len(shape_out) == 0:
raise ValueError("The shape of the output image should not be an "
"empty tuple")
return wcs_out, shape_out
| true | true |
f71189ffbe9ac0e6918cf13b221be71313d98d51 | 574 | py | Python | tests/dumb_plugins/valid/__init__.py | dechainers/dechainy | f742cd77e93c6c5adfecf7509cf35e386ea25c6d | [
"Apache-2.0"
] | 1 | 2022-02-01T11:34:13.000Z | 2022-02-01T11:34:13.000Z | tests/dumb_plugins/valid/__init__.py | dechainers/dechainy | f742cd77e93c6c5adfecf7509cf35e386ea25c6d | [
"Apache-2.0"
] | null | null | null | tests/dumb_plugins/valid/__init__.py | dechainers/dechainy | f742cd77e93c6c5adfecf7509cf35e386ea25c6d | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from dechainy.plugins import Probe
from dechainy.ebpf import EbpfCompiler
@dataclass
class Valid(Probe):
def __post_init__(self):
self.ingress.required = True
self.ingress.cflags.append("-DCUSTOM_VARIABLE=0")
self.egress.required = False
super().__post_init__(path=__file__)
def autopatch(self):
self.ingress.cflags[-1] = "-DCUSTOM_VARIABLE=1"
EbpfCompiler().patch_hook("ingress", self._programs.ingress,
self.ingress.code, self.ingress.cflags)
| 28.7 | 73 | 0.672474 | from dataclasses import dataclass
from dechainy.plugins import Probe
from dechainy.ebpf import EbpfCompiler
@dataclass
class Valid(Probe):
def __post_init__(self):
self.ingress.required = True
self.ingress.cflags.append("-DCUSTOM_VARIABLE=0")
self.egress.required = False
super().__post_init__(path=__file__)
def autopatch(self):
self.ingress.cflags[-1] = "-DCUSTOM_VARIABLE=1"
EbpfCompiler().patch_hook("ingress", self._programs.ingress,
self.ingress.code, self.ingress.cflags)
| true | true |
f7118a0f9174dce416dc80c1510559d262408d78 | 1,959 | py | Python | powerline_shell/themes/default.py | aradzu10/powerline-shell | 299493eb3ad65b0331d9369a279833e61237d9a6 | [
"MIT"
] | null | null | null | powerline_shell/themes/default.py | aradzu10/powerline-shell | 299493eb3ad65b0331d9369a279833e61237d9a6 | [
"MIT"
] | null | null | null | powerline_shell/themes/default.py | aradzu10/powerline-shell | 299493eb3ad65b0331d9369a279833e61237d9a6 | [
"MIT"
] | null | null | null | class DefaultColor(object):
"""
This class should have the default colors for every segment.
Please test every new segment with this theme first.
"""
# RESET is not a real color code. It is used as in indicator
# within the code that any foreground / background color should
# be cleared
RESET = -1
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_FG = 250
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31 # blueish
HOME_FG = 15 # white
PATH_BG = 237 # dark grey
PATH_FG = 250 # light grey
CWD_FG = 254 # nearly-white grey
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166 # medium orange
SSH_FG = 254
REPO_CLEAN_BG = 148 # a light green color
REPO_CLEAN_FG = 0 # black
REPO_DIRTY_BG = 161 # pink/red
REPO_DIRTY_FG = 15 # white
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22 # dark green
GIT_AHEAD_BG = 240
GIT_AHEAD_FG = 250
GIT_BEHIND_BG = 240
GIT_BEHIND_FG = 250
GIT_STAGED_BG = 22
GIT_STAGED_FG = 15
GIT_NOTSTAGED_BG = 130
GIT_NOTSTAGED_FG = 15
GIT_UNTRACKED_BG = 52
GIT_UNTRACKED_FG = 15
GIT_CONFLICTED_BG = 9
GIT_CONFLICTED_FG = 15
GIT_STASH_BG = 221
GIT_STASH_FG = 0
VIRTUAL_ENV_BG = 35 # a mid-tone green
VIRTUAL_ENV_FG = 00
BATTERY_NORMAL_BG = 22
BATTERY_NORMAL_FG = 7
BATTERY_LOW_BG = 196
BATTERY_LOW_FG = 7
AWS_PROFILE_FG = 39
AWS_PROFILE_BG = 238
TIME_FG = 250
TIME_BG = 238
CONST_FG = 15
CONST_BG = 0
class Color(DefaultColor):
"""
This subclass is required when the user chooses to use 'default' theme.
Because the segments require a 'Color' class for every theme.
"""
pass
| 22.011236 | 75 | 0.650332 | class DefaultColor(object):
RESET = -1
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_FG = 250
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31
HOME_FG = 15
PATH_BG = 237
PATH_FG = 250
CWD_FG = 254
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166
SSH_FG = 254
REPO_CLEAN_BG = 148
REPO_CLEAN_FG = 0
REPO_DIRTY_BG = 161
REPO_DIRTY_FG = 15
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22
GIT_AHEAD_BG = 240
GIT_AHEAD_FG = 250
GIT_BEHIND_BG = 240
GIT_BEHIND_FG = 250
GIT_STAGED_BG = 22
GIT_STAGED_FG = 15
GIT_NOTSTAGED_BG = 130
GIT_NOTSTAGED_FG = 15
GIT_UNTRACKED_BG = 52
GIT_UNTRACKED_FG = 15
GIT_CONFLICTED_BG = 9
GIT_CONFLICTED_FG = 15
GIT_STASH_BG = 221
GIT_STASH_FG = 0
VIRTUAL_ENV_BG = 35
VIRTUAL_ENV_FG = 00
BATTERY_NORMAL_BG = 22
BATTERY_NORMAL_FG = 7
BATTERY_LOW_BG = 196
BATTERY_LOW_FG = 7
AWS_PROFILE_FG = 39
AWS_PROFILE_BG = 238
TIME_FG = 250
TIME_BG = 238
CONST_FG = 15
CONST_BG = 0
class Color(DefaultColor):
pass
| true | true |
f7118a9b813f246228dc9f4c53befcf78bec63af | 327 | py | Python | quiz/permissions.py | raksa/django-quickstart | ff3ca34662b0adca4ffb29cfa8f871d861053bf3 | [
"MIT"
] | null | null | null | quiz/permissions.py | raksa/django-quickstart | ff3ca34662b0adca4ffb29cfa8f871d861053bf3 | [
"MIT"
] | 9 | 2019-12-04T23:11:06.000Z | 2022-02-10T11:54:05.000Z | quiz/permissions.py | raksa/django-quickstart | ff3ca34662b0adca4ffb29cfa8f871d861053bf3 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission
class SuperAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
if not (user and user.is_authenticated):
return False
if user.is_superuser:
return True
return False
| 21.8 | 56 | 0.663609 | from rest_framework.permissions import BasePermission
class SuperAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
if not (user and user.is_authenticated):
return False
if user.is_superuser:
return True
return False
| true | true |
f7118ac45d271ba608e997e84615b7db483463ed | 1,749 | py | Python | venv/lib/python3.6/site-packages/chatterbot/input/mailgun.py | HackBots1111/flask-server-bot | 683932802d689d21b0b8397acd3f072197dea208 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/chatterbot/input/mailgun.py | HackBots1111/flask-server-bot | 683932802d689d21b0b8397acd3f072197dea208 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/chatterbot/input/mailgun.py | HackBots1111/flask-server-bot | 683932802d689d21b0b8397acd3f072197dea208 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import datetime
from chatterbot.input import InputAdapter
from chatterbot.conversation import Statement
class Mailgun(InputAdapter):
"""
Get input from Mailgun.
"""
def __init__(self, **kwargs):
super(Mailgun, self).__init__(**kwargs)
# Use the bot's name for the name of the sender
self.name = kwargs.get('name')
self.from_address = kwargs.get('mailgun_from_address')
self.api_key = kwargs.get('mailgun_api_key')
self.endpoint = kwargs.get('mailgun_api_endpoint')
def get_email_stored_events(self):
import requests
yesterday = datetime.datetime.now() - datetime.timedelta(1)
return requests.get(
'{}/events'.format(self.endpoint),
auth=('api', self.api_key),
params={
'begin': yesterday.isoformat(),
'ascending': 'yes',
'limit': 1
}
)
def get_stored_email_urls(self):
response = self.get_email_stored_events()
data = response.json()
for item in data.get('items', []):
if 'storage' in item:
if 'url' in item['storage']:
yield item['storage']['url']
def get_message(self, url):
import requests
return requests.get(
url,
auth=('api', self.api_key)
)
def process_input(self, statement):
urls = self.get_stored_email_urls()
url = list(urls)[0]
response = self.get_message(url)
message = response.json()
text = message.get('stripped-text')
return Statement(text)
| 28.209677 | 68 | 0.56032 | from __future__ import unicode_literals
import datetime
from chatterbot.input import InputAdapter
from chatterbot.conversation import Statement
class Mailgun(InputAdapter):
def __init__(self, **kwargs):
super(Mailgun, self).__init__(**kwargs)
self.name = kwargs.get('name')
self.from_address = kwargs.get('mailgun_from_address')
self.api_key = kwargs.get('mailgun_api_key')
self.endpoint = kwargs.get('mailgun_api_endpoint')
def get_email_stored_events(self):
import requests
yesterday = datetime.datetime.now() - datetime.timedelta(1)
return requests.get(
'{}/events'.format(self.endpoint),
auth=('api', self.api_key),
params={
'begin': yesterday.isoformat(),
'ascending': 'yes',
'limit': 1
}
)
def get_stored_email_urls(self):
response = self.get_email_stored_events()
data = response.json()
for item in data.get('items', []):
if 'storage' in item:
if 'url' in item['storage']:
yield item['storage']['url']
def get_message(self, url):
import requests
return requests.get(
url,
auth=('api', self.api_key)
)
def process_input(self, statement):
urls = self.get_stored_email_urls()
url = list(urls)[0]
response = self.get_message(url)
message = response.json()
text = message.get('stripped-text')
return Statement(text)
| true | true |
f7118b05dced8094fd2666a79e6f6a3ed7eb88be | 1,363 | py | Python | linear_regression.py | kohjingyu/prob-stats | df396686b641079b5db93118b1b2373d79de7d7a | [
"MIT"
] | 3 | 2018-03-16T15:31:57.000Z | 2018-06-13T06:36:57.000Z | linear_regression.py | kohjingyu/prob-stats | df396686b641079b5db93118b1b2373d79de7d7a | [
"MIT"
] | null | null | null | linear_regression.py | kohjingyu/prob-stats | df396686b641079b5db93118b1b2373d79de7d7a | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
x = [20, 23, 29, 27, 30, 34, 35, 37, 40, 43]
y = [1.32, 1.67, 2.17, 2.70, 2.75, 2.87, 3.65, 2.86, 3.61, 4.25]
n = len(x)
assert(n == len(y))
# Means
bar_x = sum(x) / n
bar_y = sum(y) / n
# Sum of squares
sxy = sum([(x[i] - bar_x) * (y[i] - bar_y) for i in range(n)])
sxx = sum([(x[i] - bar_x)**2 for i in range(n)])
syy = sum([(y[i] - bar_y)**2 for i in range(n)])
print("S_xy = {0:5f}, S_xx = {1:5f}, S_yy = {2:5f}".format(sxy ,sxx, syy))
# Point estimates for \beta_0 and \beta_1
b1 = sxy / sxx
b0 = bar_y - b1 * bar_x
print("n = {0}".format(n))
print("\\bar{{x}} = {0:5f}".format(bar_x))
print("\\bar{{y}} = {0:5f}".format(bar_y))
print("Estimated regression line: y = {0:5f} + {1:5f} x".format(b0, b1))
# Plot x and y and save it
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x, y)
x_values = range(min(x), max(x))
ax.plot(x_values, [b0 + b1 * xi for xi in x_values])
fig.savefig("plot.png")
# error sum of squares
sse = sum([(y[i] - (b0 + b1 * x[i]))**2 for i in range(n)])
# total sum of squares
sst = sum([y[i]**2 for i in range(n)]) - sum(y)**2 / n
sigma_square = sse / (n - 2)
print("SSE: {0:5f}".format(sse))
print("SST: {0:5f}".format(sst))
print("\sigma^2 = {0:5f}".format(sigma_square))
print("\sigma = {0:5f}".format(sigma_square ** 0.5))
print("r^2 = {0:5f}".format(1 - sse / sst))
| 27.816327 | 74 | 0.581071 | import matplotlib
import matplotlib.pyplot as plt
x = [20, 23, 29, 27, 30, 34, 35, 37, 40, 43]
y = [1.32, 1.67, 2.17, 2.70, 2.75, 2.87, 3.65, 2.86, 3.61, 4.25]
n = len(x)
assert(n == len(y))
bar_x = sum(x) / n
bar_y = sum(y) / n
sxy = sum([(x[i] - bar_x) * (y[i] - bar_y) for i in range(n)])
sxx = sum([(x[i] - bar_x)**2 for i in range(n)])
syy = sum([(y[i] - bar_y)**2 for i in range(n)])
print("S_xy = {0:5f}, S_xx = {1:5f}, S_yy = {2:5f}".format(sxy ,sxx, syy))
b1 = sxy / sxx
b0 = bar_y - b1 * bar_x
print("n = {0}".format(n))
print("\\bar{{x}} = {0:5f}".format(bar_x))
print("\\bar{{y}} = {0:5f}".format(bar_y))
print("Estimated regression line: y = {0:5f} + {1:5f} x".format(b0, b1))
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x, y)
x_values = range(min(x), max(x))
ax.plot(x_values, [b0 + b1 * xi for xi in x_values])
fig.savefig("plot.png")
sse = sum([(y[i] - (b0 + b1 * x[i]))**2 for i in range(n)])
sst = sum([y[i]**2 for i in range(n)]) - sum(y)**2 / n
sigma_square = sse / (n - 2)
print("SSE: {0:5f}".format(sse))
print("SST: {0:5f}".format(sst))
print("\sigma^2 = {0:5f}".format(sigma_square))
print("\sigma = {0:5f}".format(sigma_square ** 0.5))
print("r^2 = {0:5f}".format(1 - sse / sst))
| true | true |
f7118bb7f85eb63551ff192af78a4fa82b04fb29 | 10,895 | py | Python | sensor.py | sasgoose/sureha | 47558ba2a000a7b65fa8b2e9fc91d5ce3e0c8291 | [
"Apache-2.0"
] | null | null | null | sensor.py | sasgoose/sureha | 47558ba2a000a7b65fa8b2e9fc91d5ce3e0c8291 | [
"Apache-2.0"
] | null | null | null | sensor.py | sasgoose/sureha | 47558ba2a000a7b65fa8b2e9fc91d5ce3e0c8291 | [
"Apache-2.0"
] | null | null | null | """Support for Sure PetCare Flaps/Pets sensors."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
MASS_GRAMS,
PERCENTAGE,
VOLUME_MILLILITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from surepy.entities import SurepyEntity
from surepy.entities.devices import (
Feeder as SureFeeder,
FeederBowl as SureFeederBowl,
Felaqua as SureFelaqua,
Flap as SureFlap,
SurepyDevice,
)
from surepy.enums import EntityType, LockState
# pylint: disable=relative-beyond-top-level
from . import SurePetcareAPI
from .const import ATTR_VOLTAGE_FULL, ATTR_VOLTAGE_LOW, DOMAIN, SPC, SURE_MANUFACTURER
PARALLEL_UPDATES = 2
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: Any,
discovery_info: Any = None,
) -> None:
"""Set up Sure PetCare sensor platform."""
await async_setup_entry(hass, config, async_add_entities)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Any
) -> None:
"""Set up config entry Sure PetCare Flaps sensors."""
entities: list[Flap | Felaqua | Feeder | FeederBowl | Battery] = []
spc: SurePetcareAPI = hass.data[DOMAIN][SPC]
for surepy_entity in spc.coordinator.data.values():
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
]:
entities.append(Flap(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FELAQUA:
entities.append(Felaqua(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FEEDER:
for bowl in surepy_entity.bowls.values():
entities.append(
FeederBowl(spc.coordinator, surepy_entity.id, spc, bowl.raw_data())
)
entities.append(Feeder(spc.coordinator, surepy_entity.id, spc))
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
EntityType.FEEDER,
EntityType.FELAQUA,
]:
voltage_batteries_full = cast(
float, config_entry.options.get(ATTR_VOLTAGE_FULL)
)
voltage_batteries_low = cast(
float, config_entry.options.get(ATTR_VOLTAGE_LOW)
)
entities.append(
Battery(
spc.coordinator,
surepy_entity.id,
spc,
voltage_full=voltage_batteries_full,
voltage_low=voltage_batteries_low,
)
)
async_add_entities(entities)
class SurePetcareSensor(CoordinatorEntity, SensorEntity):
"""A binary sensor implementation for Sure Petcare Entities."""
_attr_should_poll = False
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
"""Initialize a Sure Petcare sensor."""
super().__init__(coordinator)
self._id = _id
self._spc: SurePetcareAPI = spc
self._coordinator = coordinator
self._surepy_entity: SurepyEntity = self._coordinator.data[_id]
self._state: dict[str, Any] = self._surepy_entity.raw_data()["status"]
self._attr_available = bool(self._state)
self._attr_unique_id = f"{self._surepy_entity.household_id}-{self._id}"
self._attr_extra_state_attributes = (
{**self._surepy_entity.raw_data()} if self._state else {}
)
self._attr_name: str = (
f"{self._surepy_entity.type.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
@property
def device_info(self):
device = {}
try:
model = f"{self._surepy_entity.type.name.replace('_', ' ').title()}"
if serial := self._surepy_entity.raw_data().get("serial_number"):
model = f"{model} ({serial})"
elif mac_address := self._surepy_entity.raw_data().get("mac_address"):
model = f"{model} ({mac_address})"
elif tag_id := self._surepy_entity.raw_data().get("tag_id"):
model = f"{model} ({tag_id})"
device = {
"identifiers": {(DOMAIN, self._id)},
"name": self._surepy_entity.name.capitalize(),
"manufacturer": SURE_MANUFACTURER,
"model": model,
}
if self._state:
versions = self._state.get("version", {})
if dev_fw_version := versions.get("device", {}).get("firmware"):
device["sw_version"] = dev_fw_version
if (lcd_version := versions.get("lcd", {})) and (
rf_version := versions.get("rf", {})
):
device["sw_version"] = (
f"lcd: {lcd_version.get('version', lcd_version)['firmware']} | "
f"fw: {rf_version.get('version', rf_version)['firmware']}"
)
except AttributeError:
pass
return device
class Flap(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI) -> None:
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFlap
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = None
if self._state:
self._attr_extra_state_attributes = {
"learn_mode": bool(self._state["learn_mode"]),
**self._surepy_entity.raw_data(),
}
if locking := self._state.get("locking"):
self._attr_state = LockState(locking["mode"]).name.casefold()
@property
def state(self) -> str | None:
"""Return battery level in percent."""
if (
state := cast(SureFlap, self._coordinator.data[self._id])
.raw_data()
.get("status")
):
return LockState(state["locking"]["mode"]).name.casefold()
class Felaqua(SurePetcareSensor):
"""Sure Petcare Felaqua."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFelaqua
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = VOLUME_MILLILITERS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if felaqua := cast(SureFelaqua, self._coordinator.data[self._id]):
return int(felaqua.water_remaining) if felaqua.water_remaining else None
class FeederBowl(SurePetcareSensor):
"""Sure Petcare Feeder Bowl."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
bowl_data: dict[str, int | str],
):
"""Initialize a Bowl sensor."""
super().__init__(coordinator, _id, spc)
self.feeder_id = _id
self.bowl_id = int(bowl_data["index"])
self._id = int(f"{_id}{str(self.bowl_id)}")
self._spc: SurePetcareAPI = spc
self._surepy_feeder_entity: SurepyEntity = self._coordinator.data[_id]
self._surepy_entity: SureFeederBowl = self._coordinator.data[_id].bowls[
self.bowl_id
]
self._state: dict[str, Any] = bowl_data
# https://github.com/PyCQA/pylint/issues/2062
# pylint: disable=no-member
self._attr_name = (
f"{EntityType.FEEDER.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
self._attr_icon = "mdi:bowl"
self._attr_state = int(self._surepy_entity.weight)
self._attr_unique_id = (
f"{self._surepy_feeder_entity.household_id}-{self.feeder_id}-{self.bowl_id}"
)
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if (feeder := cast(SureFeeder, self._coordinator.data[self.feeder_id])) and (
weight := feeder.bowls[self.bowl_id].weight
):
return int(weight) if weight and weight > 0 else None
class Feeder(SurePetcareSensor):
"""Sure Petcare Feeder."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFeeder
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the total remaining food."""
if feeder := cast(SureFeeder, self._coordinator.data[self._id]):
return int(feeder.total_weight) if feeder.total_weight else None
class Battery(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
voltage_full: float,
voltage_low: float,
):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SurepyDevice
self._attr_name = f"{self._attr_name} Battery Level"
self.voltage_low = voltage_low
self.voltage_full = voltage_full
self._attr_unit_of_measurement = PERCENTAGE
self._attr_device_class = DEVICE_CLASS_BATTERY
self._attr_unique_id = (
f"{self._surepy_entity.household_id}-{self._surepy_entity.id}-battery"
)
@property
def state(self) -> int | None:
"""Return battery level in percent."""
if battery := cast(SurepyDevice, self._coordinator.data[self._id]):
self._surepy_entity = battery
battery_level = battery.calculate_battery_level(
voltage_full=self.voltage_full, voltage_low=self.voltage_low
)
# return batterie level between 0 and 100
return battery_level
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the additional attrs."""
attrs = {}
if (device := cast(SurepyDevice, self._coordinator.data[self._id])) and (
state := device.raw_data().get("status")
):
self._surepy_entity = device
voltage = float(state["battery"])
attrs = {
"battery_level": device.battery_level,
ATTR_VOLTAGE: f"{voltage:.2f}",
f"{ATTR_VOLTAGE}_per_battery": f"{voltage / 4:.2f}",
}
return attrs
| 31.397695 | 88 | 0.608261 | from __future__ import annotations
from typing import Any, cast
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
MASS_GRAMS,
PERCENTAGE,
VOLUME_MILLILITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from surepy.entities import SurepyEntity
from surepy.entities.devices import (
Feeder as SureFeeder,
FeederBowl as SureFeederBowl,
Felaqua as SureFelaqua,
Flap as SureFlap,
SurepyDevice,
)
from surepy.enums import EntityType, LockState
from . import SurePetcareAPI
from .const import ATTR_VOLTAGE_FULL, ATTR_VOLTAGE_LOW, DOMAIN, SPC, SURE_MANUFACTURER
PARALLEL_UPDATES = 2
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: Any,
discovery_info: Any = None,
) -> None:
await async_setup_entry(hass, config, async_add_entities)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Any
) -> None:
entities: list[Flap | Felaqua | Feeder | FeederBowl | Battery] = []
spc: SurePetcareAPI = hass.data[DOMAIN][SPC]
for surepy_entity in spc.coordinator.data.values():
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
]:
entities.append(Flap(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FELAQUA:
entities.append(Felaqua(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FEEDER:
for bowl in surepy_entity.bowls.values():
entities.append(
FeederBowl(spc.coordinator, surepy_entity.id, spc, bowl.raw_data())
)
entities.append(Feeder(spc.coordinator, surepy_entity.id, spc))
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
EntityType.FEEDER,
EntityType.FELAQUA,
]:
voltage_batteries_full = cast(
float, config_entry.options.get(ATTR_VOLTAGE_FULL)
)
voltage_batteries_low = cast(
float, config_entry.options.get(ATTR_VOLTAGE_LOW)
)
entities.append(
Battery(
spc.coordinator,
surepy_entity.id,
spc,
voltage_full=voltage_batteries_full,
voltage_low=voltage_batteries_low,
)
)
async_add_entities(entities)
class SurePetcareSensor(CoordinatorEntity, SensorEntity):
_attr_should_poll = False
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator)
self._id = _id
self._spc: SurePetcareAPI = spc
self._coordinator = coordinator
self._surepy_entity: SurepyEntity = self._coordinator.data[_id]
self._state: dict[str, Any] = self._surepy_entity.raw_data()["status"]
self._attr_available = bool(self._state)
self._attr_unique_id = f"{self._surepy_entity.household_id}-{self._id}"
self._attr_extra_state_attributes = (
{**self._surepy_entity.raw_data()} if self._state else {}
)
self._attr_name: str = (
f"{self._surepy_entity.type.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
@property
def device_info(self):
device = {}
try:
model = f"{self._surepy_entity.type.name.replace('_', ' ').title()}"
if serial := self._surepy_entity.raw_data().get("serial_number"):
model = f"{model} ({serial})"
elif mac_address := self._surepy_entity.raw_data().get("mac_address"):
model = f"{model} ({mac_address})"
elif tag_id := self._surepy_entity.raw_data().get("tag_id"):
model = f"{model} ({tag_id})"
device = {
"identifiers": {(DOMAIN, self._id)},
"name": self._surepy_entity.name.capitalize(),
"manufacturer": SURE_MANUFACTURER,
"model": model,
}
if self._state:
versions = self._state.get("version", {})
if dev_fw_version := versions.get("device", {}).get("firmware"):
device["sw_version"] = dev_fw_version
if (lcd_version := versions.get("lcd", {})) and (
rf_version := versions.get("rf", {})
):
device["sw_version"] = (
f"lcd: {lcd_version.get('version', lcd_version)['firmware']} | "
f"fw: {rf_version.get('version', rf_version)['firmware']}"
)
except AttributeError:
pass
return device
class Flap(SurePetcareSensor):
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI) -> None:
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFlap
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = None
if self._state:
self._attr_extra_state_attributes = {
"learn_mode": bool(self._state["learn_mode"]),
**self._surepy_entity.raw_data(),
}
if locking := self._state.get("locking"):
self._attr_state = LockState(locking["mode"]).name.casefold()
@property
def state(self) -> str | None:
if (
state := cast(SureFlap, self._coordinator.data[self._id])
.raw_data()
.get("status")
):
return LockState(state["locking"]["mode"]).name.casefold()
class Felaqua(SurePetcareSensor):
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFelaqua
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = VOLUME_MILLILITERS
@property
def state(self) -> float | None:
if felaqua := cast(SureFelaqua, self._coordinator.data[self._id]):
return int(felaqua.water_remaining) if felaqua.water_remaining else None
class FeederBowl(SurePetcareSensor):
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
bowl_data: dict[str, int | str],
):
super().__init__(coordinator, _id, spc)
self.feeder_id = _id
self.bowl_id = int(bowl_data["index"])
self._id = int(f"{_id}{str(self.bowl_id)}")
self._spc: SurePetcareAPI = spc
self._surepy_feeder_entity: SurepyEntity = self._coordinator.data[_id]
self._surepy_entity: SureFeederBowl = self._coordinator.data[_id].bowls[
self.bowl_id
]
self._state: dict[str, Any] = bowl_data
self._attr_name = (
f"{EntityType.FEEDER.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
self._attr_icon = "mdi:bowl"
self._attr_state = int(self._surepy_entity.weight)
self._attr_unique_id = (
f"{self._surepy_feeder_entity.household_id}-{self.feeder_id}-{self.bowl_id}"
)
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
if (feeder := cast(SureFeeder, self._coordinator.data[self.feeder_id])) and (
weight := feeder.bowls[self.bowl_id].weight
):
return int(weight) if weight and weight > 0 else None
class Feeder(SurePetcareSensor):
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFeeder
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
if feeder := cast(SureFeeder, self._coordinator.data[self._id]):
return int(feeder.total_weight) if feeder.total_weight else None
class Battery(SurePetcareSensor):
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
voltage_full: float,
voltage_low: float,
):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SurepyDevice
self._attr_name = f"{self._attr_name} Battery Level"
self.voltage_low = voltage_low
self.voltage_full = voltage_full
self._attr_unit_of_measurement = PERCENTAGE
self._attr_device_class = DEVICE_CLASS_BATTERY
self._attr_unique_id = (
f"{self._surepy_entity.household_id}-{self._surepy_entity.id}-battery"
)
@property
def state(self) -> int | None:
if battery := cast(SurepyDevice, self._coordinator.data[self._id]):
self._surepy_entity = battery
battery_level = battery.calculate_battery_level(
voltage_full=self.voltage_full, voltage_low=self.voltage_low
)
return battery_level
@property
def extra_state_attributes(self) -> dict[str, Any]:
attrs = {}
if (device := cast(SurepyDevice, self._coordinator.data[self._id])) and (
state := device.raw_data().get("status")
):
self._surepy_entity = device
voltage = float(state["battery"])
attrs = {
"battery_level": device.battery_level,
ATTR_VOLTAGE: f"{voltage:.2f}",
f"{ATTR_VOLTAGE}_per_battery": f"{voltage / 4:.2f}",
}
return attrs
| true | true |
f7118c03a435f2a29e604414e5c30c3bbf8bfdb0 | 22,879 | py | Python | toc_trends_analysis.py | JamesSample/icpw | 47562f601fc8fe23720267d083dabc540889565e | [
"MIT"
] | 1 | 2020-05-06T21:18:33.000Z | 2020-05-06T21:18:33.000Z | toc_trends_analysis.py | JamesSample/icpw | 47562f601fc8fe23720267d083dabc540889565e | [
"MIT"
] | 1 | 2020-02-05T16:50:23.000Z | 2020-03-26T16:16:37.000Z | toc_trends_analysis.py | JamesSample/icpw | 47562f601fc8fe23720267d083dabc540889565e | [
"MIT"
] | null | null | null | #------------------------------------------------------------------------------
# Name: toc_trends_analysis.py
# Purpose: Analyse RESA2 data for trends.
#
# Author: James Sample
#
# Created: Fri Jul 15 11:35:12 2016
# Copyright: (c) James Sample and NIVA
# Licence:
#------------------------------------------------------------------------------
""" Tore has previously written code to perform trend analyses on the data in
RESA2. I haven't been able to find the code, but it appears to shift data
between RESA2, Excel and Access, which seems a bit messy.
In the notebook updated_toc_trends_analysis.ipynb, I tested some code which
refactors all the analysis into Python, interfacing directly with the
database and returning results as dataframes. This seems to have worked
well.
The code below takes the main functions from this notebook and tidies them
up a bit. This file can then be imported into new notebooks, which should
make it easy to re-run trend analyses on different datasets in the future.
"""
def mk_test(x, stn_id, par, alpha=0.05):
""" Adapted from http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/
by Sat Kumar Tomer.
Perform the MK test for monotonic trends. Uses the "normal
approximation" to determine significance and therefore should
only be used if the number of values is >= 10.
Args:
x: 1D array of data
name: Name for data series (string)
alpha: Significance level
Returns:
var_s: Variance of test statistic
s: M-K test statistic
z: Normalised test statistic
p: p-value of the significance test
trend: Whether to reject the null hypothesis (no trend) at
the specified significance level. One of:
'increasing', 'decreasing' or 'no trend'
"""
import numpy as np
from scipy.stats import norm
n = len(x)
if n < 10:
print (' Data series for %s at site %s has fewer than 10 non-null values. '
'Significance estimates may be unreliable.' % (par, int(stn_id)))
# calculate S
s = 0
for k in range(n-1):
for j in range(k+1,n):
s += np.sign(x[j] - x[k])
# calculate the unique data
unique_x = np.unique(x)
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18.
else: # there are some ties in data
tp = np.zeros(unique_x.shape)
for i in range(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
# Sat Kumar's code has "+ np.sum", which is incorrect
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18.
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = np.nan
# calculate the p_value
p = 2*(1-norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2.)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
elif np.isnan(z):
trend = np.nan
else:
trend = 'no trend'
return var_s, s, z, p, trend
def wc_stats(raw_df, st_yr=None, end_yr=None, plot=False, fold=None):
""" Calculate key statistics for the TOC trends analysis:
'station_id'
'par_id'
'non_missing'
'median'
'mean'
'std_dev'
'period'
'mk_std_dev'
'mk_stat'
'norm_mk_stat'
'mk_p_val'
'trend'
'sen_slp'
Args:
raw_df: Dataframe with annual data for a single station. Columns must
be: [station_id, year, par1, par2, ... parn]
st_yr: First year to include in analysis. Pass None to start
at the beginning of the series
end_year: Last year to include in analysis. Pass None to start
at the beginning of the series
plot: Whether to generate a PNG plot of the Sen's slope
regression
fold: Folder in which to save PNGs if plot=True
Returns:
df of key statistics.
"""
import numpy as np, pandas as pd
import seaborn as sn, matplotlib.pyplot as plt, os
from scipy.stats import theilslopes
sn.set_context('poster')
# Checking
df = raw_df.copy()
assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]'
assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time'
# Get just the period of interest
if st_yr:
df = df.query('YEAR >= @st_yr')
if end_yr:
df = df.query('YEAR <= @end_yr')
# Only continue if data
if len(df) > 0:
# Get stn_id
stn_id = df['STATION_ID'].iloc[0]
# Tidy up df
df.index = df['YEAR']
df.sort_index(inplace=True)
del df['STATION_ID'], df['YEAR']
# Container for results
data_dict = {'station_id':[],
'par_id':[],
'non_missing':[],
'n_start':[],
'n_end':[],
'median':[],
'mean':[],
'std_dev':[],
'period':[],
'mk_std_dev':[],
'mk_stat':[],
'norm_mk_stat':[],
'mk_p_val':[],
'trend':[],
'sen_slp':[]}
# Loop over pars
for col in df.columns:
# 1. Station ID
data_dict['station_id'].append(stn_id)
# 2. Par ID
data_dict['par_id'].append(col)
# 3. Non-missing
data_dict['non_missing'].append(pd.notnull(df[col]).sum())
# 4. Number of non nulls at start
if st_yr:
# Record the number of non-nulls within 5 years of start year
data_dict['n_start'].append(pd.notnull(df[df.index<(st_yr+5)][col]).sum())
else:
# Record the number of non-nulls in first 5 years of record
data_dict['n_start'].append(pd.notnull(df[col].head(5)).sum())
# 5. Number of non nulls at end
if end_yr:
# Record the number of non-nulls within 5 years of end year
data_dict['n_end'].append(pd.notnull(df[df.index>(end_yr-5)][col]).sum())
else:
# Record the number of non-nulls in last 5 years of record
data_dict['n_end'].append(pd.notnull(df[col].tail(5)).sum())
# 6. Median
data_dict['median'].append(df[col].median())
# 7. Mean
data_dict['mean'].append(df[col].mean())
# 8. Std dev
data_dict['std_dev'].append(df[col].std())
# 9. Period
st_yr = df.index.min()
end_yr = df.index.max()
per = '%s-%s' % (int(st_yr), int(end_yr))
data_dict['period'].append(per)
# 10. M-K test
# Drop missing values
mk_df = df[[col]].dropna(how='any')
# Only run stats if more than 1 valid value
if len(mk_df) > 1:
var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col)
data_dict['mk_std_dev'].append(np.sqrt(var_s))
data_dict['mk_stat'].append(s)
data_dict['norm_mk_stat'].append(z)
data_dict['mk_p_val'].append(p)
data_dict['trend'].append(trend)
# 11. Sen's slope. Returns:
# Median slope, median intercept, 95% CI lower bound,
# 95% CI upper bound
sslp, icpt, lb, ub = theilslopes(mk_df[col].values,
mk_df.index, 0.95)
data_dict['sen_slp'].append(sslp)
# 12. Plot if desired
if plot:
fig = plt.figure()
plt.plot(mk_df.index, mk_df[col].values, 'bo-')
plt.plot(mk_df.index, mk_df.index*sslp + icpt, 'k-')
if col in ('Al', 'TOC'):
plt.ylabel('%s (mg/l)' % col, fontsize=24)
else:
plt.ylabel('%s (ueq/l)' % col, fontsize=24)
plt.title('%s at station %s' % (col, int(stn_id)),
fontsize=32)
plt.tight_layout()
# Save fig
out_path = os.path.join(fold,
'%s_%s_%s-%s.png' % (int(stn_id), col,
st_yr, end_yr))
plt.savefig(out_path, dpi=150)
plt.close()
# Otherwise all NaN
else:
for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat',
'mk_p_val', 'trend', 'sen_slp']:
data_dict[par].append(np.nan)
# Build to df
res_df = pd.DataFrame(data_dict)
res_df = res_df[['station_id', 'par_id', 'period', 'non_missing', 'n_start',
'n_end', 'mean', 'median', 'std_dev', 'mk_stat',
'norm_mk_stat', 'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']]
return res_df
def read_resa2(proj_list, engine):
""" Reads raw data for the specified projects from RESA2. Extracts only
the parameters required for the trends analysis and calculates
aggregated annual values by taking medians.
Args:
proj_list: List of RESA2 project names for which to extract data
engine: SQLAlchemy 'engine' object already connected to RESA2
Returns:
[stn_df, wc_df, dup_df]. Dataframe of stations; Dataframe of annual
water chemistry values; dataframe of duplicates to check
"""
import pandas as pd
# Get par IDs etc. for pars of interest
par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC',
'Al', 'K', 'Na', 'NH4-N', 'pH']
sql = ('SELECT * FROM resa2.parameter_definitions '
'WHERE name in %s' % str(tuple(par_list)))
par_df = pd.read_sql_query(sql, engine)
# Get stations for a specified list of projects
if len(proj_list) == 1:
sql = ("SELECT station_id, station_code "
"FROM resa2.stations "
"WHERE station_id IN (SELECT UNIQUE(station_id) "
"FROM resa2.projects_stations "
"WHERE project_id IN (SELECT project_id "
"FROM resa2.projects "
"WHERE project_name = '%s'))"
% proj_list[0])
else:
sql = ('SELECT station_id, station_code '
'FROM resa2.stations '
'WHERE station_id IN (SELECT UNIQUE(station_id) '
'FROM resa2.projects_stations '
'WHERE project_id IN (SELECT project_id '
'FROM resa2.projects '
'WHERE project_name IN %s))'
% str(tuple(proj_list)))
stn_df = pd.read_sql(sql, engine)
# Get results for ALL pars for these sites
if len(stn_df)==1:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id = %s)"
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id IN %s)"
% str(tuple(stn_df['station_id'].values)))
wc_df = pd.read_sql_query(sql, engine)
# Get all sample dates for sites
if len(stn_df)==1:
sql = ("SELECT water_sample_id, station_id, sample_date, depth1, depth2 "
"FROM resa2.water_samples "
"WHERE station_id = %s "
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT water_sample_id, station_id, sample_date, depth1, depth2 "
"FROM resa2.water_samples "
"WHERE station_id IN %s "
% str(tuple(stn_df['station_id'].values)))
samp_df = pd.read_sql_query(sql, engine)
# Join in par IDs based on method IDs
sql = ('SELECT * FROM resa2.wc_parameters_methods')
meth_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, meth_par_df, how='left',
left_on='method_id', right_on='wc_method_id')
# Get just the parameters of interest
wc_df = wc_df.query('wc_parameter_id in %s'
% str(tuple(par_df['parameter_id'].values)))
# Join in sample dates
wc_df = pd.merge(wc_df, samp_df, how='left',
left_on='sample_id', right_on='water_sample_id')
# Get just the near-surface samples
wc_df = wc_df.query('(depth1 <= 1) and (depth2 <= 1)')
# Join in parameter units
sql = ('SELECT * FROM resa2.parameter_definitions')
all_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, all_par_df, how='left',
left_on='wc_parameter_id', right_on='parameter_id')
# Join in station codes
wc_df = pd.merge(wc_df, stn_df, how='left',
left_on='station_id', right_on='station_id')
# Convert units
wc_df['value'] = wc_df['value'] * wc_df['conversion_factor']
# Extract columns of interest
wc_df = wc_df[['station_id', 'sample_date', 'name',
'value', 'entered_date_x']]
# Check for duplicates
dup_df = wc_df[wc_df.duplicated(subset=['station_id',
'sample_date',
'name'],
keep=False)].sort_values(by=['station_id',
'sample_date',
'name'])
if len(dup_df) > 0:
print (' The database contains duplicate values for some station-'
'date-parameter combinations.\n Only the most recent values '
'will be used, but you should check the repeated values are not '
'errors.\n The duplicated entries are returned in a separate '
'dataframe.\n')
# Choose most recent record for each duplicate
wc_df.sort_values(by='entered_date_x', inplace=True, ascending=True)
# Drop duplicates
wc_df.drop_duplicates(subset=['station_id', 'sample_date', 'name'],
keep='last', inplace=True)
# Sort
wc_df.sort_values(by=['station_id', 'sample_date', 'name'],
inplace=True)
# Tidy
del wc_df['entered_date_x']
wc_df.reset_index(inplace=True, drop=True)
# Unstack
wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True)
wc_df = wc_df.unstack(level='name')
wc_df.columns = wc_df.columns.droplevel()
wc_df.reset_index(inplace=True)
wc_df.columns.name = None
# Extract year from date column
wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year)
del wc_df['sample_date']
# Groupby station_id and year
grpd = wc_df.groupby(['station_id', 'year'])
# Calculate median
wc_df = grpd.agg('median')
return stn_df, wc_df, dup_df
def conv_units_and_correct(wc_df):
""" Take a dataframe of aggregated annual values in the units specified by
RESA2.PARAMETERS and performs unit conversions to ueq/l. Also applies
sea-salt correction where necessary.
Args:
wc_df: Dataframe in original units
Returns:
Dataframe in converted units
"""
import pandas as pd
# Tabulate chemical properties
chem_dict = {'molar_mass':[96, 35, 40, 24, 14, 39, 23, 14],
'valency':[2, 1, 2, 2, 1, 1, 1, 1],
'resa2_ref_ratio':[0.103, 1., 0.037, 0.196,
'N/A', 0.018, 0.859, 'N/A']}
chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg',
'NO3-N', 'K', 'Na', 'NH4-N'])
chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']]
# Fill NoData for ANC calculation. Assume that NH4 can be ignored if not
# present.
# If have data for NH4, fill data gaps with 0
if 'NH4-N' in wc_df.columns:
wc_df['NH4-N'].fillna(value=0, inplace=True)
else: # Just assume 0
wc_df['NH4-N'] = 0
# 1. Convert to ueq/l
# 1.1. pH to H+
wc_df['EH'] = 1E6 * 10**(-wc_df['pH'])
# 1.2. Other pars
for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N']:
val = chem_df.at[par, 'valency']
mm = chem_df.at[par, 'molar_mass']
if par == 'NO3-N':
wc_df['ENO3'] = wc_df[par] * val / mm
elif par == 'NH4-N':
wc_df['ENH4'] = wc_df[par] * val / mm
else:
wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm
# 2. Apply sea-salt correction
for par in ['ESO4', 'EMg', 'ECa']:
ref = chem_df.at[par[1:], 'resa2_ref_ratio']
wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl'])
# 3. Calculate combinations
# 3.1. ESO4 + ECl
wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl']
# 3.2. ECa + EMg
wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg']
# 3.3. ECaX + EMgX
wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX']
# 3.4. ANC = (ECa+EMg+EK+ENa+ENH4) - (ECl+ESO4+ENO3)
wc_df['ANC'] = ((wc_df['ECa'] + wc_df['EMg'] + wc_df['EK'] +
wc_df['ENa'] + wc_df['ENH4']) -
(wc_df['ECl'] + wc_df['ESO4'] + wc_df['ENO3']))
# 3.5. ANCX = (ECaX+EMgX+EK+ENa+ENH4) - (ECl+ESO4X+ENO3)
wc_df['ANCX'] = ((wc_df['ECaX'] + wc_df['EMgX'] + wc_df['EK'] +
wc_df['ENa'] + wc_df['ENH4']) -
(wc_df['ECl'] + wc_df['ESO4X'] + wc_df['ENO3']))
# 4. Delete unnecessary columns and tidy
for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N', 'pH',
'EMg', 'ECa', 'EK', 'ENa', 'ENH4', 'EMgX', 'ECaX']:
del wc_df[col]
wc_df.reset_index(inplace=True)
return wc_df
def run_trend_analysis(proj_list, engine, st_yr=None, end_yr=None,
plot=False, fold=None):
""" Run the trend analysis for the specified projects and time period.
Args:
proj_list: List of RESA2 project names for which to extract data
engine: SQLAlchemy 'engine' object already connected to RESA2
st_yr: First year to include in analysis. Pass None to start
at the beginning of the series
end_year: Last year to include in analysis. Pass None to start
at the beginning of the series
plot: Whether to generate a PNG plot of the Sen's slope
regression
fold: Folder in which to save PNGs if plot=True
Returns:
[res_df, dup_df, no_data_df]. Dataframe of statistics; dataframe of
duplicated water chemistry values for investigation; dataframe of
stations with no relevant data in the period of interest
"""
import pandas as pd, os
# Check paths valid
if plot:
assert os.path.isdir(fold), 'The specified folder does not exist.'
# Get raw data from db
print ('Extracting data from RESA2...')
stn_df, wc_df, dup_df = read_resa2(proj_list, engine)
# Identify stations with no relevant records
stns_no_data = (set(stn_df['station_id'].values) -
set(wc_df.index.get_level_values('station_id')))
if len(stns_no_data) > 0:
print (' Some stations have no relevant data in the period '
'specified. Their IDs are returned in a separate dataframe.\n')
no_data_df = pd.DataFrame({'station_id':list(stns_no_data)})
else:
no_data_df = None
print (' Done.')
# Convert units and apply sea-salt correction
print ('\nConverting units and applying sea-salt correction...')
wc_df = conv_units_and_correct(wc_df)
print (' Done.')
# Calculate stats
# Container for output
df_list = []
# Loop over sites
print ('\nCalculating statistics...')
for stn_id in wc_df['station_id'].unique():
# Extract data for this site
df = wc_df.query('station_id == @stn_id')
# Modify col names
names = list(df.columns)
names[:2] = ['STATION_ID', 'YEAR']
df.columns = names
# Heleen wants the annual time series for each site for further analysis
# Write df to output
#out_ann_fold = (r'../../../Thematic_Trends_Report_2019/results/annual_chemistry_series')
#out_ann_path = os.path.join(out_ann_fold, 'stn_%s.csv' % stn_id)
#df_trunc = df.query('(YEAR>=1990) & (YEAR<=2016)') # Truncate to 1990 to 2016
#df_trunc.to_csv(out_ann_path)
# Run analysis
df_list.append(wc_stats(df, st_yr=st_yr, end_yr=end_yr,
plot=plot, fold=fold))
res_df = pd.concat(df_list, axis=0)
# Convert station_id cols to ints
res_df['station_id'] = res_df['station_id'].map(int)
dup_df['station_id'] = dup_df['station_id'].map(int)
if no_data_df is not None:
no_data_df['station_id'] = no_data_df['station_id'].map(int)
print (' Done.')
print ('\nFinished.')
return res_df, dup_df, no_data_df | 38.516835 | 118 | 0.517287 |
def mk_test(x, stn_id, par, alpha=0.05):
import numpy as np
from scipy.stats import norm
n = len(x)
if n < 10:
print (' Data series for %s at site %s has fewer than 10 non-null values. '
'Significance estimates may be unreliable.' % (par, int(stn_id)))
s = 0
for k in range(n-1):
for j in range(k+1,n):
s += np.sign(x[j] - x[k])
unique_x = np.unique(x)
g = len(unique_x)
if n == g:
var_s = (n*(n-1)*(2*n+5))/18.
else:
tp = np.zeros(unique_x.shape)
for i in range(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18.
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = np.nan
# calculate the p_value
p = 2*(1-norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2.)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
elif np.isnan(z):
trend = np.nan
else:
trend = 'no trend'
return var_s, s, z, p, trend
def wc_stats(raw_df, st_yr=None, end_yr=None, plot=False, fold=None):
import numpy as np, pandas as pd
import seaborn as sn, matplotlib.pyplot as plt, os
from scipy.stats import theilslopes
sn.set_context('poster')
# Checking
df = raw_df.copy()
assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]'
assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time'
# Get just the period of interest
if st_yr:
df = df.query('YEAR >= @st_yr')
if end_yr:
df = df.query('YEAR <= @end_yr')
# Only continue if data
if len(df) > 0:
# Get stn_id
stn_id = df['STATION_ID'].iloc[0]
# Tidy up df
df.index = df['YEAR']
df.sort_index(inplace=True)
del df['STATION_ID'], df['YEAR']
# Container for results
data_dict = {'station_id':[],
'par_id':[],
'non_missing':[],
'n_start':[],
'n_end':[],
'median':[],
'mean':[],
'std_dev':[],
'period':[],
'mk_std_dev':[],
'mk_stat':[],
'norm_mk_stat':[],
'mk_p_val':[],
'trend':[],
'sen_slp':[]}
# Loop over pars
for col in df.columns:
# 1. Station ID
data_dict['station_id'].append(stn_id)
# 2. Par ID
data_dict['par_id'].append(col)
# 3. Non-missing
data_dict['non_missing'].append(pd.notnull(df[col]).sum())
# 4. Number of non nulls at start
if st_yr:
# Record the number of non-nulls within 5 years of start year
data_dict['n_start'].append(pd.notnull(df[df.index<(st_yr+5)][col]).sum())
else:
# Record the number of non-nulls in first 5 years of record
data_dict['n_start'].append(pd.notnull(df[col].head(5)).sum())
# 5. Number of non nulls at end
if end_yr:
# Record the number of non-nulls within 5 years of end year
data_dict['n_end'].append(pd.notnull(df[df.index>(end_yr-5)][col]).sum())
else:
# Record the number of non-nulls in last 5 years of record
data_dict['n_end'].append(pd.notnull(df[col].tail(5)).sum())
# 6. Median
data_dict['median'].append(df[col].median())
# 7. Mean
data_dict['mean'].append(df[col].mean())
# 8. Std dev
data_dict['std_dev'].append(df[col].std())
# 9. Period
st_yr = df.index.min()
end_yr = df.index.max()
per = '%s-%s' % (int(st_yr), int(end_yr))
data_dict['period'].append(per)
# 10. M-K test
# Drop missing values
mk_df = df[[col]].dropna(how='any')
# Only run stats if more than 1 valid value
if len(mk_df) > 1:
var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col)
data_dict['mk_std_dev'].append(np.sqrt(var_s))
data_dict['mk_stat'].append(s)
data_dict['norm_mk_stat'].append(z)
data_dict['mk_p_val'].append(p)
data_dict['trend'].append(trend)
# 11. Sen's slope. Returns:
sslp, icpt, lb, ub = theilslopes(mk_df[col].values,
mk_df.index, 0.95)
data_dict['sen_slp'].append(sslp)
if plot:
fig = plt.figure()
plt.plot(mk_df.index, mk_df[col].values, 'bo-')
plt.plot(mk_df.index, mk_df.index*sslp + icpt, 'k-')
if col in ('Al', 'TOC'):
plt.ylabel('%s (mg/l)' % col, fontsize=24)
else:
plt.ylabel('%s (ueq/l)' % col, fontsize=24)
plt.title('%s at station %s' % (col, int(stn_id)),
fontsize=32)
plt.tight_layout()
out_path = os.path.join(fold,
'%s_%s_%s-%s.png' % (int(stn_id), col,
st_yr, end_yr))
plt.savefig(out_path, dpi=150)
plt.close()
else:
for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat',
'mk_p_val', 'trend', 'sen_slp']:
data_dict[par].append(np.nan)
res_df = pd.DataFrame(data_dict)
res_df = res_df[['station_id', 'par_id', 'period', 'non_missing', 'n_start',
'n_end', 'mean', 'median', 'std_dev', 'mk_stat',
'norm_mk_stat', 'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']]
return res_df
def read_resa2(proj_list, engine):
import pandas as pd
par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC',
'Al', 'K', 'Na', 'NH4-N', 'pH']
sql = ('SELECT * FROM resa2.parameter_definitions '
'WHERE name in %s' % str(tuple(par_list)))
par_df = pd.read_sql_query(sql, engine)
if len(proj_list) == 1:
sql = ("SELECT station_id, station_code "
"FROM resa2.stations "
"WHERE station_id IN (SELECT UNIQUE(station_id) "
"FROM resa2.projects_stations "
"WHERE project_id IN (SELECT project_id "
"FROM resa2.projects "
"WHERE project_name = '%s'))"
% proj_list[0])
else:
sql = ('SELECT station_id, station_code '
'FROM resa2.stations '
'WHERE station_id IN (SELECT UNIQUE(station_id) '
'FROM resa2.projects_stations '
'WHERE project_id IN (SELECT project_id '
'FROM resa2.projects '
'WHERE project_name IN %s))'
% str(tuple(proj_list)))
stn_df = pd.read_sql(sql, engine)
if len(stn_df)==1:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id = %s)"
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id IN %s)"
% str(tuple(stn_df['station_id'].values)))
wc_df = pd.read_sql_query(sql, engine)
if len(stn_df)==1:
sql = ("SELECT water_sample_id, station_id, sample_date, depth1, depth2 "
"FROM resa2.water_samples "
"WHERE station_id = %s "
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT water_sample_id, station_id, sample_date, depth1, depth2 "
"FROM resa2.water_samples "
"WHERE station_id IN %s "
% str(tuple(stn_df['station_id'].values)))
samp_df = pd.read_sql_query(sql, engine)
sql = ('SELECT * FROM resa2.wc_parameters_methods')
meth_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, meth_par_df, how='left',
left_on='method_id', right_on='wc_method_id')
wc_df = wc_df.query('wc_parameter_id in %s'
% str(tuple(par_df['parameter_id'].values)))
wc_df = pd.merge(wc_df, samp_df, how='left',
left_on='sample_id', right_on='water_sample_id')
wc_df = wc_df.query('(depth1 <= 1) and (depth2 <= 1)')
sql = ('SELECT * FROM resa2.parameter_definitions')
all_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, all_par_df, how='left',
left_on='wc_parameter_id', right_on='parameter_id')
wc_df = pd.merge(wc_df, stn_df, how='left',
left_on='station_id', right_on='station_id')
wc_df['value'] = wc_df['value'] * wc_df['conversion_factor']
wc_df = wc_df[['station_id', 'sample_date', 'name',
'value', 'entered_date_x']]
dup_df = wc_df[wc_df.duplicated(subset=['station_id',
'sample_date',
'name'],
keep=False)].sort_values(by=['station_id',
'sample_date',
'name'])
if len(dup_df) > 0:
print (' The database contains duplicate values for some station-'
'date-parameter combinations.\n Only the most recent values '
'will be used, but you should check the repeated values are not '
'errors.\n The duplicated entries are returned in a separate '
'dataframe.\n')
wc_df.sort_values(by='entered_date_x', inplace=True, ascending=True)
wc_df.drop_duplicates(subset=['station_id', 'sample_date', 'name'],
keep='last', inplace=True)
wc_df.sort_values(by=['station_id', 'sample_date', 'name'],
inplace=True)
del wc_df['entered_date_x']
wc_df.reset_index(inplace=True, drop=True)
wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True)
wc_df = wc_df.unstack(level='name')
wc_df.columns = wc_df.columns.droplevel()
wc_df.reset_index(inplace=True)
wc_df.columns.name = None
wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year)
del wc_df['sample_date']
grpd = wc_df.groupby(['station_id', 'year'])
wc_df = grpd.agg('median')
return stn_df, wc_df, dup_df
def conv_units_and_correct(wc_df):
import pandas as pd
chem_dict = {'molar_mass':[96, 35, 40, 24, 14, 39, 23, 14],
'valency':[2, 1, 2, 2, 1, 1, 1, 1],
'resa2_ref_ratio':[0.103, 1., 0.037, 0.196,
'N/A', 0.018, 0.859, 'N/A']}
chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg',
'NO3-N', 'K', 'Na', 'NH4-N'])
chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']]
if 'NH4-N' in wc_df.columns:
wc_df['NH4-N'].fillna(value=0, inplace=True)
else:
wc_df['NH4-N'] = 0
wc_df['EH'] = 1E6 * 10**(-wc_df['pH'])
for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N']:
val = chem_df.at[par, 'valency']
mm = chem_df.at[par, 'molar_mass']
if par == 'NO3-N':
wc_df['ENO3'] = wc_df[par] * val / mm
elif par == 'NH4-N':
wc_df['ENH4'] = wc_df[par] * val / mm
else:
wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm
for par in ['ESO4', 'EMg', 'ECa']:
ref = chem_df.at[par[1:], 'resa2_ref_ratio']
wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl'])
wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl']
wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg']
wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX']
wc_df['ANC'] = ((wc_df['ECa'] + wc_df['EMg'] + wc_df['EK'] +
wc_df['ENa'] + wc_df['ENH4']) -
(wc_df['ECl'] + wc_df['ESO4'] + wc_df['ENO3']))
wc_df['ANCX'] = ((wc_df['ECaX'] + wc_df['EMgX'] + wc_df['EK'] +
wc_df['ENa'] + wc_df['ENH4']) -
(wc_df['ECl'] + wc_df['ESO4X'] + wc_df['ENO3']))
for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N', 'pH',
'EMg', 'ECa', 'EK', 'ENa', 'ENH4', 'EMgX', 'ECaX']:
del wc_df[col]
wc_df.reset_index(inplace=True)
return wc_df
def run_trend_analysis(proj_list, engine, st_yr=None, end_yr=None,
plot=False, fold=None):
import pandas as pd, os
if plot:
assert os.path.isdir(fold), 'The specified folder does not exist.'
print ('Extracting data from RESA2...')
stn_df, wc_df, dup_df = read_resa2(proj_list, engine)
stns_no_data = (set(stn_df['station_id'].values) -
set(wc_df.index.get_level_values('station_id')))
if len(stns_no_data) > 0:
print (' Some stations have no relevant data in the period '
'specified. Their IDs are returned in a separate dataframe.\n')
no_data_df = pd.DataFrame({'station_id':list(stns_no_data)})
else:
no_data_df = None
print (' Done.')
print ('\nConverting units and applying sea-salt correction...')
wc_df = conv_units_and_correct(wc_df)
print (' Done.')
df_list = []
print ('\nCalculating statistics...')
for stn_id in wc_df['station_id'].unique():
df = wc_df.query('station_id == @stn_id')
names = list(df.columns)
names[:2] = ['STATION_ID', 'YEAR']
df.columns = names
df_list.append(wc_stats(df, st_yr=st_yr, end_yr=end_yr,
plot=plot, fold=fold))
res_df = pd.concat(df_list, axis=0)
res_df['station_id'] = res_df['station_id'].map(int)
dup_df['station_id'] = dup_df['station_id'].map(int)
if no_data_df is not None:
no_data_df['station_id'] = no_data_df['station_id'].map(int)
print (' Done.')
print ('\nFinished.')
return res_df, dup_df, no_data_df | true | true |
f7118ca81a64318023c6f86e4904b3df0227a3a8 | 468 | py | Python | blogcbv/contrib/sites/migrations/0004_alter_options_ordering_domain.py | saaddine/BlogApp_CookieCutter | 9df5539adae62843c2d41449fe52acfab8e120d5 | [
"MIT"
] | 602 | 2021-10-24T02:17:35.000Z | 2022-03-31T14:45:45.000Z | blogcbv/contrib/sites/migrations/0004_alter_options_ordering_domain.py | saaddine/BlogApp_CookieCutter | 9df5539adae62843c2d41449fe52acfab8e120d5 | [
"MIT"
] | 162 | 2021-10-24T02:31:20.000Z | 2022-03-31T23:39:16.000Z | gestao_rh/contrib/sites/migrations/0004_alter_options_ordering_domain.py | andreFSilva/gestao_rh | 9e0bfce3c594186ed8b9acea12e4dff13337148e | [
"MIT"
] | 204 | 2021-11-02T12:18:07.000Z | 2022-03-31T08:31:53.000Z | # Generated by Django 3.1.7 on 2021-02-04 14:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("sites", "0003_set_site_domain_and_name"),
]
operations = [
migrations.AlterModelOptions(
name="site",
options={
"ordering": ["domain"],
"verbose_name": "site",
"verbose_name_plural": "sites",
},
),
]
| 21.272727 | 51 | 0.527778 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("sites", "0003_set_site_domain_and_name"),
]
operations = [
migrations.AlterModelOptions(
name="site",
options={
"ordering": ["domain"],
"verbose_name": "site",
"verbose_name_plural": "sites",
},
),
]
| true | true |
f7118cb0bdc5e5841720741a7b7171647cf7632a | 2,886 | py | Python | trax/layers/normalization_test.py | zfisher/trax | c8187944fc036418a5c3b0491fc53c223e73faa6 | [
"Apache-2.0"
] | 1 | 2020-04-19T01:40:08.000Z | 2020-04-19T01:40:08.000Z | trax/layers/normalization_test.py | zfisher/trax | c8187944fc036418a5c3b0491fc53c223e73faa6 | [
"Apache-2.0"
] | null | null | null | trax/layers/normalization_test.py | zfisher/trax | c8187944fc036418a5c3b0491fc53c223e73faa6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for normalization layers."""
from absl.testing import absltest
import numpy as onp
from trax.layers import base
from trax.layers import normalization
from trax.math import numpy as np
from trax.shapes import ShapeDtype
class NormalizationLayerTest(absltest.TestCase):
def test_batch_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(normalization.BatchNorm(),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_batch_norm(self):
input_shape = (2, 3, 4)
input_dtype = np.float32
input_signature = ShapeDtype(input_shape, input_dtype)
eps = 1e-5
inp1 = np.reshape(np.arange(np.prod(input_shape), dtype=input_dtype),
input_shape)
m1 = 11.5 # Mean of this random input.
v1 = 47.9167 # Variance of this random input.
layer = normalization.BatchNorm(axis=(0, 1, 2))
_, _ = layer.init(input_signature)
state = layer.state
onp.testing.assert_allclose(state[0], 0)
onp.testing.assert_allclose(state[1], 1)
self.assertEqual(state[2], 0)
out = layer(inp1)
state = layer.state
onp.testing.assert_allclose(state[0], m1 * 0.001)
onp.testing.assert_allclose(state[1], 0.999 + v1 * 0.001, rtol=1e-6)
self.assertEqual(state[2], 1)
onp.testing.assert_allclose(out, (inp1 - m1) / np.sqrt(v1 + eps),
rtol=1e-6)
def test_layer_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(
normalization.LayerNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_frn_shape(self):
B, H, W, C = 64, 5, 7, 3 # pylint: disable=invalid-name
input_signature = ShapeDtype((B, H, W, C))
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(learn_epsilon=False),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
if __name__ == '__main__':
absltest.main()
| 36.075 | 74 | 0.70201 |
from absl.testing import absltest
import numpy as onp
from trax.layers import base
from trax.layers import normalization
from trax.math import numpy as np
from trax.shapes import ShapeDtype
class NormalizationLayerTest(absltest.TestCase):
def test_batch_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(normalization.BatchNorm(),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_batch_norm(self):
input_shape = (2, 3, 4)
input_dtype = np.float32
input_signature = ShapeDtype(input_shape, input_dtype)
eps = 1e-5
inp1 = np.reshape(np.arange(np.prod(input_shape), dtype=input_dtype),
input_shape)
m1 = 11.5
v1 = 47.9167
layer = normalization.BatchNorm(axis=(0, 1, 2))
_, _ = layer.init(input_signature)
state = layer.state
onp.testing.assert_allclose(state[0], 0)
onp.testing.assert_allclose(state[1], 1)
self.assertEqual(state[2], 0)
out = layer(inp1)
state = layer.state
onp.testing.assert_allclose(state[0], m1 * 0.001)
onp.testing.assert_allclose(state[1], 0.999 + v1 * 0.001, rtol=1e-6)
self.assertEqual(state[2], 1)
onp.testing.assert_allclose(out, (inp1 - m1) / np.sqrt(v1 + eps),
rtol=1e-6)
def test_layer_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(
normalization.LayerNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_frn_shape(self):
B, H, W, C = 64, 5, 7, 3
input_signature = ShapeDtype((B, H, W, C))
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(learn_epsilon=False),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
if __name__ == '__main__':
absltest.main()
| true | true |
f7118d0dbc86cd00f058a109d84a4b6e7666a2d7 | 11,575 | py | Python | main.py | chicanica/TeleBot | 5f3c197fea22b812016a31f645db8178d459c4a5 | [
"MIT"
] | null | null | null | main.py | chicanica/TeleBot | 5f3c197fea22b812016a31f645db8178d459c4a5 | [
"MIT"
] | null | null | null | main.py | chicanica/TeleBot | 5f3c197fea22b812016a31f645db8178d459c4a5 | [
"MIT"
] | null | null | null | # Телеграм-бот v.002 - бот создаёт меню, присылает собачку, и анекдот
import telebot # pyTelegramBotAPI 4.3.1
from telebot import types
import requests
import bs4
import json
bot = telebot.TeleBot('5193117811:AAH0hWHVx0kH08sub52IFj2SAdJi1eugY-k') # Создаем экземпляр бота
# -----------------------------------------------------------------------
# Функция, обрабатывающая команду /start
@bot.message_handler(commands=["start"])
def start(message, res=False):
chat_id = message.chat.id
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("👋 Главное меню")
btn2 = types.KeyboardButton("❓ Помощь")
markup.add(btn1, btn2)
bot.send_message(chat_id,
text="Привет, {0.first_name}! Я тестовый бот для курса программирования на языке ПаЙтон".format(
message.from_user), reply_markup=markup)
# -----------------------------------------------------------------------
# Получение сообщений от юзера
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
chat_id = message.chat.id
ms_text = message.text
if ms_text == "Главное меню" or ms_text == "👋 Главное меню" or ms_text == "Вернуться в главное меню": # ..........
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Задачи")
btn2 = types.KeyboardButton("Развлечения")
btn3 = types.KeyboardButton("Помощь")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, back)
bot.send_message(chat_id, text="Вы в главном меню", reply_markup=markup)
elif ms_text == "Развлечения": # ..................................................................................
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Прислать анекдот c anekdotme.ru")
btn2 = types.KeyboardButton("Прислать анекдот c nekdo.ru")
btn3 = types.KeyboardButton("Прислать собаку")
btn4 = types.KeyboardButton("Прислать случайного пользователя")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, back)
bot.send_message(chat_id, text="Развлечения", reply_markup=markup)
elif ms_text == "Прислать анекдот c anekdotme.ru": # .............................................................................
bot.send_message(chat_id, text=get_anekdot('http://anekdotme.ru/random', '.anekdot_text'))
elif ms_text == "Прислать анекдот c nekdo.ru": # .............................................................................
bot.send_message(chat_id, text=get_anekdot('https://nekdo.ru/random', '.text'))
elif ms_text == "Прислать собаку" or ms_text == "/dog": # ....................
contents = requests.get('https://random.dog/woof.json').json()
urlDOG = contents['url']
bot.send_photo(chat_id, photo=urlDOG, caption='Твоя собачка:)')
elif ms_text == "Прислать случайного пользователя" or ms_text == "/user": # ....................
contents = requests.get('https://randomuser.me/api/').json()
img = contents['results'][0]['picture']['large']
name = contents['results'][0]['name']['title'] + ' ' + contents['results'][0]['name']['first'] + ' ' + contents['results'][0]['name']['last']
age = contents['results'][0]['dob']['age']
place = contents['results'][0]['location']['timezone']['description']
place = place.split(',')[0]
info = name + ', ' + str(age) + '\n' + place
bot.send_photo(chat_id, photo=img, caption=info)
elif ms_text == "Задачи": # ..................................................................................
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("1")
btn2 = types.KeyboardButton("2")
btn3 = types.KeyboardButton("3")
btn4 = types.KeyboardButton("4")
btn5 = types.KeyboardButton("5")
btn6 = types.KeyboardButton("6")
btn7 = types.KeyboardButton("7")
btn8 = types.KeyboardButton("8")
btn9 = types.KeyboardButton("9")
btn10 = types.KeyboardButton("10")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, btn5, btn6, btn7, btn8, btn9, btn10, back)
bot.send_message(chat_id, text="Задачи с 1-го занятия", reply_markup=markup)
elif ms_text == "1": # .........................................................
name = 'Вероника'
bot.send_message(chat_id, text=name)
elif ms_text == "2": # .........................................................
name = 'Вероника'
age = 20
message = 'Привет, меня зовут ' + name + '. Мне ' + str(age) + ' лет.'
bot.send_message(chat_id, text=message)
elif ms_text == "3": # .........................................................
name = 'Вероника'
name5 = name * 5
bot.send_message(chat_id, text=name5)
elif ms_text == "4": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userMessage = 'Привет, ' + userName + '! Тебе уже ' + userAge + ' лет?! Это так круто!'
bot.send_message(chat_id, text=userMessage)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "5": # .........................................................
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
if userAge < 18:
ageMessage = 'Ты не достиг еще совершеннолетия, возращайся позже'
else:
ageMessage = 'Ты уже достаточно взрослый, присоединяйся к нам!'
bot.send_message(chat_id, text=ageMessage)
bot.register_next_step_handler(message, inputAge)
elif ms_text == "6": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text=userName[1:-1])
bot.send_message(chat_id, text=userName[::-1])
bot.send_message(chat_id, text=userName[-3:])
bot.send_message(chat_id, text=userName[0:5])
bot.register_next_step_handler(message, inputName)
elif ms_text == "7": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
nameMessage = 'Кол-во букв в имени: ' + str(len(userName))
bot.send_message(chat_id, text=nameMessage)
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
import math
ageNum1 = math.floor(userAge / 10)
ageNum2 = userAge % 10
sum = ageNum1 + ageNum2
ageMessage1 = 'Сумма цифр возраста: ' + str(sum)
bot.send_message(chat_id, text=ageMessage1)
if ageNum1 < 1:
comp = ageNum2
else:
comp = ageNum1 * ageNum2
ageMessage2 = 'Произведение цифр возраста: ' + str(comp)
bot.send_message(chat_id, text=ageMessage2)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "8": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text=userName.upper())
bot.send_message(chat_id, text=userName.lower())
bot.send_message(chat_id, text=userName.capitalize())
bot.register_next_step_handler(message, inputName)
elif ms_text == "9": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
if " " in userName:
nameMessage = 'Error userName value'
else:
nameMessage = 'Correct userName value'
bot.send_message(chat_id, text=nameMessage)
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
if (userAge < 0) or (userAge > 150):
ageMessage = 'Error userAge value'
else:
ageMessage = 'Correct userAge value'
bot.send_message(chat_id, text=ageMessage)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "10": # .........................................................
bot.send_message(chat_id, text='Сколько будет 8+2*3?')
@bot.message_handler(content_types=['text'])
def inputAnswer(message):
userAnswer = message.text
userAnswer = int(userAnswer)
if userAnswer == 14:
userMessage = 'Правильно!'
else:
userMessage = 'Неверно!'
bot.send_message(chat_id, text=userMessage)
bot.register_next_step_handler(message, inputAnswer)
elif ms_text == "Помощь" or ms_text == "/help": # .................................................................
bot.send_message(chat_id, "Автор: Яковлева Вероника")
key1 = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://t.me/chicanica")
key1.add(btn1)
img = open('foto.jpg', 'rb')
bot.send_photo(message.chat.id, img, reply_markup=key1)
else: # ...........................................................................................................
bot.send_message(chat_id, text="Вы написали: " + ms_text)
def get_anekdot(link, className):
array_anekdots = []
req_anek = requests.get(link)
soup = bs4.BeautifulSoup(req_anek.text, "html.parser")
result_find = soup.select(className)
for result in result_find:
array_anekdots.append(result.getText().strip())
return array_anekdots[0]
# -----------------------------------------------------------------------
bot.polling(none_stop=True, interval=0) # Запускаем бота
print()
| 42.555147 | 149 | 0.539611 |
import telebot
from telebot import types
import requests
import bs4
import json
bot = telebot.TeleBot('5193117811:AAH0hWHVx0kH08sub52IFj2SAdJi1eugY-k')
@bot.message_handler(commands=["start"])
def start(message, res=False):
chat_id = message.chat.id
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("👋 Главное меню")
btn2 = types.KeyboardButton("❓ Помощь")
markup.add(btn1, btn2)
bot.send_message(chat_id,
text="Привет, {0.first_name}! Я тестовый бот для курса программирования на языке ПаЙтон".format(
message.from_user), reply_markup=markup)
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
chat_id = message.chat.id
ms_text = message.text
if ms_text == "Главное меню" or ms_text == "👋 Главное меню" or ms_text == "Вернуться в главное меню":
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Задачи")
btn2 = types.KeyboardButton("Развлечения")
btn3 = types.KeyboardButton("Помощь")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, back)
bot.send_message(chat_id, text="Вы в главном меню", reply_markup=markup)
elif ms_text == "Развлечения":
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Прислать анекдот c anekdotme.ru")
btn2 = types.KeyboardButton("Прислать анекдот c nekdo.ru")
btn3 = types.KeyboardButton("Прислать собаку")
btn4 = types.KeyboardButton("Прислать случайного пользователя")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, back)
bot.send_message(chat_id, text="Развлечения", reply_markup=markup)
elif ms_text == "Прислать анекдот c anekdotme.ru":
bot.send_message(chat_id, text=get_anekdot('http://anekdotme.ru/random', '.anekdot_text'))
elif ms_text == "Прислать анекдот c nekdo.ru":
bot.send_message(chat_id, text=get_anekdot('https://nekdo.ru/random', '.text'))
elif ms_text == "Прислать собаку" or ms_text == "/dog":
contents = requests.get('https://random.dog/woof.json').json()
urlDOG = contents['url']
bot.send_photo(chat_id, photo=urlDOG, caption='Твоя собачка:)')
elif ms_text == "Прислать случайного пользователя" or ms_text == "/user":
contents = requests.get('https://randomuser.me/api/').json()
img = contents['results'][0]['picture']['large']
name = contents['results'][0]['name']['title'] + ' ' + contents['results'][0]['name']['first'] + ' ' + contents['results'][0]['name']['last']
age = contents['results'][0]['dob']['age']
place = contents['results'][0]['location']['timezone']['description']
place = place.split(',')[0]
info = name + ', ' + str(age) + '\n' + place
bot.send_photo(chat_id, photo=img, caption=info)
elif ms_text == "Задачи":
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("1")
btn2 = types.KeyboardButton("2")
btn3 = types.KeyboardButton("3")
btn4 = types.KeyboardButton("4")
btn5 = types.KeyboardButton("5")
btn6 = types.KeyboardButton("6")
btn7 = types.KeyboardButton("7")
btn8 = types.KeyboardButton("8")
btn9 = types.KeyboardButton("9")
btn10 = types.KeyboardButton("10")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, btn5, btn6, btn7, btn8, btn9, btn10, back)
bot.send_message(chat_id, text="Задачи с 1-го занятия", reply_markup=markup)
elif ms_text == "1":
name = 'Вероника'
bot.send_message(chat_id, text=name)
elif ms_text == "2":
name = 'Вероника'
age = 20
message = 'Привет, меня зовут ' + name + '. Мне ' + str(age) + ' лет.'
bot.send_message(chat_id, text=message)
elif ms_text == "3":
name = 'Вероника'
name5 = name * 5
bot.send_message(chat_id, text=name5)
elif ms_text == "4":
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userMessage = 'Привет, ' + userName + '! Тебе уже ' + userAge + ' лет?! Это так круто!'
bot.send_message(chat_id, text=userMessage)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "5":
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
if userAge < 18:
ageMessage = 'Ты не достиг еще совершеннолетия, возращайся позже'
else:
ageMessage = 'Ты уже достаточно взрослый, присоединяйся к нам!'
bot.send_message(chat_id, text=ageMessage)
bot.register_next_step_handler(message, inputAge)
elif ms_text == "6":
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text=userName[1:-1])
bot.send_message(chat_id, text=userName[::-1])
bot.send_message(chat_id, text=userName[-3:])
bot.send_message(chat_id, text=userName[0:5])
bot.register_next_step_handler(message, inputName)
elif ms_text == "7":
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
nameMessage = 'Кол-во букв в имени: ' + str(len(userName))
bot.send_message(chat_id, text=nameMessage)
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
import math
ageNum1 = math.floor(userAge / 10)
ageNum2 = userAge % 10
sum = ageNum1 + ageNum2
ageMessage1 = 'Сумма цифр возраста: ' + str(sum)
bot.send_message(chat_id, text=ageMessage1)
if ageNum1 < 1:
comp = ageNum2
else:
comp = ageNum1 * ageNum2
ageMessage2 = 'Произведение цифр возраста: ' + str(comp)
bot.send_message(chat_id, text=ageMessage2)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "8":
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text=userName.upper())
bot.send_message(chat_id, text=userName.lower())
bot.send_message(chat_id, text=userName.capitalize())
bot.register_next_step_handler(message, inputName)
elif ms_text == "9":
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
if " " in userName:
nameMessage = 'Error userName value'
else:
nameMessage = 'Correct userName value'
bot.send_message(chat_id, text=nameMessage)
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
if (userAge < 0) or (userAge > 150):
ageMessage = 'Error userAge value'
else:
ageMessage = 'Correct userAge value'
bot.send_message(chat_id, text=ageMessage)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "10":
bot.send_message(chat_id, text='Сколько будет 8+2*3?')
@bot.message_handler(content_types=['text'])
def inputAnswer(message):
userAnswer = message.text
userAnswer = int(userAnswer)
if userAnswer == 14:
userMessage = 'Правильно!'
else:
userMessage = 'Неверно!'
bot.send_message(chat_id, text=userMessage)
bot.register_next_step_handler(message, inputAnswer)
elif ms_text == "Помощь" or ms_text == "/help":
bot.send_message(chat_id, "Автор: Яковлева Вероника")
key1 = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://t.me/chicanica")
key1.add(btn1)
img = open('foto.jpg', 'rb')
bot.send_photo(message.chat.id, img, reply_markup=key1)
else:
bot.send_message(chat_id, text="Вы написали: " + ms_text)
def get_anekdot(link, className):
array_anekdots = []
req_anek = requests.get(link)
soup = bs4.BeautifulSoup(req_anek.text, "html.parser")
result_find = soup.select(className)
for result in result_find:
array_anekdots.append(result.getText().strip())
return array_anekdots[0]
bot.polling(none_stop=True, interval=0)
print()
| true | true |
f7118d7f8a48baf47c123ea7acecff67d1c87396 | 460 | py | Python | meetscheduler/settings/developments.py | leejooy96/meetscheduler | e29a96d5bcaa62a32ae240fb20e85379f37000e5 | [
"MIT"
] | null | null | null | meetscheduler/settings/developments.py | leejooy96/meetscheduler | e29a96d5bcaa62a32ae240fb20e85379f37000e5 | [
"MIT"
] | 6 | 2020-02-13T00:29:51.000Z | 2021-06-10T17:59:43.000Z | meetscheduler/settings/developments.py | leejooy96/meetscheduler | e29a96d5bcaa62a32ae240fb20e85379f37000e5 | [
"MIT"
] | 1 | 2019-12-12T02:19:20.000Z | 2019-12-12T02:19:20.000Z | from .base import *
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': get_value_from_secret('DB_NAME'),
'HOST': get_value_from_secret('DB_HOST'),
'USER': get_value_from_secret('DB_USER'),
'PASSWORD': get_value_from_secret('DB_PASSWORD'),
'PORT': get_value_from_secret('DB_PORT'),
'OPTIONS': {'charset': 'utf8'}
}
}
| 27.058824 | 57 | 0.608696 | from .base import *
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': get_value_from_secret('DB_NAME'),
'HOST': get_value_from_secret('DB_HOST'),
'USER': get_value_from_secret('DB_USER'),
'PASSWORD': get_value_from_secret('DB_PASSWORD'),
'PORT': get_value_from_secret('DB_PORT'),
'OPTIONS': {'charset': 'utf8'}
}
}
| true | true |
f7118dacceb344ed0d2633d7e6dd0d0119901228 | 5,711 | py | Python | backend/pyrogram/methods/chats/iter_chat_members.py | appheap/social-media-analyzer | 0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c | [
"Apache-2.0"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/pyrogram/methods/chats/iter_chat_members.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/pyrogram/methods/chats/iter_chat_members.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from string import ascii_lowercase
from typing import Union, AsyncGenerator, Optional
from pyrogram import raw
from pyrogram import types
from pyrogram.scaffold import Scaffold
class Filters:
ALL = "all"
KICKED = "kicked"
RESTRICTED = "restricted"
BOTS = "bots"
RECENT = "recent"
ADMINISTRATORS = "administrators"
QUERIES = [""] + [str(i) for i in range(10)] + list(ascii_lowercase)
QUERYABLE_FILTERS = (Filters.ALL, Filters.KICKED, Filters.RESTRICTED)
class IterChatMembers(Scaffold):
async def iter_chat_members(
self,
chat_id: Union[int, str],
limit: int = 0,
query: str = "",
filter: str = Filters.RECENT,
last_member_count: int = 0, # to speedup iteration for small chats
) -> Optional[AsyncGenerator["types.ChatMember", None]]:
"""Iterate through the members of a chat sequentially.
This convenience method does the same as repeatedly calling :meth:`~pyrogram.Client.get_chat_members` in a loop,
thus saving you from the hassle of setting up boilerplate code. It is useful for getting the whole members list
of a chat with a single call.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
limit (``int``, *optional*):
Limits the number of members to be retrieved.
By default, no limit is applied and all members are returned [1]_.
query (``str``, *optional*):
Query string to filter members based on their display names and usernames.
Defaults to "" (empty string) [2]_.
filter (``str``, *optional*):
Filter used to select the kind of members you want to retrieve. Only applicable for supergroups
and channels. It can be any of the followings:
*"all"* - all kind of members,
*"kicked"* - kicked (banned) members only,
*"restricted"* - restricted members only,
*"bots"* - bots only,
*"recent"* - recent members only,
*"administrators"* - chat administrators only.
Defaults to *"recent"*.
last_member_count (``int``):
Last member count number
.. [1] Server limit: on supergroups, you can get up to 10,000 members for a single query and up to 200 members
on channels.
.. [2] A query string is applicable only for *"all"*, *"kicked"* and *"restricted"* filters only.
Returns:
``Generator``: A generator yielding :obj:`~pyrogram.types.ChatMember` objects.
Example:
.. code-block:: python
# Iterate though all chat members
for member in app.iter_chat_members("pyrogramchat"):
print(member.user.first_name)
# Iterate though all administrators
for member in app.iter_chat_members("pyrogramchat", filter="administrators"):
print(member.user.first_name)
# Iterate though all bots
for member in app.iter_chat_members("pyrogramchat", filter="bots"):
print(member.user.first_name)
"""
current = 0
yielded = set()
if query:
queries = [query]
else:
if last_member_count > 200:
queries = QUERIES
else:
queries = [query]
# queries = [query] if query else QUERIES
total = limit or (1 << 31) - 1
limit = min(200, total)
resolved_chat_id = await self.resolve_peer(chat_id)
if filter not in QUERYABLE_FILTERS:
queries = [""]
import arrow
for q in queries:
offset = 0
while True:
# now=arrow.utcnow().timestamp()
chat_members = await self.get_chat_members(
chat_id=chat_id,
offset=offset,
limit=limit,
query=q,
filter=filter
)
# print(f"got chat members in : {arrow.utcnow().timestamp()-now}")
if not chat_members:
break
if isinstance(resolved_chat_id, raw.types.InputPeerChat):
total = len(chat_members)
offset += len(chat_members)
for chat_member in chat_members:
user_id = chat_member.user.id
if user_id in yielded:
continue
yield chat_member
yielded.add(chat_member.user.id)
current += 1
if current >= total:
return
| 35.69375 | 120 | 0.575031 |
from string import ascii_lowercase
from typing import Union, AsyncGenerator, Optional
from pyrogram import raw
from pyrogram import types
from pyrogram.scaffold import Scaffold
class Filters:
ALL = "all"
KICKED = "kicked"
RESTRICTED = "restricted"
BOTS = "bots"
RECENT = "recent"
ADMINISTRATORS = "administrators"
QUERIES = [""] + [str(i) for i in range(10)] + list(ascii_lowercase)
QUERYABLE_FILTERS = (Filters.ALL, Filters.KICKED, Filters.RESTRICTED)
class IterChatMembers(Scaffold):
async def iter_chat_members(
self,
chat_id: Union[int, str],
limit: int = 0,
query: str = "",
filter: str = Filters.RECENT,
last_member_count: int = 0,
) -> Optional[AsyncGenerator["types.ChatMember", None]]:
current = 0
yielded = set()
if query:
queries = [query]
else:
if last_member_count > 200:
queries = QUERIES
else:
queries = [query]
total = limit or (1 << 31) - 1
limit = min(200, total)
resolved_chat_id = await self.resolve_peer(chat_id)
if filter not in QUERYABLE_FILTERS:
queries = [""]
import arrow
for q in queries:
offset = 0
while True:
chat_members = await self.get_chat_members(
chat_id=chat_id,
offset=offset,
limit=limit,
query=q,
filter=filter
)
if not chat_members:
break
if isinstance(resolved_chat_id, raw.types.InputPeerChat):
total = len(chat_members)
offset += len(chat_members)
for chat_member in chat_members:
user_id = chat_member.user.id
if user_id in yielded:
continue
yield chat_member
yielded.add(chat_member.user.id)
current += 1
if current >= total:
return
| true | true |
f7118e03c63e8796cb463067d621da592fba5da6 | 2,513 | py | Python | stackedcoil/cmulti_c1243_3c.py | anon-coil/coil_gecco | 6b8aa410a944e1db26c3acdc77af71b3b5d4fe74 | [
"MIT"
] | 2 | 2022-02-15T08:39:26.000Z | 2022-02-17T11:51:06.000Z | stackedcoil/cmulti_c1243_3c.py | anon-coil/coil_gecco | 6b8aa410a944e1db26c3acdc77af71b3b5d4fe74 | [
"MIT"
] | null | null | null | stackedcoil/cmulti_c1243_3c.py | anon-coil/coil_gecco | 6b8aa410a944e1db26c3acdc77af71b3b5d4fe74 | [
"MIT"
] | null | null | null | # generate data: specify number of data points generated
NUM_DATA_POINTS = 5000
# the minimum number of variables required for the equation to work
MIN_VARIABLES = 4
# specify objective function
def func(x, num_var):
result = 0.0
for i in range(num_var):
result += x[i]**2
return result
# specify whether to minimise or maximise function, 0 for min 1 for max
MIN_OR_MAX_FLAG = 0
# set the min and max range for the variables
X_MIN_RANGE = -50.0
X_MAX_RANGE = 50.0
# specify constraints (return 0 if constraint is met, otherwise return absolute distance)
def c0(x, num_var):
# the constraint is: (80 - x[0] - x[1]) <= 0
result = 80 - x[0] - x[1]
if result <= 0.0:
return 0.0
else:
return result
def c1(x, num_var):
# the constraint is: x[2] + 45 <= 0
result = x[2] + 45
if result <= 0.0:
return 0.0
else:
return result
def c2(x, num_var):
# the constraint is: -7 <= x[2] + x[3] <= -5
LOWER_BOUND = -7.0
UPPER_BOUND = -5.0
result = x[2] + x[3]
if (result <= UPPER_BOUND) and (result >= LOWER_BOUND): # inside bounds
return 0.0
else:
if result < LOWER_BOUND:
distance = result - LOWER_BOUND
else:
distance = result - UPPER_BOUND
if distance >= 0: # always return a positive distance
return distance
else:
return (-distance)
# list of constraints: add specified constraints to this list in order for them to be considered
CONSTRAINTS = [
c0,
c1,
c2
]
# calculate the optimal result for the function for the constraint(s) to be met
optimal_point = [40.0, 40.0, -45.0, 40.0]
def calculate_optimal(num_var):
return func(optimal_point, num_var)
# generate data: specify num gen and num pop for the data generator GA
DATAGEN_GEN = 200 #500
DATAGEN_POP = 200
# generate data: specify min and max range for data
DATAGEN_MIN_RANGE = -1.0
DATAGEN_MAX_RANGE = 1.0
# learn representation: specify the number of latent variables and epochs for the vae
# NUM_LATENT = NUM_VARIABLES
NUM_EPOCHS = 200
# optimise: specify num gen and num pop for the optimiser GA
VAEGA_GEN = 50
VAEGA_POP = 20
# optimse: the range for the GA to generate random numbers for the latent variable
VAEGA_MIN_RANGE = -2.0
VAEGA_MAX_RANGE = 2.0
# comparison GA: specify num gen and num pop for the GA
# GA_NUM_INDIVIDUALS = NUM_VARIABLES # the number of individuals for the GA is the number of variables
GA_GEN = 50
GA_POP = 20
| 25.907216 | 102 | 0.670115 |
NUM_DATA_POINTS = 5000
MIN_VARIABLES = 4
def func(x, num_var):
result = 0.0
for i in range(num_var):
result += x[i]**2
return result
MIN_OR_MAX_FLAG = 0
X_MIN_RANGE = -50.0
X_MAX_RANGE = 50.0
def c0(x, num_var):
result = 80 - x[0] - x[1]
if result <= 0.0:
return 0.0
else:
return result
def c1(x, num_var):
result = x[2] + 45
if result <= 0.0:
return 0.0
else:
return result
def c2(x, num_var):
LOWER_BOUND = -7.0
UPPER_BOUND = -5.0
result = x[2] + x[3]
if (result <= UPPER_BOUND) and (result >= LOWER_BOUND):
return 0.0
else:
if result < LOWER_BOUND:
distance = result - LOWER_BOUND
else:
distance = result - UPPER_BOUND
if distance >= 0:
return distance
else:
return (-distance)
CONSTRAINTS = [
c0,
c1,
c2
]
optimal_point = [40.0, 40.0, -45.0, 40.0]
def calculate_optimal(num_var):
return func(optimal_point, num_var)
DATAGEN_GEN = 200
DATAGEN_POP = 200
DATAGEN_MIN_RANGE = -1.0
DATAGEN_MAX_RANGE = 1.0
NUM_EPOCHS = 200
VAEGA_GEN = 50
VAEGA_POP = 20
VAEGA_MIN_RANGE = -2.0
VAEGA_MAX_RANGE = 2.0
| true | true |
f7118f37fa1e83d2b81be644d00dbe24bcf186ed | 5,814 | py | Python | file_utils.py | watanka/CRAFTS-implementation | bc514638755fe798a0d5eb583d6d477e8eb55bff | [
"Apache-2.0"
] | 1 | 2021-11-16T15:52:58.000Z | 2021-11-16T15:52:58.000Z | file_utils.py | watanka/CRAFTS-implementation | bc514638755fe798a0d5eb583d6d477e8eb55bff | [
"Apache-2.0"
] | null | null | null | file_utils.py | watanka/CRAFTS-implementation | bc514638755fe798a0d5eb583d6d477e8eb55bff | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import imgproc
from PIL import Image, ImageDraw
# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return imgs, masks, xmls
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
# img_files.sort()
# mask_files.sort()
# gt_files.sort()
return img_files, mask_files, gt_files
def saveResult(img_file, img, boxes, font,dirname='./result/', verticals=None, texts=None):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
img_pil = Image.fromarray(img)
imgdraw = ImageDraw.Draw(img_pil)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + "res_" + filename + '.txt'
res_img_file = dirname + "res_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
with open(res_file, 'w') as f:
if texts is not None :
for i, (box, text) in enumerate(zip(boxes, texts)):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) +','+text +'\r\n'
# poly = np.array(box).astype(np.int32)
# min_x = np.min(poly[:,0])
# max_x = np.max(poly[:,0])
# min_y = np.min(poly[:,1])
# max_y = np.max(poly[:,1])
# strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
# cv2.putText(img, text, tuple(poly[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.1, color = (0,0,255), thickness= 1)
imgdraw.polygon(poly.flatten().tolist(), fill = None, outline = (0,0,255))
imgdraw.text(tuple(poly[1]), text,font = font, fill = (0,0,255))
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
else :
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) + '\r\n'
# poly = np.array(box).astype(np.int32)
# min_x = np.min(poly[:,0])
# max_x = np.max(poly[:,0])
# min_y = np.min(poly[:,1])
# max_y = np.max(poly[:,1])
# strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
imgdraw.polygon([poly.reshape((-1,1,2))], fill = None, outline =(0,0,255))
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
#
# if texts is not None:
# font = cv2.FONT_HERSHEY_SIMPLEX
# font_scale = 0.5
# cv2.putText(img, "{}".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)
# cv2.putText(img, "{}".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)
#
# #Save result image
cv2.imwrite(res_img_file, np.array(img_pil))
def load_txt(file, delimiter = ',') :
## character bbox는 \n\n으로 box별 구분
coords_ls = []
with open(file, 'r', encoding = 'utf-8-sig') as f :
boxes_list = f.read().split('\n\n')
for boxes in boxes_list :
if boxes.strip() == '' :
continue
char_boxes = boxes.split('\n')
# char_txt는 라벨이 따로 없다
charbox_ls = []
for charbox in char_boxes :
if len(char_boxes) == 0 :
continue
coords = charbox.split(delimiter)
coords = [float(c) for c in coords if c != '']
if len(coords) == 0 :
continue
coords = np.array(coords).reshape(-1,2)
charbox_ls.append(coords)
if len(charbox_ls) != 0 :
coords_ls.append(np.array(charbox_ls))
return coords_ls
| 40.943662 | 136 | 0.484348 |
import os
import numpy as np
import cv2
import imgproc
from PIL import Image, ImageDraw
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return imgs, masks, xmls
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
return img_files, mask_files, gt_files
def saveResult(img_file, img, boxes, font,dirname='./result/', verticals=None, texts=None):
img = np.array(img)
img_pil = Image.fromarray(img)
imgdraw = ImageDraw.Draw(img_pil)
filename, file_ext = os.path.splitext(os.path.basename(img_file))
res_file = dirname + "res_" + filename + '.txt'
res_img_file = dirname + "res_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
with open(res_file, 'w') as f:
if texts is not None :
for i, (box, text) in enumerate(zip(boxes, texts)):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) +','+text +'\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
imgdraw.polygon(poly.flatten().tolist(), fill = None, outline = (0,0,255))
imgdraw.text(tuple(poly[1]), text,font = font, fill = (0,0,255))
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
else :
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
imgdraw.polygon([poly.reshape((-1,1,2))], fill = None, outline =(0,0,255))
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
te(res_img_file, np.array(img_pil))
def load_txt(file, delimiter = ',') :
n(file, 'r', encoding = 'utf-8-sig') as f :
boxes_list = f.read().split('\n\n')
for boxes in boxes_list :
if boxes.strip() == '' :
continue
char_boxes = boxes.split('\n')
charbox_ls = []
for charbox in char_boxes :
if len(char_boxes) == 0 :
continue
coords = charbox.split(delimiter)
coords = [float(c) for c in coords if c != '']
if len(coords) == 0 :
continue
coords = np.array(coords).reshape(-1,2)
charbox_ls.append(coords)
if len(charbox_ls) != 0 :
coords_ls.append(np.array(charbox_ls))
return coords_ls
| true | true |
f7118f84164b64de1d5f666d410c1b51fe89dbb3 | 985 | py | Python | migrations/versions/5b2f27493d7e_.py | NeverLeft/FLASKTPP | 3480131f3386bfc86e45a914f2140949863641dd | [
"Apache-2.0"
] | null | null | null | migrations/versions/5b2f27493d7e_.py | NeverLeft/FLASKTPP | 3480131f3386bfc86e45a914f2140949863641dd | [
"Apache-2.0"
] | null | null | null | migrations/versions/5b2f27493d7e_.py | NeverLeft/FLASKTPP | 3480131f3386bfc86e45a914f2140949863641dd | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: 5b2f27493d7e
Revises: 1d17bfa8fe08
Create Date: 2018-06-14 14:54:29.224338
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5b2f27493d7e'
down_revision = '1d17bfa8fe08'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('movie_plan',
sa.Column('id', sa.String(length=128), nullable=False),
sa.Column('mp_movie', sa.Integer(), nullable=True),
sa.Column('mp_hall', sa.Integer(), nullable=True),
sa.Column('mp_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['mp_hall'], ['hall.id'], ),
sa.ForeignKeyConstraint(['mp_movie'], ['movies.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('movie_plan')
# ### end Alembic commands ###
| 26.621622 | 65 | 0.675127 | from alembic import op
import sqlalchemy as sa
revision = '5b2f27493d7e'
down_revision = '1d17bfa8fe08'
branch_labels = None
depends_on = None
def upgrade():
nteger(), nullable=True),
sa.Column('mp_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['mp_hall'], ['hall.id'], ),
sa.ForeignKeyConstraint(['mp_movie'], ['movies.id'], ),
sa.PrimaryKeyConstraint('id')
)
| true | true |
f711900c4366c1277c1a4b4099d698e25beaf991 | 5,029 | py | Python | chainer/links/connection/peephole.py | yuhonghong66/chainer | 15d475f54fc39587abd7264808c5e4b33782df9e | [
"MIT"
] | 1 | 2019-02-12T23:10:16.000Z | 2019-02-12T23:10:16.000Z | chainer/links/connection/peephole.py | nolfwin/chainer | 8d776fcc1e848cb9d3800a6aab356eb91ae9d088 | [
"MIT"
] | 2 | 2019-05-14T15:45:01.000Z | 2019-05-15T07:12:49.000Z | chainer/links/connection/peephole.py | nolfwin/chainer | 8d776fcc1e848cb9d3800a6aab356eb91ae9d088 | [
"MIT"
] | 1 | 2018-05-28T22:43:34.000Z | 2018-05-28T22:43:34.000Z | import chainer
from chainer.backends import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class StatefulPeepholeLSTM(link.Chain):
"""Fully-connected LSTM layer with peephole connections.
This is a fully-connected LSTM layer with peephole connections as a chain.
Unlike the :class:`~chainer.links.LSTM` link, this chain holds ``peep_i``,
``peep_f`` and ``peep_o`` as child links besides ``upward`` and
``lateral``.
Given a input vector :math:`x`, Peephole returns the next hidden vector
:math:`h'` defined as
.. math::
a &=& \\tanh(upward x + lateral h), \\\\
i &=& \\sigma(upward x + lateral h + peep_i c), \\\\
f &=& \\sigma(upward x + lateral h + peep_f c), \\\\
c' &=& a \\odot i + f \\odot c, \\\\
o &=& \\sigma(upward x + lateral h + peep_o c'), \\\\
h' &=& o \\tanh(c'),
where :math:`\\sigma` is the sigmoid function, :math:`\\odot` is the
element-wise product, :math:`c` is the current cell state, :math:`c'`
is the next cell state and :math:`h` is the current hidden vector.
Args:
in_size(int): Dimension of the input vector :math:`x`.
out_size(int): Dimension of the hidden vector :math:`h`.
Attributes:
upward (~chainer.links.Linear): Linear layer of upward connections.
lateral (~chainer.links.Linear): Linear layer of lateral connections.
peep_i (~chainer.links.Linear): Linear layer of peephole connections
to the input gate.
peep_f (~chainer.links.Linear): Linear layer of peephole connections
to the forget gate.
peep_o (~chainer.links.Linear): Linear layer of peephole connections
to the output gate.
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the current time step.
"""
def __init__(self, in_size, out_size):
super(StatefulPeepholeLSTM, self).__init__()
self.state_size = out_size
self.reset_state()
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size)
self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
self.peep_i = linear.Linear(out_size, out_size, nobias=True)
self.peep_f = linear.Linear(out_size, out_size, nobias=True)
self.peep_o = linear.Linear(out_size, out_size, nobias=True)
def _to_device(self, device, skip_between_cupy_devices=False):
# Overrides Link._to_device
# TODO(niboshi): Avoid forcing concrete links to override _to_device
device = chainer.get_device(device)
super(StatefulPeepholeLSTM, self)._to_device(
device, skip_between_cupy_devices=skip_between_cupy_devices)
if self.c is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.c, cuda.ndarray)):
self.c.to_device(device)
if self.h is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.h, cuda.ndarray)):
self.h.to_device(device)
return self
def reset_state(self):
"""Resets the internal states.
It sets ``None`` to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def forward(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
if self.c is None:
xp = self.xp
with chainer.using_device(self.device):
self.c = variable.Variable(
xp.zeros((len(x), self.state_size), dtype=x.dtype))
lstm_in = reshape.reshape(
lstm_in, (len(lstm_in), lstm_in.shape[1] // 4, 4))
a, i, f, o = split_axis.split_axis(lstm_in, 4, 2)
a = reshape.reshape(a, a.shape[:2])
i = reshape.reshape(i, i.shape[:2])
f = reshape.reshape(f, f.shape[:2])
o = reshape.reshape(o, o.shape[:2])
peep_in_i = self.peep_i(self.c)
peep_in_f = self.peep_f(self.c)
a = tanh.tanh(a)
i = sigmoid.sigmoid(i + peep_in_i)
f = sigmoid.sigmoid(f + peep_in_f)
self.c = a * i + f * self.c
peep_in_o = self.peep_o(self.c)
o = sigmoid.sigmoid(o + peep_in_o)
self.h = o * tanh.tanh(self.c)
return self.h
| 39.289063 | 78 | 0.601114 | import chainer
from chainer.backends import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class StatefulPeepholeLSTM(link.Chain):
def __init__(self, in_size, out_size):
super(StatefulPeepholeLSTM, self).__init__()
self.state_size = out_size
self.reset_state()
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size)
self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
self.peep_i = linear.Linear(out_size, out_size, nobias=True)
self.peep_f = linear.Linear(out_size, out_size, nobias=True)
self.peep_o = linear.Linear(out_size, out_size, nobias=True)
def _to_device(self, device, skip_between_cupy_devices=False):
device = chainer.get_device(device)
super(StatefulPeepholeLSTM, self)._to_device(
device, skip_between_cupy_devices=skip_between_cupy_devices)
if self.c is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.c, cuda.ndarray)):
self.c.to_device(device)
if self.h is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.h, cuda.ndarray)):
self.h.to_device(device)
return self
def reset_state(self):
self.c = self.h = None
def forward(self, x):
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
if self.c is None:
xp = self.xp
with chainer.using_device(self.device):
self.c = variable.Variable(
xp.zeros((len(x), self.state_size), dtype=x.dtype))
lstm_in = reshape.reshape(
lstm_in, (len(lstm_in), lstm_in.shape[1] // 4, 4))
a, i, f, o = split_axis.split_axis(lstm_in, 4, 2)
a = reshape.reshape(a, a.shape[:2])
i = reshape.reshape(i, i.shape[:2])
f = reshape.reshape(f, f.shape[:2])
o = reshape.reshape(o, o.shape[:2])
peep_in_i = self.peep_i(self.c)
peep_in_f = self.peep_f(self.c)
a = tanh.tanh(a)
i = sigmoid.sigmoid(i + peep_in_i)
f = sigmoid.sigmoid(f + peep_in_f)
self.c = a * i + f * self.c
peep_in_o = self.peep_o(self.c)
o = sigmoid.sigmoid(o + peep_in_o)
self.h = o * tanh.tanh(self.c)
return self.h
| true | true |
f71190292a7421e463a92607ba0d07147a7f68d5 | 3,338 | py | Python | xenia_python_client_library/models/organization_user_update.py | DutchAnalytics/xenia-python-client-library | 60dc3e21094086124b552ff5bed5895fee826b57 | [
"Apache-2.0"
] | null | null | null | xenia_python_client_library/models/organization_user_update.py | DutchAnalytics/xenia-python-client-library | 60dc3e21094086124b552ff5bed5895fee826b57 | [
"Apache-2.0"
] | null | null | null | xenia_python_client_library/models/organization_user_update.py | DutchAnalytics/xenia-python-client-library | 60dc3e21094086124b552ff5bed5895fee826b57 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Xenia Python Client Library
Python Client Library to interact with the Xenia API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from xenia_python_client_library.configuration import Configuration
class OrganizationUserUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admin': 'bool'
}
attribute_map = {
'admin': 'admin'
}
def __init__(self, admin=False, local_vars_configuration=None): # noqa: E501
"""OrganizationUserUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admin = None
self.discriminator = None
if admin is not None:
self.admin = admin
@property
def admin(self):
"""Gets the admin of this OrganizationUserUpdate. # noqa: E501
:return: The admin of this OrganizationUserUpdate. # noqa: E501
:rtype: bool
"""
return self._admin
@admin.setter
def admin(self, admin):
"""Sets the admin of this OrganizationUserUpdate.
:param admin: The admin of this OrganizationUserUpdate. # noqa: E501
:type: bool
"""
self._admin = admin
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrganizationUserUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OrganizationUserUpdate):
return True
return self.to_dict() != other.to_dict()
| 27.586777 | 81 | 0.579089 |
import pprint
import re
import six
from xenia_python_client_library.configuration import Configuration
class OrganizationUserUpdate(object):
openapi_types = {
'admin': 'bool'
}
attribute_map = {
'admin': 'admin'
}
def __init__(self, admin=False, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admin = None
self.discriminator = None
if admin is not None:
self.admin = admin
@property
def admin(self):
return self._admin
@admin.setter
def admin(self, admin):
self._admin = admin
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, OrganizationUserUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, OrganizationUserUpdate):
return True
return self.to_dict() != other.to_dict()
| true | true |
f71192f1a6d47ff40b424445a4968b83f2e91a89 | 9,469 | py | Python | tests/test_generate_files.py | bittner/cookiecutter | 3a3f4b247a5f5f326a614907ba915af773791fdf | [
"BSD-3-Clause"
] | 1 | 2020-12-19T08:02:33.000Z | 2020-12-19T08:02:33.000Z | tests/test_generate_files.py | imbi7py/cookiecutter | 7f4bc4929ec6dc4e77b389d698b576b8a089a091 | [
"BSD-3-Clause"
] | null | null | null | tests/test_generate_files.py | imbi7py/cookiecutter | 7f4bc4929ec6dc4e77b389d698b576b8a089a091 | [
"BSD-3-Clause"
] | 1 | 2020-01-15T16:44:21.000Z | 2020-01-15T16:44:21.000Z | # -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Tests formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
TestGenerateFiles.test_generate_files
TestGenerateFiles.test_generate_files_with_trailing_newline
TestGenerateFiles.test_generate_files_binaries
TestGenerateFiles.test_generate_files_absolute_path
TestGenerateFiles.test_generate_files_output_dir
TestGenerateFiles.test_generate_files_permissions
Use the global clean_system fixture and run additional teardown code to remove
some special folders.
For a better understanding - order of fixture calls:
clean_system setup code
remove_additional_folders setup code
remove_additional_folders teardown code
clean_system teardown code
"""
from __future__ import unicode_literals
import os
import io
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
from cookiecutter import utils
@pytest.mark.parametrize('invalid_dirname', ['', '{foo}', '{{foo', 'bar}}'])
def test_ensure_dir_is_templated_raises(invalid_dirname):
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.ensure_dir_is_templated(invalid_dirname)
@pytest.fixture(scope='function')
def remove_additional_folders(request):
"""
Remove some special folders which are created by the tests.
"""
def fin_remove_additional_folders():
if os.path.exists('inputpizzä'):
utils.rmtree('inputpizzä')
if os.path.exists('inputgreen'):
utils.rmtree('inputgreen')
if os.path.exists('inputbinary_files'):
utils.rmtree('inputbinary_files')
if os.path.exists('tests/custom_output_dir'):
utils.rmtree('tests/custom_output_dir')
if os.path.exists('inputpermissions'):
utils.rmtree('inputpermissions')
request.addfinalizer(fin_remove_additional_folders)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_nontemplated_exception():
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.generate_files(
context={
'cookiecutter': {'food': 'pizza'}
},
repo_dir='tests/test-generate-files-nontemplated'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
simple_file = 'inputpizzä/simple.txt'
assert os.path.isfile(simple_file)
simple_text = io.open(simple_file, 'rt', encoding='utf-8').read()
assert simple_text == u'I eat pizzä'
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_with_trailing_newline():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
newline_file = 'inputpizzä/simple-with-newline.txt'
assert os.path.isfile(newline_file)
with io.open(newline_file, 'r', encoding='utf-8') as f:
simple_text = f.read()
assert simple_text == u'I eat pizzä\n'
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_binaries():
generate.generate_files(
context={
'cookiecutter': {'binary_test': 'binary_files'}
},
repo_dir='tests/test-generate-binaries'
)
assert os.path.isfile('inputbinary_files/logo.png')
assert os.path.isfile('inputbinary_files/.DS_Store')
assert os.path.isfile('inputbinary_files/readme.txt')
assert os.path.isfile('inputbinary_files/some_font.otf')
assert os.path.isfile('inputbinary_files/binary_files/logo.png')
assert os.path.isfile('inputbinary_files/binary_files/.DS_Store')
assert os.path.isfile('inputbinary_files/binary_files/readme.txt')
assert os.path.isfile('inputbinary_files/binary_files/some_font.otf')
assert os.path.isfile(
'inputbinary_files/binary_files/binary_files/logo.png'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_absolute_path():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files')
)
assert os.path.isfile('inputpizzä/simple.txt')
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_output_dir():
os.mkdir('tests/custom_output_dir')
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files'),
output_dir='tests/custom_output_dir'
)
assert os.path.isfile('tests/custom_output_dir/inputpizzä/simple.txt')
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_return_rendered_project_dir():
os.mkdir('tests/custom_output_dir')
project_dir = generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files'),
output_dir='tests/custom_output_dir'
)
assert project_dir == os.path.abspath(
'tests/custom_output_dir/inputpizzä/'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_permissions():
"""
simple.txt and script.sh should retain their respective 0o644 and
0o755 permissions
"""
generate.generate_files(
context={
'cookiecutter': {'permissions': 'permissions'}
},
repo_dir='tests/test-generate-files-permissions'
)
assert os.path.isfile('inputpermissions/simple.txt')
# simple.txt should still be 0o644
tests_simple_file = os.path.join(
'tests',
'test-generate-files-permissions',
'input{{cookiecutter.permissions}}',
'simple.txt'
)
tests_simple_file_mode = os.stat(tests_simple_file).st_mode & 0o777
input_simple_file = os.path.join(
'inputpermissions',
'simple.txt'
)
input_simple_file_mode = os.stat(input_simple_file).st_mode & 0o777
assert tests_simple_file_mode == input_simple_file_mode
assert os.path.isfile('inputpermissions/script.sh')
# script.sh should still be 0o755
tests_script_file = os.path.join(
'tests',
'test-generate-files-permissions',
'input{{cookiecutter.permissions}}',
'script.sh'
)
tests_script_file_mode = os.stat(tests_script_file).st_mode & 0o777
input_script_file = os.path.join(
'inputpermissions',
'script.sh'
)
input_script_file_mode = os.stat(input_script_file).st_mode & 0o777
assert tests_script_file_mode == input_script_file_mode
@pytest.fixture
def undefined_context():
return {
'cookiecutter': {
'project_slug': 'testproject',
'github_username': 'hackebrot'
}
}
def test_raise_undefined_variable_file_name(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/file-name/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
assert "Unable to create file '{{cookiecutter.foobar}}'" == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_file_content(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/file-content/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
assert "Unable to create file 'README.rst'" == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_dir_name(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/dir-name/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
directory = os.path.join('testproject', '{{cookiecutter.foobar}}')
msg = "Unable to create directory '{}'".format(directory)
assert msg == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_project_dir(tmpdir):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/dir-name/',
output_dir=str(output_dir),
context={}
)
error = err.value
msg = "Unable to create project directory '{{cookiecutter.project_slug}}'"
assert msg == error.message
assert error.context == {}
assert not output_dir.join('testproject').exists()
| 32.317406 | 78 | 0.699018 |
from __future__ import unicode_literals
import os
import io
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
from cookiecutter import utils
@pytest.mark.parametrize('invalid_dirname', ['', '{foo}', '{{foo', 'bar}}'])
def test_ensure_dir_is_templated_raises(invalid_dirname):
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.ensure_dir_is_templated(invalid_dirname)
@pytest.fixture(scope='function')
def remove_additional_folders(request):
def fin_remove_additional_folders():
if os.path.exists('inputpizzä'):
utils.rmtree('inputpizzä')
if os.path.exists('inputgreen'):
utils.rmtree('inputgreen')
if os.path.exists('inputbinary_files'):
utils.rmtree('inputbinary_files')
if os.path.exists('tests/custom_output_dir'):
utils.rmtree('tests/custom_output_dir')
if os.path.exists('inputpermissions'):
utils.rmtree('inputpermissions')
request.addfinalizer(fin_remove_additional_folders)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_nontemplated_exception():
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.generate_files(
context={
'cookiecutter': {'food': 'pizza'}
},
repo_dir='tests/test-generate-files-nontemplated'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
simple_file = 'inputpizzä/simple.txt'
assert os.path.isfile(simple_file)
simple_text = io.open(simple_file, 'rt', encoding='utf-8').read()
assert simple_text == u'I eat pizzä'
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_with_trailing_newline():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
newline_file = 'inputpizzä/simple-with-newline.txt'
assert os.path.isfile(newline_file)
with io.open(newline_file, 'r', encoding='utf-8') as f:
simple_text = f.read()
assert simple_text == u'I eat pizzä\n'
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_binaries():
generate.generate_files(
context={
'cookiecutter': {'binary_test': 'binary_files'}
},
repo_dir='tests/test-generate-binaries'
)
assert os.path.isfile('inputbinary_files/logo.png')
assert os.path.isfile('inputbinary_files/.DS_Store')
assert os.path.isfile('inputbinary_files/readme.txt')
assert os.path.isfile('inputbinary_files/some_font.otf')
assert os.path.isfile('inputbinary_files/binary_files/logo.png')
assert os.path.isfile('inputbinary_files/binary_files/.DS_Store')
assert os.path.isfile('inputbinary_files/binary_files/readme.txt')
assert os.path.isfile('inputbinary_files/binary_files/some_font.otf')
assert os.path.isfile(
'inputbinary_files/binary_files/binary_files/logo.png'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_absolute_path():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files')
)
assert os.path.isfile('inputpizzä/simple.txt')
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_output_dir():
os.mkdir('tests/custom_output_dir')
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files'),
output_dir='tests/custom_output_dir'
)
assert os.path.isfile('tests/custom_output_dir/inputpizzä/simple.txt')
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_return_rendered_project_dir():
os.mkdir('tests/custom_output_dir')
project_dir = generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files'),
output_dir='tests/custom_output_dir'
)
assert project_dir == os.path.abspath(
'tests/custom_output_dir/inputpizzä/'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_permissions():
generate.generate_files(
context={
'cookiecutter': {'permissions': 'permissions'}
},
repo_dir='tests/test-generate-files-permissions'
)
assert os.path.isfile('inputpermissions/simple.txt')
tests_simple_file = os.path.join(
'tests',
'test-generate-files-permissions',
'input{{cookiecutter.permissions}}',
'simple.txt'
)
tests_simple_file_mode = os.stat(tests_simple_file).st_mode & 0o777
input_simple_file = os.path.join(
'inputpermissions',
'simple.txt'
)
input_simple_file_mode = os.stat(input_simple_file).st_mode & 0o777
assert tests_simple_file_mode == input_simple_file_mode
assert os.path.isfile('inputpermissions/script.sh')
tests_script_file = os.path.join(
'tests',
'test-generate-files-permissions',
'input{{cookiecutter.permissions}}',
'script.sh'
)
tests_script_file_mode = os.stat(tests_script_file).st_mode & 0o777
input_script_file = os.path.join(
'inputpermissions',
'script.sh'
)
input_script_file_mode = os.stat(input_script_file).st_mode & 0o777
assert tests_script_file_mode == input_script_file_mode
@pytest.fixture
def undefined_context():
return {
'cookiecutter': {
'project_slug': 'testproject',
'github_username': 'hackebrot'
}
}
def test_raise_undefined_variable_file_name(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/file-name/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
assert "Unable to create file '{{cookiecutter.foobar}}'" == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_file_content(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/file-content/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
assert "Unable to create file 'README.rst'" == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_dir_name(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/dir-name/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
directory = os.path.join('testproject', '{{cookiecutter.foobar}}')
msg = "Unable to create directory '{}'".format(directory)
assert msg == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_project_dir(tmpdir):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/dir-name/',
output_dir=str(output_dir),
context={}
)
error = err.value
msg = "Unable to create project directory '{{cookiecutter.project_slug}}'"
assert msg == error.message
assert error.context == {}
assert not output_dir.join('testproject').exists()
| true | true |
f71192fabb5e746aede1a9c9b5b7d8a1f18a4159 | 4,888 | py | Python | torch/jit/_monkeytype_config.py | rpindale/pytorch | b07264301d19869ab223957a15e2ce8825886d21 | [
"Intel"
] | 1 | 2021-06-21T09:43:33.000Z | 2021-06-21T09:43:33.000Z | torch/jit/_monkeytype_config.py | rpindale/pytorch | b07264301d19869ab223957a15e2ce8825886d21 | [
"Intel"
] | null | null | null | torch/jit/_monkeytype_config.py | rpindale/pytorch | b07264301d19869ab223957a15e2ce8825886d21 | [
"Intel"
] | null | null | null | import inspect
import typing
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import default_code_filter # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self):
super().__init__()
# A dictionary keeping all collected CallTrace
# key is fully qualified name of called function
# value is list of all CallTrace
self.trace_records: Dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
# and create a dictionary of all the types
# for arguments.
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> Dict:
all_args = self.analyze(qualified_name)
# If there are more types for an argument,
# then consolidate the type to `Any` and replace the entry
# by type `Any`.
for arg, types in all_args.items():
_all_type = " "
for _type in types:
# If the type is a type imported from typing
# like Tuple, List, Dict then replace "typing."
# with a null string.
if inspect.getmodule(_type) == typing:
_type_to_string = str(_type)
_all_type += _type_to_string.replace('typing.', '') + ','
elif _type is torch.nn.parameter.Parameter:
# Check if the type is torch.nn.parameter.Parameter,
# use the entire quaalified name `torch.nn.parameter.Parameter`
# for type
_all_type += 'torch.nn.parameter.Parameter' + ','
else:
_all_type += _type.__name__ + ','
_all_type = _all_type.lstrip(" ") # Remove any trailing spaces
if len(types) > 1:
all_args[arg] = {'Any'}
else:
all_args[arg] = {_all_type[:-1]}
return all_args
def get_args_types(self, qualified_name: str) -> Dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
"""
Returns a JitCallTraceStoreLogger that logs to the configured
trace store.
"""
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return default_code_filter
else:
# When MonkeyType is not installed, we provide dummy class definitions
# for the below classes.
class JitTypeTraceStoreLogger: # type: ignore[no-redef]
def __init__(self):
pass
class JitTypeTraceStore: # type: ignore[no-redef]
def __init__(self):
self.trace_records = None
class JitTypeTraceConfig: # type: ignore[no-redef]
def __init__(self):
pass
monkeytype_trace = None # noqa: F811
| 38.488189 | 111 | 0.594926 | import inspect
import typing
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger
from monkeytype.config import default_code_filter
from monkeytype.tracing import CallTrace, CodeFilter
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self):
super().__init__()
self.trace_records: Dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> Dict:
all_args = self.analyze(qualified_name)
for arg, types in all_args.items():
_all_type = " "
for _type in types:
if inspect.getmodule(_type) == typing:
_type_to_string = str(_type)
_all_type += _type_to_string.replace('typing.', '') + ','
elif _type is torch.nn.parameter.Parameter:
_all_type += 'torch.nn.parameter.Parameter' + ','
else:
_all_type += _type.__name__ + ','
_all_type = _all_type.lstrip(" ")
if len(types) > 1:
all_args[arg] = {'Any'}
else:
all_args[arg] = {_all_type[:-1]}
return all_args
def get_args_types(self, qualified_name: str) -> Dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return default_code_filter
else:
class JitTypeTraceStoreLogger:
def __init__(self):
pass
class JitTypeTraceStore:
def __init__(self):
self.trace_records = None
class JitTypeTraceConfig:
def __init__(self):
pass
monkeytype_trace = None
| true | true |
f71193d757e833203e1e5b18a48ec2bdf4f531a0 | 1,029 | py | Python | edu/class8/config.py | h1063135843/PaddleViT | 6f150b82d801b082cc7af09af396bfe2f6bf9987 | [
"Apache-2.0"
] | 1 | 2021-12-12T12:34:01.000Z | 2021-12-12T12:34:01.000Z | edu/class8/config.py | h1063135843/PaddleViT | 6f150b82d801b082cc7af09af396bfe2f6bf9987 | [
"Apache-2.0"
] | null | null | null | edu/class8/config.py | h1063135843/PaddleViT | 6f150b82d801b082cc7af09af396bfe2f6bf9987 | [
"Apache-2.0"
] | null | null | null | from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.DATA = CN()
_C.DATA.DATASET = 'Cifar10'
_C.DATA.BATCH_SIZE = 128
_C.MODEL = CN()
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.TRANS = CN()
_C.MODEL.TRANS.EMBED_DIM = 96
_C.MODEL.TRANS.DEPTHS = [2, 2, 6, 2]
_C.MODEL.TRANS.QKV_BIAS = False
def _update_config_from_file(config, cfg_file):
config.defrost()
config.merge_from_file(cfg_file)
#config.freeze()
def update_config(config, args):
if args.cfg:
_update_config_form_file(config, args.cfg)
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
return config
def get_config(cfg_file=None):
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
def main():
cfg = get_config()
print(cfg)
print('-----')
print(cfg.MODEL.NUM_CLASSES)
print('-----')
print(cfg.MODEL.TRANS.QKV_BIAS)
if __name__ == "__main__":
main()
| 18.709091 | 50 | 0.665695 | from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.DATA = CN()
_C.DATA.DATASET = 'Cifar10'
_C.DATA.BATCH_SIZE = 128
_C.MODEL = CN()
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.TRANS = CN()
_C.MODEL.TRANS.EMBED_DIM = 96
_C.MODEL.TRANS.DEPTHS = [2, 2, 6, 2]
_C.MODEL.TRANS.QKV_BIAS = False
def _update_config_from_file(config, cfg_file):
config.defrost()
config.merge_from_file(cfg_file)
def update_config(config, args):
if args.cfg:
_update_config_form_file(config, args.cfg)
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
return config
def get_config(cfg_file=None):
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
def main():
cfg = get_config()
print(cfg)
print('-----')
print(cfg.MODEL.NUM_CLASSES)
print('-----')
print(cfg.MODEL.TRANS.QKV_BIAS)
if __name__ == "__main__":
main()
| true | true |
f71193ef3fbfbec2cf3575c19fdbffbc9b4086b6 | 7,520 | py | Python | test/integration/ggrc/review/test_api.py | sfarbotka/ggrc-core | ef7aae6bc09ad2f53a2414f643572e07d689784a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc/review/test_api.py | sfarbotka/ggrc-core | ef7aae6bc09ad2f53a2414f643572e07d689784a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc/review/test_api.py | sfarbotka/ggrc-core | ef7aae6bc09ad2f53a2414f643572e07d689784a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-02-13T12:32:45.000Z | 2020-02-13T12:32:45.000Z | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Base TestCase for proposal api."""
import ddt
from ggrc.models import all_models
from integration.ggrc import TestCase, generator
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
from integration.ggrc.review import build_reviewer_acl
@ddt.ddt
class TestReviewApi(TestCase):
"""Base TestCase class proposal api tests."""
def setUp(self):
super(TestReviewApi, self).setUp()
self.api = Api()
self.api.client.get("/login")
self.generator = generator.ObjectGenerator()
def test_simple_get(self):
"""Test simple get"""
with factories.single_commit():
program = factories.ProgramFactory()
review = factories.ReviewFactory(
email_message="test email message",
notification_type="email",
reviewable=program,
status=all_models.Review.STATES.UNREVIEWED,
)
resp = self.api.get(all_models.Review, review.id)
self.assert200(resp)
self.assertIn("review", resp.json)
resp_review = resp.json["review"]
self.assertEqual(all_models.Review.STATES.UNREVIEWED,
resp_review["status"])
self.assertEqual(all_models.Review.NotificationTypes.EMAIL_TYPE,
resp_review["notification_type"])
self.assertEqual("test email message",
resp_review["email_message"])
def test_collection_get(self):
"""Test simple collection get"""
with factories.single_commit():
review1 = factories.ReviewFactory(
status=all_models.Review.STATES.UNREVIEWED
)
review2 = factories.ReviewFactory(
status=all_models.Review.STATES.REVIEWED
)
resp = self.api.get_collection(all_models.Review,
[review1.id, review2.id])
self.assert200(resp)
self.assertIn("reviews_collection", resp.json)
self.assertIn("reviews", resp.json["reviews_collection"])
self.assertEquals(2, len(resp.json["reviews_collection"]["reviews"]))
def test_create_review(self):
"""Create review via API, check that single relationship is created"""
program = factories.ProgramFactory()
program_id = program.id
resp = self.api.post(
all_models.Review,
{
"review": {
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"notification_type": "email",
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl()
},
},
)
self.assertEqual(201, resp.status_code)
review_id = resp.json["review"]["id"]
review = all_models.Review.query.get(review_id)
self.assertEqual(all_models.Review.STATES.UNREVIEWED, review.status)
self.assertEqual(program.type, review.reviewable_type)
self.assertEqual(program_id, review.reviewable_id)
control_review_rel_count = all_models.Relationship.query.filter(
all_models.Relationship.source_id == review.id,
all_models.Relationship.source_type == review.type,
all_models.Relationship.destination_id == program_id,
all_models.Relationship.destination_type == program.type,
).union(
all_models.Relationship.query.filter(
all_models.Relationship.destination_id == review.id,
all_models.Relationship.destination_type == review.type,
all_models.Relationship.source_id == program_id,
all_models.Relationship.source_type == program.type,
)
).count()
self.assertEqual(1, control_review_rel_count)
def test_delete_review(self):
"""Test delete review via API"""
with factories.single_commit():
program = factories.ProgramFactory()
program_id = program.id
review = factories.ReviewFactory(reviewable=program)
review_id = review.id
resp = self.api.delete(review)
self.assert200(resp)
review = all_models.Review.query.get(review_id)
program = all_models.Program.query.get(program_id)
self.assertIsNone(review)
self.assertEquals(0, len(program.related_objects(_types=["Review"])))
def test_last_reviewed(self):
"""last_reviewed_by, last_reviewed_by should be set if reviewed"""
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
review_id = review.id
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
self.assertIsNotNone(resp.json["review"]["last_reviewed_by"])
self.assertIsNotNone(resp.json["review"]["last_reviewed_at"])
review = all_models.Review.query.get(review_id)
self.assertIsNotNone(review.last_reviewed_by)
self.assertIsNotNone(review.last_reviewed_at)
def test_reviewable_revisions(self):
"""Check that proper revisions are created"""
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
program_id = program.id
reviewable = review.reviewable
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(2, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[0].content["review_status"])
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[1].content["review_status"])
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(3, len(program_revisions))
self.assertEquals(all_models.Review.STATES.REVIEWED,
program_revisions[2].content["review_status"])
resp = self.api.put(
reviewable,
{
"description": "some new description"
}
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(4, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[3].content["review_status"])
| 34.814815 | 79 | 0.648404 |
import ddt
from ggrc.models import all_models
from integration.ggrc import TestCase, generator
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
from integration.ggrc.review import build_reviewer_acl
@ddt.ddt
class TestReviewApi(TestCase):
def setUp(self):
super(TestReviewApi, self).setUp()
self.api = Api()
self.api.client.get("/login")
self.generator = generator.ObjectGenerator()
def test_simple_get(self):
with factories.single_commit():
program = factories.ProgramFactory()
review = factories.ReviewFactory(
email_message="test email message",
notification_type="email",
reviewable=program,
status=all_models.Review.STATES.UNREVIEWED,
)
resp = self.api.get(all_models.Review, review.id)
self.assert200(resp)
self.assertIn("review", resp.json)
resp_review = resp.json["review"]
self.assertEqual(all_models.Review.STATES.UNREVIEWED,
resp_review["status"])
self.assertEqual(all_models.Review.NotificationTypes.EMAIL_TYPE,
resp_review["notification_type"])
self.assertEqual("test email message",
resp_review["email_message"])
def test_collection_get(self):
with factories.single_commit():
review1 = factories.ReviewFactory(
status=all_models.Review.STATES.UNREVIEWED
)
review2 = factories.ReviewFactory(
status=all_models.Review.STATES.REVIEWED
)
resp = self.api.get_collection(all_models.Review,
[review1.id, review2.id])
self.assert200(resp)
self.assertIn("reviews_collection", resp.json)
self.assertIn("reviews", resp.json["reviews_collection"])
self.assertEquals(2, len(resp.json["reviews_collection"]["reviews"]))
def test_create_review(self):
program = factories.ProgramFactory()
program_id = program.id
resp = self.api.post(
all_models.Review,
{
"review": {
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"notification_type": "email",
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl()
},
},
)
self.assertEqual(201, resp.status_code)
review_id = resp.json["review"]["id"]
review = all_models.Review.query.get(review_id)
self.assertEqual(all_models.Review.STATES.UNREVIEWED, review.status)
self.assertEqual(program.type, review.reviewable_type)
self.assertEqual(program_id, review.reviewable_id)
control_review_rel_count = all_models.Relationship.query.filter(
all_models.Relationship.source_id == review.id,
all_models.Relationship.source_type == review.type,
all_models.Relationship.destination_id == program_id,
all_models.Relationship.destination_type == program.type,
).union(
all_models.Relationship.query.filter(
all_models.Relationship.destination_id == review.id,
all_models.Relationship.destination_type == review.type,
all_models.Relationship.source_id == program_id,
all_models.Relationship.source_type == program.type,
)
).count()
self.assertEqual(1, control_review_rel_count)
def test_delete_review(self):
with factories.single_commit():
program = factories.ProgramFactory()
program_id = program.id
review = factories.ReviewFactory(reviewable=program)
review_id = review.id
resp = self.api.delete(review)
self.assert200(resp)
review = all_models.Review.query.get(review_id)
program = all_models.Program.query.get(program_id)
self.assertIsNone(review)
self.assertEquals(0, len(program.related_objects(_types=["Review"])))
def test_last_reviewed(self):
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
review_id = review.id
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
self.assertIsNotNone(resp.json["review"]["last_reviewed_by"])
self.assertIsNotNone(resp.json["review"]["last_reviewed_at"])
review = all_models.Review.query.get(review_id)
self.assertIsNotNone(review.last_reviewed_by)
self.assertIsNotNone(review.last_reviewed_at)
def test_reviewable_revisions(self):
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
program_id = program.id
reviewable = review.reviewable
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(2, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[0].content["review_status"])
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[1].content["review_status"])
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(3, len(program_revisions))
self.assertEquals(all_models.Review.STATES.REVIEWED,
program_revisions[2].content["review_status"])
resp = self.api.put(
reviewable,
{
"description": "some new description"
}
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(4, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[3].content["review_status"])
| true | true |
f7119443cc493f06975a851156fbc3ec8010dca5 | 998 | py | Python | flaskr/__init__.py | wiky-avis/flask_app | 7ea6964bb9dcd4a919161d062ddaff2a3c20ae43 | [
"MIT"
] | null | null | null | flaskr/__init__.py | wiky-avis/flask_app | 7ea6964bb9dcd4a919161d062ddaff2a3c20ae43 | [
"MIT"
] | null | null | null | flaskr/__init__.py | wiky-avis/flask_app | 7ea6964bb9dcd4a919161d062ddaff2a3c20ae43 | [
"MIT"
] | 1 | 2021-07-15T08:39:17.000Z | 2021-07-15T08:39:17.000Z | import os
from flask import Flask
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app
| 23.209302 | 66 | 0.650301 | import os
from flask import Flask
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/hello')
def hello():
return 'Hello, World!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app
| true | true |
f7119504da3f4521c3aee2052b77a4d48632efc2 | 8,573 | py | Python | json_commander.py | LairdCP/BT510-Python | a7c5b2825a3919b3b261524763ec0a5ed76d54da | [
"Apache-2.0"
] | 1 | 2021-05-16T17:53:33.000Z | 2021-05-16T17:53:33.000Z | json_commander.py | LairdCP/BT510-Python | a7c5b2825a3919b3b261524763ec0a5ed76d54da | [
"Apache-2.0"
] | null | null | null | json_commander.py | LairdCP/BT510-Python | a7c5b2825a3919b3b261524763ec0a5ed76d54da | [
"Apache-2.0"
] | 1 | 2021-04-23T22:33:21.000Z | 2021-04-23T22:33:21.000Z |
"""
BT510 commands (serial or BLE).
This is a subset of the jtester commands used for verification.
"""
import time
import json
import random
import string
import logging
from jsonrpcclient.requests import Request
class jtester:
def __init__(self, fname="config.json"):
""" JSON tester that is independent of the transport """
print("jtester init")
self.protocol = None
self.inter_message_delay = 0.01
self.reset_delay = 10
self.reset_after_write_delay = 2
self.get_queue_timeout = 2.0
self.ok = 0
self.fail = 0
self._LoadConfig(fname)
self.logger = logging.getLogger('jtester')
def _LoadConfig(self, fname: str) -> None:
with open(fname, 'r') as f:
c = json.load(f)
if "inter_message_delay" in c:
self.inter_message_delay = c["inter_message_delay"]
if "reset_delay" in c:
self.reset_delay = c["reset_delay"]
def _send_json(self, text):
if self.protocol is not None:
self.logger.debug(text)
self.protocol.send_json(text, self.inter_message_delay)
else:
self.logger.warning("Transport not available")
def _get_json(self):
if self.protocol is not None:
result = self.protocol.get_json(self.get_queue_timeout)
self.logger.debug(json.dumps(result))
return result
else:
return None
def set_protocol(self, protocol) -> None:
self.protocol = protocol
def IncrementOkCount(self) -> None:
self.ok += 1
def IncrementFailCount(self) -> None:
self.fail += 1
self.logger.error("Test Fail")
def ExpectOk(self) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectError(self) -> None:
response = self._get_json()
if response is not None:
if "error" in response:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectValue(self, name, value) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
if value is None:
if name in response:
self.IncrementOkCount()
return
elif isinstance(value, str):
if response[name] == value.strip('\"'):
self.IncrementOkCount()
return
else:
if response[name] == value:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectValues(self, **pairs) -> None:
responseFound = False
error = 0
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
responseFound = True
for (name, value) in pairs.items():
if value is None:
if name not in response:
error += 1
elif isinstance(value, str):
if response[name] != value.strip('\"'):
error += 1
elif response[name] != value:
error += 1
if not responseFound or error:
self.IncrementFailCount()
else:
self.IncrementOkCount()
def ExpectRange(self, name, imin, imax) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
x = response["result"]
if isinstance(x, int):
if x >= imin and x <= imax:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectInt(self) -> int:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, int):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return -1
def ExpectStr(self) -> str:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, str):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return ""
def ExpectLog(self) -> list:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, list):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return [0, ""]
def SendFactoryReset(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("factoryReset")))
self.ExpectOk()
time.sleep(self.reset_delay)
def SendReboot(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("reboot")))
self.ExpectOk()
time.sleep(self.reset_delay)
def SendEnterBootloader(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("reboot", 1)))
self.ExpectOk()
time.sleep(self.reset_delay)
def EpochTest(self, epoch: int) -> None:
"""Test epoch commands"""
delay = 3
self._send_json(str(Request(f"setEpoch", epoch)))
self.ExpectOk()
time.sleep(delay)
self._send_json(str(Request("getEpoch")))
self.ExpectRange("epoch", epoch + delay - 1, epoch + delay + 1)
def LedTest(self) -> None:
self._send_json(str(Request("ledTest", 1000)))
self.ExpectOk()
def Dump(self) -> None:
""" Test dump command without any parameters """
self._send_json(str(Request("dump")))
response = self._get_json()
ok = False
if response is not None:
if "result" in response:
if response["result"] == "ok":
self.IncrementOkCount()
else:
self.IncrementFailCount()
def Unlock(self) -> None:
kwargs = {"lock": 0}
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def Lock(self) -> None:
kwargs = {"lock": 1}
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def GetAttribute(self, name: str):
"""Get an attribute by its name - Doesn't affect test ok count"""
self._send_json(str(Request("get", name)))
response = self._get_json()
result = None
if response is not None:
if "result" in response:
if response["result"] == "ok":
if name in response:
value = response[name]
if isinstance(value, str):
result = value.strip('\"')
else:
result = value
self.logger.info(f'"{name}": {result}')
return result
def SetAttributes(self, **kwargs) -> None:
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def SetEpoch(self, epoch: int) -> None:
self._send_json(str(Request("setEpoch", epoch)))
self.ExpectOk()
def PrepareLog(self) -> int:
self._send_json(str(Request("prepareLog", 0))) # fifo mode
return self.ExpectInt()
def ReadLog(self, count: int) -> list:
self._send_json(str(Request("readLog", count)))
return self.ExpectLog()
def AckLog(self, count: int) -> int:
self._send_json(str(Request("ackLog", count)))
result = self.ExpectInt()
return result
def LogResults(self):
self.logger.info(f"Pass: {self.ok} Fail: {self.fail}")
if __name__ == "__main__":
pass
| 32.721374 | 73 | 0.523737 |
import time
import json
import random
import string
import logging
from jsonrpcclient.requests import Request
class jtester:
def __init__(self, fname="config.json"):
print("jtester init")
self.protocol = None
self.inter_message_delay = 0.01
self.reset_delay = 10
self.reset_after_write_delay = 2
self.get_queue_timeout = 2.0
self.ok = 0
self.fail = 0
self._LoadConfig(fname)
self.logger = logging.getLogger('jtester')
def _LoadConfig(self, fname: str) -> None:
with open(fname, 'r') as f:
c = json.load(f)
if "inter_message_delay" in c:
self.inter_message_delay = c["inter_message_delay"]
if "reset_delay" in c:
self.reset_delay = c["reset_delay"]
def _send_json(self, text):
if self.protocol is not None:
self.logger.debug(text)
self.protocol.send_json(text, self.inter_message_delay)
else:
self.logger.warning("Transport not available")
def _get_json(self):
if self.protocol is not None:
result = self.protocol.get_json(self.get_queue_timeout)
self.logger.debug(json.dumps(result))
return result
else:
return None
def set_protocol(self, protocol) -> None:
self.protocol = protocol
def IncrementOkCount(self) -> None:
self.ok += 1
def IncrementFailCount(self) -> None:
self.fail += 1
self.logger.error("Test Fail")
def ExpectOk(self) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectError(self) -> None:
response = self._get_json()
if response is not None:
if "error" in response:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectValue(self, name, value) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
if value is None:
if name in response:
self.IncrementOkCount()
return
elif isinstance(value, str):
if response[name] == value.strip('\"'):
self.IncrementOkCount()
return
else:
if response[name] == value:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectValues(self, **pairs) -> None:
responseFound = False
error = 0
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
responseFound = True
for (name, value) in pairs.items():
if value is None:
if name not in response:
error += 1
elif isinstance(value, str):
if response[name] != value.strip('\"'):
error += 1
elif response[name] != value:
error += 1
if not responseFound or error:
self.IncrementFailCount()
else:
self.IncrementOkCount()
def ExpectRange(self, name, imin, imax) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
x = response["result"]
if isinstance(x, int):
if x >= imin and x <= imax:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectInt(self) -> int:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, int):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return -1
def ExpectStr(self) -> str:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, str):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return ""
def ExpectLog(self) -> list:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, list):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return [0, ""]
def SendFactoryReset(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("factoryReset")))
self.ExpectOk()
time.sleep(self.reset_delay)
def SendReboot(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("reboot")))
self.ExpectOk()
time.sleep(self.reset_delay)
def SendEnterBootloader(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("reboot", 1)))
self.ExpectOk()
time.sleep(self.reset_delay)
def EpochTest(self, epoch: int) -> None:
delay = 3
self._send_json(str(Request(f"setEpoch", epoch)))
self.ExpectOk()
time.sleep(delay)
self._send_json(str(Request("getEpoch")))
self.ExpectRange("epoch", epoch + delay - 1, epoch + delay + 1)
def LedTest(self) -> None:
self._send_json(str(Request("ledTest", 1000)))
self.ExpectOk()
def Dump(self) -> None:
self._send_json(str(Request("dump")))
response = self._get_json()
ok = False
if response is not None:
if "result" in response:
if response["result"] == "ok":
self.IncrementOkCount()
else:
self.IncrementFailCount()
def Unlock(self) -> None:
kwargs = {"lock": 0}
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def Lock(self) -> None:
kwargs = {"lock": 1}
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def GetAttribute(self, name: str):
self._send_json(str(Request("get", name)))
response = self._get_json()
result = None
if response is not None:
if "result" in response:
if response["result"] == "ok":
if name in response:
value = response[name]
if isinstance(value, str):
result = value.strip('\"')
else:
result = value
self.logger.info(f'"{name}": {result}')
return result
def SetAttributes(self, **kwargs) -> None:
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def SetEpoch(self, epoch: int) -> None:
self._send_json(str(Request("setEpoch", epoch)))
self.ExpectOk()
def PrepareLog(self) -> int:
self._send_json(str(Request("prepareLog", 0))) # fifo mode
return self.ExpectInt()
def ReadLog(self, count: int) -> list:
self._send_json(str(Request("readLog", count)))
return self.ExpectLog()
def AckLog(self, count: int) -> int:
self._send_json(str(Request("ackLog", count)))
result = self.ExpectInt()
return result
def LogResults(self):
self.logger.info(f"Pass: {self.ok} Fail: {self.fail}")
if __name__ == "__main__":
pass
| true | true |
f711955d7814146fa3409fdcd71c37026ca25fdd | 11,405 | py | Python | augmentation/methods/cyclegan/utils.py | SaraR-1/model-patching | 97b30bad4bb4575a5f3a4cc23fbd333b10a057a8 | [
"Apache-2.0"
] | 28 | 2020-08-19T02:59:37.000Z | 2022-03-17T18:10:24.000Z | augmentation/methods/cyclegan/utils.py | SaraR-1/model-patching | 97b30bad4bb4575a5f3a4cc23fbd333b10a057a8 | [
"Apache-2.0"
] | null | null | null | augmentation/methods/cyclegan/utils.py | SaraR-1/model-patching | 97b30bad4bb4575a5f3a4cc23fbd333b10a057a8 | [
"Apache-2.0"
] | 3 | 2021-01-29T10:20:14.000Z | 2021-11-15T17:06:27.000Z | import datetime
import tensorflow as tf
import random
import wandb
from tensorflow_examples.models.pix2pix import pix2pix
from augmentation.dataflows.utils import create_paired_direct_dataflow, \
create_paired_parallel_dataflow_via_numpy
from augmentation.methods.cyclegan.models import mnist_unet_generator, mnist_discriminator, unet_generator
from augmentation.utilities.optim import build_lr_scheduler
from augmentation.utilities.visualize import gallery
# Other places to look for training GANs
# https://github.com/eriklindernoren/Keras-GAN
def gradient_penalty(f, real, fake, mode, scale=10.0):
# https://github.com/LynnHo/CycleGAN-Tensorflow-2/blob/master/tf2gan/loss.py
def _gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
if b is None: # interpolation in DRAGAN
beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)
b = a + 0.5 * tf.math.reduce_std(a) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.shape)
return inter
x = _interpolate(real, fake)
with tf.GradientTape() as t:
t.watch(x)
pred = tf.reduce_mean(tf.reshape(f(x), [tf.shape(real)[0], -1]), axis=1)
grad = t.gradient(pred, x)
norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)
gp = tf.reduce_mean((norm - 1.) ** 2)
return gp
if mode == 'none':
gp = tf.constant(0, dtype=real.dtype)
elif mode == 'dragan':
gp = _gradient_penalty(f, real)
elif mode == 'wgan-gp':
gp = _gradient_penalty(f, real, fake)
else:
raise NotImplementedError
return gp * scale
class ReplayBuffer(object):
"""
Adapted from https://github.com/tensorflow/models/blob/master/research/pcl_rl/replay_buffer.py
"""
def __init__(self, max_size):
self.max_size = max_size
self.cur_size = 0
self.buffer = {}
self.oldest_idx = 0
self.init_length = 0
def __len__(self):
return self.cur_size
def add(self, images):
idx = 0
while self.cur_size < self.max_size and idx < len(images):
self.buffer[self.cur_size] = images[idx]
self.cur_size += 1
idx += 1
if idx < len(images):
remove_idxs = self.remove_n(len(images) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = images[idx]
idx += 1
assert len(self.buffer) == self.cur_size
def remove_n(self, n):
return random.sample(range(self.init_length, self.cur_size), n)
def get_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return [self.buffer[idx] for idx in idxs]
def get_tf_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return tf.convert_to_tensor([self.buffer[idx] for idx in idxs])
def wgan_loss(targets, predictions):
return tf.reduce_mean((-2 * targets + 1.) * predictions)
def build_gan_loss_fn(loss_name):
if loss_name == 'bce':
return tf.keras.losses.BinaryCrossentropy(from_logits=True)
elif loss_name == 'lsgan':
return tf.keras.losses.MeanSquaredError()
elif loss_name == 'wgan':
return wgan_loss
else:
raise NotImplementedError
def discriminator_loss(real, generated, loss_fn):
# Classification loss for the discriminator, maximize log-prob of the real example
real_loss = loss_fn(tf.ones_like(real), real)
generated_loss = loss_fn(tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
def generator_loss(generated, loss_fn):
# The discriminator's probability (generated) for realness is maximized
return loss_fn(tf.ones_like(generated), generated)
def cycle_loss(real_image, cycled_image, scale):
# Cycle-consistency using an L! loss
return scale * tf.reduce_mean(tf.abs(real_image - cycled_image))
def identity_loss(real_image, same_image, scale):
# Map the image to itself and compute the L1 loss
return scale * 0.5 * tf.reduce_mean(tf.abs(real_image - same_image))
def build_cyclegan_models(n_channels, norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = pix2pix.unet_generator(n_channels, norm_type=norm_type)
generator_f = pix2pix.unet_generator(n_channels, norm_type=norm_type)
discriminator_x = pix2pix.discriminator(norm_type=norm_type, target=False)
discriminator_y = pix2pix.discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def build_mnist_cyclegan_models(norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = mnist_unet_generator(norm_type=norm_type)
generator_f = mnist_unet_generator(norm_type=norm_type)
discriminator_x = mnist_discriminator(norm_type=norm_type, target=False)
discriminator_y = mnist_discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def get_models_from_input_shape(input_shape, norm_type, output_init=0.02, residual_output=False):
if input_shape == (28, 28, 1):
# MNIST-like data
return mnist_unet_generator(norm_type=norm_type), \
mnist_discriminator(norm_type=norm_type, target=False)
elif input_shape == (256, 256, 3):
# TODO: just use our unet_generator fn
if residual_output is True or output_init != 0.02:
raise NotImplementedError
return pix2pix.unet_generator(output_channels=3, norm_type=norm_type), \
pix2pix.discriminator(norm_type=norm_type, target=False)
else:
return unet_generator(output_channels=3, input_shape=input_shape, norm_type=norm_type,
output_init=output_init, residual_output=residual_output), \
pix2pix.discriminator(norm_type=norm_type, target=False)
def build_models(source_input_shape, target_input_shape, norm_type, output_init=0.02, residual_output=False):
assert norm_type in ['instancenorm', 'batchnorm']
generator_s_to_t, discriminator_s = get_models_from_input_shape(source_input_shape, norm_type, output_init, residual_output)
generator_t_to_s, discriminator_t = get_models_from_input_shape(target_input_shape, norm_type, output_init, residual_output)
return generator_s_to_t, generator_t_to_s, discriminator_s, discriminator_t
def build_optimizers(lr_gen=2e-4, lr_disc=2e-4,
beta_1_gen=0.5, beta_1_disc=0.5,
lr_scheduler='constant', lr_decay_steps=None):
generator_g_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
generator_f_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
discriminator_x_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
discriminator_y_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
return generator_g_optimizer, generator_f_optimizer, discriminator_x_optimizer, discriminator_y_optimizer
def create_cyclegan_data_generator(source_dataset, target_dataset, batch_size, augmentations,
dataflow, cache_dir):
if dataflow == 'disk_cached':
cache_dir = cache_dir + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S')
# Shuffle hangs sometimes (e.g. for horse2zebra)
return create_paired_direct_dataflow(source_dataset, target_dataset, batch_size,
augmentations, x_only=True,
cache_dir1=cache_dir + '1',
cache_dir2=cache_dir + '2',
shuffle=True)
elif dataflow == 'in_memory':
return create_paired_parallel_dataflow_via_numpy(source_dataset, target_dataset,
batch_size, augmentations, x_only=True)
else:
raise NotImplementedError
def generate_and_log_one_image_batch(data_generator,
generator_g,
generator_f,
step):
# Grab a batch from the dataset
for real_x, real_y in data_generator:
# Convert to tensors
real_x, real_y = tf.convert_to_tensor(real_x), tf.convert_to_tensor(real_y)
# Compute the fake examples
fake_y = generator_g(real_x, training=True)
fake_x = generator_f(real_y, training=True)
# Cycle the fake examples
cycled_x = generator_f(fake_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# Compute the identity examples
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
# Log everything to Weights and Biases
wandb.log({'test/real_x': wandb.Image(gallery(real_x.numpy() * 0.5 + 0.5)),
'test/fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),
'test/cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),
'test/same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),
'test/real_y': wandb.Image(gallery(real_y.numpy() * 0.5 + 0.5)),
'test/fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),
'test/cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),
'test/same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)
# Break after a single batch: note, this will not run if you remove the break due to wandb reasons (ask Karan)
break
if __name__ == '__main__':
buffer = ReplayBuffer(1)
buffer.add([1])
buffer.add([2])
buffer.add([3])
print(buffer.get_batch(1))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add([4])
print(buffer.get_batch(1))
print(buffer.buffer)
buffer = ReplayBuffer(1)
buffer.add(tf.convert_to_tensor([1]))
buffer.add(tf.convert_to_tensor([2]))
buffer.add(tf.convert_to_tensor([3]))
print(tf.convert_to_tensor(buffer.get_batch(1)))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add(tf.convert_to_tensor([4]))
print(buffer.get_batch(1))
print(buffer.buffer)
| 41.624088 | 128 | 0.635949 | import datetime
import tensorflow as tf
import random
import wandb
from tensorflow_examples.models.pix2pix import pix2pix
from augmentation.dataflows.utils import create_paired_direct_dataflow, \
create_paired_parallel_dataflow_via_numpy
from augmentation.methods.cyclegan.models import mnist_unet_generator, mnist_discriminator, unet_generator
from augmentation.utilities.optim import build_lr_scheduler
from augmentation.utilities.visualize import gallery
def gradient_penalty(f, real, fake, mode, scale=10.0):
def _gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
if b is None:
beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)
b = a + 0.5 * tf.math.reduce_std(a) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.shape)
return inter
x = _interpolate(real, fake)
with tf.GradientTape() as t:
t.watch(x)
pred = tf.reduce_mean(tf.reshape(f(x), [tf.shape(real)[0], -1]), axis=1)
grad = t.gradient(pred, x)
norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)
gp = tf.reduce_mean((norm - 1.) ** 2)
return gp
if mode == 'none':
gp = tf.constant(0, dtype=real.dtype)
elif mode == 'dragan':
gp = _gradient_penalty(f, real)
elif mode == 'wgan-gp':
gp = _gradient_penalty(f, real, fake)
else:
raise NotImplementedError
return gp * scale
class ReplayBuffer(object):
def __init__(self, max_size):
self.max_size = max_size
self.cur_size = 0
self.buffer = {}
self.oldest_idx = 0
self.init_length = 0
def __len__(self):
return self.cur_size
def add(self, images):
idx = 0
while self.cur_size < self.max_size and idx < len(images):
self.buffer[self.cur_size] = images[idx]
self.cur_size += 1
idx += 1
if idx < len(images):
remove_idxs = self.remove_n(len(images) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = images[idx]
idx += 1
assert len(self.buffer) == self.cur_size
def remove_n(self, n):
return random.sample(range(self.init_length, self.cur_size), n)
def get_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return [self.buffer[idx] for idx in idxs]
def get_tf_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return tf.convert_to_tensor([self.buffer[idx] for idx in idxs])
def wgan_loss(targets, predictions):
return tf.reduce_mean((-2 * targets + 1.) * predictions)
def build_gan_loss_fn(loss_name):
if loss_name == 'bce':
return tf.keras.losses.BinaryCrossentropy(from_logits=True)
elif loss_name == 'lsgan':
return tf.keras.losses.MeanSquaredError()
elif loss_name == 'wgan':
return wgan_loss
else:
raise NotImplementedError
def discriminator_loss(real, generated, loss_fn):
real_loss = loss_fn(tf.ones_like(real), real)
generated_loss = loss_fn(tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
def generator_loss(generated, loss_fn):
return loss_fn(tf.ones_like(generated), generated)
def cycle_loss(real_image, cycled_image, scale):
# Cycle-consistency using an L! loss
return scale * tf.reduce_mean(tf.abs(real_image - cycled_image))
def identity_loss(real_image, same_image, scale):
# Map the image to itself and compute the L1 loss
return scale * 0.5 * tf.reduce_mean(tf.abs(real_image - same_image))
def build_cyclegan_models(n_channels, norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = pix2pix.unet_generator(n_channels, norm_type=norm_type)
generator_f = pix2pix.unet_generator(n_channels, norm_type=norm_type)
discriminator_x = pix2pix.discriminator(norm_type=norm_type, target=False)
discriminator_y = pix2pix.discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def build_mnist_cyclegan_models(norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = mnist_unet_generator(norm_type=norm_type)
generator_f = mnist_unet_generator(norm_type=norm_type)
discriminator_x = mnist_discriminator(norm_type=norm_type, target=False)
discriminator_y = mnist_discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def get_models_from_input_shape(input_shape, norm_type, output_init=0.02, residual_output=False):
if input_shape == (28, 28, 1):
# MNIST-like data
return mnist_unet_generator(norm_type=norm_type), \
mnist_discriminator(norm_type=norm_type, target=False)
elif input_shape == (256, 256, 3):
# TODO: just use our unet_generator fn
if residual_output is True or output_init != 0.02:
raise NotImplementedError
return pix2pix.unet_generator(output_channels=3, norm_type=norm_type), \
pix2pix.discriminator(norm_type=norm_type, target=False)
else:
return unet_generator(output_channels=3, input_shape=input_shape, norm_type=norm_type,
output_init=output_init, residual_output=residual_output), \
pix2pix.discriminator(norm_type=norm_type, target=False)
def build_models(source_input_shape, target_input_shape, norm_type, output_init=0.02, residual_output=False):
assert norm_type in ['instancenorm', 'batchnorm']
generator_s_to_t, discriminator_s = get_models_from_input_shape(source_input_shape, norm_type, output_init, residual_output)
generator_t_to_s, discriminator_t = get_models_from_input_shape(target_input_shape, norm_type, output_init, residual_output)
return generator_s_to_t, generator_t_to_s, discriminator_s, discriminator_t
def build_optimizers(lr_gen=2e-4, lr_disc=2e-4,
beta_1_gen=0.5, beta_1_disc=0.5,
lr_scheduler='constant', lr_decay_steps=None):
generator_g_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
generator_f_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
discriminator_x_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
discriminator_y_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
return generator_g_optimizer, generator_f_optimizer, discriminator_x_optimizer, discriminator_y_optimizer
def create_cyclegan_data_generator(source_dataset, target_dataset, batch_size, augmentations,
dataflow, cache_dir):
if dataflow == 'disk_cached':
cache_dir = cache_dir + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S')
# Shuffle hangs sometimes (e.g. for horse2zebra)
return create_paired_direct_dataflow(source_dataset, target_dataset, batch_size,
augmentations, x_only=True,
cache_dir1=cache_dir + '1',
cache_dir2=cache_dir + '2',
shuffle=True)
elif dataflow == 'in_memory':
return create_paired_parallel_dataflow_via_numpy(source_dataset, target_dataset,
batch_size, augmentations, x_only=True)
else:
raise NotImplementedError
def generate_and_log_one_image_batch(data_generator,
generator_g,
generator_f,
step):
# Grab a batch from the dataset
for real_x, real_y in data_generator:
# Convert to tensors
real_x, real_y = tf.convert_to_tensor(real_x), tf.convert_to_tensor(real_y)
# Compute the fake examples
fake_y = generator_g(real_x, training=True)
fake_x = generator_f(real_y, training=True)
# Cycle the fake examples
cycled_x = generator_f(fake_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# Compute the identity examples
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
# Log everything to Weights and Biases
wandb.log({'test/real_x': wandb.Image(gallery(real_x.numpy() * 0.5 + 0.5)),
'test/fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),
'test/cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),
'test/same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),
'test/real_y': wandb.Image(gallery(real_y.numpy() * 0.5 + 0.5)),
'test/fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),
'test/cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),
'test/same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)
# Break after a single batch: note, this will not run if you remove the break due to wandb reasons (ask Karan)
break
if __name__ == '__main__':
buffer = ReplayBuffer(1)
buffer.add([1])
buffer.add([2])
buffer.add([3])
print(buffer.get_batch(1))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add([4])
print(buffer.get_batch(1))
print(buffer.buffer)
buffer = ReplayBuffer(1)
buffer.add(tf.convert_to_tensor([1]))
buffer.add(tf.convert_to_tensor([2]))
buffer.add(tf.convert_to_tensor([3]))
print(tf.convert_to_tensor(buffer.get_batch(1)))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add(tf.convert_to_tensor([4]))
print(buffer.get_batch(1))
print(buffer.buffer)
| true | true |
f71195d43ec5aff59399866c4085c5cb7a9cb696 | 433 | py | Python | collect.py | foamliu/Facial-Expression-Prediction-v2 | cb29f7cfbc2136e13adc2b35bbff95fdfbe063f6 | [
"MIT"
] | 4 | 2019-09-17T10:00:51.000Z | 2021-04-24T03:10:43.000Z | collect.py | foamliu/Facial-Expression-Prediction-v2 | cb29f7cfbc2136e13adc2b35bbff95fdfbe063f6 | [
"MIT"
] | 1 | 2019-09-26T08:08:41.000Z | 2019-09-26T08:22:08.000Z | collect.py | foamliu/Facial-Expression-Prediction-v2 | cb29f7cfbc2136e13adc2b35bbff95fdfbe063f6 | [
"MIT"
] | null | null | null | import pickle
import cv2 as cv
if __name__ == "__main__":
with open('fer2013.pkl', 'rb') as file:
data = pickle.load(file)
train = data['train']
train = train[:10]
for i, sample in enumerate(train):
filename = sample['image_path']
img = cv.imread(filename)
new_name = 'images/{}.jpg'.format(i)
cv.imwrite(new_name, img)
label = sample['label']
print(label)
| 22.789474 | 44 | 0.581986 | import pickle
import cv2 as cv
if __name__ == "__main__":
with open('fer2013.pkl', 'rb') as file:
data = pickle.load(file)
train = data['train']
train = train[:10]
for i, sample in enumerate(train):
filename = sample['image_path']
img = cv.imread(filename)
new_name = 'images/{}.jpg'.format(i)
cv.imwrite(new_name, img)
label = sample['label']
print(label)
| true | true |
f711965dc8523212746c3ea30163a0260f421dd0 | 24 | py | Python | torchlight/xp/__init__.py | l3robot/torchlight | e9a809aad0b5e75f97bf0cb50c9c799ea7b98eab | [
"MIT"
] | null | null | null | torchlight/xp/__init__.py | l3robot/torchlight | e9a809aad0b5e75f97bf0cb50c9c799ea7b98eab | [
"MIT"
] | null | null | null | torchlight/xp/__init__.py | l3robot/torchlight | e9a809aad0b5e75f97bf0cb50c9c799ea7b98eab | [
"MIT"
] | null | null | null | from .splitter import *
| 12 | 23 | 0.75 | from .splitter import *
| true | true |
f71199626d1f05e87cb35c323e9b5e8cccb1c230 | 1,888 | py | Python | tuiuiu/tuiuiutenant/migration_executors/base.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | 3 | 2019-08-08T09:09:35.000Z | 2020-12-15T18:04:17.000Z | tuiuiu/tuiuiutenant/migration_executors/base.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | null | null | null | tuiuiu/tuiuiutenant/migration_executors/base.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | 1 | 2017-09-09T20:10:40.000Z | 2017-09-09T20:10:40.000Z | import sys
from django.core.management.commands.migrate import Command as MigrateCommand
from django.db import transaction
from tuiuiu.tuiuiutenant.utils import get_public_schema_name
def run_migrations(args, options, executor_codename, schema_name, allow_atomic=True):
from django.core.management import color
from django.core.management.base import OutputWrapper
from django.db import connection
style = color.color_style()
def style_func(msg):
return '[%s:%s] %s' % (
style.NOTICE(executor_codename),
style.NOTICE(schema_name),
msg
)
stdout = OutputWrapper(sys.stdout)
stdout.style_func = style_func
stderr = OutputWrapper(sys.stderr)
stderr.style_func = style_func
if int(options.get('verbosity', 1)) >= 1:
stdout.write(style.NOTICE("=== Running migrate for schema %s" % schema_name))
connection.set_schema(schema_name)
MigrateCommand(stdout=stdout, stderr=stderr).execute(*args, **options)
try:
transaction.commit()
connection.close()
connection.connection = None
except transaction.TransactionManagementError:
if not allow_atomic:
raise
# We are in atomic transaction, don't close connections
pass
connection.set_schema_to_public()
class MigrationExecutor(object):
codename = None
def __init__(self, args, options):
self.args = args
self.options = options
def run_migrations(self, tenants):
public_schema_name = get_public_schema_name()
if public_schema_name in tenants:
run_migrations(self.args, self.options, self.codename, public_schema_name)
tenants.pop(tenants.index(public_schema_name))
self.run_tenant_migrations(tenants)
def run_tenant_migrations(self, tenant):
raise NotImplementedError
| 29.046154 | 86 | 0.692797 | import sys
from django.core.management.commands.migrate import Command as MigrateCommand
from django.db import transaction
from tuiuiu.tuiuiutenant.utils import get_public_schema_name
def run_migrations(args, options, executor_codename, schema_name, allow_atomic=True):
from django.core.management import color
from django.core.management.base import OutputWrapper
from django.db import connection
style = color.color_style()
def style_func(msg):
return '[%s:%s] %s' % (
style.NOTICE(executor_codename),
style.NOTICE(schema_name),
msg
)
stdout = OutputWrapper(sys.stdout)
stdout.style_func = style_func
stderr = OutputWrapper(sys.stderr)
stderr.style_func = style_func
if int(options.get('verbosity', 1)) >= 1:
stdout.write(style.NOTICE("=== Running migrate for schema %s" % schema_name))
connection.set_schema(schema_name)
MigrateCommand(stdout=stdout, stderr=stderr).execute(*args, **options)
try:
transaction.commit()
connection.close()
connection.connection = None
except transaction.TransactionManagementError:
if not allow_atomic:
raise
pass
connection.set_schema_to_public()
class MigrationExecutor(object):
codename = None
def __init__(self, args, options):
self.args = args
self.options = options
def run_migrations(self, tenants):
public_schema_name = get_public_schema_name()
if public_schema_name in tenants:
run_migrations(self.args, self.options, self.codename, public_schema_name)
tenants.pop(tenants.index(public_schema_name))
self.run_tenant_migrations(tenants)
def run_tenant_migrations(self, tenant):
raise NotImplementedError
| true | true |
f7119983a202a347052dbee89c37d91f383faf43 | 904 | py | Python | qiskit_machine_learning/algorithms/distribution_learners/__init__.py | Zoufalc/qiskit-machine-learning | aae3941214cd9667a53b643f229d11d0bff32c60 | [
"Apache-2.0"
] | 1 | 2021-07-07T21:23:38.000Z | 2021-07-07T21:23:38.000Z | qiskit_machine_learning/algorithms/distribution_learners/__init__.py | Zoufalc/qiskit-machine-learning | aae3941214cd9667a53b643f229d11d0bff32c60 | [
"Apache-2.0"
] | null | null | null | qiskit_machine_learning/algorithms/distribution_learners/__init__.py | Zoufalc/qiskit-machine-learning | aae3941214cd9667a53b643f229d11d0bff32c60 | [
"Apache-2.0"
] | 1 | 2021-04-11T14:30:32.000Z | 2021-04-11T14:30:32.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Distribution Learners Package """
from .qgan import (DiscriminativeNetwork,
GenerativeNetwork,
NumPyDiscriminator,
PyTorchDiscriminator,
QuantumGenerator,
QGAN)
__all__ = [
'DiscriminativeNetwork',
'GenerativeNetwork',
'NumPyDiscriminator',
'PyTorchDiscriminator',
'QuantumGenerator',
'QGAN',
]
| 30.133333 | 77 | 0.670354 |
from .qgan import (DiscriminativeNetwork,
GenerativeNetwork,
NumPyDiscriminator,
PyTorchDiscriminator,
QuantumGenerator,
QGAN)
__all__ = [
'DiscriminativeNetwork',
'GenerativeNetwork',
'NumPyDiscriminator',
'PyTorchDiscriminator',
'QuantumGenerator',
'QGAN',
]
| true | true |
f7119988b8ce9a26197da88008895fe3d1beeded | 908 | py | Python | image_converter/urls.py | dumrauf/web_tools | df2efff4c3435bfb2df131a6de572ecd50cc034d | [
"MIT"
] | null | null | null | image_converter/urls.py | dumrauf/web_tools | df2efff4c3435bfb2df131a6de572ecd50cc034d | [
"MIT"
] | null | null | null | image_converter/urls.py | dumrauf/web_tools | df2efff4c3435bfb2df131a6de572ecd50cc034d | [
"MIT"
] | null | null | null | """web_tools URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from image_converter.views import upload_and_convert_image_file
__author__ = 'dominic'
urlpatterns = [
url(r'', upload_and_convert_image_file, name='upload_and_convert_image_file'),
]
| 34.923077 | 82 | 0.73348 | from django.conf.urls import url, include
from django.contrib import admin
from image_converter.views import upload_and_convert_image_file
__author__ = 'dominic'
urlpatterns = [
url(r'', upload_and_convert_image_file, name='upload_and_convert_image_file'),
]
| true | true |
f7119aad378282c16b985f4ad98bfd62252bbb62 | 22 | py | Python | builtinUI/__init__.py | BlackCatDevel0per/s2txt | fe1cf551057be5777eb8f27e9d56dd2ae3cbb514 | [
"Apache-2.0"
] | null | null | null | builtinUI/__init__.py | BlackCatDevel0per/s2txt | fe1cf551057be5777eb8f27e9d56dd2ae3cbb514 | [
"Apache-2.0"
] | null | null | null | builtinUI/__init__.py | BlackCatDevel0per/s2txt | fe1cf551057be5777eb8f27e9d56dd2ae3cbb514 | [
"Apache-2.0"
] | null | null | null | from .main import UIS
| 11 | 21 | 0.772727 | from .main import UIS
| true | true |
f7119b37d5164e1883c48596812221294dc20231 | 906 | py | Python | pws/asymmetric/rsa/helpers.py | pqlx/pws-crypto | 23dcf59d0b37d811d0a9bda995a3ea7d09051416 | [
"MIT"
] | 1 | 2020-12-10T01:14:29.000Z | 2020-12-10T01:14:29.000Z | pws/asymmetric/rsa/helpers.py | pqlx/pws-crypto | 23dcf59d0b37d811d0a9bda995a3ea7d09051416 | [
"MIT"
] | null | null | null | pws/asymmetric/rsa/helpers.py | pqlx/pws-crypto | 23dcf59d0b37d811d0a9bda995a3ea7d09051416 | [
"MIT"
] | null | null | null | from typing import Union
import math
AbstractText = Union[int, bytes]
def byte_length(i: int) -> int:
"""Returns the minimal amount of bytes needed to represent unsigned integer `i`."""
# we need to add 1 to correct the fact that a byte can only go up to 255, instead of 256:
# i.e math.log(0x100, 0x100) = 1 but needs 2 bytes
return math.ceil(math.log(i + 1, 0x100))
def bit_length(i: int) -> int:
"""Returns the minimal amount of bits needed to represent unsigned integer `i`."""
return math.ceil(math.log(i + 1, 2))
def int_to_bytes(i: int, length: int=-1) -> bytes:
"""Converts integer to a MSB-first byte sequence using the least amount of bytes possible"""
return i.to_bytes(byte_length(i) if length == -1 else length, "big")
def bytes_to_int(b: bytes) -> int:
"""Converts MSB-first byte sequence to an integer"""
return int.from_bytes(b, "big")
| 32.357143 | 96 | 0.677704 | from typing import Union
import math
AbstractText = Union[int, bytes]
def byte_length(i: int) -> int:
return math.ceil(math.log(i + 1, 0x100))
def bit_length(i: int) -> int:
return math.ceil(math.log(i + 1, 2))
def int_to_bytes(i: int, length: int=-1) -> bytes:
return i.to_bytes(byte_length(i) if length == -1 else length, "big")
def bytes_to_int(b: bytes) -> int:
return int.from_bytes(b, "big")
| true | true |
f7119bbdc9310255f89817b25365b0356a1b07f4 | 960 | py | Python | lib/galaxy/jobs/runners/lwr_client/transport/__init__.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 1 | 2016-08-17T06:36:03.000Z | 2016-08-17T06:36:03.000Z | lib/galaxy/jobs/runners/lwr_client/transport/__init__.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | lib/galaxy/jobs/runners/lwr_client/transport/__init__.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | from .standard import Urllib2Transport
from .curl import PycurlTransport
import os
def get_transport(transport_type=None, os_module=os):
transport_type = __get_transport_type(transport_type, os_module)
if transport_type == 'urllib':
transport = Urllib2Transport()
else:
transport = PycurlTransport()
return transport
def __get_transport_type(transport_type, os_module):
if not transport_type:
use_curl = os_module.getenv('LWR_CURL_TRANSPORT', "0")
# If LWR_CURL_TRANSPORT is unset or set to 0, use default,
# else use curl.
if use_curl.isdigit() and not int(use_curl):
transport_type = 'urllib'
else:
transport_type = 'curl'
return transport_type
# TODO: Provide urllib implementation if these unavailable,
# also explore a requests+poster option.
from .curl import get_file
from .curl import post_file
__all__ = [get_transport, get_file, post_file]
| 30 | 68 | 0.713542 | from .standard import Urllib2Transport
from .curl import PycurlTransport
import os
def get_transport(transport_type=None, os_module=os):
transport_type = __get_transport_type(transport_type, os_module)
if transport_type == 'urllib':
transport = Urllib2Transport()
else:
transport = PycurlTransport()
return transport
def __get_transport_type(transport_type, os_module):
if not transport_type:
use_curl = os_module.getenv('LWR_CURL_TRANSPORT', "0")
if use_curl.isdigit() and not int(use_curl):
transport_type = 'urllib'
else:
transport_type = 'curl'
return transport_type
from .curl import get_file
from .curl import post_file
__all__ = [get_transport, get_file, post_file]
| true | true |
f7119cd11533daac3bec25c889502995c3aaed94 | 2,212 | py | Python | api/contests/migrations/0001_squashed_0003_auto_20200410_1511.py | selelab/contests | 5a239e306aeb84882fdee56ed8ba64721308cbb6 | [
"MIT"
] | null | null | null | api/contests/migrations/0001_squashed_0003_auto_20200410_1511.py | selelab/contests | 5a239e306aeb84882fdee56ed8ba64721308cbb6 | [
"MIT"
] | 6 | 2021-06-04T22:53:39.000Z | 2022-02-18T23:53:51.000Z | api/contests/migrations/0001_squashed_0003_auto_20200410_1511.py | selelab/contests | 5a239e306aeb84882fdee56ed8ba64721308cbb6 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-04-11 03:48
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
replaces = [('contests', '0001_initial'), ('contests', '0002_auto_20200410_1324'), ('contests', '0003_auto_20200410_1511')]
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contests',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('start', models.DateTimeField(null=True)),
('end', models.DateTimeField(null=True)),
],
options={
'db_table': 'contests',
},
),
migrations.CreateModel(
name='Tasks',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('sub_title', models.CharField(max_length=50)),
('text', models.CharField(max_length=2048)),
],
options={
'db_table': 'tasks',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Teams',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('ip_address', models.CharField(max_length=15)),
('vs_liveshare_link', models.CharField(max_length=2048)),
('github_branch_name', models.CharField(max_length=50)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contests.Contests')),
],
options={
'db_table': 'teams',
'ordering': ['id'],
},
),
migrations.AddField(
model_name='contests',
name='tasks',
field=models.ManyToManyField(to='contests.Tasks'),
),
]
| 34.5625 | 127 | 0.529385 |
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
replaces = [('contests', '0001_initial'), ('contests', '0002_auto_20200410_1324'), ('contests', '0003_auto_20200410_1511')]
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contests',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('start', models.DateTimeField(null=True)),
('end', models.DateTimeField(null=True)),
],
options={
'db_table': 'contests',
},
),
migrations.CreateModel(
name='Tasks',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('sub_title', models.CharField(max_length=50)),
('text', models.CharField(max_length=2048)),
],
options={
'db_table': 'tasks',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Teams',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('ip_address', models.CharField(max_length=15)),
('vs_liveshare_link', models.CharField(max_length=2048)),
('github_branch_name', models.CharField(max_length=50)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contests.Contests')),
],
options={
'db_table': 'teams',
'ordering': ['id'],
},
),
migrations.AddField(
model_name='contests',
name='tasks',
field=models.ManyToManyField(to='contests.Tasks'),
),
]
| true | true |
f7119d599e25a3f54a577d53867e596bb48c317c | 1,556 | py | Python | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/tag.py | bulentelmaci/azure-sdk-for-python | 9723d1baf6dde51c069e172ff0f21eb925d362f1 | [
"MIT"
] | null | null | null | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/tag.py | bulentelmaci/azure-sdk-for-python | 9723d1baf6dde51c069e172ff0f21eb925d362f1 | [
"MIT"
] | null | null | null | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/tag.py | bulentelmaci/azure-sdk-for-python | 9723d1baf6dde51c069e172ff0f21eb925d362f1 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Tag(Model):
"""Represents a Tag.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Gets the Tag ID
:vartype id: str
:param name: Gets or sets the name of the tag
:type name: str
:param description: Gets or sets the description of the tag
:type description: str
:ivar image_count: Gets the number of images with this tag
:vartype image_count: int
"""
_validation = {
'id': {'readonly': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'image_count': {'key': 'ImageCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Tag, self).__init__(**kwargs)
self.id = None
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.image_count = None
| 31.755102 | 76 | 0.568123 |
from msrest.serialization import Model
class Tag(Model):
_validation = {
'id': {'readonly': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'image_count': {'key': 'ImageCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Tag, self).__init__(**kwargs)
self.id = None
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.image_count = None
| true | true |
f7119d7c59ca6eac52878c1e416f7ea7327c9543 | 4,445 | py | Python | IOTSocket/IOTSocketClient.py | AbhijithAJ/IOTSocket | 1a27c12491edc31b1c4fab8bcda34c643a5ef21c | [
"MIT"
] | 51 | 2020-02-19T16:46:32.000Z | 2022-03-19T08:51:35.000Z | IOTSocket/IOTSocketClient.py | AbhijithAJ/IOTSocket | 1a27c12491edc31b1c4fab8bcda34c643a5ef21c | [
"MIT"
] | null | null | null | IOTSocket/IOTSocketClient.py | AbhijithAJ/IOTSocket | 1a27c12491edc31b1c4fab8bcda34c643a5ef21c | [
"MIT"
] | 6 | 2020-02-19T16:46:43.000Z | 2021-11-23T13:37:03.000Z | '''
Developed by Abhijith Boppe - linkedin.com/in/abhijith-boppe/
'''
import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
"""
Check if the time matches the server time and
to make sure there are no reused time (no replay attacks)
"""
global time_stamps
time_drop_max = 3 # packet with time difference 30sec will not be accepted
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100: # if 100 req in less than 30sec
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1: # to remove old time stamps (to reduce memory usage)
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
# 65535 max data (including headers)
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|') # split data at delimeter
while '' in data:
data.remove('')
if data[0]: # clear the remaining queue/buffer and read only first element/data
data = data[0]
# split headers and data
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
# split each line by http field name and value
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers['TIME']} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers['IOT']}")
def _headers():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
# ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)
| 36.434426 | 142 | 0.575928 | import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
global time_stamps
time_drop_max = 3
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100:
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1:
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|')
while '' in data:
data.remove('')
if data[0]:
data = data[0]
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers['TIME']} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers['IOT']}")
def _headers():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
| true | true |
f7119ee2963ec0639fd70a9f4c454952354ead62 | 299 | py | Python | arrow/commands/cannedvalues/findAllValues.py | trstickland/python-apollo | 04cccf2923e6977b2cfb6ebb2ff7e5227b740bcb | [
"MIT"
] | null | null | null | arrow/commands/cannedvalues/findAllValues.py | trstickland/python-apollo | 04cccf2923e6977b2cfb6ebb2ff7e5227b740bcb | [
"MIT"
] | null | null | null | arrow/commands/cannedvalues/findAllValues.py | trstickland/python-apollo | 04cccf2923e6977b2cfb6ebb2ff7e5227b740bcb | [
"MIT"
] | null | null | null | import click
from arrow.cli import pass_context
from arrow.decorators import custom_exception, dict_output
@click.command('findAllValues')
@pass_context
@custom_exception
@dict_output
def cli(ctx):
"""TODO: Undocumented
Output:
???
"""
return ctx.gi.cannedvalues.findAllValues()
| 16.611111 | 58 | 0.749164 | import click
from arrow.cli import pass_context
from arrow.decorators import custom_exception, dict_output
@click.command('findAllValues')
@pass_context
@custom_exception
@dict_output
def cli(ctx):
return ctx.gi.cannedvalues.findAllValues()
| true | true |
f7119f18095bc966c56c2eadd6c672d850c9ddfc | 828 | py | Python | tests/models/test_deployment.py | Hydrospheredata/mist-cli | 1eb6b2c660e5c1718ecaa5b58648821d9cacc6a9 | [
"Apache-2.0"
] | 3 | 2017-10-12T13:04:10.000Z | 2018-08-06T08:37:04.000Z | tests/models/test_deployment.py | Hydrospheredata/mist-cli | 1eb6b2c660e5c1718ecaa5b58648821d9cacc6a9 | [
"Apache-2.0"
] | 20 | 2017-10-05T13:40:36.000Z | 2019-09-05T13:56:01.000Z | tests/models/test_deployment.py | Hydrospheredata/mist-cli | 1eb6b2c660e5c1718ecaa5b58648821d9cacc6a9 | [
"Apache-2.0"
] | 5 | 2018-01-28T09:55:24.000Z | 2021-10-04T06:30:34.000Z | from unittest import TestCase
from pyhocon import ConfigTree
from mist.models import Deployment
class DeploymentTest(TestCase):
def test_create_deployment(self):
Deployment('test', 'Artifact', ConfigTree(), '0.0.1')
def test_get_name(self):
d = Deployment('test', 'Artifact', ConfigTree({
'file-path': 'test-name.py'
}), '0.0.1')
self.assertEqual(d.get_name(), 'test_0.0.1.py')
def test_with_user_name(self):
d = Deployment('test', 'Function', ConfigTree({
'context': 'foo',
'path': 'test-name.jar'
}), '0.0.1')
d.with_user('test_name')
self.assertEqual(d.name, 'test_name_test')
self.assertEqual(d.data['path'], 'test_name_test-name.jar')
self.assertEqual(d.data['context'], 'test_name_foo')
| 29.571429 | 67 | 0.612319 | from unittest import TestCase
from pyhocon import ConfigTree
from mist.models import Deployment
class DeploymentTest(TestCase):
def test_create_deployment(self):
Deployment('test', 'Artifact', ConfigTree(), '0.0.1')
def test_get_name(self):
d = Deployment('test', 'Artifact', ConfigTree({
'file-path': 'test-name.py'
}), '0.0.1')
self.assertEqual(d.get_name(), 'test_0.0.1.py')
def test_with_user_name(self):
d = Deployment('test', 'Function', ConfigTree({
'context': 'foo',
'path': 'test-name.jar'
}), '0.0.1')
d.with_user('test_name')
self.assertEqual(d.name, 'test_name_test')
self.assertEqual(d.data['path'], 'test_name_test-name.jar')
self.assertEqual(d.data['context'], 'test_name_foo')
| true | true |
f7119f62d7de298128757da8c1cef7e04a214f8c | 2,449 | py | Python | examples/AdafruitDHT.py | rewisyoung/Adafruit_Python_DHT | d3f525fea0ed2b5dc899eb1584015b6bd092a46a | [
"MIT"
] | null | null | null | examples/AdafruitDHT.py | rewisyoung/Adafruit_Python_DHT | d3f525fea0ed2b5dc899eb1584015b6bd092a46a | [
"MIT"
] | null | null | null | examples/AdafruitDHT.py | rewisyoung/Adafruit_Python_DHT | d3f525fea0ed2b5dc899eb1584015b6bd092a46a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
import time
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('Usage: sudo ./Adafruit_DHT.py [11|22|2302] <GPIO pin number>')
print('Example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO pin #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
while (1):
time.sleep(1)
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
sys.exit(1)
| 41.508475 | 98 | 0.740302 |
import sys
import Adafruit_DHT
import time
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('Usage: sudo ./Adafruit_DHT.py [11|22|2302] <GPIO pin number>')
print('Example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO pin #4')
sys.exit(1)
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# the results will be null (because Linux can't
while (1):
time.sleep(1)
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
sys.exit(1)
| true | true |
f7119fbceb12284f90eb8b90d388f4fb365e1026 | 510 | py | Python | leetcode/053/53.py | shankar-shiv/CS1010E_Kattis_practice | 9a8597b7ab61d5afa108a8b943ca2bb3603180c6 | [
"MIT"
] | null | null | null | leetcode/053/53.py | shankar-shiv/CS1010E_Kattis_practice | 9a8597b7ab61d5afa108a8b943ca2bb3603180c6 | [
"MIT"
] | null | null | null | leetcode/053/53.py | shankar-shiv/CS1010E_Kattis_practice | 9a8597b7ab61d5afa108a8b943ca2bb3603180c6 | [
"MIT"
] | null | null | null | '''The thought follows a simple rule:
If the sum of a subarray is positive, it has possible to make the next value bigger, so we keep do it until it turn to negative.
If the sum is negative, it has no use to the next element, so we break.
it is a game of sum, not the elements.'''
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
def Solution(nums):
for i in range(1, len(nums)):
if nums[i - 1] > 0:
nums[i] += nums[i - 1]
print(nums)
return max(nums)
print(Solution(nums))
| 26.842105 | 128 | 0.615686 |
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
def Solution(nums):
for i in range(1, len(nums)):
if nums[i - 1] > 0:
nums[i] += nums[i - 1]
print(nums)
return max(nums)
print(Solution(nums))
| true | true |
f7119fd3687878f90fc548a161cfe2f3cf9d90a5 | 12,909 | py | Python | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2018-04-15T22:59:15.000Z | 2018-04-15T22:59:15.000Z | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | """
Copyright (C) 2010 David Fong and Michael Saunders
LSMR uses an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsmr']
from numpy import zeros, infty
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
from .lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
.. versionadded:: 0.11.0
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
b : (m,) ndarray
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^{T} r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool
Print iterations logs if ``show=True``.
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^T (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
http://arxiv.org/abs/1006.0758
.. [2] LSMR Software, http://www.stanford.edu/~clfong/lsmr.html
"""
A = aslinearoperator(A)
b = b.squeeze()
msg=('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm A''r'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print('The matrix A has %8g rows and %8g cols' % (m, n))
print('damp = %20.14e\n' % (damp))
print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim))
print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter))
u = b
beta = norm(u)
v = zeros(n)
alpha = 0
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u)
alpha = norm(v)
if alpha > 0:
v = (1 / alpha) * v
# Initialize variables for 1st iteration.
itn = 0
zetabar = alpha * beta
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n)
x = zeros(n)
# Initialize variables for estimation of ||r||.
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
# Initialize variables for estimation of ||A|| and cond(A)
normA2 = alpha * alpha
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
# Items for use in stopping rules.
normb = beta
istop = 0
ctol = 0
if conlim > 0:
ctol = 1 / conlim
normr = beta
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
normar = alpha * beta
if normar == 0:
if show:
print(msg[0])
return x, istop, itn, normr, normar, normA, condA, normx
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = alpha / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(''.join([str1, str2, str3]))
# Main iteration loop.
while itn < maxiter:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alpha, v. These satisfy the relations
# beta*u = a*v - alpha*u,
# alpha*v = A'*u - beta*v.
u = A.matvec(v) - alpha * u
beta = norm(u)
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u) - beta * v
alpha = norm(v)
if alpha > 0:
v = (1 / alpha) * v
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
# Construct rotation Qhat_{k,2k+1}.
chat, shat, alphahat = _sym_ortho(alphabar, damp)
# Use a plane rotation (Q_i) to turn B_i to R_i
rhoold = rho
c, s, rho = _sym_ortho(alphahat, beta)
thetanew = s*alpha
alphabar = c*alpha
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
rhobarold = rhobar
zetaold = zeta
thetabar = sbar * rho
rhotemp = cbar * rho
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
zeta = cbar * zetabar
zetabar = - sbar * zetabar
# Update h, h_hat, x.
hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar
x = x + (zeta / (rho * rhobar)) * hbar
h = v - (thetanew / rho) * h
# Estimate of ||r||.
# Apply rotation Qhat_{k,2k+1}.
betaacute = chat * betadd
betacheck = -shat * betadd
# Apply rotation Q_{k,k+1}.
betahat = c * betaacute
betadd = -s * betaacute
# Apply rotation Qtilde_{k-1}.
# betad = betad_{k-1} here.
thetatildeold = thetatilde
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
thetatilde = stildeold* rhobar
rhodold = ctildeold * rhobar
betad = - stildeold * betad + ctildeold * betahat
# betad = betad_k here.
# rhodold = rhod_k here.
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
taud = (zeta - thetatilde * tautildeold) / rhodold
d = d + betacheck * betacheck
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
# Estimate ||A||.
normA2 = normA2 + beta * beta
normA = sqrt(normA2)
normA2 = normA2 + alpha * alpha
# Estimate cond(A).
maxrbar = max(maxrbar, rhobarold)
if itn > 1:
minrbar= min(minrbar, rhobarold)
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
# Test for convergence.
# Compute norms for convergence testing.
normar = abs(zetabar)
normx = norm(x)
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = normr / normb
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = infty
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normAl tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= maxiter:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
if show:
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
(istop != 0):
if pcount >= pfreq:
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = pcount + 1
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (normA, condA)
print(''.join([str1, str2, str3, str4]))
if istop > 0:
break
# Print the stopping condition.
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print('istop =%8g normr =%8.1e' % (istop, normr))
print(' normA =%8.1e normAr =%8.1e' % (normA, normar))
print('itn =%8g condA =%8.1e' % (itn, condA))
print(' normx =%8.1e' % (normx))
print(str1, str2)
print(str3, str4)
return x, istop, itn, normr, normar, normA, condA, normx
| 31.874074 | 79 | 0.539778 |
from __future__ import division, print_function, absolute_import
__all__ = ['lsmr']
from numpy import zeros, infty
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
from .lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False):
A = aslinearoperator(A)
b = b.squeeze()
msg=('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm A''r'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20
pcount = 0
m, n = A.shape
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print('The matrix A has %8g rows and %8g cols' % (m, n))
print('damp = %20.14e\n' % (damp))
print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim))
print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter))
u = b
beta = norm(u)
v = zeros(n)
alpha = 0
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u)
alpha = norm(v)
if alpha > 0:
v = (1 / alpha) * v
itn = 0
zetabar = alpha * beta
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n)
x = zeros(n)
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
normA2 = alpha * alpha
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
normb = beta
istop = 0
ctol = 0
if conlim > 0:
ctol = 1 / conlim
normr = beta
normar = alpha * beta
if normar == 0:
if show:
print(msg[0])
return x, istop, itn, normr, normar, normA, condA, normx
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = alpha / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(''.join([str1, str2, str3]))
while itn < maxiter:
itn = itn + 1
u = A.matvec(v) - alpha * u
beta = norm(u)
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u) - beta * v
alpha = norm(v)
if alpha > 0:
v = (1 / alpha) * v
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
# Construct rotation Qhat_{k,2k+1}.
chat, shat, alphahat = _sym_ortho(alphabar, damp)
# Use a plane rotation (Q_i) to turn B_i to R_i
rhoold = rho
c, s, rho = _sym_ortho(alphahat, beta)
thetanew = s*alpha
alphabar = c*alpha
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
rhobarold = rhobar
zetaold = zeta
thetabar = sbar * rho
rhotemp = cbar * rho
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
zeta = cbar * zetabar
zetabar = - sbar * zetabar
# Update h, h_hat, x.
hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar
x = x + (zeta / (rho * rhobar)) * hbar
h = v - (thetanew / rho) * h
# Estimate of ||r||.
# Apply rotation Qhat_{k,2k+1}.
betaacute = chat * betadd
betacheck = -shat * betadd
# Apply rotation Q_{k,k+1}.
betahat = c * betaacute
betadd = -s * betaacute
# Apply rotation Qtilde_{k-1}.
# betad = betad_{k-1} here.
thetatildeold = thetatilde
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
thetatilde = stildeold* rhobar
rhodold = ctildeold * rhobar
betad = - stildeold * betad + ctildeold * betahat
# betad = betad_k here.
# rhodold = rhod_k here.
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
taud = (zeta - thetatilde * tautildeold) / rhodold
d = d + betacheck * betacheck
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
# Estimate ||A||.
normA2 = normA2 + beta * beta
normA = sqrt(normA2)
normA2 = normA2 + alpha * alpha
# Estimate cond(A).
maxrbar = max(maxrbar, rhobarold)
if itn > 1:
minrbar= min(minrbar, rhobarold)
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
# Test for convergence.
# Compute norms for convergence testing.
normar = abs(zetabar)
normx = norm(x)
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = normr / normb
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = infty
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normAl tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= maxiter:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
if show:
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
(istop != 0):
if pcount >= pfreq:
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = pcount + 1
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (normA, condA)
print(''.join([str1, str2, str3, str4]))
if istop > 0:
break
# Print the stopping condition.
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print('istop =%8g normr =%8.1e' % (istop, normr))
print(' normA =%8.1e normAr =%8.1e' % (normA, normar))
print('itn =%8g condA =%8.1e' % (itn, condA))
print(' normx =%8.1e' % (normx))
print(str1, str2)
print(str3, str4)
return x, istop, itn, normr, normar, normA, condA, normx
| true | true |
f711a06a366032c815f82e1ea837be756e3e5b25 | 36,627 | py | Python | .modules/.sqlmap/lib/core/settings.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.sqlmap/lib/core/settings.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.sqlmap/lib/core/settings.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 161 | 2018-04-20T15:57:12.000Z | 2022-03-15T19:16:16.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import os
import random
import re
import subprocess
import string
import sys
import types
from lib.core.datatype import AttribDict
from lib.core.enums import DBMS
from lib.core.enums import DBMS_DIRECTORY_NAME
from lib.core.enums import OS
# sqlmap version (<major>.<minor>.<month>.<monthly commit>)
VERSION = "1.2.11.12"
TYPE = "dev" if VERSION.count('.') > 2 and VERSION.split('.')[-1] != '0' else "stable"
TYPE_COLORS = {"dev": 33, "stable": 90, "pip": 34}
VERSION_STRING = "sqlmap/%s#%s" % ('.'.join(VERSION.split('.')[:-1]) if VERSION.count('.') > 2 and VERSION.split('.')[-1] == '0' else VERSION, TYPE)
DESCRIPTION = "automatic SQL injection and database takeover tool"
SITE = "http://sqlmap.org"
DEV_EMAIL_ADDRESS = "dev@sqlmap.org"
ISSUES_PAGE = "https://github.com/sqlmapproject/sqlmap/issues/new"
GIT_REPOSITORY = "https://github.com/sqlmapproject/sqlmap.git"
GIT_PAGE = "https://github.com/sqlmapproject/sqlmap"
ZIPBALL_PAGE = "https://github.com/sqlmapproject/sqlmap/zipball/master"
# colorful banner
BANNER = """\033[01;33m\
___
__H__
___ ___[.]_____ ___ ___ \033[01;37m{\033[01;%dm%s\033[01;37m}\033[01;33m
|_ -| . [.] | .'| . |
|___|_ [.]_|_|_|__,| _|
|_|V |_| \033[0m\033[4;37m%s\033[0m\n
""" % (TYPE_COLORS.get(TYPE, 31), VERSION_STRING.split('/')[-1], SITE)
# Minimum distance of ratio from kb.matchRatio to result in True
DIFF_TOLERANCE = 0.05
CONSTANT_RATIO = 0.9
# Ratio used in heuristic check for WAF/IPS protected targets
IDS_WAF_CHECK_RATIO = 0.5
# Timeout used in heuristic check for WAF/IPS protected targets
IDS_WAF_CHECK_TIMEOUT = 10
# Lower and upper values for match ratio in case of stable page
LOWER_RATIO_BOUND = 0.02
UPPER_RATIO_BOUND = 0.98
# Markers for special cases when parameter values contain html encoded characters
PARAMETER_AMP_MARKER = "__AMP__"
PARAMETER_SEMICOLON_MARKER = "__SEMICOLON__"
BOUNDARY_BACKSLASH_MARKER = "__BACKSLASH__"
PARTIAL_VALUE_MARKER = "__PARTIAL_VALUE__"
PARTIAL_HEX_VALUE_MARKER = "__PARTIAL_HEX_VALUE__"
URI_QUESTION_MARKER = "__QUESTION_MARK__"
ASTERISK_MARKER = "__ASTERISK_MARK__"
REPLACEMENT_MARKER = "__REPLACEMENT_MARK__"
BOUNDED_INJECTION_MARKER = "__BOUNDED_INJECTION_MARK__"
SAFE_VARIABLE_MARKER = "__SAFE__"
RANDOM_INTEGER_MARKER = "[RANDINT]"
RANDOM_STRING_MARKER = "[RANDSTR]"
SLEEP_TIME_MARKER = "[SLEEPTIME]"
INFERENCE_MARKER = "[INFERENCE]"
SINGLE_QUOTE_MARKER = "[SINGLE_QUOTE]"
PAYLOAD_DELIMITER = "__PAYLOAD_DELIMITER__"
CHAR_INFERENCE_MARK = "%c"
PRINTABLE_CHAR_REGEX = r"[^\x00-\x1f\x7f-\xff]"
# Regular expression used for extraction of table names (useful for (e.g.) MsAccess)
SELECT_FROM_TABLE_REGEX = r"\bSELECT\b.+?\bFROM\s+(?P<result>([\w.]|`[^`<>]+`)+)"
# Regular expression used for recognition of textual content-type
TEXT_CONTENT_TYPE_REGEX = r"(?i)(text|form|message|xml|javascript|ecmascript|json)"
# Regular expression used for recognition of generic permission messages
PERMISSION_DENIED_REGEX = r"(?P<result>(command|permission|access)\s*(was|is)?\s*denied)"
# Regular expression used in recognition of generic protection mechanisms
GENERIC_PROTECTION_REGEX = r"(?i)\b(rejected|blocked|protection|incident|denied|detected|dangerous|firewall)\b"
# Regular expression used for recognition of generic maximum connection messages
MAX_CONNECTIONS_REGEX = r"\bmax.+?\bconnection"
# Maximum consecutive connection errors before asking the user if he wants to continue
MAX_CONSECUTIVE_CONNECTION_ERRORS = 15
# Timeout before the pre-connection candidate is being disposed (because of high probability that the web server will reset it)
PRECONNECT_CANDIDATE_TIMEOUT = 10
# Servers known to cause issue with pre-connection mechanism (because of lack of multi-threaded support)
PRECONNECT_INCOMPATIBLE_SERVERS = ("SimpleHTTP",)
# Maximum sleep time in "Murphy" (testing) mode
MAX_MURPHY_SLEEP_TIME = 3
# Regular expression used for extracting results from Google search
GOOGLE_REGEX = r"webcache\.googleusercontent\.com/search\?q=cache:[^:]+:([^+]+)\+&cd=|url\?\w+=((?![^>]+webcache\.googleusercontent\.com)http[^>]+)&(sa=U|rct=j)"
# Regular expression used for extracting results from DuckDuckGo search
DUCKDUCKGO_REGEX = r'"u":"([^"]+)'
# Regular expression used for extracting results from Bing search
BING_REGEX = r'<h2><a href="([^"]+)" h='
# Dummy user agent for search (if default one returns different results)
DUMMY_SEARCH_USER_AGENT = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0"
# Regular expression used for extracting content from "textual" tags
TEXT_TAG_REGEX = r"(?si)<(abbr|acronym|b|blockquote|br|center|cite|code|dt|em|font|h\d|i|li|p|pre|q|strong|sub|sup|td|th|title|tt|u)(?!\w).*?>(?P<result>[^<]+)"
# Regular expression used for recognition of IP addresses
IP_ADDRESS_REGEX = r"\b(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\b"
# Regular expression used for recognition of generic "your ip has been blocked" messages
BLOCKED_IP_REGEX = r"(?i)(\A|\b)ip\b.*\b(banned|blocked|block list|firewall)"
# Dumping characters used in GROUP_CONCAT MySQL technique
CONCAT_ROW_DELIMITER = ','
CONCAT_VALUE_DELIMITER = '|'
# Coefficient used for a time-based query delay checking (must be >= 7)
TIME_STDEV_COEFF = 7
# Minimum response time that can be even considered as delayed (not a complete requirement)
MIN_VALID_DELAYED_RESPONSE = 0.5
# Standard deviation after which a warning message should be displayed about connection lags
WARN_TIME_STDEV = 0.5
# Minimum length of usable union injected response (quick defense against substr fields)
UNION_MIN_RESPONSE_CHARS = 10
# Coefficient used for a union-based number of columns checking (must be >= 7)
UNION_STDEV_COEFF = 7
# Length of queue for candidates for time delay adjustment
TIME_DELAY_CANDIDATES = 3
# Default value for HTTP Accept header
HTTP_ACCEPT_HEADER_VALUE = "*/*"
# Default value for HTTP Accept-Encoding header
HTTP_ACCEPT_ENCODING_HEADER_VALUE = "gzip,deflate"
# Default timeout for running commands over backdoor
BACKDOOR_RUN_CMD_TIMEOUT = 5
# Number of seconds to wait for thread finalization at program end
THREAD_FINALIZATION_TIMEOUT = 1
# Maximum number of techniques used in inject.py/getValue() per one value
MAX_TECHNIQUES_PER_VALUE = 2
# In case of missing piece of partial union dump, buffered array must be flushed after certain size
MAX_BUFFERED_PARTIAL_UNION_LENGTH = 1024
# Suffix used for naming meta databases in DBMS(es) without explicit database name
METADB_SUFFIX = "_masterdb"
# Number of times to retry the pushValue during the exceptions (e.g. KeyboardInterrupt)
PUSH_VALUE_EXCEPTION_RETRY_COUNT = 3
# Minimum time response set needed for time-comparison based on standard deviation
MIN_TIME_RESPONSES = 30
# Minimum comparison ratio set needed for searching valid union column number based on standard deviation
MIN_UNION_RESPONSES = 5
# After these number of blanks at the end inference should stop (just in case)
INFERENCE_BLANK_BREAK = 10
# Use this replacement character for cases when inference is not able to retrieve the proper character value
INFERENCE_UNKNOWN_CHAR = '?'
# Character used for operation "greater" in inference
INFERENCE_GREATER_CHAR = ">"
# Character used for operation "greater or equal" in inference
INFERENCE_GREATER_EQUALS_CHAR = ">="
# Character used for operation "equals" in inference
INFERENCE_EQUALS_CHAR = "="
# Character used for operation "not-equals" in inference
INFERENCE_NOT_EQUALS_CHAR = "!="
# String used for representation of unknown DBMS
UNKNOWN_DBMS = "Unknown"
# String used for representation of unknown DBMS version
UNKNOWN_DBMS_VERSION = "Unknown"
# Dynamicity boundary length used in dynamicity removal engine
DYNAMICITY_BOUNDARY_LENGTH = 20
# Dummy user prefix used in dictionary attack
DUMMY_USER_PREFIX = "__dummy__"
# Reference: http://en.wikipedia.org/wiki/ISO/IEC_8859-1
DEFAULT_PAGE_ENCODING = "iso-8859-1"
try:
unicode(DEFAULT_PAGE_ENCODING, DEFAULT_PAGE_ENCODING)
except LookupError:
DEFAULT_PAGE_ENCODING = "utf8"
# URL used in dummy runs
DUMMY_URL = "http://foo/bar?id=1"
# System variables
IS_WIN = subprocess.mswindows
# The name of the operating system dependent module imported. The following names have currently been registered: 'posix', 'nt', 'mac', 'os2', 'ce', 'java', 'riscos'
PLATFORM = os.name
PYVERSION = sys.version.split()[0]
# DBMS system databases
MSSQL_SYSTEM_DBS = ("Northwind", "master", "model", "msdb", "pubs", "tempdb")
MYSQL_SYSTEM_DBS = ("information_schema", "mysql", "performance_schema")
PGSQL_SYSTEM_DBS = ("information_schema", "pg_catalog", "pg_toast", "pgagent")
ORACLE_SYSTEM_DBS = ('ANONYMOUS', 'APEX_030200', 'APEX_PUBLIC_USER', 'APPQOSSYS', 'BI', 'CTXSYS', 'DBSNMP', 'DIP', 'EXFSYS', 'FLOWS_%', 'FLOWS_FILES', 'HR', 'IX', 'LBACSYS', 'MDDATA', 'MDSYS', 'MGMT_VIEW', 'OC', 'OE', 'OLAPSYS', 'ORACLE_OCM', 'ORDDATA', 'ORDPLUGINS', 'ORDSYS', 'OUTLN', 'OWBSYS', 'PM', 'SCOTT', 'SH', 'SI_INFORMTN_SCHEMA', 'SPATIAL_CSW_ADMIN_USR', 'SPATIAL_WFS_ADMIN_USR', 'SYS', 'SYSMAN', 'SYSTEM', 'WKPROXY', 'WKSYS', 'WK_TEST', 'WMSYS', 'XDB', 'XS$NULL')
SQLITE_SYSTEM_DBS = ("sqlite_master", "sqlite_temp_master")
ACCESS_SYSTEM_DBS = ("MSysAccessObjects", "MSysACEs", "MSysObjects", "MSysQueries", "MSysRelationships", "MSysAccessStorage", "MSysAccessXML", "MSysModules", "MSysModules2")
FIREBIRD_SYSTEM_DBS = ("RDB$BACKUP_HISTORY", "RDB$CHARACTER_SETS", "RDB$CHECK_CONSTRAINTS", "RDB$COLLATIONS", "RDB$DATABASE", "RDB$DEPENDENCIES", "RDB$EXCEPTIONS", "RDB$FIELDS", "RDB$FIELD_DIMENSIONS", " RDB$FILES", "RDB$FILTERS", "RDB$FORMATS", "RDB$FUNCTIONS", "RDB$FUNCTION_ARGUMENTS", "RDB$GENERATORS", "RDB$INDEX_SEGMENTS", "RDB$INDICES", "RDB$LOG_FILES", "RDB$PAGES", "RDB$PROCEDURES", "RDB$PROCEDURE_PARAMETERS", "RDB$REF_CONSTRAINTS", "RDB$RELATIONS", "RDB$RELATION_CONSTRAINTS", "RDB$RELATION_FIELDS", "RDB$ROLES", "RDB$SECURITY_CLASSES", "RDB$TRANSACTIONS", "RDB$TRIGGERS", "RDB$TRIGGER_MESSAGES", "RDB$TYPES", "RDB$USER_PRIVILEGES", "RDB$VIEW_RELATIONS")
MAXDB_SYSTEM_DBS = ("SYSINFO", "DOMAIN")
SYBASE_SYSTEM_DBS = ("master", "model", "sybsystemdb", "sybsystemprocs")
DB2_SYSTEM_DBS = ("NULLID", "SQLJ", "SYSCAT", "SYSFUN", "SYSIBM", "SYSIBMADM", "SYSIBMINTERNAL", "SYSIBMTS", "SYSPROC", "SYSPUBLIC", "SYSSTAT", "SYSTOOLS")
HSQLDB_SYSTEM_DBS = ("INFORMATION_SCHEMA", "SYSTEM_LOB")
H2_SYSTEM_DBS = ("INFORMATION_SCHEMA")
INFORMIX_SYSTEM_DBS = ("sysmaster", "sysutils", "sysuser", "sysadmin")
MSSQL_ALIASES = ("microsoft sql server", "mssqlserver", "mssql", "ms")
MYSQL_ALIASES = ("mysql", "my", "mariadb", "maria")
PGSQL_ALIASES = ("postgresql", "postgres", "pgsql", "psql", "pg")
ORACLE_ALIASES = ("oracle", "orcl", "ora", "or")
SQLITE_ALIASES = ("sqlite", "sqlite3")
ACCESS_ALIASES = ("msaccess", "access", "jet", "microsoft access")
FIREBIRD_ALIASES = ("firebird", "mozilla firebird", "interbase", "ibase", "fb")
MAXDB_ALIASES = ("maxdb", "sap maxdb", "sap db")
SYBASE_ALIASES = ("sybase", "sybase sql server")
DB2_ALIASES = ("db2", "ibm db2", "ibmdb2")
HSQLDB_ALIASES = ("hsql", "hsqldb", "hs", "hypersql")
H2_ALIASES = ("h2",)
INFORMIX_ALIASES = ("informix", "ibm informix", "ibminformix")
DBMS_DIRECTORY_DICT = dict((getattr(DBMS, _), getattr(DBMS_DIRECTORY_NAME, _)) for _ in dir(DBMS) if not _.startswith("_"))
SUPPORTED_DBMS = MSSQL_ALIASES + MYSQL_ALIASES + PGSQL_ALIASES + ORACLE_ALIASES + SQLITE_ALIASES + ACCESS_ALIASES + FIREBIRD_ALIASES + MAXDB_ALIASES + SYBASE_ALIASES + DB2_ALIASES + HSQLDB_ALIASES + H2_ALIASES + INFORMIX_ALIASES
SUPPORTED_OS = ("linux", "windows")
DBMS_ALIASES = ((DBMS.MSSQL, MSSQL_ALIASES), (DBMS.MYSQL, MYSQL_ALIASES), (DBMS.PGSQL, PGSQL_ALIASES), (DBMS.ORACLE, ORACLE_ALIASES), (DBMS.SQLITE, SQLITE_ALIASES), (DBMS.ACCESS, ACCESS_ALIASES), (DBMS.FIREBIRD, FIREBIRD_ALIASES), (DBMS.MAXDB, MAXDB_ALIASES), (DBMS.SYBASE, SYBASE_ALIASES), (DBMS.DB2, DB2_ALIASES), (DBMS.HSQLDB, HSQLDB_ALIASES), (DBMS.H2, H2_ALIASES), (DBMS.INFORMIX, INFORMIX_ALIASES))
USER_AGENT_ALIASES = ("ua", "useragent", "user-agent")
REFERER_ALIASES = ("ref", "referer", "referrer")
HOST_ALIASES = ("host",)
H2_DEFAULT_SCHEMA = HSQLDB_DEFAULT_SCHEMA = "PUBLIC"
# Names that can't be used to name files on Windows OS
WINDOWS_RESERVED_NAMES = ("CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9")
# Items displayed in basic help (-h) output
BASIC_HELP_ITEMS = (
"url",
"googleDork",
"data",
"cookie",
"randomAgent",
"proxy",
"testParameter",
"dbms",
"level",
"risk",
"tech",
"getAll",
"getBanner",
"getCurrentUser",
"getCurrentDb",
"getPasswordHashes",
"getTables",
"getColumns",
"getSchema",
"dumpTable",
"dumpAll",
"db",
"tbl",
"col",
"osShell",
"osPwn",
"batch",
"checkTor",
"flushSession",
"tor",
"sqlmapShell",
"wizard",
)
# Tags used for value replacements inside shell scripts
SHELL_WRITABLE_DIR_TAG = "%WRITABLE_DIR%"
SHELL_RUNCMD_EXE_TAG = "%RUNCMD_EXE%"
# String representation for NULL value
NULL = "NULL"
# String representation for blank ('') value
BLANK = "<blank>"
# String representation for current database
CURRENT_DB = "CD"
# Name of SQLite file used for storing session data
SESSION_SQLITE_FILE = "session.sqlite"
# Regular expressions used for finding file paths in error messages
FILE_PATH_REGEXES = (r"<b>(?P<result>[^<>]+?)</b> on line \d+", r"in (?P<result>[^<>'\"]+?)['\"]? on line \d+", r"(?:[>(\[\s])(?P<result>[A-Za-z]:[\\/][\w. \\/-]*)", r"(?:[>(\[\s])(?P<result>/\w[/\w.~-]+)", r"href=['\"]file://(?P<result>/[^'\"]+)")
# Regular expressions used for parsing error messages (--parse-errors)
ERROR_PARSING_REGEXES = (
r"\[Microsoft\]\[ODBC SQL Server Driver\]\[SQL Server\](?P<result>[^<]+)",
r"<b>[^<]*(fatal|error|warning|exception)[^<]*</b>:?\s*(?P<result>.+?)<br\s*/?\s*>",
r"(?m)^\s*(fatal|error|warning|exception):?\s*(?P<result>[^\n]+?)$",
r"(?P<result>[^\n>]*SQL Syntax[^\n<]+)",
r"<li>Error Type:<br>(?P<result>.+?)</li>",
r"CDbCommand (?P<result>[^<>\n]*SQL[^<>\n]+)",
r"error '[0-9a-f]{8}'((<[^>]+>)|\s)+(?P<result>[^<>]+)",
r"\[[^\n\]]+(ODBC|JDBC)[^\n\]]+\](\[[^\]]+\])?(?P<result>[^\n]+(in query expression|\(SQL| at /[^ ]+pdo)[^\n<]+)"
)
# Regular expression used for parsing charset info from meta html headers
META_CHARSET_REGEX = r'(?si)<head>.*<meta[^>]+charset="?(?P<result>[^"> ]+).*</head>'
# Regular expression used for parsing refresh info from meta html headers
META_REFRESH_REGEX = r'(?si)<head>(?!.*?<noscript.*?</head).*?<meta http-equiv="?refresh"?[^>]+content="?[^">]+url=["\']?(?P<result>[^\'">]+).*</head>'
# Regular expression used for parsing empty fields in tested form data
EMPTY_FORM_FIELDS_REGEX = r'(&|\A)(?P<result>[^=]+=(&|\Z))'
# Reference: http://www.cs.ru.nl/bachelorscripties/2010/Martin_Devillers___0437999___Analyzing_password_strength.pdf
COMMON_PASSWORD_SUFFIXES = ("1", "123", "2", "12", "3", "13", "7", "11", "5", "22", "23", "01", "4", "07", "21", "14", "10", "06", "08", "8", "15", "69", "16", "6", "18")
# Reference: http://www.the-interweb.com/serendipity/index.php?/archives/94-A-brief-analysis-of-40,000-leaked-MySpace-passwords.html
COMMON_PASSWORD_SUFFIXES += ("!", ".", "*", "!!", "?", ";", "..", "!!!", ", ", "@")
# Splitter used between requests in WebScarab log files
WEBSCARAB_SPLITTER = "### Conversation"
# Splitter used between requests in BURP log files
BURP_REQUEST_REGEX = r"={10,}\s+[^=]+={10,}\s(.+?)\s={10,}"
# Regex used for parsing XML Burp saved history items
BURP_XML_HISTORY_REGEX = r'<port>(\d+)</port>.+?<request base64="true"><!\[CDATA\[([^]]+)'
# Encoding used for Unicode data
UNICODE_ENCODING = "utf8"
# Reference: http://www.w3.org/Protocols/HTTP/Object_Headers.html#uri
URI_HTTP_HEADER = "URI"
# Uri format which could be injectable (e.g. www.site.com/id82)
URI_INJECTABLE_REGEX = r"//[^/]*/([^\.*?]+)\Z"
# Regex used for masking sensitive data
SENSITIVE_DATA_REGEX = r"(\s|=)(?P<result>[^\s=]*%s[^\s]*)\s"
# Options to explicitly mask in anonymous (unhandled exception) reports (along with anything carrying the <hostname> inside)
SENSITIVE_OPTIONS = ("hostname", "answers", "data", "dnsDomain", "googleDork", "authCred", "proxyCred", "tbl", "db", "col", "user", "cookie", "proxy", "fileRead", "fileWrite", "fileDest", "testParameter", "authCred")
# Maximum number of threads (avoiding connection issues and/or DoS)
MAX_NUMBER_OF_THREADS = 10
# Minimum range between minimum and maximum of statistical set
MIN_STATISTICAL_RANGE = 0.01
# Minimum value for comparison ratio
MIN_RATIO = 0.0
# Maximum value for comparison ratio
MAX_RATIO = 1.0
# Minimum length of sentence for automatic choosing of --string (in case of high matching ratio)
CANDIDATE_SENTENCE_MIN_LENGTH = 10
# Character used for marking injectable position inside provided data
CUSTOM_INJECTION_MARK_CHAR = '*'
# Other way to declare injection position
INJECT_HERE_REGEX = r"(?i)%INJECT[_ ]?HERE%"
# Minimum chunk length used for retrieving data over error based payloads
MIN_ERROR_CHUNK_LENGTH = 8
# Maximum chunk length used for retrieving data over error based payloads
MAX_ERROR_CHUNK_LENGTH = 1024
# Do not escape the injected statement if it contains any of the following SQL keywords
EXCLUDE_UNESCAPE = ("WAITFOR DELAY ", " INTO DUMPFILE ", " INTO OUTFILE ", "CREATE ", "BULK ", "EXEC ", "RECONFIGURE ", "DECLARE ", "'%s'" % CHAR_INFERENCE_MARK)
# Mark used for replacement of reflected values
REFLECTED_VALUE_MARKER = "__REFLECTED_VALUE__"
# Regular expression used for replacing border non-alphanum characters
REFLECTED_BORDER_REGEX = r"[^A-Za-z]+"
# Regular expression used for replacing non-alphanum characters
REFLECTED_REPLACEMENT_REGEX = r"[^\n]{1,100}"
# Maximum time (in seconds) spent per reflective value(s) replacement
REFLECTED_REPLACEMENT_TIMEOUT = 3
# Maximum number of alpha-numerical parts in reflected regex (for speed purposes)
REFLECTED_MAX_REGEX_PARTS = 10
# Chars which can be used as a failsafe values in case of too long URL encoding value
URLENCODE_FAILSAFE_CHARS = "()|,"
# Maximum length of URL encoded value after which failsafe procedure takes away
URLENCODE_CHAR_LIMIT = 2000
# Default schema for Microsoft SQL Server DBMS
DEFAULT_MSSQL_SCHEMA = "dbo"
# Display hash attack info every mod number of items
HASH_MOD_ITEM_DISPLAY = 11
# Display marker for (cracked) empty password
HASH_EMPTY_PASSWORD_MARKER = "<empty>"
# Maximum integer value
MAX_INT = sys.maxint
# Replacement for unsafe characters in dump table filenames
UNSAFE_DUMP_FILEPATH_REPLACEMENT = '_'
# Options that need to be restored in multiple targets run mode
RESTORE_MERGED_OPTIONS = ("col", "db", "dnsDomain", "privEsc", "tbl", "regexp", "string", "textOnly", "threads", "timeSec", "tmpPath", "uChar", "user")
# Parameters to be ignored in detection phase (upper case)
IGNORE_PARAMETERS = ("__VIEWSTATE", "__VIEWSTATEENCRYPTED", "__VIEWSTATEGENERATOR", "__EVENTARGUMENT", "__EVENTTARGET", "__EVENTVALIDATION", "ASPSESSIONID", "ASP.NET_SESSIONID", "JSESSIONID", "CFID", "CFTOKEN")
# Regular expression used for recognition of ASP.NET control parameters
ASP_NET_CONTROL_REGEX = r"(?i)\Actl\d+\$"
# Prefix for Google analytics cookie names
GOOGLE_ANALYTICS_COOKIE_PREFIX = "__UTM"
# Prefix for configuration overriding environment variables
SQLMAP_ENVIRONMENT_PREFIX = "SQLMAP_"
# Turn off resume console info to avoid potential slowdowns
TURN_OFF_RESUME_INFO_LIMIT = 20
# Strftime format for results file used in multiple target mode
RESULTS_FILE_FORMAT = "results-%m%d%Y_%I%M%p.csv"
# Official web page with the list of Python supported codecs
CODECS_LIST_PAGE = "http://docs.python.org/library/codecs.html#standard-encodings"
# Simple regular expression used to distinguish scalar from multiple-row commands (not sole condition)
SQL_SCALAR_REGEX = r"\A(SELECT(?!\s+DISTINCT\(?))?\s*\w*\("
# Option/switch values to ignore during configuration save
IGNORE_SAVE_OPTIONS = ("saveConfig",)
# IP address of the localhost
LOCALHOST = "127.0.0.1"
# Default SOCKS ports used by Tor
DEFAULT_TOR_SOCKS_PORTS = (9050, 9150)
# Default HTTP ports used by Tor
DEFAULT_TOR_HTTP_PORTS = (8123, 8118)
# Percentage below which comparison engine could have problems
LOW_TEXT_PERCENT = 20
# These MySQL keywords can't go (alone) into versioned comment form (/*!...*/)
# Reference: http://dev.mysql.com/doc/refman/5.1/en/function-resolution.html
IGNORE_SPACE_AFFECTED_KEYWORDS = ("CAST", "COUNT", "EXTRACT", "GROUP_CONCAT", "MAX", "MID", "MIN", "SESSION_USER", "SUBSTR", "SUBSTRING", "SUM", "SYSTEM_USER", "TRIM")
# Keywords expected to be in UPPERCASE in getValue()
GET_VALUE_UPPERCASE_KEYWORDS = ("SELECT", "FROM", "WHERE", "DISTINCT", "COUNT")
LEGAL_DISCLAIMER = "Usage of sqlmap for attacking targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable local, state and federal laws. Developers assume no liability and are not responsible for any misuse or damage caused by this program"
# After this number of misses reflective removal mechanism is turned off (for speed up reasons)
REFLECTIVE_MISS_THRESHOLD = 20
# Regular expression used for extracting HTML title
HTML_TITLE_REGEX = r"<title>(?P<result>[^<]+)</title>"
# Table used for Base64 conversion in WordPress hash cracking routine
ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
PICKLE_REDUCE_WHITELIST = (types.BooleanType, types.DictType, types.FloatType, types.IntType, types.ListType, types.LongType, types.NoneType, types.StringType, types.TupleType, types.UnicodeType, types.XRangeType, type(AttribDict()), type(set()))
# Chars used to quickly distinguish if the user provided tainted parameter values
DUMMY_SQL_INJECTION_CHARS = ";()'"
# Simple check against dummy users
DUMMY_USER_INJECTION = r"(?i)[^\w](AND|OR)\s+[^\s]+[=><]|\bUNION\b.+\bSELECT\b|\bSELECT\b.+\bFROM\b|\b(CONCAT|information_schema|SLEEP|DELAY|FLOOR\(RAND)\b"
# Extensions skipped by crawler
CRAWL_EXCLUDE_EXTENSIONS = ("3ds", "3g2", "3gp", "7z", "DS_Store", "a", "aac", "adp", "ai", "aif", "aiff", "apk", "ar", "asf", "au", "avi", "bak", "bin", "bk", "bmp", "btif", "bz2", "cab", "caf", "cgm", "cmx", "cpio", "cr2", "dat", "deb", "djvu", "dll", "dmg", "dmp", "dng", "doc", "docx", "dot", "dotx", "dra", "dsk", "dts", "dtshd", "dvb", "dwg", "dxf", "ear", "ecelp4800", "ecelp7470", "ecelp9600", "egg", "eol", "eot", "epub", "exe", "f4v", "fbs", "fh", "fla", "flac", "fli", "flv", "fpx", "fst", "fvt", "g3", "gif", "gz", "h261", "h263", "h264", "ico", "ief", "image", "img", "ipa", "iso", "jar", "jpeg", "jpg", "jpgv", "jpm", "jxr", "ktx", "lvp", "lz", "lzma", "lzo", "m3u", "m4a", "m4v", "mar", "mdi", "mid", "mj2", "mka", "mkv", "mmr", "mng", "mov", "movie", "mp3", "mp4", "mp4a", "mpeg", "mpg", "mpga", "mxu", "nef", "npx", "o", "oga", "ogg", "ogv", "otf", "pbm", "pcx", "pdf", "pea", "pgm", "pic", "png", "pnm", "ppm", "pps", "ppt", "pptx", "ps", "psd", "pya", "pyc", "pyo", "pyv", "qt", "rar", "ras", "raw", "rgb", "rip", "rlc", "rz", "s3m", "s7z", "scm", "scpt", "sgi", "shar", "sil", "smv", "so", "sub", "swf", "tar", "tbz2", "tga", "tgz", "tif", "tiff", "tlz", "ts", "ttf", "uvh", "uvi", "uvm", "uvp", "uvs", "uvu", "viv", "vob", "war", "wav", "wax", "wbmp", "wdp", "weba", "webm", "webp", "whl", "wm", "wma", "wmv", "wmx", "woff", "woff2", "wvx", "xbm", "xif", "xls", "xlsx", "xlt", "xm", "xpi", "xpm", "xwd", "xz", "z", "zip", "zipx")
# Patterns often seen in HTTP headers containing custom injection marking character '*'
PROBLEMATIC_CUSTOM_INJECTION_PATTERNS = r"(;q=[^;']+)|(\*/\*)"
# Template used for common table existence check
BRUTE_TABLE_EXISTS_TEMPLATE = "EXISTS(SELECT %d FROM %s)"
# Template used for common column existence check
BRUTE_COLUMN_EXISTS_TEMPLATE = "EXISTS(SELECT %s FROM %s)"
# Payload used for checking of existence of IDS/IPS/WAF (dummier the better)
IDS_WAF_CHECK_PAYLOAD = "AND 1=1 UNION ALL SELECT 1,NULL,'<script>alert(\"XSS\")</script>',table_name FROM information_schema.tables WHERE 2>1--/**/; EXEC xp_cmdshell('cat ../../../etc/passwd')#"
# Data inside shellcodeexec to be filled with random string
SHELLCODEEXEC_RANDOM_STRING_MARKER = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# Generic address for checking the Internet connection while using switch --check-internet
CHECK_INTERNET_ADDRESS = "https://ipinfo.io/"
# Value to look for in response to CHECK_INTERNET_ADDRESS
CHECK_INTERNET_VALUE = "IP Address Details"
# Vectors used for provoking specific WAF/IPS behavior(s)
WAF_ATTACK_VECTORS = (
"", # NIL
"search=<script>alert(1)</script>",
"file=../../../../etc/passwd",
"q=<invalid>foobar",
"id=1 %s" % IDS_WAF_CHECK_PAYLOAD
)
# Used for status representation in dictionary attack phase
ROTATING_CHARS = ('\\', '|', '|', '/', '-')
# Approximate chunk length (in bytes) used by BigArray objects (only last chunk and cached one are held in memory)
BIGARRAY_CHUNK_SIZE = 1024 * 1024
# Compress level used for storing BigArray chunks to disk (0-9)
BIGARRAY_COMPRESS_LEVEL = 9
# Maximum number of socket pre-connects
SOCKET_PRE_CONNECT_QUEUE_SIZE = 3
# Only console display last n table rows
TRIM_STDOUT_DUMP_SIZE = 256
# Reference: http://stackoverflow.com/a/3168436
# Reference: https://support.microsoft.com/en-us/kb/899149
DUMP_FILE_BUFFER_SIZE = 1024
# Parse response headers only first couple of times
PARSE_HEADERS_LIMIT = 3
# Step used in ORDER BY technique used for finding the right number of columns in UNION query injections
ORDER_BY_STEP = 10
# Maximum number of times for revalidation of a character in inference (as required)
MAX_REVALIDATION_STEPS = 5
# Characters that can be used to split parameter values in provided command line (e.g. in --tamper)
PARAMETER_SPLITTING_REGEX = r"[,|;]"
# Regular expression describing possible union char value (e.g. used in --union-char)
UNION_CHAR_REGEX = r"\A\w+\Z"
# Attribute used for storing original parameter value in special cases (e.g. POST)
UNENCODED_ORIGINAL_VALUE = "original"
# Common column names containing usernames (used for hash cracking in some cases)
COMMON_USER_COLUMNS = ("login", "user", "username", "user_name", "user_login", "benutzername", "benutzer", "utilisateur", "usager", "consommateur", "utente", "utilizzatore", "usufrutuario", "korisnik", "usuario", "consumidor", "client", "cuser")
# Default delimiter in GET/POST values
DEFAULT_GET_POST_DELIMITER = '&'
# Default delimiter in cookie values
DEFAULT_COOKIE_DELIMITER = ';'
# Unix timestamp used for forcing cookie expiration when provided with --load-cookies
FORCE_COOKIE_EXPIRATION_TIME = "9999999999"
# Github OAuth token used for creating an automatic Issue for unhandled exceptions
GITHUB_REPORT_OAUTH_TOKEN = "NTYzYjhmZWJjYzc0Njg2ODJhNzhmNDg1YzM0YzlkYjk3N2JiMzE3Nw=="
# Skip unforced HashDB flush requests below the threshold number of cached items
HASHDB_FLUSH_THRESHOLD = 32
# Number of retries for unsuccessful HashDB flush attempts
HASHDB_FLUSH_RETRIES = 3
# Number of retries for unsuccessful HashDB retrieve attempts
HASHDB_RETRIEVE_RETRIES = 3
# Number of retries for unsuccessful HashDB end transaction attempts
HASHDB_END_TRANSACTION_RETRIES = 3
# Unique milestone value used for forced deprecation of old HashDB values (e.g. when changing hash/pickle mechanism)
HASHDB_MILESTONE_VALUE = "BZzRotigLX" # python -c 'import random, string; print "".join(random.sample(string.ascii_letters, 10))'
# Warn user of possible delay due to large page dump in full UNION query injections
LARGE_OUTPUT_THRESHOLD = 1024 ** 2
# On huge tables there is a considerable slowdown if every row retrieval requires ORDER BY (most noticable in table dumping using ERROR injections)
SLOW_ORDER_COUNT_THRESHOLD = 10000
# Give up on hash recognition if nothing was found in first given number of rows
HASH_RECOGNITION_QUIT_THRESHOLD = 10000
# Maximum number of redirections to any single URL - this is needed because of the state that cookies introduce
MAX_SINGLE_URL_REDIRECTIONS = 4
# Maximum total number of redirections (regardless of URL) - before assuming we're in a loop
MAX_TOTAL_REDIRECTIONS = 10
# Reference: http://www.tcpipguide.com/free/t_DNSLabelsNamesandSyntaxRules.htm
MAX_DNS_LABEL = 63
# Alphabet used for prefix and suffix strings of name resolution requests in DNS technique (excluding hexadecimal chars for not mixing with inner content)
DNS_BOUNDARIES_ALPHABET = re.sub(r"[a-fA-F]", "", string.ascii_letters)
# Alphabet used for heuristic checks
HEURISTIC_CHECK_ALPHABET = ('"', '\'', ')', '(', ',', '.')
# Minor artistic touch
BANNER = re.sub(r"\[.\]", lambda _: "[\033[01;41m%s\033[01;49m]" % random.sample(HEURISTIC_CHECK_ALPHABET, 1)[0], BANNER)
# String used for dummy non-SQLi (e.g. XSS) heuristic checks of a tested parameter value
DUMMY_NON_SQLI_CHECK_APPENDIX = "<'\">"
# Regular expression used for recognition of file inclusion errors
FI_ERROR_REGEX = r"(?i)[^\n]{0,100}(no such file|failed (to )?open)[^\n]{0,100}"
# Length of prefix and suffix used in non-SQLI heuristic checks
NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH = 6
# Connection chunk size (processing large responses in chunks to avoid MemoryError crashes - e.g. large table dump in full UNION injections)
MAX_CONNECTION_CHUNK_SIZE = 10 * 1024 * 1024
# Maximum response total page size (trimmed if larger)
MAX_CONNECTION_TOTAL_SIZE = 100 * 1024 * 1024
# For preventing MemoryError exceptions (caused when using large sequences in difflib.SequenceMatcher)
MAX_DIFFLIB_SEQUENCE_LENGTH = 10 * 1024 * 1024
# Maximum (multi-threaded) length of entry in bisection algorithm
MAX_BISECTION_LENGTH = 50 * 1024 * 1024
# Mark used for trimming unnecessary content in large chunks
LARGE_CHUNK_TRIM_MARKER = "__TRIMMED_CONTENT__"
# Generic SQL comment formation
GENERIC_SQL_COMMENT = "-- [RANDSTR]"
# Threshold value for turning back on time auto-adjustment mechanism
VALID_TIME_CHARS_RUN_THRESHOLD = 100
# Check for empty columns only if table is sufficiently large
CHECK_ZERO_COLUMNS_THRESHOLD = 10
# Boldify all logger messages containing these "patterns"
BOLD_PATTERNS = ("' injectable", "provided empty", "leftover chars", "might be injectable", "' is vulnerable", "is not injectable", "does not seem to be", "test failed", "test passed", "live test final result", "test shows that", "the back-end DBMS is", "created Github", "blocked by the target server", "protection is involved", "CAPTCHA", "specific response", "NULL connection is supported")
# Generic www root directory names
GENERIC_DOC_ROOT_DIRECTORY_NAMES = ("htdocs", "httpdocs", "public", "wwwroot", "www")
# Maximum length of a help part containing switch/option name(s)
MAX_HELP_OPTION_LENGTH = 18
# Maximum number of connection retries (to prevent problems with recursion)
MAX_CONNECT_RETRIES = 100
# Strings for detecting formatting errors
FORMAT_EXCEPTION_STRINGS = ("Type mismatch", "Error converting", "Conversion failed", "String or binary data would be truncated", "Failed to convert", "unable to interpret text value", "Input string was not in a correct format", "System.FormatException", "java.lang.NumberFormatException", "ValueError: invalid literal", "TypeMismatchException", "CF_SQL_INTEGER", " for CFSQLTYPE ", "cfqueryparam cfsqltype", "InvalidParamTypeException", "Invalid parameter type", "is not of type numeric", "<cfif Not IsNumeric(", "invalid input syntax for integer", "invalid input syntax for type", "invalid number", "character to number conversion error", "unable to interpret text value", "String was not recognized as a valid", "Convert.ToInt", "cannot be converted to a ", "InvalidDataException")
# Regular expression used for extracting ASP.NET view state values
VIEWSTATE_REGEX = r'(?i)(?P<name>__VIEWSTATE[^"]*)[^>]+value="(?P<result>[^"]+)'
# Regular expression used for extracting ASP.NET event validation values
EVENTVALIDATION_REGEX = r'(?i)(?P<name>__EVENTVALIDATION[^"]*)[^>]+value="(?P<result>[^"]+)'
# Number of rows to generate inside the full union test for limited output (mustn't be too large to prevent payload length problems)
LIMITED_ROWS_TEST_NUMBER = 15
# Default adapter to use for bottle server
RESTAPI_DEFAULT_ADAPTER = "wsgiref"
# Default REST-JSON API server listen address
RESTAPI_DEFAULT_ADDRESS = "127.0.0.1"
# Default REST-JSON API server listen port
RESTAPI_DEFAULT_PORT = 8775
# Format used for representing invalid unicode characters
INVALID_UNICODE_CHAR_FORMAT = r"\x%02x"
# Regular expression for XML POST data
XML_RECOGNITION_REGEX = r"(?s)\A\s*<[^>]+>(.+>)?\s*\Z"
# Regular expression used for detecting JSON POST data
JSON_RECOGNITION_REGEX = r'(?s)\A(\s*\[)*\s*\{.*"[^"]+"\s*:\s*("[^"]*"|\d+|true|false|null).*\}\s*(\]\s*)*\Z'
# Regular expression used for detecting JSON-like POST data
JSON_LIKE_RECOGNITION_REGEX = r"(?s)\A(\s*\[)*\s*\{.*'[^']+'\s*:\s*('[^']+'|\d+).*\}\s*(\]\s*)*\Z"
# Regular expression used for detecting multipart POST data
MULTIPART_RECOGNITION_REGEX = r"(?i)Content-Disposition:[^;]+;\s*name="
# Regular expression used for detecting Array-like POST data
ARRAY_LIKE_RECOGNITION_REGEX = r"(\A|%s)(\w+)\[\]=.+%s\2\[\]=" % (DEFAULT_GET_POST_DELIMITER, DEFAULT_GET_POST_DELIMITER)
# Default POST data content-type
DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded; charset=utf-8"
# Raw text POST data content-type
PLAIN_TEXT_CONTENT_TYPE = "text/plain; charset=utf-8"
# Length used while checking for existence of Suhosin-patch (like) protection mechanism
SUHOSIN_MAX_VALUE_LENGTH = 512
# Minimum size of an (binary) entry before it can be considered for dumping to disk
MIN_BINARY_DISK_DUMP_SIZE = 100
# Filenames of payloads xml files (in order of loading)
PAYLOAD_XML_FILES = ("boolean_blind.xml", "error_based.xml", "inline_query.xml", "stacked_queries.xml", "time_blind.xml", "union_query.xml")
# Regular expression used for extracting form tags
FORM_SEARCH_REGEX = r"(?si)<form(?!.+<form).+?</form>"
# Maximum number of lines to save in history file
MAX_HISTORY_LENGTH = 1000
# Minimum field entry length needed for encoded content (hex, base64,...) check
MIN_ENCODED_LEN_CHECK = 5
# Timeout in seconds in which Metasploit remote session has to be initialized
METASPLOIT_SESSION_TIMEOUT = 120
# Reference: http://www.postgresql.org/docs/9.0/static/catalog-pg-largeobject.html
LOBLKSIZE = 2048
# Suffix used to mark variables having keyword names
EVALCODE_KEYWORD_SUFFIX = "_KEYWORD"
# Reference: http://www.cookiecentral.com/faq/#3.5
NETSCAPE_FORMAT_HEADER_COOKIES = "# Netscape HTTP Cookie File."
# Infixes used for automatic recognition of parameters carrying anti-CSRF tokens
CSRF_TOKEN_PARAMETER_INFIXES = ("csrf", "xsrf", "token")
# Prefixes used in brute force search for web server document root
BRUTE_DOC_ROOT_PREFIXES = {
OS.LINUX: ("/var/www", "/usr/local/apache", "/usr/local/apache2", "/usr/local/www/apache22", "/usr/local/www/apache24", "/usr/local/httpd", "/var/www/nginx-default", "/srv/www", "/var/www/%TARGET%", "/var/www/vhosts/%TARGET%", "/var/www/virtual/%TARGET%", "/var/www/clients/vhosts/%TARGET%", "/var/www/clients/virtual/%TARGET%"),
OS.WINDOWS: ("/xampp", "/Program Files/xampp", "/wamp", "/Program Files/wampp", "/apache", "/Program Files/Apache Group/Apache", "/Program Files/Apache Group/Apache2", "/Program Files/Apache Group/Apache2.2", "/Program Files/Apache Group/Apache2.4", "/Inetpub/wwwroot", "/Inetpub/wwwroot/%TARGET%", "/Inetpub/vhosts/%TARGET%")
}
# Suffixes used in brute force search for web server document root
BRUTE_DOC_ROOT_SUFFIXES = ("", "html", "htdocs", "httpdocs", "php", "public", "src", "site", "build", "web", "www", "data", "sites/all", "www/build")
# String used for marking target name inside used brute force web server document root
BRUTE_DOC_ROOT_TARGET_MARK = "%TARGET%"
# Character used as a boundary in kb.chars (preferably less frequent letter)
KB_CHARS_BOUNDARY_CHAR = 'q'
# Letters of lower frequency used in kb.chars
KB_CHARS_LOW_FREQUENCY_ALPHABET = "zqxjkvbp"
# CSS style used in HTML dump format
HTML_DUMP_CSS_STYLE = """<style>
table{
margin:10;
background-color:#FFFFFF;
font-family:verdana;
font-size:12px;
align:center;
}
thead{
font-weight:bold;
background-color:#4F81BD;
color:#FFFFFF;
}
tr:nth-child(even) {
background-color: #D3DFEE
}
td{
font-size:12px;
}
th{
font-size:12px;
}
</style>"""
| 45.499379 | 1,450 | 0.72523 |
import os
import random
import re
import subprocess
import string
import sys
import types
from lib.core.datatype import AttribDict
from lib.core.enums import DBMS
from lib.core.enums import DBMS_DIRECTORY_NAME
from lib.core.enums import OS
VERSION = "1.2.11.12"
TYPE = "dev" if VERSION.count('.') > 2 and VERSION.split('.')[-1] != '0' else "stable"
TYPE_COLORS = {"dev": 33, "stable": 90, "pip": 34}
VERSION_STRING = "sqlmap/%s#%s" % ('.'.join(VERSION.split('.')[:-1]) if VERSION.count('.') > 2 and VERSION.split('.')[-1] == '0' else VERSION, TYPE)
DESCRIPTION = "automatic SQL injection and database takeover tool"
SITE = "http://sqlmap.org"
DEV_EMAIL_ADDRESS = "dev@sqlmap.org"
ISSUES_PAGE = "https://github.com/sqlmapproject/sqlmap/issues/new"
GIT_REPOSITORY = "https://github.com/sqlmapproject/sqlmap.git"
GIT_PAGE = "https://github.com/sqlmapproject/sqlmap"
ZIPBALL_PAGE = "https://github.com/sqlmapproject/sqlmap/zipball/master"
BANNER = """\033[01;33m\
___
__H__
___ ___[.]_____ ___ ___ \033[01;37m{\033[01;%dm%s\033[01;37m}\033[01;33m
|_ -| . [.] | .'| . |
|___|_ [.]_|_|_|__,| _|
|_|V |_| \033[0m\033[4;37m%s\033[0m\n
""" % (TYPE_COLORS.get(TYPE, 31), VERSION_STRING.split('/')[-1], SITE)
# Minimum distance of ratio from kb.matchRatio to result in True
DIFF_TOLERANCE = 0.05
CONSTANT_RATIO = 0.9
# Ratio used in heuristic check for WAF/IPS protected targets
IDS_WAF_CHECK_RATIO = 0.5
# Timeout used in heuristic check for WAF/IPS protected targets
IDS_WAF_CHECK_TIMEOUT = 10
# Lower and upper values for match ratio in case of stable page
LOWER_RATIO_BOUND = 0.02
UPPER_RATIO_BOUND = 0.98
# Markers for special cases when parameter values contain html encoded characters
PARAMETER_AMP_MARKER = "__AMP__"
PARAMETER_SEMICOLON_MARKER = "__SEMICOLON__"
BOUNDARY_BACKSLASH_MARKER = "__BACKSLASH__"
PARTIAL_VALUE_MARKER = "__PARTIAL_VALUE__"
PARTIAL_HEX_VALUE_MARKER = "__PARTIAL_HEX_VALUE__"
URI_QUESTION_MARKER = "__QUESTION_MARK__"
ASTERISK_MARKER = "__ASTERISK_MARK__"
REPLACEMENT_MARKER = "__REPLACEMENT_MARK__"
BOUNDED_INJECTION_MARKER = "__BOUNDED_INJECTION_MARK__"
SAFE_VARIABLE_MARKER = "__SAFE__"
RANDOM_INTEGER_MARKER = "[RANDINT]"
RANDOM_STRING_MARKER = "[RANDSTR]"
SLEEP_TIME_MARKER = "[SLEEPTIME]"
INFERENCE_MARKER = "[INFERENCE]"
SINGLE_QUOTE_MARKER = "[SINGLE_QUOTE]"
PAYLOAD_DELIMITER = "__PAYLOAD_DELIMITER__"
CHAR_INFERENCE_MARK = "%c"
PRINTABLE_CHAR_REGEX = r"[^\x00-\x1f\x7f-\xff]"
# Regular expression used for extraction of table names (useful for (e.g.) MsAccess)
SELECT_FROM_TABLE_REGEX = r"\bSELECT\b.+?\bFROM\s+(?P<result>([\w.]|`[^`<>]+`)+)"
# Regular expression used for recognition of textual content-type
TEXT_CONTENT_TYPE_REGEX = r"(?i)(text|form|message|xml|javascript|ecmascript|json)"
# Regular expression used for recognition of generic permission messages
PERMISSION_DENIED_REGEX = r"(?P<result>(command|permission|access)\s*(was|is)?\s*denied)"
# Regular expression used in recognition of generic protection mechanisms
GENERIC_PROTECTION_REGEX = r"(?i)\b(rejected|blocked|protection|incident|denied|detected|dangerous|firewall)\b"
# Regular expression used for recognition of generic maximum connection messages
MAX_CONNECTIONS_REGEX = r"\bmax.+?\bconnection"
# Maximum consecutive connection errors before asking the user if he wants to continue
MAX_CONSECUTIVE_CONNECTION_ERRORS = 15
# Timeout before the pre-connection candidate is being disposed (because of high probability that the web server will reset it)
PRECONNECT_CANDIDATE_TIMEOUT = 10
# Servers known to cause issue with pre-connection mechanism (because of lack of multi-threaded support)
PRECONNECT_INCOMPATIBLE_SERVERS = ("SimpleHTTP",)
# Maximum sleep time in "Murphy" (testing) mode
MAX_MURPHY_SLEEP_TIME = 3
# Regular expression used for extracting results from Google search
GOOGLE_REGEX = r"webcache\.googleusercontent\.com/search\?q=cache:[^:]+:([^+]+)\+&cd=|url\?\w+=((?![^>]+webcache\.googleusercontent\.com)http[^>]+)&(sa=U|rct=j)"
# Regular expression used for extracting results from DuckDuckGo search
DUCKDUCKGO_REGEX = r'"u":"([^"]+)'
# Regular expression used for extracting results from Bing search
BING_REGEX = r'<h2><a href="([^"]+)" h='
# Dummy user agent for search (if default one returns different results)
DUMMY_SEARCH_USER_AGENT = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0"
# Regular expression used for extracting content from "textual" tags
TEXT_TAG_REGEX = r"(?si)<(abbr|acronym|b|blockquote|br|center|cite|code|dt|em|font|h\d|i|li|p|pre|q|strong|sub|sup|td|th|title|tt|u)(?!\w).*?>(?P<result>[^<]+)"
# Regular expression used for recognition of IP addresses
IP_ADDRESS_REGEX = r"\b(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\b"
# Regular expression used for recognition of generic "your ip has been blocked" messages
BLOCKED_IP_REGEX = r"(?i)(\A|\b)ip\b.*\b(banned|blocked|block list|firewall)"
# Dumping characters used in GROUP_CONCAT MySQL technique
CONCAT_ROW_DELIMITER = ','
CONCAT_VALUE_DELIMITER = '|'
# Coefficient used for a time-based query delay checking (must be >= 7)
TIME_STDEV_COEFF = 7
# Minimum response time that can be even considered as delayed (not a complete requirement)
MIN_VALID_DELAYED_RESPONSE = 0.5
# Standard deviation after which a warning message should be displayed about connection lags
WARN_TIME_STDEV = 0.5
# Minimum length of usable union injected response (quick defense against substr fields)
UNION_MIN_RESPONSE_CHARS = 10
# Coefficient used for a union-based number of columns checking (must be >= 7)
UNION_STDEV_COEFF = 7
# Length of queue for candidates for time delay adjustment
TIME_DELAY_CANDIDATES = 3
# Default value for HTTP Accept header
HTTP_ACCEPT_HEADER_VALUE = "*/*"
# Default value for HTTP Accept-Encoding header
HTTP_ACCEPT_ENCODING_HEADER_VALUE = "gzip,deflate"
# Default timeout for running commands over backdoor
BACKDOOR_RUN_CMD_TIMEOUT = 5
# Number of seconds to wait for thread finalization at program end
THREAD_FINALIZATION_TIMEOUT = 1
# Maximum number of techniques used in inject.py/getValue() per one value
MAX_TECHNIQUES_PER_VALUE = 2
# In case of missing piece of partial union dump, buffered array must be flushed after certain size
MAX_BUFFERED_PARTIAL_UNION_LENGTH = 1024
# Suffix used for naming meta databases in DBMS(es) without explicit database name
METADB_SUFFIX = "_masterdb"
# Number of times to retry the pushValue during the exceptions (e.g. KeyboardInterrupt)
PUSH_VALUE_EXCEPTION_RETRY_COUNT = 3
# Minimum time response set needed for time-comparison based on standard deviation
MIN_TIME_RESPONSES = 30
# Minimum comparison ratio set needed for searching valid union column number based on standard deviation
MIN_UNION_RESPONSES = 5
# After these number of blanks at the end inference should stop (just in case)
INFERENCE_BLANK_BREAK = 10
# Use this replacement character for cases when inference is not able to retrieve the proper character value
INFERENCE_UNKNOWN_CHAR = '?'
# Character used for operation "greater" in inference
INFERENCE_GREATER_CHAR = ">"
# Character used for operation "greater or equal" in inference
INFERENCE_GREATER_EQUALS_CHAR = ">="
# Character used for operation "equals" in inference
INFERENCE_EQUALS_CHAR = "="
# Character used for operation "not-equals" in inference
INFERENCE_NOT_EQUALS_CHAR = "!="
# String used for representation of unknown DBMS
UNKNOWN_DBMS = "Unknown"
# String used for representation of unknown DBMS version
UNKNOWN_DBMS_VERSION = "Unknown"
# Dynamicity boundary length used in dynamicity removal engine
DYNAMICITY_BOUNDARY_LENGTH = 20
# Dummy user prefix used in dictionary attack
DUMMY_USER_PREFIX = "__dummy__"
# Reference: http://en.wikipedia.org/wiki/ISO/IEC_8859-1
DEFAULT_PAGE_ENCODING = "iso-8859-1"
try:
unicode(DEFAULT_PAGE_ENCODING, DEFAULT_PAGE_ENCODING)
except LookupError:
DEFAULT_PAGE_ENCODING = "utf8"
# URL used in dummy runs
DUMMY_URL = "http://foo/bar?id=1"
# System variables
IS_WIN = subprocess.mswindows
# The name of the operating system dependent module imported. The following names have currently been registered: 'posix', 'nt', 'mac', 'os2', 'ce', 'java', 'riscos'
PLATFORM = os.name
PYVERSION = sys.version.split()[0]
# DBMS system databases
MSSQL_SYSTEM_DBS = ("Northwind", "master", "model", "msdb", "pubs", "tempdb")
MYSQL_SYSTEM_DBS = ("information_schema", "mysql", "performance_schema")
PGSQL_SYSTEM_DBS = ("information_schema", "pg_catalog", "pg_toast", "pgagent")
ORACLE_SYSTEM_DBS = ('ANONYMOUS', 'APEX_030200', 'APEX_PUBLIC_USER', 'APPQOSSYS', 'BI', 'CTXSYS', 'DBSNMP', 'DIP', 'EXFSYS', 'FLOWS_%', 'FLOWS_FILES', 'HR', 'IX', 'LBACSYS', 'MDDATA', 'MDSYS', 'MGMT_VIEW', 'OC', 'OE', 'OLAPSYS', 'ORACLE_OCM', 'ORDDATA', 'ORDPLUGINS', 'ORDSYS', 'OUTLN', 'OWBSYS', 'PM', 'SCOTT', 'SH', 'SI_INFORMTN_SCHEMA', 'SPATIAL_CSW_ADMIN_USR', 'SPATIAL_WFS_ADMIN_USR', 'SYS', 'SYSMAN', 'SYSTEM', 'WKPROXY', 'WKSYS', 'WK_TEST', 'WMSYS', 'XDB', 'XS$NULL')
SQLITE_SYSTEM_DBS = ("sqlite_master", "sqlite_temp_master")
ACCESS_SYSTEM_DBS = ("MSysAccessObjects", "MSysACEs", "MSysObjects", "MSysQueries", "MSysRelationships", "MSysAccessStorage", "MSysAccessXML", "MSysModules", "MSysModules2")
FIREBIRD_SYSTEM_DBS = ("RDB$BACKUP_HISTORY", "RDB$CHARACTER_SETS", "RDB$CHECK_CONSTRAINTS", "RDB$COLLATIONS", "RDB$DATABASE", "RDB$DEPENDENCIES", "RDB$EXCEPTIONS", "RDB$FIELDS", "RDB$FIELD_DIMENSIONS", " RDB$FILES", "RDB$FILTERS", "RDB$FORMATS", "RDB$FUNCTIONS", "RDB$FUNCTION_ARGUMENTS", "RDB$GENERATORS", "RDB$INDEX_SEGMENTS", "RDB$INDICES", "RDB$LOG_FILES", "RDB$PAGES", "RDB$PROCEDURES", "RDB$PROCEDURE_PARAMETERS", "RDB$REF_CONSTRAINTS", "RDB$RELATIONS", "RDB$RELATION_CONSTRAINTS", "RDB$RELATION_FIELDS", "RDB$ROLES", "RDB$SECURITY_CLASSES", "RDB$TRANSACTIONS", "RDB$TRIGGERS", "RDB$TRIGGER_MESSAGES", "RDB$TYPES", "RDB$USER_PRIVILEGES", "RDB$VIEW_RELATIONS")
MAXDB_SYSTEM_DBS = ("SYSINFO", "DOMAIN")
SYBASE_SYSTEM_DBS = ("master", "model", "sybsystemdb", "sybsystemprocs")
DB2_SYSTEM_DBS = ("NULLID", "SQLJ", "SYSCAT", "SYSFUN", "SYSIBM", "SYSIBMADM", "SYSIBMINTERNAL", "SYSIBMTS", "SYSPROC", "SYSPUBLIC", "SYSSTAT", "SYSTOOLS")
HSQLDB_SYSTEM_DBS = ("INFORMATION_SCHEMA", "SYSTEM_LOB")
H2_SYSTEM_DBS = ("INFORMATION_SCHEMA")
INFORMIX_SYSTEM_DBS = ("sysmaster", "sysutils", "sysuser", "sysadmin")
MSSQL_ALIASES = ("microsoft sql server", "mssqlserver", "mssql", "ms")
MYSQL_ALIASES = ("mysql", "my", "mariadb", "maria")
PGSQL_ALIASES = ("postgresql", "postgres", "pgsql", "psql", "pg")
ORACLE_ALIASES = ("oracle", "orcl", "ora", "or")
SQLITE_ALIASES = ("sqlite", "sqlite3")
ACCESS_ALIASES = ("msaccess", "access", "jet", "microsoft access")
FIREBIRD_ALIASES = ("firebird", "mozilla firebird", "interbase", "ibase", "fb")
MAXDB_ALIASES = ("maxdb", "sap maxdb", "sap db")
SYBASE_ALIASES = ("sybase", "sybase sql server")
DB2_ALIASES = ("db2", "ibm db2", "ibmdb2")
HSQLDB_ALIASES = ("hsql", "hsqldb", "hs", "hypersql")
H2_ALIASES = ("h2",)
INFORMIX_ALIASES = ("informix", "ibm informix", "ibminformix")
DBMS_DIRECTORY_DICT = dict((getattr(DBMS, _), getattr(DBMS_DIRECTORY_NAME, _)) for _ in dir(DBMS) if not _.startswith("_"))
SUPPORTED_DBMS = MSSQL_ALIASES + MYSQL_ALIASES + PGSQL_ALIASES + ORACLE_ALIASES + SQLITE_ALIASES + ACCESS_ALIASES + FIREBIRD_ALIASES + MAXDB_ALIASES + SYBASE_ALIASES + DB2_ALIASES + HSQLDB_ALIASES + H2_ALIASES + INFORMIX_ALIASES
SUPPORTED_OS = ("linux", "windows")
DBMS_ALIASES = ((DBMS.MSSQL, MSSQL_ALIASES), (DBMS.MYSQL, MYSQL_ALIASES), (DBMS.PGSQL, PGSQL_ALIASES), (DBMS.ORACLE, ORACLE_ALIASES), (DBMS.SQLITE, SQLITE_ALIASES), (DBMS.ACCESS, ACCESS_ALIASES), (DBMS.FIREBIRD, FIREBIRD_ALIASES), (DBMS.MAXDB, MAXDB_ALIASES), (DBMS.SYBASE, SYBASE_ALIASES), (DBMS.DB2, DB2_ALIASES), (DBMS.HSQLDB, HSQLDB_ALIASES), (DBMS.H2, H2_ALIASES), (DBMS.INFORMIX, INFORMIX_ALIASES))
USER_AGENT_ALIASES = ("ua", "useragent", "user-agent")
REFERER_ALIASES = ("ref", "referer", "referrer")
HOST_ALIASES = ("host",)
H2_DEFAULT_SCHEMA = HSQLDB_DEFAULT_SCHEMA = "PUBLIC"
# Names that can't be used to name files on Windows OS
WINDOWS_RESERVED_NAMES = ("CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9")
# Items displayed in basic help (-h) output
BASIC_HELP_ITEMS = (
"url",
"googleDork",
"data",
"cookie",
"randomAgent",
"proxy",
"testParameter",
"dbms",
"level",
"risk",
"tech",
"getAll",
"getBanner",
"getCurrentUser",
"getCurrentDb",
"getPasswordHashes",
"getTables",
"getColumns",
"getSchema",
"dumpTable",
"dumpAll",
"db",
"tbl",
"col",
"osShell",
"osPwn",
"batch",
"checkTor",
"flushSession",
"tor",
"sqlmapShell",
"wizard",
)
# Tags used for value replacements inside shell scripts
SHELL_WRITABLE_DIR_TAG = "%WRITABLE_DIR%"
SHELL_RUNCMD_EXE_TAG = "%RUNCMD_EXE%"
# String representation for NULL value
NULL = "NULL"
# String representation for blank ('') value
BLANK = "<blank>"
# String representation for current database
CURRENT_DB = "CD"
# Name of SQLite file used for storing session data
SESSION_SQLITE_FILE = "session.sqlite"
# Regular expressions used for finding file paths in error messages
FILE_PATH_REGEXES = (r"<b>(?P<result>[^<>]+?)</b> on line \d+", r"in (?P<result>[^<>'\"]+?)['\"]? on line \d+", r"(?:[>(\[\s])(?P<result>[A-Za-z]:[\\/][\w. \\/-]*)", r"(?:[>(\[\s])(?P<result>/\w[/\w.~-]+)", r"href=['\"]file://(?P<result>/[^'\"]+)")
# Regular expressions used for parsing error messages (--parse-errors)
ERROR_PARSING_REGEXES = (
r"\[Microsoft\]\[ODBC SQL Server Driver\]\[SQL Server\](?P<result>[^<]+)",
r"<b>[^<]*(fatal|error|warning|exception)[^<]*</b>:?\s*(?P<result>.+?)<br\s*/?\s*>",
r"(?m)^\s*(fatal|error|warning|exception):?\s*(?P<result>[^\n]+?)$",
r"(?P<result>[^\n>]*SQL Syntax[^\n<]+)",
r"<li>Error Type:<br>(?P<result>.+?)</li>",
r"CDbCommand (?P<result>[^<>\n]*SQL[^<>\n]+)",
r"error '[0-9a-f]{8}'((<[^>]+>)|\s)+(?P<result>[^<>]+)",
r"\[[^\n\]]+(ODBC|JDBC)[^\n\]]+\](\[[^\]]+\])?(?P<result>[^\n]+(in query expression|\(SQL| at /[^ ]+pdo)[^\n<]+)"
)
# Regular expression used for parsing charset info from meta html headers
META_CHARSET_REGEX = r'(?si)<head>.*<meta[^>]+charset="?(?P<result>[^"> ]+).*</head>'
# Regular expression used for parsing refresh info from meta html headers
META_REFRESH_REGEX = r'(?si)<head>(?!.*?<noscript.*?</head).*?<meta http-equiv="?refresh"?[^>]+content="?[^">]+url=["\']?(?P<result>[^\'">]+).*</head>'
# Regular expression used for parsing empty fields in tested form data
EMPTY_FORM_FIELDS_REGEX = r'(&|\A)(?P<result>[^=]+=(&|\Z))'
# Reference: http://www.cs.ru.nl/bachelorscripties/2010/Martin_Devillers___0437999___Analyzing_password_strength.pdf
COMMON_PASSWORD_SUFFIXES = ("1", "123", "2", "12", "3", "13", "7", "11", "5", "22", "23", "01", "4", "07", "21", "14", "10", "06", "08", "8", "15", "69", "16", "6", "18")
# Reference: http://www.the-interweb.com/serendipity/index.php?/archives/94-A-brief-analysis-of-40,000-leaked-MySpace-passwords.html
COMMON_PASSWORD_SUFFIXES += ("!", ".", "*", "!!", "?", ";", "..", "!!!", ", ", "@")
# Splitter used between requests in WebScarab log files
WEBSCARAB_SPLITTER = "sts in BURP log files
BURP_REQUEST_REGEX = r"={10,}\s+[^=]+={10,}\s(.+?)\s={10,}"
# Regex used for parsing XML Burp saved history items
BURP_XML_HISTORY_REGEX = r'<port>(\d+)</port>.+?<request base64="true"><!\[CDATA\[([^]]+)'
# Encoding used for Unicode data
UNICODE_ENCODING = "utf8"
# Reference: http://www.w3.org/Protocols/HTTP/Object_Headers.html#uri
URI_HTTP_HEADER = "URI"
# Uri format which could be injectable (e.g. www.site.com/id82)
URI_INJECTABLE_REGEX = r"//[^/]*/([^\.*?]+)\Z"
# Regex used for masking sensitive data
SENSITIVE_DATA_REGEX = r"(\s|=)(?P<result>[^\s=]*%s[^\s]*)\s"
# Options to explicitly mask in anonymous (unhandled exception) reports (along with anything carrying the <hostname> inside)
SENSITIVE_OPTIONS = ("hostname", "answers", "data", "dnsDomain", "googleDork", "authCred", "proxyCred", "tbl", "db", "col", "user", "cookie", "proxy", "fileRead", "fileWrite", "fileDest", "testParameter", "authCred")
# Maximum number of threads (avoiding connection issues and/or DoS)
MAX_NUMBER_OF_THREADS = 10
# Minimum range between minimum and maximum of statistical set
MIN_STATISTICAL_RANGE = 0.01
# Minimum value for comparison ratio
MIN_RATIO = 0.0
# Maximum value for comparison ratio
MAX_RATIO = 1.0
# Minimum length of sentence for automatic choosing of --string (in case of high matching ratio)
CANDIDATE_SENTENCE_MIN_LENGTH = 10
# Character used for marking injectable position inside provided data
CUSTOM_INJECTION_MARK_CHAR = '*'
# Other way to declare injection position
INJECT_HERE_REGEX = r"(?i)%INJECT[_ ]?HERE%"
# Minimum chunk length used for retrieving data over error based payloads
MIN_ERROR_CHUNK_LENGTH = 8
# Maximum chunk length used for retrieving data over error based payloads
MAX_ERROR_CHUNK_LENGTH = 1024
# Do not escape the injected statement if it contains any of the following SQL keywords
EXCLUDE_UNESCAPE = ("WAITFOR DELAY ", " INTO DUMPFILE ", " INTO OUTFILE ", "CREATE ", "BULK ", "EXEC ", "RECONFIGURE ", "DECLARE ", "'%s'" % CHAR_INFERENCE_MARK)
# Mark used for replacement of reflected values
REFLECTED_VALUE_MARKER = "__REFLECTED_VALUE__"
# Regular expression used for replacing border non-alphanum characters
REFLECTED_BORDER_REGEX = r"[^A-Za-z]+"
# Regular expression used for replacing non-alphanum characters
REFLECTED_REPLACEMENT_REGEX = r"[^\n]{1,100}"
# Maximum time (in seconds) spent per reflective value(s) replacement
REFLECTED_REPLACEMENT_TIMEOUT = 3
# Maximum number of alpha-numerical parts in reflected regex (for speed purposes)
REFLECTED_MAX_REGEX_PARTS = 10
# Chars which can be used as a failsafe values in case of too long URL encoding value
URLENCODE_FAILSAFE_CHARS = "()|,"
# Maximum length of URL encoded value after which failsafe procedure takes away
URLENCODE_CHAR_LIMIT = 2000
# Default schema for Microsoft SQL Server DBMS
DEFAULT_MSSQL_SCHEMA = "dbo"
# Display hash attack info every mod number of items
HASH_MOD_ITEM_DISPLAY = 11
# Display marker for (cracked) empty password
HASH_EMPTY_PASSWORD_MARKER = "<empty>"
# Maximum integer value
MAX_INT = sys.maxint
# Replacement for unsafe characters in dump table filenames
UNSAFE_DUMP_FILEPATH_REPLACEMENT = '_'
# Options that need to be restored in multiple targets run mode
RESTORE_MERGED_OPTIONS = ("col", "db", "dnsDomain", "privEsc", "tbl", "regexp", "string", "textOnly", "threads", "timeSec", "tmpPath", "uChar", "user")
# Parameters to be ignored in detection phase (upper case)
IGNORE_PARAMETERS = ("__VIEWSTATE", "__VIEWSTATEENCRYPTED", "__VIEWSTATEGENERATOR", "__EVENTARGUMENT", "__EVENTTARGET", "__EVENTVALIDATION", "ASPSESSIONID", "ASP.NET_SESSIONID", "JSESSIONID", "CFID", "CFTOKEN")
# Regular expression used for recognition of ASP.NET control parameters
ASP_NET_CONTROL_REGEX = r"(?i)\Actl\d+\$"
# Prefix for Google analytics cookie names
GOOGLE_ANALYTICS_COOKIE_PREFIX = "__UTM"
# Prefix for configuration overriding environment variables
SQLMAP_ENVIRONMENT_PREFIX = "SQLMAP_"
# Turn off resume console info to avoid potential slowdowns
TURN_OFF_RESUME_INFO_LIMIT = 20
# Strftime format for results file used in multiple target mode
RESULTS_FILE_FORMAT = "results-%m%d%Y_%I%M%p.csv"
# Official web page with the list of Python supported codecs
CODECS_LIST_PAGE = "http://docs.python.org/library/codecs.html
# Simple regular expression used to distinguish scalar from multiple-row commands (not sole condition)
SQL_SCALAR_REGEX = r"\A(SELECT(?!\s+DISTINCT\(?))?\s*\w*\("
# Option/switch values to ignore during configuration save
IGNORE_SAVE_OPTIONS = ("saveConfig",)
# IP address of the localhost
LOCALHOST = "127.0.0.1"
# Default SOCKS ports used by Tor
DEFAULT_TOR_SOCKS_PORTS = (9050, 9150)
# Default HTTP ports used by Tor
DEFAULT_TOR_HTTP_PORTS = (8123, 8118)
# Percentage below which comparison engine could have problems
LOW_TEXT_PERCENT = 20
# These MySQL keywords can't go (alone) into versioned comment form (/*!...*/)
# Reference: http://dev.mysql.com/doc/refman/5.1/en/function-resolution.html
IGNORE_SPACE_AFFECTED_KEYWORDS = ("CAST", "COUNT", "EXTRACT", "GROUP_CONCAT", "MAX", "MID", "MIN", "SESSION_USER", "SUBSTR", "SUBSTRING", "SUM", "SYSTEM_USER", "TRIM")
# Keywords expected to be in UPPERCASE in getValue()
GET_VALUE_UPPERCASE_KEYWORDS = ("SELECT", "FROM", "WHERE", "DISTINCT", "COUNT")
LEGAL_DISCLAIMER = "Usage of sqlmap for attacking targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable local, state and federal laws. Developers assume no liability and are not responsible for any misuse or damage caused by this program"
# After this number of misses reflective removal mechanism is turned off (for speed up reasons)
REFLECTIVE_MISS_THRESHOLD = 20
# Regular expression used for extracting HTML title
HTML_TITLE_REGEX = r"<title>(?P<result>[^<]+)</title>"
# Table used for Base64 conversion in WordPress hash cracking routine
ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
PICKLE_REDUCE_WHITELIST = (types.BooleanType, types.DictType, types.FloatType, types.IntType, types.ListType, types.LongType, types.NoneType, types.StringType, types.TupleType, types.UnicodeType, types.XRangeType, type(AttribDict()), type(set()))
# Chars used to quickly distinguish if the user provided tainted parameter values
DUMMY_SQL_INJECTION_CHARS = ";()'"
# Simple check against dummy users
DUMMY_USER_INJECTION = r"(?i)[^\w](AND|OR)\s+[^\s]+[=><]|\bUNION\b.+\bSELECT\b|\bSELECT\b.+\bFROM\b|\b(CONCAT|information_schema|SLEEP|DELAY|FLOOR\(RAND)\b"
# Extensions skipped by crawler
CRAWL_EXCLUDE_EXTENSIONS = ("3ds", "3g2", "3gp", "7z", "DS_Store", "a", "aac", "adp", "ai", "aif", "aiff", "apk", "ar", "asf", "au", "avi", "bak", "bin", "bk", "bmp", "btif", "bz2", "cab", "caf", "cgm", "cmx", "cpio", "cr2", "dat", "deb", "djvu", "dll", "dmg", "dmp", "dng", "doc", "docx", "dot", "dotx", "dra", "dsk", "dts", "dtshd", "dvb", "dwg", "dxf", "ear", "ecelp4800", "ecelp7470", "ecelp9600", "egg", "eol", "eot", "epub", "exe", "f4v", "fbs", "fh", "fla", "flac", "fli", "flv", "fpx", "fst", "fvt", "g3", "gif", "gz", "h261", "h263", "h264", "ico", "ief", "image", "img", "ipa", "iso", "jar", "jpeg", "jpg", "jpgv", "jpm", "jxr", "ktx", "lvp", "lz", "lzma", "lzo", "m3u", "m4a", "m4v", "mar", "mdi", "mid", "mj2", "mka", "mkv", "mmr", "mng", "mov", "movie", "mp3", "mp4", "mp4a", "mpeg", "mpg", "mpga", "mxu", "nef", "npx", "o", "oga", "ogg", "ogv", "otf", "pbm", "pcx", "pdf", "pea", "pgm", "pic", "png", "pnm", "ppm", "pps", "ppt", "pptx", "ps", "psd", "pya", "pyc", "pyo", "pyv", "qt", "rar", "ras", "raw", "rgb", "rip", "rlc", "rz", "s3m", "s7z", "scm", "scpt", "sgi", "shar", "sil", "smv", "so", "sub", "swf", "tar", "tbz2", "tga", "tgz", "tif", "tiff", "tlz", "ts", "ttf", "uvh", "uvi", "uvm", "uvp", "uvs", "uvu", "viv", "vob", "war", "wav", "wax", "wbmp", "wdp", "weba", "webm", "webp", "whl", "wm", "wma", "wmv", "wmx", "woff", "woff2", "wvx", "xbm", "xif", "xls", "xlsx", "xlt", "xm", "xpi", "xpm", "xwd", "xz", "z", "zip", "zipx")
# Patterns often seen in HTTP headers containing custom injection marking character '*'
PROBLEMATIC_CUSTOM_INJECTION_PATTERNS = r"(;q=[^;']+)|(\*/\*)"
# Template used for common table existence check
BRUTE_TABLE_EXISTS_TEMPLATE = "EXISTS(SELECT %d FROM %s)"
# Template used for common column existence check
BRUTE_COLUMN_EXISTS_TEMPLATE = "EXISTS(SELECT %s FROM %s)"
# Payload used for checking of existence of IDS/IPS/WAF (dummier the better)
IDS_WAF_CHECK_PAYLOAD = "AND 1=1 UNION ALL SELECT 1,NULL,'<script>alert(\"XSS\")</script>',table_name FROM information_schema.tables WHERE 2>1--/**/; EXEC xp_cmdshell('cat ../../../etc/passwd')
# Data inside shellcodeexec to be filled with random string
SHELLCODEEXEC_RANDOM_STRING_MARKER = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# Generic address for checking the Internet connection while using switch --check-internet
CHECK_INTERNET_ADDRESS = "https://ipinfo.io/"
# Value to look for in response to CHECK_INTERNET_ADDRESS
CHECK_INTERNET_VALUE = "IP Address Details"
# Vectors used for provoking specific WAF/IPS behavior(s)
WAF_ATTACK_VECTORS = (
"", # NIL
"search=<script>alert(1)</script>",
"file=../../../../etc/passwd",
"q=<invalid>foobar",
"id=1 %s" % IDS_WAF_CHECK_PAYLOAD
)
# Used for status representation in dictionary attack phase
ROTATING_CHARS = ('\\', '|', '|', '/', '-')
# Approximate chunk length (in bytes) used by BigArray objects (only last chunk and cached one are held in memory)
BIGARRAY_CHUNK_SIZE = 1024 * 1024
# Compress level used for storing BigArray chunks to disk (0-9)
BIGARRAY_COMPRESS_LEVEL = 9
# Maximum number of socket pre-connects
SOCKET_PRE_CONNECT_QUEUE_SIZE = 3
# Only console display last n table rows
TRIM_STDOUT_DUMP_SIZE = 256
# Reference: http://stackoverflow.com/a/3168436
# Reference: https://support.microsoft.com/en-us/kb/899149
DUMP_FILE_BUFFER_SIZE = 1024
# Parse response headers only first couple of times
PARSE_HEADERS_LIMIT = 3
# Step used in ORDER BY technique used for finding the right number of columns in UNION query injections
ORDER_BY_STEP = 10
# Maximum number of times for revalidation of a character in inference (as required)
MAX_REVALIDATION_STEPS = 5
# Characters that can be used to split parameter values in provided command line (e.g. in --tamper)
PARAMETER_SPLITTING_REGEX = r"[,|;]"
# Regular expression describing possible union char value (e.g. used in --union-char)
UNION_CHAR_REGEX = r"\A\w+\Z"
# Attribute used for storing original parameter value in special cases (e.g. POST)
UNENCODED_ORIGINAL_VALUE = "original"
# Common column names containing usernames (used for hash cracking in some cases)
COMMON_USER_COLUMNS = ("login", "user", "username", "user_name", "user_login", "benutzername", "benutzer", "utilisateur", "usager", "consommateur", "utente", "utilizzatore", "usufrutuario", "korisnik", "usuario", "consumidor", "client", "cuser")
# Default delimiter in GET/POST values
DEFAULT_GET_POST_DELIMITER = '&'
# Default delimiter in cookie values
DEFAULT_COOKIE_DELIMITER = ';'
# Unix timestamp used for forcing cookie expiration when provided with --load-cookies
FORCE_COOKIE_EXPIRATION_TIME = "9999999999"
# Github OAuth token used for creating an automatic Issue for unhandled exceptions
GITHUB_REPORT_OAUTH_TOKEN = "NTYzYjhmZWJjYzc0Njg2ODJhNzhmNDg1YzM0YzlkYjk3N2JiMzE3Nw=="
# Skip unforced HashDB flush requests below the threshold number of cached items
HASHDB_FLUSH_THRESHOLD = 32
# Number of retries for unsuccessful HashDB flush attempts
HASHDB_FLUSH_RETRIES = 3
# Number of retries for unsuccessful HashDB retrieve attempts
HASHDB_RETRIEVE_RETRIES = 3
# Number of retries for unsuccessful HashDB end transaction attempts
HASHDB_END_TRANSACTION_RETRIES = 3
# Unique milestone value used for forced deprecation of old HashDB values (e.g. when changing hash/pickle mechanism)
HASHDB_MILESTONE_VALUE = "BZzRotigLX" # python -c 'import random, string; print "".join(random.sample(string.ascii_letters, 10))'
# Warn user of possible delay due to large page dump in full UNION query injections
LARGE_OUTPUT_THRESHOLD = 1024 ** 2
# On huge tables there is a considerable slowdown if every row retrieval requires ORDER BY (most noticable in table dumping using ERROR injections)
SLOW_ORDER_COUNT_THRESHOLD = 10000
# Give up on hash recognition if nothing was found in first given number of rows
HASH_RECOGNITION_QUIT_THRESHOLD = 10000
# Maximum number of redirections to any single URL - this is needed because of the state that cookies introduce
MAX_SINGLE_URL_REDIRECTIONS = 4
# Maximum total number of redirections (regardless of URL) - before assuming we're in a loop
MAX_TOTAL_REDIRECTIONS = 10
# Reference: http://www.tcpipguide.com/free/t_DNSLabelsNamesandSyntaxRules.htm
MAX_DNS_LABEL = 63
# Alphabet used for prefix and suffix strings of name resolution requests in DNS technique (excluding hexadecimal chars for not mixing with inner content)
DNS_BOUNDARIES_ALPHABET = re.sub(r"[a-fA-F]", "", string.ascii_letters)
# Alphabet used for heuristic checks
HEURISTIC_CHECK_ALPHABET = ('"', '\'', ')', '(', ',', '.')
BANNER = re.sub(r"\[.\]", lambda _: "[\033[01;41m%s\033[01;49m]" % random.sample(HEURISTIC_CHECK_ALPHABET, 1)[0], BANNER)
DUMMY_NON_SQLI_CHECK_APPENDIX = "<'\">"
# Regular expression used for recognition of file inclusion errors
FI_ERROR_REGEX = r"(?i)[^\n]{0,100}(no such file|failed (to )?open)[^\n]{0,100}"
# Length of prefix and suffix used in non-SQLI heuristic checks
NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH = 6
# Connection chunk size (processing large responses in chunks to avoid MemoryError crashes - e.g. large table dump in full UNION injections)
MAX_CONNECTION_CHUNK_SIZE = 10 * 1024 * 1024
# Maximum response total page size (trimmed if larger)
MAX_CONNECTION_TOTAL_SIZE = 100 * 1024 * 1024
# For preventing MemoryError exceptions (caused when using large sequences in difflib.SequenceMatcher)
MAX_DIFFLIB_SEQUENCE_LENGTH = 10 * 1024 * 1024
# Maximum (multi-threaded) length of entry in bisection algorithm
MAX_BISECTION_LENGTH = 50 * 1024 * 1024
# Mark used for trimming unnecessary content in large chunks
LARGE_CHUNK_TRIM_MARKER = "__TRIMMED_CONTENT__"
# Generic SQL comment formation
GENERIC_SQL_COMMENT = "-- [RANDSTR]"
# Threshold value for turning back on time auto-adjustment mechanism
VALID_TIME_CHARS_RUN_THRESHOLD = 100
# Check for empty columns only if table is sufficiently large
CHECK_ZERO_COLUMNS_THRESHOLD = 10
# Boldify all logger messages containing these "patterns"
BOLD_PATTERNS = ("' injectable", "provided empty", "leftover chars", "might be injectable", "' is vulnerable", "is not injectable", "does not seem to be", "test failed", "test passed", "live test final result", "test shows that", "the back-end DBMS is", "created Github", "blocked by the target server", "protection is involved", "CAPTCHA", "specific response", "NULL connection is supported")
# Generic www root directory names
GENERIC_DOC_ROOT_DIRECTORY_NAMES = ("htdocs", "httpdocs", "public", "wwwroot", "www")
# Maximum length of a help part containing switch/option name(s)
MAX_HELP_OPTION_LENGTH = 18
# Maximum number of connection retries (to prevent problems with recursion)
MAX_CONNECT_RETRIES = 100
# Strings for detecting formatting errors
FORMAT_EXCEPTION_STRINGS = ("Type mismatch", "Error converting", "Conversion failed", "String or binary data would be truncated", "Failed to convert", "unable to interpret text value", "Input string was not in a correct format", "System.FormatException", "java.lang.NumberFormatException", "ValueError: invalid literal", "TypeMismatchException", "CF_SQL_INTEGER", " for CFSQLTYPE ", "cfqueryparam cfsqltype", "InvalidParamTypeException", "Invalid parameter type", "is not of type numeric", "<cfif Not IsNumeric(", "invalid input syntax for integer", "invalid input syntax for type", "invalid number", "character to number conversion error", "unable to interpret text value", "String was not recognized as a valid", "Convert.ToInt", "cannot be converted to a ", "InvalidDataException")
# Regular expression used for extracting ASP.NET view state values
VIEWSTATE_REGEX = r'(?i)(?P<name>__VIEWSTATE[^"]*)[^>]+value="(?P<result>[^"]+)'
# Regular expression used for extracting ASP.NET event validation values
EVENTVALIDATION_REGEX = r'(?i)(?P<name>__EVENTVALIDATION[^"]*)[^>]+value="(?P<result>[^"]+)'
# Number of rows to generate inside the full union test for limited output (mustn't be too large to prevent payload length problems)
LIMITED_ROWS_TEST_NUMBER = 15
# Default adapter to use for bottle server
RESTAPI_DEFAULT_ADAPTER = "wsgiref"
# Default REST-JSON API server listen address
RESTAPI_DEFAULT_ADDRESS = "127.0.0.1"
# Default REST-JSON API server listen port
RESTAPI_DEFAULT_PORT = 8775
# Format used for representing invalid unicode characters
INVALID_UNICODE_CHAR_FORMAT = r"\x%02x"
# Regular expression for XML POST data
XML_RECOGNITION_REGEX = r"(?s)\A\s*<[^>]+>(.+>)?\s*\Z"
# Regular expression used for detecting JSON POST data
JSON_RECOGNITION_REGEX = r'(?s)\A(\s*\[)*\s*\{.*"[^"]+"\s*:\s*("[^"]*"|\d+|true|false|null).*\}\s*(\]\s*)*\Z'
# Regular expression used for detecting JSON-like POST data
JSON_LIKE_RECOGNITION_REGEX = r"(?s)\A(\s*\[)*\s*\{.*'[^']+'\s*:\s*('[^']+'|\d+).*\}\s*(\]\s*)*\Z"
# Regular expression used for detecting multipart POST data
MULTIPART_RECOGNITION_REGEX = r"(?i)Content-Disposition:[^;]+;\s*name="
# Regular expression used for detecting Array-like POST data
ARRAY_LIKE_RECOGNITION_REGEX = r"(\A|%s)(\w+)\[\]=.+%s\2\[\]=" % (DEFAULT_GET_POST_DELIMITER, DEFAULT_GET_POST_DELIMITER)
# Default POST data content-type
DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded; charset=utf-8"
# Raw text POST data content-type
PLAIN_TEXT_CONTENT_TYPE = "text/plain; charset=utf-8"
# Length used while checking for existence of Suhosin-patch (like) protection mechanism
SUHOSIN_MAX_VALUE_LENGTH = 512
# Minimum size of an (binary) entry before it can be considered for dumping to disk
MIN_BINARY_DISK_DUMP_SIZE = 100
# Filenames of payloads xml files (in order of loading)
PAYLOAD_XML_FILES = ("boolean_blind.xml", "error_based.xml", "inline_query.xml", "stacked_queries.xml", "time_blind.xml", "union_query.xml")
# Regular expression used for extracting form tags
FORM_SEARCH_REGEX = r"(?si)<form(?!.+<form).+?</form>"
# Maximum number of lines to save in history file
MAX_HISTORY_LENGTH = 1000
# Minimum field entry length needed for encoded content (hex, base64,...) check
MIN_ENCODED_LEN_CHECK = 5
# Timeout in seconds in which Metasploit remote session has to be initialized
METASPLOIT_SESSION_TIMEOUT = 120
# Reference: http://www.postgresql.org/docs/9.0/static/catalog-pg-largeobject.html
LOBLKSIZE = 2048
# Suffix used to mark variables having keyword names
EVALCODE_KEYWORD_SUFFIX = "_KEYWORD"
# Reference: http://www.cookiecentral.com/faq/#3.5
NETSCAPE_FORMAT_HEADER_COOKIES = "
# Infixes used for automatic recognition of parameters carrying anti-CSRF tokens
CSRF_TOKEN_PARAMETER_INFIXES = ("csrf", "xsrf", "token")
# Prefixes used in brute force search for web server document root
BRUTE_DOC_ROOT_PREFIXES = {
OS.LINUX: ("/var/www", "/usr/local/apache", "/usr/local/apache2", "/usr/local/www/apache22", "/usr/local/www/apache24", "/usr/local/httpd", "/var/www/nginx-default", "/srv/www", "/var/www/%TARGET%", "/var/www/vhosts/%TARGET%", "/var/www/virtual/%TARGET%", "/var/www/clients/vhosts/%TARGET%", "/var/www/clients/virtual/%TARGET%"),
OS.WINDOWS: ("/xampp", "/Program Files/xampp", "/wamp", "/Program Files/wampp", "/apache", "/Program Files/Apache Group/Apache", "/Program Files/Apache Group/Apache2", "/Program Files/Apache Group/Apache2.2", "/Program Files/Apache Group/Apache2.4", "/Inetpub/wwwroot", "/Inetpub/wwwroot/%TARGET%", "/Inetpub/vhosts/%TARGET%")
}
# Suffixes used in brute force search for web server document root
BRUTE_DOC_ROOT_SUFFIXES = ("", "html", "htdocs", "httpdocs", "php", "public", "src", "site", "build", "web", "www", "data", "sites/all", "www/build")
# String used for marking target name inside used brute force web server document root
BRUTE_DOC_ROOT_TARGET_MARK = "%TARGET%"
# Character used as a boundary in kb.chars (preferably less frequent letter)
KB_CHARS_BOUNDARY_CHAR = 'q'
# Letters of lower frequency used in kb.chars
KB_CHARS_LOW_FREQUENCY_ALPHABET = "zqxjkvbp"
# CSS style used in HTML dump format
HTML_DUMP_CSS_STYLE = """<style>
table{
margin:10;
background-color:#FFFFFF;
font-family:verdana;
font-size:12px;
align:center;
}
thead{
font-weight:bold;
background-color:#4F81BD;
color:#FFFFFF;
}
tr:nth-child(even) {
background-color: #D3DFEE
}
td{
font-size:12px;
}
th{
font-size:12px;
}
</style>"""
| true | true |
f711a1707f5dbf4044a9357dbfc2ae691a4ada14 | 5,598 | py | Python | mechamodlearn/rigidbody.py | sisl/mechamodlearn | ed514b5d1193ce546b0221ba9222b0228d6c319a | [
"MIT"
] | 22 | 2019-02-22T00:11:55.000Z | 2022-01-01T22:13:17.000Z | mechamodlearn/rigidbody.py | sisl/mechamodlearn | ed514b5d1193ce546b0221ba9222b0228d6c319a | [
"MIT"
] | 2 | 2019-04-12T05:17:53.000Z | 2021-04-14T21:12:59.000Z | mechamodlearn/rigidbody.py | sisl/mechamodlearn | ed514b5d1193ce546b0221ba9222b0228d6c319a | [
"MIT"
] | 5 | 2019-06-10T14:28:26.000Z | 2019-12-06T02:03:21.000Z | # File: rigidbody.py
import abc
import torch
from mechamodlearn import nn, utils
from mechamodlearn.models import CholeskyMMNet, PotentialNet, GeneralizedForceNet
class AbstractRigidBody:
@property
@abc.abstractmethod
def thetamask(self):
"""Returns theta mask for configuration q.
These should use utils.diffangles to compute differences
"""
@abc.abstractmethod
def mass_matrix(self, q):
"""Return mass matrix for configuration q"""
@abc.abstractmethod
def potential(self, q):
"""Return potential for configuration q"""
@abc.abstractmethod
def generalized_force(self, q, v, u):
"""Return generalized force for configuration q, velocity v, external torque u"""
def kinetic_energy(self, q, v):
mass_matrix = self.mass_matrix(q)
# TODO(jkg): Check if this works correctly for batched
kenergy = 0.5 * (v.unsqueeze(1) @ (mass_matrix @ v.unsqueeze(2))).squeeze(2)
return kenergy
def lagrangian(self, q, v):
""" Returns the Lagrangian of a mechanical system
"""
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
lag = kenergy - pot
return lag
def hamiltonian(self, q, v):
""" Returns the Hamiltonian of a mechanical system
"""
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
ham = kenergy + pot
return ham
def corriolisforce(self, q, v, mass_matrix=None):
""" Computes the corriolis matrix times v
"""
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
Mv = mass_matrix @ v.unsqueeze(2)
KE = 0.5 * v.unsqueeze(1) @ Mv
Cv_KE = torch.autograd.grad(KE.sum(), q, retain_graph=True, create_graph=True)[0]
gMv = torch.stack([
torch.autograd.grad(Mv[:, i].sum(), q, retain_graph=True, create_graph=True)[0]
for i in range(q.size(1))
], dim=1)
Cv = gMv @ v.unsqueeze(2) - Cv_KE.unsqueeze(2)
return Cv
def corriolis(self, q, v, mass_matrix=None):
""" Computes the corriolis matrix
"""
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
qdim = q.size(1)
B = mass_matrix.size(0)
mass_matrix = mass_matrix.reshape(-1, qdim, qdim)
# TODO vectorize
rows = []
for i in range(qdim):
cols = []
for j in range(qdim):
qgrad = torch.autograd.grad(
torch.sum(mass_matrix[:, i, j]), q, retain_graph=True, create_graph=True)[0]
cols.append(qgrad)
rows.append(torch.stack(cols, dim=1))
dMijk = torch.stack(rows, dim=1)
corriolis = 0.5 * ((dMijk + dMijk.transpose(2, 3) - dMijk.transpose(1, 3)
) @ v.reshape(B, 1, qdim, 1)).squeeze(3)
return corriolis
def gradpotential(self, q):
""" Returns the conservative forces acting on the system
"""
with torch.enable_grad():
pot = self.potential(q)
gvec = torch.autograd.grad(torch.sum(pot), q, retain_graph=True, create_graph=True)[0]
return gvec
def solve_euler_lagrange(self, q, v, u=None):
""" Computes `qddot` (generalized acceleration) by solving
the Euler-Lagrange equation (Eq 7 in the paper)
\qddot = M^-1 (F - Cv - G)
"""
with torch.enable_grad():
with utils.temp_require_grad((q, v)):
M = self.mass_matrix(q)
Cv = self.corriolisforce(q, v, M)
G = self.gradpotential(q)
F = torch.zeros_like(Cv)
if u is not None:
F = self.generalized_force(q, v, u)
# Solve M \qddot = F - Cv - G
qddot = torch.gesv(F - Cv - G.unsqueeze(2), M)[0].squeeze(2)
return qddot
class LearnedRigidBody(AbstractRigidBody, torch.nn.Module):
def __init__(self, qdim: int, udim: int, thetamask: torch.tensor, mass_matrix=None,
potential=None, generalized_force=None, hidden_sizes=None):
"""
Arguments:
- `qdim`:
- `udim`: [int]
- `thetamask`: [torch.Tensor (1, qdim)] 1 if angle, 0 otherwise
- `mass_matrix`: [torch.nn.Module]
- `potential`: [torch.nn.Module]
- `generalized_force`: [torch.nn.Module]
- hidden_sizes: [list]
"""
self._qdim = qdim
self._udim = udim
self._thetamask = thetamask
super().__init__()
if mass_matrix is None:
mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)
self._mass_matrix = mass_matrix
if potential is None:
potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)
self._potential = potential
if generalized_force is None:
generalized_force = GeneralizedForceNet(qdim, udim, hidden_sizes)
self._generalized_force = generalized_force
def mass_matrix(self, q):
return self._mass_matrix(q)
def potential(self, q):
return self._potential(q)
def generalized_force(self, q, v, u):
return self._generalized_force(q, v, u)
@property
def thetamask(self):
return self._thetamask
def forward(self, q, v, u=None):
return self.solve_euler_lagrange(q, v, u)
| 30.096774 | 100 | 0.576099 |
import abc
import torch
from mechamodlearn import nn, utils
from mechamodlearn.models import CholeskyMMNet, PotentialNet, GeneralizedForceNet
class AbstractRigidBody:
@property
@abc.abstractmethod
def thetamask(self):
@abc.abstractmethod
def mass_matrix(self, q):
@abc.abstractmethod
def potential(self, q):
@abc.abstractmethod
def generalized_force(self, q, v, u):
def kinetic_energy(self, q, v):
mass_matrix = self.mass_matrix(q)
kenergy = 0.5 * (v.unsqueeze(1) @ (mass_matrix @ v.unsqueeze(2))).squeeze(2)
return kenergy
def lagrangian(self, q, v):
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
lag = kenergy - pot
return lag
def hamiltonian(self, q, v):
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
ham = kenergy + pot
return ham
def corriolisforce(self, q, v, mass_matrix=None):
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
Mv = mass_matrix @ v.unsqueeze(2)
KE = 0.5 * v.unsqueeze(1) @ Mv
Cv_KE = torch.autograd.grad(KE.sum(), q, retain_graph=True, create_graph=True)[0]
gMv = torch.stack([
torch.autograd.grad(Mv[:, i].sum(), q, retain_graph=True, create_graph=True)[0]
for i in range(q.size(1))
], dim=1)
Cv = gMv @ v.unsqueeze(2) - Cv_KE.unsqueeze(2)
return Cv
def corriolis(self, q, v, mass_matrix=None):
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
qdim = q.size(1)
B = mass_matrix.size(0)
mass_matrix = mass_matrix.reshape(-1, qdim, qdim)
rows = []
for i in range(qdim):
cols = []
for j in range(qdim):
qgrad = torch.autograd.grad(
torch.sum(mass_matrix[:, i, j]), q, retain_graph=True, create_graph=True)[0]
cols.append(qgrad)
rows.append(torch.stack(cols, dim=1))
dMijk = torch.stack(rows, dim=1)
corriolis = 0.5 * ((dMijk + dMijk.transpose(2, 3) - dMijk.transpose(1, 3)
) @ v.reshape(B, 1, qdim, 1)).squeeze(3)
return corriolis
def gradpotential(self, q):
with torch.enable_grad():
pot = self.potential(q)
gvec = torch.autograd.grad(torch.sum(pot), q, retain_graph=True, create_graph=True)[0]
return gvec
def solve_euler_lagrange(self, q, v, u=None):
with torch.enable_grad():
with utils.temp_require_grad((q, v)):
M = self.mass_matrix(q)
Cv = self.corriolisforce(q, v, M)
G = self.gradpotential(q)
F = torch.zeros_like(Cv)
if u is not None:
F = self.generalized_force(q, v, u)
qddot = torch.gesv(F - Cv - G.unsqueeze(2), M)[0].squeeze(2)
return qddot
class LearnedRigidBody(AbstractRigidBody, torch.nn.Module):
def __init__(self, qdim: int, udim: int, thetamask: torch.tensor, mass_matrix=None,
potential=None, generalized_force=None, hidden_sizes=None):
self._qdim = qdim
self._udim = udim
self._thetamask = thetamask
super().__init__()
if mass_matrix is None:
mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)
self._mass_matrix = mass_matrix
if potential is None:
potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)
self._potential = potential
if generalized_force is None:
generalized_force = GeneralizedForceNet(qdim, udim, hidden_sizes)
self._generalized_force = generalized_force
def mass_matrix(self, q):
return self._mass_matrix(q)
def potential(self, q):
return self._potential(q)
def generalized_force(self, q, v, u):
return self._generalized_force(q, v, u)
@property
def thetamask(self):
return self._thetamask
def forward(self, q, v, u=None):
return self.solve_euler_lagrange(q, v, u)
| true | true |
f711a20ba72a69cc04b9b1a002911d1710c385ab | 9,423 | py | Python | fiftyone/core/validation.py | williamcorsel/fiftyone | 22e34e91deb1d2e2fe6316ec81714e0c55015523 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/validation.py | williamcorsel/fiftyone | 22e34e91deb1d2e2fe6316ec81714e0c55015523 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/validation.py | williamcorsel/fiftyone | 22e34e91deb1d2e2fe6316ec81714e0c55015523 | [
"Apache-2.0"
] | null | null | null | """
Validation utilities.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import eta.core.utils as etau
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
foc = fou.lazy_import("fiftyone.core.collections")
fov = fou.lazy_import("fiftyone.core.video")
def validate_image_sample(sample):
"""Validates that the sample's media is an image.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
Raises:
ValueError if the sample's media is not an image
"""
if sample.media_type != fom.IMAGE:
raise ValueError(
"Expected media type '%s' but found '%s' for filepath '%s'"
% (fom.IMAGE, sample.media_type, sample.filepath)
)
if isinstance(sample, fov.FrameView):
_validate_image(sample.filepath)
def validate_video_sample(sample):
"""Validates that the sample's media is a video.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
Raises:
ValueError if the sample's media is not a video
"""
if sample.media_type != fom.VIDEO:
raise ValueError(
"Expected media type '%s' but found '%s' for filepath '%s'"
% (fom.VIDEO, sample.media_type, sample.filepath)
)
def validate_collection(sample_collection):
"""Validates that the provided samples are a
:class:`fiftyone.core.collections.SampleCollection`.
Args:
sample_collection: a sample collection
Raises:
ValueError: if ``samples`` is not a
:class:`fiftyone.core.collections.SampleCollection`
"""
if not isinstance(sample_collection, foc.SampleCollection):
raise ValueError(
"Expected samples to be a %s; found %s"
% (foc.SampleCollection, sample_collection.__class__)
)
def validate_image_collection(sample_collection):
"""Validates that the provided samples are an image
:class:`fiftyone.core.collections.SampleCollection`.
Args:
sample_collection: a sample collection
Raises:
ValueError: if ``samples`` is not an image
:class:`fiftyone.core.collections.SampleCollection`
"""
validate_collection(sample_collection)
if sample_collection.media_type != fom.IMAGE:
raise ValueError(
"Expected collection to have media type %s; found %s"
% (fom.IMAGE, sample_collection.media_type)
)
if sample_collection._dataset._is_frames:
try:
filepath = sample_collection[:1].values("filepath")[0]
except:
return # empty
_validate_image(filepath)
def validate_video_collection(sample_collection):
"""Validates that the provided samples are a video
:class:`fiftyone.core.collections.SampleCollection`.
Args:
sample_collection: a sample collection
Raises:
ValueError: if ``samples`` is not a video
:class:`fiftyone.core.collections.SampleCollection`
"""
validate_collection(sample_collection)
if sample_collection.media_type != fom.VIDEO:
raise ValueError(
"Expected collection to have media type %s; found %s"
% (fom.VIDEO, sample_collection.media_type)
)
def validate_collection_label_fields(
sample_collection, field_names, allowed_label_types, same_type=False
):
"""Validates that the :class:`fiftyone.core.collections.SampleCollection`
has fields with the specified :class:`fiftyone.core.labels.Label` types.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field_names: a field name or iterable of field names
allowed_label_types: a :class:`fiftyone.core.labels.Label` type or
iterable of allowed :class:`fiftyone.core.labels.Label` types
same_type (False): whether to enforce that all fields have same type.
This condition is enforced separately for sample- and frame-level
fields
Raises:
ValueError if the required conditions are not met
"""
validate_collection(sample_collection)
if etau.is_str(field_names):
field_names = [field_names]
if not etau.is_container(allowed_label_types):
allowed_label_types = [allowed_label_types]
if sample_collection.media_type == fom.VIDEO:
sample_fields, frame_fields = fou.split_frame_fields(field_names)
else:
sample_fields = field_names
frame_fields = []
if frame_fields:
_validate_fields(
sample_collection,
frame_fields,
allowed_label_types,
same_type,
frames=True,
)
if sample_fields:
_validate_fields(
sample_collection,
sample_fields,
allowed_label_types,
same_type,
)
def _validate_image(filepath):
actual_media_type = fom.get_media_type(filepath)
if actual_media_type != fom.IMAGE:
raise ValueError(
"The requested operation requires samples whose filepaths are "
"images, but we found a sample whose filepath '%s' has media type "
"'%s'.\n\nIf you are working with a frames view that was created "
"via `to_frames(..., sample_frames=False)`, then re-create the "
"view without `sample_frames=False` so that the necessary images "
"will be available." % (filepath, actual_media_type)
)
def _validate_fields(
sample_collection,
field_names,
allowed_label_types,
same_type,
frames=False,
):
if frames:
schema = sample_collection.get_frame_field_schema()
else:
schema = sample_collection.get_field_schema()
label_types = {}
for field_name in field_names:
if field_name not in schema:
ftype = "frame field" if frames else "sample field"
raise ValueError(
"%s has no %s '%s'"
% (sample_collection.__class__.__name__, ftype, field_name)
)
field = schema[field_name]
try:
label_type = field.document_type
except:
label_type = field
if label_type not in allowed_label_types:
ftype = "Frame field" if frames else "Sample field"
raise ValueError(
"%s '%s' is not a %s instance; found %s"
% (ftype, field_name, allowed_label_types, label_type)
)
label_types[field_name] = label_type
if same_type and len(set(label_types.values())) > 1:
ftype = "Frame fields" if frames else "Sample fields"
raise ValueError(
"%s %s must have the same type; found %s"
% (ftype, field_names, label_types)
)
def get_field(sample, field_name, allowed_types=None, allow_none=True):
"""Gets the given sample field and optionally validates its type and value.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
field_name: the name of the field to get
allowed_types (None): an optional iterable of
:class:`fiftyone.core.labels.Label` types to enforce that the field
value has
allow_none (True): whether to allow the field to be None
Returns:
the field value
Raises:
ValueError if the field does not exist or does not meet the specified
criteria
"""
try:
value = sample[field_name]
except KeyError:
raise ValueError(
"Sample '%s' has no field '%s'" % (sample.id, field_name)
)
if not allow_none and value is None:
raise ValueError(
"Sample '%s' field '%s' is None" % (sample.id, field_name)
)
if allowed_types is not None:
field_type = type(value)
if field_type not in allowed_types:
raise ValueError(
"Sample '%s' field '%s' is not a %s instance; found %s"
% (sample.id, field_name, allowed_types, field_type)
)
return value
def get_fields(
sample, field_names, allowed_types=None, same_type=False, allow_none=True
):
"""Gets the given sample fields and optionally validates their types and
values.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
field_names: an iterable of field names to get
allowed_types (None): an optional iterable of
:class:`fiftyone.core.labels.Label` types to enforce that the
field values have
same_type (False): whether to enforce that all fields have same type
allow_none (True): whether to allow the fields to be None
Returns:
a tuple of field values
Raises:
ValueError if a field does not exist or does not meet the specified
criteria
"""
label_types = {}
values = []
for field_name in field_names:
value = get_field(
sample,
field_name,
allowed_types=allowed_types,
allow_none=allow_none,
)
if same_type:
label_types[field_name] = type(value)
values.append(value)
if same_type and len(set(label_types.values())) > 1:
raise ValueError(
"Sample '%s' fields %s must have the same type; found %s"
% (sample.id, field_names, label_types)
)
return tuple(values)
| 30.299035 | 79 | 0.632495 | import eta.core.utils as etau
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
foc = fou.lazy_import("fiftyone.core.collections")
fov = fou.lazy_import("fiftyone.core.video")
def validate_image_sample(sample):
if sample.media_type != fom.IMAGE:
raise ValueError(
"Expected media type '%s' but found '%s' for filepath '%s'"
% (fom.IMAGE, sample.media_type, sample.filepath)
)
if isinstance(sample, fov.FrameView):
_validate_image(sample.filepath)
def validate_video_sample(sample):
if sample.media_type != fom.VIDEO:
raise ValueError(
"Expected media type '%s' but found '%s' for filepath '%s'"
% (fom.VIDEO, sample.media_type, sample.filepath)
)
def validate_collection(sample_collection):
if not isinstance(sample_collection, foc.SampleCollection):
raise ValueError(
"Expected samples to be a %s; found %s"
% (foc.SampleCollection, sample_collection.__class__)
)
def validate_image_collection(sample_collection):
validate_collection(sample_collection)
if sample_collection.media_type != fom.IMAGE:
raise ValueError(
"Expected collection to have media type %s; found %s"
% (fom.IMAGE, sample_collection.media_type)
)
if sample_collection._dataset._is_frames:
try:
filepath = sample_collection[:1].values("filepath")[0]
except:
return
_validate_image(filepath)
def validate_video_collection(sample_collection):
validate_collection(sample_collection)
if sample_collection.media_type != fom.VIDEO:
raise ValueError(
"Expected collection to have media type %s; found %s"
% (fom.VIDEO, sample_collection.media_type)
)
def validate_collection_label_fields(
sample_collection, field_names, allowed_label_types, same_type=False
):
validate_collection(sample_collection)
if etau.is_str(field_names):
field_names = [field_names]
if not etau.is_container(allowed_label_types):
allowed_label_types = [allowed_label_types]
if sample_collection.media_type == fom.VIDEO:
sample_fields, frame_fields = fou.split_frame_fields(field_names)
else:
sample_fields = field_names
frame_fields = []
if frame_fields:
_validate_fields(
sample_collection,
frame_fields,
allowed_label_types,
same_type,
frames=True,
)
if sample_fields:
_validate_fields(
sample_collection,
sample_fields,
allowed_label_types,
same_type,
)
def _validate_image(filepath):
actual_media_type = fom.get_media_type(filepath)
if actual_media_type != fom.IMAGE:
raise ValueError(
"The requested operation requires samples whose filepaths are "
"images, but we found a sample whose filepath '%s' has media type "
"'%s'.\n\nIf you are working with a frames view that was created "
"via `to_frames(..., sample_frames=False)`, then re-create the "
"view without `sample_frames=False` so that the necessary images "
"will be available." % (filepath, actual_media_type)
)
def _validate_fields(
sample_collection,
field_names,
allowed_label_types,
same_type,
frames=False,
):
if frames:
schema = sample_collection.get_frame_field_schema()
else:
schema = sample_collection.get_field_schema()
label_types = {}
for field_name in field_names:
if field_name not in schema:
ftype = "frame field" if frames else "sample field"
raise ValueError(
"%s has no %s '%s'"
% (sample_collection.__class__.__name__, ftype, field_name)
)
field = schema[field_name]
try:
label_type = field.document_type
except:
label_type = field
if label_type not in allowed_label_types:
ftype = "Frame field" if frames else "Sample field"
raise ValueError(
"%s '%s' is not a %s instance; found %s"
% (ftype, field_name, allowed_label_types, label_type)
)
label_types[field_name] = label_type
if same_type and len(set(label_types.values())) > 1:
ftype = "Frame fields" if frames else "Sample fields"
raise ValueError(
"%s %s must have the same type; found %s"
% (ftype, field_names, label_types)
)
def get_field(sample, field_name, allowed_types=None, allow_none=True):
try:
value = sample[field_name]
except KeyError:
raise ValueError(
"Sample '%s' has no field '%s'" % (sample.id, field_name)
)
if not allow_none and value is None:
raise ValueError(
"Sample '%s' field '%s' is None" % (sample.id, field_name)
)
if allowed_types is not None:
field_type = type(value)
if field_type not in allowed_types:
raise ValueError(
"Sample '%s' field '%s' is not a %s instance; found %s"
% (sample.id, field_name, allowed_types, field_type)
)
return value
def get_fields(
sample, field_names, allowed_types=None, same_type=False, allow_none=True
):
label_types = {}
values = []
for field_name in field_names:
value = get_field(
sample,
field_name,
allowed_types=allowed_types,
allow_none=allow_none,
)
if same_type:
label_types[field_name] = type(value)
values.append(value)
if same_type and len(set(label_types.values())) > 1:
raise ValueError(
"Sample '%s' fields %s must have the same type; found %s"
% (sample.id, field_names, label_types)
)
return tuple(values)
| true | true |
f711a2afb6e9dd431868b09d890d7264429b6980 | 31,867 | py | Python | venv/lib/python3.6/tempfile.py | KorynLA/dwellinglyapi | 752d9aff1b203966df04de38caad2fd3e885a512 | [
"MIT"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | venv/lib/python3.6/tempfile.py | KorynLA/dwellinglyapi | 752d9aff1b203966df04de38caad2fd3e885a512 | [
"MIT"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | venv/lib/python3.6/tempfile.py | KorynLA/dwellinglyapi | 752d9aff1b203966df04de38caad2fd3e885a512 | [
"MIT"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
The default path names are returned as str. If you supply bytes as
input, all return values will be in bytes. Ex:
>>> tempfile.mkstemp()
(4, '/tmp/tmptpu9nin8')
>>> tempfile.mkdtemp(suffix=b'')
b'/tmp/tmppbi8f0hy'
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir",
"gettempprefixb", "gettempdirb",
]
# Imports.
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
try:
import shutil as _shutil
_rmtree = _shutil.rmtree
except ImportError:
import sys as _sys
import stat as _stat
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if _os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = _os.listdir(path)
except OSError:
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
mode = _os.lstat(fullname).st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
_os.unlink(fullname)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = _os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
orig_st = _os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
try:
dirfd = _os.open(name, _os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(_os.open, fullname, _sys.exc_info())
else:
try:
if _os.path.samestat(orig_st, _os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
_os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(_os.rmdir, fullname, _sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(_os.path.islink, fullname, _sys.exc_info())
finally:
_os.close(dirfd)
else:
try:
_os.unlink(name, dir_fd=topfd)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
_use_fd_functions = ({_os.open, _os.stat, _os.unlink, _os.rmdir} <=
_os.supports_dir_fd and
_os.listdir in _os.supports_fd and
_os.stat in _os.supports_follow_symlinks)
def _rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = _os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = _os.lstat(path)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
fd = _os.open(path, _os.O_RDONLY)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
if _os.path.samestat(orig_st, _os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
finally:
_os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# This variable _was_ unused for legacy reasons, see issue 10354.
# But as of 3.5 we actually use it at runtime so changing it would
# have a possibly desirable side effect... But we do not want to support
# that as an API. It is undocumented on purpose. Do not depend on this.
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
def _infer_return_type(*args):
"""Look at the type of all args and divine their implied return type."""
return_type = None
for arg in args:
if arg is None:
continue
if isinstance(arg, bytes):
if return_type is str:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = bytes
else:
if return_type is bytes:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = str
if return_type is None:
return str # tempfile APIs return a str by default.
return return_type
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = template
else:
prefix = _os.fsencode(template)
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = gettempdirb()
return prefix, suffix, dir, output_type
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is eight characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
_os.path.expandvars(r'%SYSTEMROOT%\Temp'),
r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
break # no point trying more names in this directory
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags, output_type):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return (fd, _os.path.abspath(file))
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""The default prefix for temporary directories."""
return template
def gettempprefixb():
"""The default prefix for temporary directories as bytes."""
return _os.fsencode(gettempprefix())
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def gettempdirb():
"""A bytes version of tempfile.gettempdir()."""
return _os.fsencode(gettempdir())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is not None, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is not None, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is not None, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
If any of 'suffix', 'prefix' and 'dir' are not None, they must be the
same type. If they are bytes, the returned name will be bytes; str
otherwise.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are similar to mkstemp, except that the 'text' argument is
not accepted, and suffix=None, prefix=None and bytes file names are not
supported.
THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may
refer to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as its 'name' attribute. The file will be automatically
deleted when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
_os.unlink(name)
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
# Is the O_TMPFILE flag available and does it work?
# The flag is set to False if os.open(dir, os.O_TMPFILE) raises an
# IsADirectoryError exception
_O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
global _O_TMPFILE_WORKS
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _O_TMPFILE_WORKS:
try:
flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
fd = _os.open(dir, flags2, 0o600)
except IsADirectoryError:
# Linux kernel older than 3.11 ignores the O_TMPFILE flag:
# O_TMPFILE is read as O_DIRECTORY. Trying to open a directory
# with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a
# directory cannot be open to write. Set flag to False to not
# try again.
_O_TMPFILE_WORKS = False
except OSError:
# The filesystem of the directory does not support O_TMPFILE.
# For example, OSError(95, 'Operation not supported').
#
# On Linux kernel older than 3.11, trying to open a regular
# file (or a symbolic link to a regular file) with O_TMPFILE
# fails with NotADirectoryError, because O_TMPFILE is read as
# O_DIRECTORY.
pass
else:
try:
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
# Fallback to _mkstemp_inner().
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix=None, prefix=None, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# get double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix=None, prefix=None, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message):
_rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
_rmtree(self.name)
| 33.438615 | 80 | 0.600151 |
__all__ = [
"NamedTemporaryFile", "TemporaryFile",
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp",
"mktemp",
"TMP_MAX", "gettempprefix",
"tempdir", "gettempdir",
"gettempprefixb", "gettempdirb",
]
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
try:
import shutil as _shutil
_rmtree = _shutil.rmtree
except ImportError:
import sys as _sys
import stat as _stat
def _rmtree_unsafe(path, onerror):
try:
if _os.path.islink(path):
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
return
names = []
try:
names = _os.listdir(path)
except OSError:
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
mode = _os.lstat(fullname).st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
_os.unlink(fullname)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = _os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
orig_st = _os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
try:
dirfd = _os.open(name, _os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(_os.open, fullname, _sys.exc_info())
else:
try:
if _os.path.samestat(orig_st, _os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
_os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(_os.rmdir, fullname, _sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(_os.path.islink, fullname, _sys.exc_info())
finally:
_os.close(dirfd)
else:
try:
_os.unlink(name, dir_fd=topfd)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
_use_fd_functions = ({_os.open, _os.stat, _os.unlink, _os.rmdir} <=
_os.supports_dir_fd and
_os.listdir in _os.supports_fd and
_os.stat in _os.supports_follow_symlinks)
def _rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = _os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = _os.lstat(path)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
fd = _os.open(path, _os.O_RDONLY)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
if _os.path.samestat(orig_st, _os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
finally:
_os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# This variable _was_ unused for legacy reasons, see issue 10354.
# But as of 3.5 we actually use it at runtime so changing it would
# have a possibly desirable side effect... But we do not want to support
# that as an API. It is undocumented on purpose. Do not depend on this.
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
def _infer_return_type(*args):
return_type = None
for arg in args:
if arg is None:
continue
if isinstance(arg, bytes):
if return_type is str:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = bytes
else:
if return_type is bytes:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = str
if return_type is None:
return str
return return_type
def _sanitize_params(prefix, suffix, dir):
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = template
else:
prefix = _os.fsencode(template)
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = gettempdirb()
return prefix, suffix, dir, output_type
class _RandomNameSequence:
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
dirlist = []
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
if _os.name == 'nt':
dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
_os.path.expandvars(r'%SYSTEMROOT%\Temp'),
r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except PermissionError:
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
break
except OSError:
break
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags, output_type):
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
except FileExistsError:
continue
except PermissionError:
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return (fd, _os.path.abspath(file))
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
def gettempprefix():
return template
def gettempprefixb():
return _os.fsencode(gettempprefix())
tempdir = None
def gettempdir():
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def gettempdirb():
return _os.fsencode(gettempdir())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
def mkdtemp(suffix=None, prefix=None, dir=None):
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
except FileExistsError:
continue
except PermissionError:
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
file = None
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
self._closer.close()
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# can't use 'yield from' here because iter(file) returns the file
for line in self.file:
yield line
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None, delete=True):
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
_os.unlink(name)
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
TemporaryFile = NamedTemporaryFile
else:
_O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
global _O_TMPFILE_WORKS
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _O_TMPFILE_WORKS:
try:
flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
fd = _os.open(dir, flags2, 0o600)
except IsADirectoryError:
_O_TMPFILE_WORKS = False
except OSError:
pass
else:
try:
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix=None, prefix=None, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# get double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
def __init__(self, suffix=None, prefix=None, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message):
_rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
_rmtree(self.name)
| true | true |
f711a4bb1a6410ceb126fa5124c0048c868bc502 | 958 | py | Python | Records/admin.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-12-22T13:11:12.000Z | 2021-12-22T13:11:12.000Z | Records/admin.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | 9 | 2020-10-28T07:07:05.000Z | 2021-06-28T20:05:37.000Z | Records/admin.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.contrib import admin
from .models import IndivRecord, BesteIndivRecords
class IndivRecordAdmin(admin.ModelAdmin):
""" Admin configuratie voor Records.IndivRecord klasse """
search_fields = ('naam', 'plaats', 'score', 'volg_nr')
# filter mogelijkheid
list_filter = ('discipline', 'soort_record', 'geslacht', 'leeftijdscategorie', 'materiaalklasse',
'is_european_record', 'is_world_record')
autocomplete_fields = ('sporter',)
class BesteIndivRecordsAdmin(admin.ModelAdmin):
# filter mogelijkheid
list_filter = ('discipline', 'soort_record', 'geslacht', 'leeftijdscategorie', 'materiaalklasse')
admin.site.register(IndivRecord, IndivRecordAdmin)
admin.site.register(BesteIndivRecords, BesteIndivRecordsAdmin)
# end of file
| 28.176471 | 101 | 0.726514 |
from django.contrib import admin
from .models import IndivRecord, BesteIndivRecords
class IndivRecordAdmin(admin.ModelAdmin):
search_fields = ('naam', 'plaats', 'score', 'volg_nr')
list_filter = ('discipline', 'soort_record', 'geslacht', 'leeftijdscategorie', 'materiaalklasse',
'is_european_record', 'is_world_record')
autocomplete_fields = ('sporter',)
class BesteIndivRecordsAdmin(admin.ModelAdmin):
list_filter = ('discipline', 'soort_record', 'geslacht', 'leeftijdscategorie', 'materiaalklasse')
admin.site.register(IndivRecord, IndivRecordAdmin)
admin.site.register(BesteIndivRecords, BesteIndivRecordsAdmin)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.