hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91194441358a27c33e53ee2da572dc209f82f5aa | 7,173 | py | Python | venv/Lib/site-packages/Token/generated/provider/configuration.py | The-Fragment/FragmentFembot | bca0027b423753eb162590e8fd440a2c1e65d133 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/Token/generated/provider/configuration.py | The-Fragment/FragmentFembot | bca0027b423753eb162590e8fd440a2c1e65d133 | [
"MIT"
] | 5 | 2020-06-06T00:40:42.000Z | 2021-06-10T22:36:12.000Z | venv/Lib/site-packages/Token/generated/provider/configuration.py | The-Fragment/FragmentFembot | bca0027b423753eb162590e8fd440a2c1e65d133 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import base64
import urllib3
try:
import httplib
except ImportError:
# for python3
import http.client as httplib
import sys
import logging
from six import iteritems
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""
Constructor
"""
# Default Base url
self.host = "http://localhost/"
# Default api client
self.api_client = None
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("swagger_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
@property
def logger_file(self):
"""
Gets the logger_file.
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""
Sets the logger_file.
If the logger_file is None, then add stream handler and remove file handler.
Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""
Gets the debug status.
"""
return self.__debug
@debug.setter
def debug(self, value):
"""
Sets the debug status.
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""
Gets the logger_format.
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""
Sets the logger_format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier]
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""
Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\
.get('authorization')
def auth_settings(self):
"""
Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
}
def to_debug_report(self):
"""
Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
| 30.918103 | 97 | 0.612714 |
3a9ac568ca69dd1c348fa856f7f190f5456a7c88 | 4,658 | py | Python | examples/box_pivoting/analysis/box_pivoting_bundle.py | lujieyang/irs_lqr | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | [
"MIT"
] | 6 | 2021-11-20T19:05:06.000Z | 2022-01-31T00:10:41.000Z | examples/box_pivoting/analysis/box_pivoting_bundle.py | lujieyang/irs_lqr | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | [
"MIT"
] | 10 | 2021-07-24T19:50:36.000Z | 2021-11-20T19:06:40.000Z | examples/box_pivoting/analysis/box_pivoting_bundle.py | lujieyang/irs_lqr | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | [
"MIT"
] | 1 | 2021-12-15T22:09:31.000Z | 2021-12-15T22:09:31.000Z | import time
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from pydrake.all import PiecewisePolynomial
from quasistatic_simulator.core.quasistatic_simulator import (
QuasistaticSimulator, QuasistaticSimParameters)
from quasistatic_simulator.core.quasistatic_system import (
cpp_params_from_py_params)
from quasistatic_simulator.examples.setup_simulation_diagram import (
create_dict_keyed_by_model_instance_index)
from quasistatic_simulator_py import (QuasistaticSimulatorCpp)
from irs_lqr.quasistatic_dynamics import QuasistaticDynamics
from irs_lqr.irs_lqr_quasistatic import IrsLqrQuasistatic
from box_pivoting_setup import *
#%% sim setup
T = int(round(0.1 / h)) # num of time steps to simulate forward.
duration = T * h
sim_params = QuasistaticSimParameters(
gravity=gravity,
nd_per_contact=2,
contact_detection_tolerance=contact_detection_tolerance,
is_quasi_dynamic=True)
# trajectory and initial conditions.
nq_a = 2
z_now = 1.11
robot_name = "hand"
object_name = "box"
q_sim_py = QuasistaticSimulator(
model_directive_path=model_directive_path,
robot_stiffness_dict=robot_stiffness_dict,
object_sdf_paths=object_sdf_dict,
sim_params=sim_params,
internal_vis=True)
# construct C++ backend.
sim_params_cpp = cpp_params_from_py_params(sim_params)
sim_params_cpp.gradient_lstsq_tolerance = gradient_lstsq_tolerance
q_sim_cpp = QuasistaticSimulatorCpp(
model_directive_path=model_directive_path,
robot_stiffness_str=robot_stiffness_dict,
object_sdf_paths=object_sdf_dict,
sim_params=sim_params_cpp)
plant = q_sim_cpp.get_plant()
q_sim_py.get_robot_name_to_model_instance_dict()
idx_a = plant.GetModelInstanceByName(robot_name)
idx_u = plant.GetModelInstanceByName(object_name)
#%%
w = np.random.normal(0, (0.001, 0.05), size=(1000, 2))
x_lst = np.zeros((w.shape[0], 5))
q_dynamics = QuasistaticDynamics(h=h, q_sim_py=q_sim_py, q_sim=q_sim_cpp)
data_lst = []
for i in range(w.shape[0]):
qa_knots = np.zeros((2, nq_a))
qa_knots[0] = [0.0, z_now]
qa_knots[1] = [0.05, z_now]
q_robot_traj = PiecewisePolynomial.FirstOrderHold(
[0, T * h], qa_knots.T)
q_u0 = np.array([0.0, 0.505, 0.0])
q0_dict_str = {object_name: q_u0,
robot_name: qa_knots[0]}
q0_dict = create_dict_keyed_by_model_instance_index(
q_sim_py.plant, q_dict_str=q0_dict_str)
q_a_traj_dict_str = {robot_name: q_robot_traj}
dim_x = q_dynamics.dim_x
dim_u = q_dynamics.dim_u
#%% try running the dynamics.
q_dict_traj = [q0_dict]
# print('--------------------------------')
x0 = q_dynamics.get_x_from_q_dict(q0_dict)
u_traj_0 = np.zeros((T, dim_u))
x = np.copy(x0)
qa_knots = np.zeros((2, nq_a))
qa_knots[0] = [0.0, z_now]
qa_knots[1] = [0.02, z_now]
q_robot_traj = PiecewisePolynomial.FirstOrderHold(
[0, T * h], qa_knots.T)
t = h * i
q_cmd_dict = {idx_a: q_robot_traj.value(t + h).ravel()}
u = q_dynamics.get_u_from_q_cmd_dict(q_cmd_dict)
xp = q_dynamics.dynamics(x, u + w[i], requires_grad=True)
ABhat = q_dynamics.jacobian_xu(x, u + w[i])
data_lst.append([(u + w[i])[1], ABhat[1,6]])
u_traj_0[0] = u
q_dict_traj = [q0_dict, q_dynamics.get_q_dict_from_x(xp)]
q_sim_py.animate_system_trajectory(h, q_dict_traj)
x_lst[i,:] = xp
plt.figure()
data_lst = np.array(data_lst)
plt.scatter(data_lst[:,1], data_lst[:,0], marker='o', color='springgreen')
plt.ylim([1.1-0.02, 1.1+0.1])
plt.xlabel('dx_x_box / du_y_ball')
plt.ylabel('nominal uy')
print(np.mean(x_lst, axis=0)[1])
print(np.std(x_lst, axis=0)[1])
x = np.copy(x0)
u = q_dynamics.get_u_from_q_cmd_dict(q_cmd_dict)
print(x)
print(u)
ABhat_first_order = \
q_dynamics.calc_AB_first_order(x, u, 1000, np.array([0.1, 0.1]))
x = np.copy(x0)
u = q_dynamics.get_u_from_q_cmd_dict(q_cmd_dict)
print(x)
print(u)
ABhat_zero_order = \
q_dynamics.calc_B_zero_order(x, u, 1000, np.array([0.1, 0.1]))
plt.figure()
ax = plt.subplot(2,1,1)
plt.imshow(ABhat_zero_order)
for i in range(ABhat_zero_order.shape[0]):
for j in range(ABhat_zero_order.shape[1]):
text = ax.text(j, i, "{:02f}".format(ABhat_zero_order[i,j]),
ha="center", va="center", color="w")
plt.colorbar()
ax = plt.subplot(2,1,2)
plt.imshow(ABhat_first_order)
for i in range(ABhat_zero_order.shape[0]):
for j in range(ABhat_zero_order.shape[1]):
text = ax.text(j, i, "{:02f}".format(ABhat_first_order[i,j]),
ha="center", va="center", color="w")
plt.colorbar()
plt.show()
#plt.scatter(w[:,0], x_lst[:,3])
#plt.show()
| 28.931677 | 74 | 0.70717 |
4f15b4954713b96ef16eb592aabe4b803f77bd5f | 1,806 | py | Python | rdkit/Chem/ChemUtils/UnitTestAlignDepict.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 1,609 | 2015-01-05T02:41:13.000Z | 2022-03-30T21:57:24.000Z | rdkit/Chem/ChemUtils/UnitTestAlignDepict.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 3,412 | 2015-01-06T12:13:33.000Z | 2022-03-31T17:25:41.000Z | rdkit/Chem/ChemUtils/UnitTestAlignDepict.py | bp-kelley/rdkit | e0de7c9622ce73894b1e7d9568532f6d5638058a | [
"BSD-3-Clause"
] | 811 | 2015-01-11T03:33:48.000Z | 2022-03-28T11:57:49.000Z | from contextlib import contextmanager
import sys
import unittest
from rdkit import Chem
from rdkit.Chem.ChemUtils.AlignDepict import initParser, processArgs, AlignDepict
from io import StringIO
class TestCase(unittest.TestCase):
def test1(self):
parser = initParser()
with outputRedirect() as (out, err):
args = parser.parse_args('--smiles CC CCC'.split())
args.outF = out
processArgs(args)
self.assertIn('RDKit', out.getvalue())
self.assertIn('2D', out.getvalue())
self.assertEqual(err.getvalue(), '')
def test_AlignDepict(self):
mol = Chem.MolFromSmiles('CNC')
core = Chem.MolFromSmiles('CC')
pattern = Chem.MolFromSmarts('CCC')
self.assertRaises(ValueError, AlignDepict, mol, core, pattern)
pattern = Chem.MolFromSmarts('CN')
self.assertRaises(ValueError, AlignDepict, mol, core, pattern)
pattern = Chem.MolFromSmarts('CC')
self.assertRaises(ValueError, AlignDepict, mol, core, pattern)
pattern = Chem.MolFromSmarts('CC')
self.assertRaises(ValueError, AlignDepict, mol, core, pattern)
mol = Chem.MolFromSmiles('CCC')
Chem.rdDepictor.Compute2DCoords(core)
AlignDepict(mol, core, pattern)
mol = Chem.MolFromSmiles('CNC')
AlignDepict(mol, core, pattern, acceptFailure=True)
@contextmanager
def outputRedirect():
""" Redirect standard output and error to String IO and return """
try:
_stdout, _stderr = sys.stdout, sys.stderr
sys.stdout = sStdout = StringIO()
sys.stderr = sStderr = StringIO()
yield (sStdout, sStderr)
finally:
sys.stdout, sys.stderr = _stdout, _stderr
if __name__ == '__main__': # pragma: nocover
unittest.main()
| 30.610169 | 81 | 0.652824 |
37d31090a5f741c30ec7a0f4e780f1675faabeaf | 619 | py | Python | accounts/api/urls.py | CodeMonk263/Live-Mart-Backend | 39f6e6b9dcb686a8b02313b289456f30e4b99f1c | [
"MIT"
] | null | null | null | accounts/api/urls.py | CodeMonk263/Live-Mart-Backend | 39f6e6b9dcb686a8b02313b289456f30e4b99f1c | [
"MIT"
] | null | null | null | accounts/api/urls.py | CodeMonk263/Live-Mart-Backend | 39f6e6b9dcb686a8b02313b289456f30e4b99f1c | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token
from .views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register('users', UserView)
urlpatterns = [
url(r'^login/$', AuthenticationAPIView.as_view()),
url(r'^refresh/$', refresh_jwt_token),
url(r'^register/$', RegisterAPIView.as_view()),
url(r'^resetpassword/$', ChangePasswordView.as_view()),
url(r'^sendresetmail/$', password_reset_mail, name='password reset mail'),
url(r'^', include(router.urls)),
]
| 32.578947 | 78 | 0.738288 |
a8f598a7c8b82a14750f533c4f22b5fa316c1cf4 | 1,023 | py | Python | Practica7/restaurantes/models.py | adrianmorente/PracticasDAI | 0f0a032a9cade87298c2b7bcbad4ca4d6b867b1d | [
"MIT"
] | null | null | null | Practica7/restaurantes/models.py | adrianmorente/PracticasDAI | 0f0a032a9cade87298c2b7bcbad4ca4d6b867b1d | [
"MIT"
] | null | null | null | Practica7/restaurantes/models.py | adrianmorente/PracticasDAI | 0f0a032a9cade87298c2b7bcbad4ca4d6b867b1d | [
"MIT"
] | null | null | null | # restaurantes/models.py
from pymongo import MongoClient
client = MongoClient()
db = client.test
restaurantes = db.restaurants
# EJEMPLO DE RESTAURANTE ALMACENADO EN LA BBDD
#
# {
# "_id" : ObjectId("5a355b461fc7d55cf59f9722"),
# "address" : {
# "building" : "1007",
# "coord" : [
# -73.856077,
# 40.848447
# ],
# "street" : "Morris Park Ave",
# "zipcode" : "10462"
# },
# "borough" : "Bronx",
# "cuisine" : "Bakery",
# "grades" : [
# {
# "date" : ISODate("2014-03-03T00:00:00Z"),
# "grade" : "A",
# "score" : 2
# },
# {
# "date" : ISODate("2013-09-11T00:00:00Z"),
# "grade" : "A",
# "score" : 6
# },
# {
# "date" : ISODate("2013-01-24T00:00:00Z"),
# "grade" : "A",
# "score" : 10
# },
# {
# "date" : ISODate("2011-11-23T00:00:00Z"),
# "grade" : "A",
# "score" : 9
# },
# {
# "date" : ISODate("2011-03-10T00:00:00Z"),
# "grade" : "B",
# "score" : 14
# }
# ],
# "name" : "Morris Park Bake Shop",
# "restaurant_id" : "30075445"
# }
| 18.944444 | 48 | 0.505376 |
4333c24399528d8331694740c253f5b2b4a9c3ba | 7,122 | py | Python | fastai/callback/fp16.py | mone27/fastai | af8dfc07ca3f333f8c1bdbea1803af669a53738f | [
"Apache-2.0"
] | 5 | 2020-08-27T00:52:27.000Z | 2022-03-31T02:46:05.000Z | fastai/callback/fp16.py | mone27/fastai | af8dfc07ca3f333f8c1bdbea1803af669a53738f | [
"Apache-2.0"
] | 22 | 2021-01-07T23:35:00.000Z | 2022-03-20T00:16:40.000Z | fastai/callback/fp16.py | mone27/fastai | af8dfc07ca3f333f8c1bdbea1803af669a53738f | [
"Apache-2.0"
] | 2 | 2021-04-17T03:33:21.000Z | 2022-02-25T19:32:34.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/18_callback.fp16.ipynb (unless otherwise specified).
__all__ = ['get_master', 'to_master_grads', 'to_model_params', 'test_overflow', 'grad_overflow', 'copy_clone',
'ModelToHalf', 'MixedPrecision', 'NativeMixedPrecision']
# Cell
from ..basics import *
from .progress import *
from torch.cuda.amp import GradScaler, autocast
# Cell
from ..fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params
# Cell
from torch.nn.utils import parameters_to_vector
# Cell
def get_master(opt, flat_master=False):
model_params = [[param for param in pg if param.requires_grad] for pg in opt.param_lists]
if flat_master:
master_params = []
for pg in model_params:
mp = parameters_to_vector([param.data.float() for param in pg])
mp = nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params = [[nn.Parameter(param.data.clone().float().detach(), requires_grad=True) for param in pg] for pg in model_params]
return model_params, master_params
# Cell
def to_master_grads(model_pgs, master_pgs, flat_master=False):
for (model_params,master_params) in zip(model_pgs,master_pgs):
model_grads_to_master_grads(model_params, master_params, flat_master=flat_master)
# Cell
def to_model_params(model_pgs, master_pgs, flat_master=False)->None:
for (model_params,master_params) in zip(model_pgs,master_pgs):
master_params_to_model_params(model_params, master_params, flat_master=flat_master)
# Cell
def test_overflow(x):
s = float(x.float().sum())
return (s == float('inf') or s == float('-inf') or s != s)
# Cell
def grad_overflow(pgs):
for pg in pgs:
for p in pg:
if p.grad is not None and test_overflow(p.grad.data): return True
return False
# Cell
def copy_clone(d):
return {k:(v.detach().clone().float() if isinstance(v,Tensor) else v) for k,v in d.items()}
# Cell
def _copy_state(opt, pgs1, pgs2):
opt.param_lists = pgs2
for pg1,pg2 in zip(pgs1, pgs2):
for p1,p2 in zip(pg1, pg2): opt.state[p2] = copy_clone(opt.state.pop(p1, {}))
# Cell
class ModelToHalf(Callback):
"Use with MixedPrecision callback (but it needs to run at the very beginning)"
run_before=TrainEvalCallback
def before_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16)
def after_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float32)
# Cell
@docs
@log_args
class MixedPrecision(Callback):
"Run training in mixed precision"
toward_end=True
def __init__(self, loss_scale=512, flat_master=False, dynamic=True, max_loss_scale=2.**24,
div_factor=2., scale_wait=500, clip=None):
assert torch.backends.cudnn.enabled, "Mixed precision training requires cudnn."
self.flat_master,self.dynamic,self.max_loss_scale = flat_master,dynamic,max_loss_scale
self.div_factor,self.scale_wait,self.clip = div_factor,scale_wait,clip
self.loss_scale = max_loss_scale if dynamic else loss_scale
def before_fit(self):
assert self.dls.device.type == 'cuda', "Mixed-precision training requires a GPU, remove the call `to_fp16`"
if self.learn.opt is None: self.learn.create_opt()
self.model_pgs,self.master_pgs = get_master(self.opt, self.flat_master)
self.old_pgs = self.opt.param_lists
#Changes the optimizer so that the optimization step is done in FP32.
_copy_state(self.learn.opt, self.model_pgs, self.master_pgs)
if self.dynamic: self.count = 0
def before_batch(self): self.learn.xb = to_half(self.xb)
def after_pred(self): self.learn.pred = to_float(self.pred)
def before_backward(self): self.learn.loss *= self.loss_scale
def after_backward(self):
self.learn.loss /= self.loss_scale #To record the real loss
#First, check for an overflow
if self.dynamic and grad_overflow(self.model_pgs):
self.loss_scale /= self.div_factor
self.model.zero_grad()
raise CancelBatchException() #skip step and zero_grad
to_master_grads(self.model_pgs, self.master_pgs, self.flat_master)
for master_params in self.master_pgs:
for param in master_params:
if param.grad is not None: param.grad.div_(self.loss_scale)
if self.clip is not None:
for group in self.master_pgs: nn.utils.clip_grad_norm_(group, self.clip)
# Check if it's been long enough without overflow
if self.dynamic:
self.count += 1
if self.count == self.scale_wait:
self.count = 0
self.loss_scale *= self.div_factor
def after_step(self):
self.model.zero_grad() #Zero the gradients of the model manually (optimizer disconnected)
to_model_params(self.model_pgs, self.master_pgs, self.flat_master)
def after_fit(self):
if not hasattr(self,'master_pgs'): return
_copy_state(self.learn.opt, self.master_pgs, self.model_pgs)
self.learn.opt.param_lists = self.old_pgs
delattr(self, "master_pgs")
delattr(self, "model_pgs")
delattr(self, "old_pgs")
_docs = dict(before_fit="Put the model in FP16 and prepare the two copies of the parameters",
before_batch="Put the input in FP16",
after_pred="Put the output back to FP32 so that the loss is computed in FP32",
before_backward="Apply loss scaling to avoid gradient underflow",
after_backward="Copy the gradients to the master param and undo the loss scaling",
after_step="Copy the master params to the model params",
after_fit="Put the model back in FP32"
)
# Cell
@delegates(MixedPrecision.__init__)
@patch
def to_fp16(self:Learner, **kwargs):
self.add_cbs([ModelToHalf(), MixedPrecision(**kwargs)])
return self
# Cell
@patch
def to_fp32(self: Learner):
self.remove_cbs([ModelToHalf, MixedPrecision])
return self
# Cell
class NativeMixedPrecision(Callback):
"Mixed precision training using Pytorch's `autocast` and `GradScaler`"
@delegates(GradScaler.__init__)
def __init__(self, **kwargs): self.scaler_kwargs,self.autocast = kwargs,autocast()
def before_fit(self):
self.learn.scaler = GradScaler(**self.scaler_kwargs)
self.learn._step,self.learn._backward = self._step,self._backward
def before_batch(self): self.autocast.__enter__()
def after_step(self): self.learn.scaler.update()
def after_loss(self): self.autocast.__exit__()
def _backward(self): self.scaler.scale(self.loss).backward()
def _step(self): self.scaler.step(self.opt)
# Cell
@delegates(GradScaler.__init__)
@patch
def to_native_fp16(self:Learner, **kwargs):
self.add_cb(NativeMixedPrecision(**kwargs))
return self
# Cell
@patch
def to_native_fp32(self:Learner):
self.remove_cb(NativeMixedPrecision)
return self | 39.566667 | 136 | 0.694047 |
b06249073f960b8870c8f80745361cd58a9dcd0d | 2,719 | py | Python | tools/dataset_utils.py | jbutle55/yolov3-tf2 | f7f60c63fb4cb04b91ecd0ae02a16b10ff74f8b0 | [
"MIT"
] | null | null | null | tools/dataset_utils.py | jbutle55/yolov3-tf2 | f7f60c63fb4cb04b91ecd0ae02a16b10ff74f8b0 | [
"MIT"
] | null | null | null | tools/dataset_utils.py | jbutle55/yolov3-tf2 | f7f60c63fb4cb04b91ecd0ae02a16b10ff74f8b0 | [
"MIT"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for creating TFRecord data sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def read_examples_list(path):
"""Read list of training or validation examples.
The file is assumed to contain a single example per line where the first
token in the line is an identifier that allows us to find the image and
annotation xml for that example.
For example, the line:
xyz 3
would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).
Args:
path: absolute path to examples list file.
Returns:
list of example identifiers (strings).
"""
with tf.gfile.GFile(path) as fid:
lines = fid.readlines()
return [line.strip().split(' ')[0] for line in lines]
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result} | 32.759036 | 80 | 0.719382 |
4045c7fb8d04e831984e9b5915bd1ac8642105d5 | 1,919 | py | Python | tests/tools/stub.py | Navan0/pepy | 0cb739912000f7940f11406353a9487600997893 | [
"MIT"
] | 522 | 2018-05-27T20:31:32.000Z | 2022-03-31T04:39:52.000Z | tests/tools/stub.py | Spratiher9/pepy | a94aa60f505291dcbd33be094e13870101eabff0 | [
"MIT"
] | 174 | 2018-06-05T12:58:54.000Z | 2022-03-22T07:39:33.000Z | tests/tools/stub.py | Spratiher9/pepy | a94aa60f505291dcbd33be094e13870101eabff0 | [
"MIT"
] | 37 | 2018-06-13T18:17:38.000Z | 2022-03-21T07:28:36.000Z | import random
from datetime import date, datetime, timedelta
from typing import List
from pepy.domain.model import ProjectName, Downloads, Project, ProjectDownloads
class ProjectNameStub:
@staticmethod
def create() -> ProjectName:
project_names = ["climoji", "pepy", "commandbus", "flask", "behave"]
return ProjectName(random.choice(project_names))
class DownloadsStub:
@staticmethod
def create(min_value: int = 0, max_value: int = 999999) -> Downloads:
return Downloads(random.randint(min_value, max_value), random.randint(min_value, max_value))
class ProjectStub:
@staticmethod
def create(name: ProjectName = None, downloads: Downloads = None) -> Project:
name = name or ProjectNameStub.create()
downloads = downloads or DownloadsStub.create()
return Project(name, downloads)
@staticmethod
def from_plain_data(name: str = None, downloads: int = None) -> Project:
name = ProjectName(name) if name is not None else ProjectNameStub.create()
downloads = Downloads(downloads, downloads) if downloads is not None else DownloadsStub.create()
return ProjectStub.create(name=name, downloads=downloads)
class ProjectDownloadsStub:
@staticmethod
def create(name: ProjectName = None, downloads: Downloads = None, day: date = None) -> ProjectDownloads:
name = name or ProjectNameStub.create()
downloads = downloads or DownloadsStub.create()
day = day or datetime.now().date()
return ProjectDownloads(name, downloads, day)
@staticmethod
def create_consecutive(name: ProjectName = None, first_day: date = None, days: int = 10) -> List[ProjectDownloads]:
name = name or ProjectNameStub.create()
first_day = first_day or datetime.now().date()
return [ProjectDownloadsStub.create(name=name, day=first_day - timedelta(days=i)) for i in range(days)]
| 39.979167 | 119 | 0.706618 |
d7f1f4e5911cd579acedc77767458e9dcd4c7dff | 1,591 | py | Python | CGAL-5.0.3/surface_reconstruction/src/main.py | pranjal-s/cpp17 | 04b5278ff4d754d6e62f955d49bddf6509f86e73 | [
"MIT"
] | null | null | null | CGAL-5.0.3/surface_reconstruction/src/main.py | pranjal-s/cpp17 | 04b5278ff4d754d6e62f955d49bddf6509f86e73 | [
"MIT"
] | null | null | null | CGAL-5.0.3/surface_reconstruction/src/main.py | pranjal-s/cpp17 | 04b5278ff4d754d6e62f955d49bddf6509f86e73 | [
"MIT"
] | null | null | null | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# CGAL-5.0.3 Tutorial
# A simple program to read, visualize and write geometry files with Open3D
# for analysis with CGAL in C++
# Author: Pranjal Singh, pranjal.officemail@gmail.com, Aug 2020
# Source: https://github.com/pranjal-s/cpp17/blob/master/CGAL-5.0.3/open3d-0.9/off.py
"""
CGAL OFF Mesh Visualizer
========================================================
off files (positions and triangles) are produced by CGAL with Complex_2_in_triangulation_3_file_writer.
Open3D
-----------
Open3D is an open-source library that deals with 3D data.
The Open3D frontend exposes a set of carefully selected data structures
and algorithms in both C++ and Python. The backend is highly optimized
and is set up for parallelization.
Note
----
"""
import numpy as np
import open3d as o3d
print("Testing IO for meshes ...")
offmesh = o3d.io.read_triangle_mesh("data/atoms_CGAL.off")
print(offmesh)
print("Test passed!")
o3d.io.write_triangle_mesh("data/atoms_Open3D.off", offmesh)
print(offmesh)
print('Vertices:')
print(np.asarray(offmesh.vertices))
print('Triangles:')
print(np.asarray(offmesh.triangles))
print("Visualizing mesh ...")
print("Press 'w' to see wireframe (mesh) and 'b' to switch back and front.")
print("Try to render a mesh with normals (exist: " +
str(offmesh.has_vertex_normals()) + ") and colors (exist: " +
str(offmesh.has_vertex_colors()) + ")")
o3d.visualization.draw_geometries([offmesh])
print("Visualization works!")
quit()
| 33.851064 | 103 | 0.711502 |
f3e567068debce547b32063b4a0d1286387949a8 | 4,953 | py | Python | sdk/python/pulumi_azure_native/sql/v20201101preview/get_database_vulnerability_assessment_rule_baseline.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/sql/v20201101preview/get_database_vulnerability_assessment_rule_baseline.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/sql/v20201101preview/get_database_vulnerability_assessment_rule_baseline.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDatabaseVulnerabilityAssessmentRuleBaselineResult',
'AwaitableGetDatabaseVulnerabilityAssessmentRuleBaselineResult',
'get_database_vulnerability_assessment_rule_baseline',
]
@pulumi.output_type
class GetDatabaseVulnerabilityAssessmentRuleBaselineResult:
"""
A database vulnerability assessment rule baseline.
"""
def __init__(__self__, baseline_results=None, id=None, name=None, type=None):
if baseline_results and not isinstance(baseline_results, list):
raise TypeError("Expected argument 'baseline_results' to be a list")
pulumi.set(__self__, "baseline_results", baseline_results)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="baselineResults")
def baseline_results(self) -> Sequence['outputs.DatabaseVulnerabilityAssessmentRuleBaselineItemResponse']:
"""
The rule baseline result
"""
return pulumi.get(self, "baseline_results")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDatabaseVulnerabilityAssessmentRuleBaselineResult(GetDatabaseVulnerabilityAssessmentRuleBaselineResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseVulnerabilityAssessmentRuleBaselineResult(
baseline_results=self.baseline_results,
id=self.id,
name=self.name,
type=self.type)
def get_database_vulnerability_assessment_rule_baseline(baseline_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_id: Optional[str] = None,
server_name: Optional[str] = None,
vulnerability_assessment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseVulnerabilityAssessmentRuleBaselineResult:
"""
A database vulnerability assessment rule baseline.
:param str baseline_name: The name of the vulnerability assessment rule baseline (default implies a baseline on a database level rule and master for server level rule).
:param str database_name: The name of the database for which the vulnerability assessment rule baseline is defined.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str rule_id: The vulnerability assessment rule ID.
:param str server_name: The name of the server.
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
"""
__args__ = dict()
__args__['baselineName'] = baseline_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['ruleId'] = rule_id
__args__['serverName'] = server_name
__args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20201101preview:getDatabaseVulnerabilityAssessmentRuleBaseline', __args__, opts=opts, typ=GetDatabaseVulnerabilityAssessmentRuleBaselineResult).value
return AwaitableGetDatabaseVulnerabilityAssessmentRuleBaselineResult(
baseline_results=__ret__.baseline_results,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type)
| 41.974576 | 204 | 0.662831 |
791e5172a41d68c5773fec88f1479377e313896a | 6,017 | py | Python | evennia/server/portal/tests.py | zeitkunst/evennia | 1f254b2542fbefe400c114b3d7029522cdcb37b7 | [
"BSD-3-Clause"
] | 3 | 2019-08-08T16:58:25.000Z | 2019-10-12T07:31:36.000Z | evennia/server/portal/tests.py | zeitkunst/evennia | 1f254b2542fbefe400c114b3d7029522cdcb37b7 | [
"BSD-3-Clause"
] | 9 | 2019-09-06T18:21:59.000Z | 2022-01-13T03:04:11.000Z | evennia/server/portal/tests.py | zeitkunst/evennia | 1f254b2542fbefe400c114b3d7029522cdcb37b7 | [
"BSD-3-Clause"
] | 2 | 2019-09-02T08:39:24.000Z | 2019-09-02T18:39:32.000Z | try:
from django.utils.unittest import TestCase
except ImportError:
from django.test import TestCase
try:
from django.utils import unittest
except ImportError:
import unittest
from mock import Mock
import string
from evennia.server.portal import irc
from twisted.conch.telnet import IAC, WILL, DONT, SB, SE, NAWS, DO
from twisted.test import proto_helpers
from twisted.trial.unittest import TestCase as TwistedTestCase
from .telnet import TelnetServerFactory, TelnetProtocol
from .portal import PORTAL_SESSIONS
from .suppress_ga import SUPPRESS_GA
from .naws import DEFAULT_HEIGHT, DEFAULT_WIDTH
from .ttype import TTYPE, IS
from .mccp import MCCP
from .mssp import MSSP
from .mxp import MXP
from .telnet_oob import MSDP, MSDP_VAL, MSDP_VAR
class TestIRC(TestCase):
def test_plain_ansi(self):
"""
Test that printable characters do not get mangled.
"""
irc_ansi = irc.parse_ansi_to_irc(string.printable)
ansi_irc = irc.parse_irc_to_ansi(string.printable)
self.assertEqual(irc_ansi, string.printable)
self.assertEqual(ansi_irc, string.printable)
def test_bold(self):
s_irc = "\x02thisisatest"
s_eve = r'|hthisisatest'
self.assertEqual(irc.parse_ansi_to_irc(s_eve), s_irc)
self.assertEqual(s_eve, irc.parse_irc_to_ansi(s_irc))
def test_italic(self):
s_irc = "\x02thisisatest"
s_eve = r'|hthisisatest'
self.assertEqual(irc.parse_ansi_to_irc(s_eve), s_irc)
def test_colors(self):
color_map = (("\0030", r'|w'),
("\0031", r'|X'),
("\0032", r'|B'),
("\0033", r'|G'),
("\0034", r'|r'),
("\0035", r'|R'),
("\0036", r'|M'),
("\0037", r'|Y'),
("\0038", r'|y'),
("\0039", r'|g'),
("\00310", r'|C'),
("\00311", r'|c'),
("\00312", r'|b'),
("\00313", r'|m'),
("\00314", r'|x'),
("\00315", r'|W'),
("\00399,5", r'|[r'),
("\00399,3", r'|[g'),
("\00399,7", r'|[y'),
("\00399,2", r'|[b'),
("\00399,6", r'|[m'),
("\00399,10", r'|[c'),
("\00399,15", r'|[w'),
("\00399,1", r'|[x'))
for m in color_map:
self.assertEqual(irc.parse_irc_to_ansi(m[0]), m[1])
self.assertEqual(m[0], irc.parse_ansi_to_irc(m[1]))
def test_identity(self):
"""
Test that the composition of the function and
its inverse gives the correct string.
"""
s = r'|wthis|Xis|gis|Ma|C|complex|*string'
self.assertEqual(irc.parse_irc_to_ansi(irc.parse_ansi_to_irc(s)), s)
class TestTelnet(TwistedTestCase):
def setUp(self):
super(TestTelnet, self).setUp()
factory = TelnetServerFactory()
factory.protocol = TelnetProtocol
factory.sessionhandler = PORTAL_SESSIONS
factory.sessionhandler.portal = Mock()
self.proto = factory.buildProtocol(("localhost", 0))
self.transport = proto_helpers.StringTransport()
self.addCleanup(factory.sessionhandler.disconnect_all)
def test_mudlet_ttype(self):
self.transport.client = ["localhost"]
self.transport.setTcpKeepAlive = Mock()
d = self.proto.makeConnection(self.transport)
# test suppress_ga
self.assertTrue(self.proto.protocol_flags["NOGOAHEAD"])
self.proto.dataReceived(IAC + DONT + SUPPRESS_GA)
self.assertFalse(self.proto.protocol_flags["NOGOAHEAD"])
self.assertEqual(self.proto.handshakes, 7)
# test naws
self.assertEqual(self.proto.protocol_flags['SCREENWIDTH'], {0: DEFAULT_WIDTH})
self.assertEqual(self.proto.protocol_flags['SCREENHEIGHT'], {0: DEFAULT_HEIGHT})
self.proto.dataReceived(IAC + WILL + NAWS)
self.proto.dataReceived([IAC, SB, NAWS, '', 'x', '', 'd', IAC, SE])
self.assertEqual(self.proto.protocol_flags['SCREENWIDTH'][0], 120)
self.assertEqual(self.proto.protocol_flags['SCREENHEIGHT'][0], 100)
self.assertEqual(self.proto.handshakes, 6)
# test ttype
self.assertTrue(self.proto.protocol_flags["FORCEDENDLINE"])
self.assertFalse(self.proto.protocol_flags["TTYPE"])
self.assertTrue(self.proto.protocol_flags["ANSI"])
self.proto.dataReceived(IAC + WILL + TTYPE)
self.proto.dataReceived([IAC, SB, TTYPE, IS, "MUDLET", IAC, SE])
self.assertTrue(self.proto.protocol_flags["XTERM256"])
self.assertEqual(self.proto.protocol_flags["CLIENTNAME"], "MUDLET")
self.proto.dataReceived([IAC, SB, TTYPE, IS, "XTERM", IAC, SE])
self.proto.dataReceived([IAC, SB, TTYPE, IS, "MTTS 137", IAC, SE])
self.assertEqual(self.proto.handshakes, 5)
# test mccp
self.proto.dataReceived(IAC + DONT + MCCP)
self.assertFalse(self.proto.protocol_flags['MCCP'])
self.assertEqual(self.proto.handshakes, 4)
# test mssp
self.proto.dataReceived(IAC + DONT + MSSP)
self.assertEqual(self.proto.handshakes, 3)
# test oob
self.proto.dataReceived(IAC + DO + MSDP)
self.proto.dataReceived([IAC, SB, MSDP, MSDP_VAR, "LIST", MSDP_VAL, "COMMANDS", IAC, SE])
self.assertTrue(self.proto.protocol_flags['OOB'])
self.assertEqual(self.proto.handshakes, 2)
# test mxp
self.proto.dataReceived(IAC + DONT + MXP)
self.assertFalse(self.proto.protocol_flags['MXP'])
self.assertEqual(self.proto.handshakes, 1)
# clean up to prevent Unclean reactor
self.proto.nop_keep_alive.stop()
self.proto._handshake_delay.cancel()
return d
| 39.585526 | 97 | 0.590327 |
8df727b59eca1c965693c4ab0ad5e3add777025d | 1,370 | py | Python | pin.py | realjohnward/rgbeads | 9faecc6440bc63582d85d49f733ba076c250abf0 | [
"MIT"
] | null | null | null | pin.py | realjohnward/rgbeads | 9faecc6440bc63582d85d49f733ba076c250abf0 | [
"MIT"
] | null | null | null | pin.py | realjohnward/rgbeads | 9faecc6440bc63582d85d49f733ba076c250abf0 | [
"MIT"
] | null | null | null | import subprocess
import json
import os
from dotenv import load_dotenv
load_dotenv()
NODE_PATH = os.getenv('NODE_PATH')
NUM_BEADS = int(os.getenv('NUM_BEADS'))
def pin(read_path="./beads", save_path="./data"):
def pin_img_to_pinata(img_path):
ipfs_hash = subprocess.check_output([NODE_PATH,'./_pinImgToPinata.js', img_path])
return ipfs_hash.decode().strip()
def pin_metadata_to_pinata(metadata, img_ipfs_hash, number):
metadata['attributes'].append({'display_type': 'number', 'trait_type': 'Edition', 'max_value': 1, 'value': 1})
metadata['name'] = f'RGBead #{number} of {NUM_BEADS}'
metadata['image'] = "https://gateway.pinata.cloud/ipfs/" + img_ipfs_hash
metadata_ipfs_hash = subprocess.check_output([NODE_PATH, './_pinMetadataToPinata.js', json.dumps(metadata), "1"])
return metadata_ipfs_hash.decode().strip()
mhashes = []
for i in range(10):
impath = os.path.join(read_path, str(i), f"{i}.png")
imhash = pin_img_to_pinata(impath)
m = json.load(open(os.path.join(read_path, str(i), f"{i}.json")))
mhash = pin_metadata_to_pinata(m, imhash, str(i+1))
mhashes.append(mhash)
url = f"https://gateway.pinata.cloud/ipfs/{mhash}"
print(url)
json.dump(mhashes, open(os.path.join(save_path, "metadata_hashes.json"), "w"))
pin() | 38.055556 | 121 | 0.661314 |
45d676499a26cbdc79675e3ac33e7e87628c218f | 1,049 | py | Python | test/clients/IDummyClient.py | pip-services-archive/pip-services-runtime-python | 70eca1ffc44bfdc45c9c65b0ee347fa578368849 | [
"MIT"
] | null | null | null | test/clients/IDummyClient.py | pip-services-archive/pip-services-runtime-python | 70eca1ffc44bfdc45c9c65b0ee347fa578368849 | [
"MIT"
] | null | null | null | test/clients/IDummyClient.py | pip-services-archive/pip-services-runtime-python | 70eca1ffc44bfdc45c9c65b0ee347fa578368849 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pip_services_runtime.clients.IDummyClient
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Interface for dummy client implementations
:copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services_runtime.IClient import IClient
class IDummyClient(IClient):
def get_dummies(self, correlation_id, filter, paging):
raise NotImplementedError('Method from interface definition')
def get_dummy_by_id(self, correlation_id, dummy_id):
raise NotImplementedError('Method from interface definition')
def create_dummy(self, correlation_id, dummy):
raise NotImplementedError('Method from interface definition')
def update_dummy(self, correlation_id, dummy_id, dummy):
raise NotImplementedError('Method from interface definition')
def delete_dummy(self, correlation_id, dummy_id):
raise NotImplementedError('Method from interface definition')
| 36.172414 | 86 | 0.698761 |
8046b58c3d0645928af9978505e51e9e8f5dc1e5 | 3,766 | py | Python | util/update_copyright/__init__.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 765 | 2015-01-14T16:17:04.000Z | 2022-03-28T07:46:28.000Z | util/update_copyright/__init__.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 148 | 2018-07-20T00:58:36.000Z | 2021-11-16T01:52:33.000Z | util/update_copyright/__init__.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 807 | 2015-01-06T09:55:38.000Z | 2022-03-30T10:23:36.000Z | # Copyright (c) 2020 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Utilities to parse and modify copyright headers in gem5 source.
"""
import re
org_alias_map = {
'arm': b'ARM Limited',
'uc': b'The Regents of the University of California',
}
_update_copyright_year_regexp = re.compile(b'(.*?)([0-9]+)$')
def _update_copyright_years(m, cur_year, org_bytes):
'''
Does e.g.: b'2016, 2018-2019' -> b'2016, 2018-2020'.
:param m: match containing only the years part of the string
:type m: re.Match
:param cur_year: the current year to update the copyright to
:type cur_year: int
:return: the new years part of the string
:rtype: bytes
'''
global _update_copyright_year_regexp
cur_year_bytes = str(cur_year).encode()
m = _update_copyright_year_regexp.match(m.group(1))
years_prefix = m.group(1)
old_year_bytes = m.group(2)
old_year = int(old_year_bytes.decode())
if old_year == cur_year:
new_years_string = old_year_bytes
elif old_year == cur_year - 1:
if len(years_prefix) > 0 and years_prefix[-1:] == b'-':
new_years_string = cur_year_bytes
else:
new_years_string = old_year_bytes + b'-' + cur_year_bytes
else:
new_years_string = old_year_bytes + b', ' + cur_year_bytes
new_years_string = years_prefix + new_years_string
return b' Copyright (c) %b %b\n' % (new_years_string, org_bytes)
def update_copyright(data, cur_year, org_bytes):
update_copyright_regexp = re.compile(
b' Copyright \\(c\\) ([0-9,\- ]+) ' + org_bytes + b'\n',
re.IGNORECASE
)
return update_copyright_regexp.sub(
lambda m: _update_copyright_years(m, cur_year, org_bytes),
data,
count=1,
)
| 42.795455 | 72 | 0.733139 |
adb63ab69af372168609b01e25eb3fc050bf7925 | 482 | py | Python | activities/models/rnotes.py | aiomi/REQAP | ba9ee8c7be544082c54455a8990782e1affe0e0d | [
"MIT"
] | 1 | 2021-07-08T06:16:33.000Z | 2021-07-08T06:16:33.000Z | activities/models/rnotes.py | adole99/REQAP | 6d95040fa2e5b7de23d4ba230877721f3f17f633 | [
"MIT"
] | 5 | 2020-05-01T19:37:52.000Z | 2021-09-22T18:44:51.000Z | activities/models/rnotes.py | adole99/REQAP | 6d95040fa2e5b7de23d4ba230877721f3f17f633 | [
"MIT"
] | 1 | 2020-03-13T10:15:07.000Z | 2020-03-13T10:15:07.000Z | from django.db import models
from . import Transcript
from users.models import Staff
class TranscriptNote(models.Model):
transcript = models.ForeignKey(Transcript, on_delete=models.CASCADE)
action = models.CharField(max_length=10)
reason = models.CharField(max_length=160, blank=True, null=True)
staff_id = models.ForeignKey(Staff, on_delete=models.DO_NOTHING)
def __str__(self):
return f'Transcript {self.action} for {self.transcript.request_by}' | 37.076923 | 75 | 0.755187 |
5f6d2182cfe7f60acc99bb6e1ccc4376ac19ea66 | 22,232 | py | Python | src/tensorflow-examples/tensorflow_examples/converted_notebooks/19_training_and_deploying_at_scale.py | wilsonify/tensorflow-examples | 2271c666b33c7a74047c7196783ab04e9aee8362 | [
"MIT"
] | 2 | 2019-11-21T02:43:24.000Z | 2020-08-12T04:48:39.000Z | src/tensorflow-examples/tensorflow_examples/converted_notebooks/19_training_and_deploying_at_scale.py | wilsonify/tensorflow-examples | 2271c666b33c7a74047c7196783ab04e9aee8362 | [
"MIT"
] | null | null | null | src/tensorflow-examples/tensorflow_examples/converted_notebooks/19_training_and_deploying_at_scale.py | wilsonify/tensorflow-examples | 2271c666b33c7a74047c7196783ab04e9aee8362 | [
"MIT"
] | 1 | 2021-02-06T12:36:58.000Z | 2021-02-06T12:36:58.000Z | #!/usr/bin/env python
# coding: utf-8
# **Chapter 19 – Training and Deploying TensorFlow Models at Scale**
# _This notebook contains all the sample code in chapter 19._
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/19_training_and_deploying_at_scale.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# </table>
# # Setup
# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
#
# In[1]:
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
get_ipython().system('echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" > /etc/apt/sources.list.d/tensorflow-serving.list')
get_ipython().system('curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -')
get_ipython().system('apt update && apt-get install -y tensorflow-model-server')
get_ipython().system('pip install -q -U tensorflow-serving-api')
IS_COLAB = True
except Exception:
IS_COLAB = False
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.test.is_gpu_available():
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deploy"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# # Deploying TensorFlow models to TensorFlow Serving (TFS)
# We will use the REST API or the gRPC API.
# ## Save/Load a `SavedModel`
# In[2]:
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis].astype(np.float32) / 255.
X_test = X_test[..., np.newaxis].astype(np.float32) / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
# In[3]:
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
# In[4]:
np.round(model.predict(X_new), 2)
# In[5]:
model_version = "0001"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
# In[6]:
get_ipython().system('rm -rf {model_name}')
# In[7]:
tf.saved_model.save(model, model_path)
# In[8]:
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
# In[9]:
get_ipython().system('saved_model_cli show --dir {model_path}')
# In[10]:
get_ipython().system('saved_model_cli show --dir {model_path} --tag_set serve')
# In[11]:
get_ipython().system('saved_model_cli show --dir {model_path} --tag_set serve --signature_def serving_default')
# In[12]:
get_ipython().system('saved_model_cli show --dir {model_path} --all')
# Let's write the new instances to a `npy` file so we can pass them easily to our model:
# In[13]:
np.save("my_mnist_tests.npy", X_new)
# In[14]:
input_name = model.input_names[0]
input_name
# And now let's use `saved_model_cli` to make predictions for the instances we just saved:
# In[15]:
get_ipython().system('saved_model_cli run --dir {model_path} --tag_set serve --signature_def serving_default --inputs {input_name}=my_mnist_tests.npy')
# In[16]:
np.round([[1.1739199e-04, 1.1239604e-07, 6.0210604e-04, 2.0804715e-03, 2.5779348e-06,
6.4079795e-05, 2.7411186e-08, 9.9669880e-01, 3.9654213e-05, 3.9471846e-04],
[1.2294615e-03, 2.9207937e-05, 9.8599273e-01, 9.6755642e-03, 8.8930705e-08,
2.9156188e-04, 1.5831805e-03, 1.1311053e-09, 1.1980456e-03, 1.1113169e-07],
[6.4066830e-05, 9.6359509e-01, 9.0598064e-03, 2.9872139e-03, 5.9552520e-04,
3.7478798e-03, 2.5074568e-03, 1.1462728e-02, 5.5553433e-03, 4.2495009e-04]], 2)
# ## TensorFlow Serving
# Install [Docker](https://docs.docker.com/install/) if you don't have it already. Then run:
#
# ```bash
# docker pull tensorflow/serving
#
# export ML_PATH=$HOME/ml # or wherever this project is
# docker run -it --rm -p 8500:8500 -p 8501:8501 \
# -v "$ML_PATH/my_mnist_model:/models/my_mnist_model" \
# -e MODEL_NAME=my_mnist_model \
# tensorflow/serving
# ```
# Once you are finished using it, press Ctrl-C to shut down the server.
# Alternatively, if `tensorflow_model_server` is installed (e.g., if you are running this notebook in Colab), then the following 3 cells will start the server:
# In[17]:
os.environ["MODEL_DIR"] = os.path.split(os.path.abspath(model_path))[0]
# In[18]:
get_ipython().run_cell_magic('bash', '--bg', 'nohup tensorflow_model_server \\\n --rest_api_port=8501 \\\n --model_name=my_mnist_model \\\n --model_base_path="${MODEL_DIR}" >server.log 2>&1')
# In[19]:
get_ipython().system('tail server.log')
# In[20]:
import json
input_data_json = json.dumps({
"signature_name": "serving_default",
"instances": X_new.tolist(),
})
# In[21]:
repr(input_data_json)[:1500] + "..."
# Now let's use TensorFlow Serving's REST API to make predictions:
# In[22]:
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status() # raise an exception in case of error
response = response.json()
# In[23]:
response.keys()
# In[24]:
y_proba = np.array(response["predictions"])
y_proba.round(2)
# ### Using the gRPC API
# In[25]:
from tensorflow_serving.apis.predict_pb2 import PredictRequest
request = PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = "serving_default"
input_name = model.input_names[0]
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(X_new))
# In[26]:
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc
channel = grpc.insecure_channel('localhost:8500')
predict_service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
response = predict_service.Predict(request, timeout=10.0)
# In[27]:
response
# Convert the response to a tensor:
# In[28]:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
y_proba = tf.make_ndarray(outputs_proto)
y_proba.round(2)
# Or to a NumPy array if your client does not include the TensorFlow library:
# In[29]:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
shape = [dim.size for dim in outputs_proto.tensor_shape.dim]
y_proba = np.array(outputs_proto.float_val).reshape(shape)
y_proba.round(2)
# ## Deploying a new model version
# In[30]:
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
# In[31]:
model_version = "0002"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
# In[32]:
tf.saved_model.save(model, model_path)
# In[33]:
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
# **Warning**: You may need to wait a minute before the new model is loaded by TensorFlow Serving.
# In[34]:
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status()
response = response.json()
# In[35]:
response.keys()
# In[36]:
y_proba = np.array(response["predictions"])
y_proba.round(2)
# # Deploy the model to Google Cloud AI Platform
# Follow the instructions in the book to deploy the model to Google Cloud AI Platform, download the service account's private key and save it to the `my_service_account_private_key.json` in the project directory. Also, update the `project_id`:
# In[37]:
project_id = "onyx-smoke-242003"
# In[38]:
import googleapiclient.discovery
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "my_service_account_private_key.json"
model_id = "my_mnist_model"
model_path = "projects/{}/models/{}".format(project_id, model_id)
model_path += "/versions/v0001/" # if you want to run a specific version
ml_resource = googleapiclient.discovery.build("ml", "v1").projects()
# In[39]:
def predict(X):
input_data_json = {"signature_name": "serving_default",
"instances": X.tolist()}
request = ml_resource.predict(name=model_path, body=input_data_json)
response = request.execute()
if "error" in response:
raise RuntimeError(response["error"])
return np.array([pred[output_name] for pred in response["predictions"]])
# In[40]:
Y_probas = predict(X_new)
np.round(Y_probas, 2)
# # Using GPUs
# In[41]:
tf.test.is_gpu_available()
# In[42]:
tf.test.gpu_device_name()
# In[43]:
tf.test.is_built_with_cuda()
# In[44]:
from tensorflow.python.client.device_lib import list_local_devices
devices = list_local_devices()
devices
# # Distributed Training
# In[45]:
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
# In[46]:
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
# In[47]:
batch_size = 100
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
# In[48]:
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
distribution = tf.distribute.MirroredStrategy()
# Change the default all-reduce algorithm:
#distribution = tf.distribute.MirroredStrategy(
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
# Specify the list of GPUs to use:
#distribution = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
# Use the central storage strategy instead:
#distribution = tf.distribute.experimental.CentralStorageStrategy()
#resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
#tf.tpu.experimental.initialize_tpu_system(resolver)
#distribution = tf.distribute.experimental.TPUStrategy(resolver)
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
# In[49]:
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
# In[50]:
model.predict(X_new)
# Custom training loop:
# In[51]:
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
K = keras.backend
distribution = tf.distribute.MirroredStrategy()
with distribution.scope():
model = create_model()
optimizer = keras.optimizers.SGD()
with distribution.scope():
dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size)
input_iterator = distribution.make_dataset_iterator(dataset)
@tf.function
def train_step():
def step_fn(inputs):
X, y = inputs
with tf.GradientTape() as tape:
Y_proba = model(X)
loss = K.sum(keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_losses = distribution.experimental_run(step_fn, input_iterator)
mean_loss = distribution.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
return mean_loss
n_epochs = 10
with distribution.scope():
input_iterator.initialize()
for epoch in range(n_epochs):
print("Epoch {}/{}".format(epoch + 1, n_epochs))
for iteration in range(len(X_train) // batch_size):
print("\rLoss: {:.3f}".format(train_step().numpy()), end="")
print()
# In[52]:
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
# ## Training across multiple servers
# A TensorFlow cluster is a group of TensorFlow processes running in parallel, usually on different machines, and talking to each other to complete some work, for example training or executing a neural network. Each TF process in the cluster is called a "task" (or a "TF server"). It has an IP address, a port, and a type (also called its role or its job). The type can be `"worker"`, `"chief"`, `"ps"` (parameter server) or `"evaluator"`:
# * Each **worker** performs computations, usually on a machine with one or more GPUs.
# * The **chief** performs computations as well, but it also handles extra work such as writing TensorBoard logs or saving checkpoints. There is a single chief in a cluster. If no chief is specified, then the first worker is the chief.
# * A **parameter server** (ps) only keeps track of variable values, it is usually on a CPU-only machine.
# * The **evaluator** obviously takes care of evaluation. There is usually a single evaluator in a cluster.
#
# The set of tasks that share the same type is often called a "job". For example, the "worker" job is the set of all workers.
#
# To start a TensorFlow cluster, you must first specify it. This means defining all the tasks (IP address, TCP port, and type). For example, the following cluster specification defines a cluster with 3 tasks (2 workers and 1 parameter server). It's a dictionary with one key per job, and the values are lists of task addresses:
#
# ```
# {
# "worker": ["my-worker0.example.com:9876", "my-worker1.example.com:9876"],
# "ps": ["my-ps0.example.com:9876"]
# }
# ```
#
# Every task in the cluster may communicate with every other task in the server, so make sure to configure your firewall to authorize all communications between these machines on these ports (it's usually simpler if you use the same port on every machine).
#
# When a task is started, it needs to be told which one it is: its type and index (the task index is also called the task id). A common way to specify everything at once (both the cluster spec and the current task's type and id) is to set the `TF_CONFIG` environment variable before starting the program. It must be a JSON-encoded dictionary containing a cluster specification (under the `"cluster"` key), and the type and index of the task to start (under the `"task"` key). For example, the following `TF_CONFIG` environment variable defines a simple cluster with 2 workers and 1 parameter server, and specifies that the task to start is the first worker:
# In[53]:
import os
import json
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["my-work0.example.com:9876", "my-work1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
},
"task": {"type": "worker", "index": 0}
})
print("TF_CONFIG='{}'".format(os.environ["TF_CONFIG"]))
# Some platforms (e.g., Google Cloud ML Engine) automatically set this environment variable for you.
# Then you would write a short Python script to start a task. The same script can be used on every machine, since it will load the `TF_CONFIG` variable, which will tell it which task to start:
# In[54]:
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
worker0 = tf.distribute.Server(resolver.cluster_spec(),
job_name=resolver.task_type,
task_index=resolver.task_id)
# Another way to specify the cluster specification is directly in Python, rather than through an environment variable:
# In[55]:
cluster_spec = tf.train.ClusterSpec({
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
})
# You can then start a server simply by passing it the cluster spec and indicating its type and index. Let's start the two remaining tasks (remember that in general you would only start a single task per machine; we are starting 3 tasks on the localhost just for the purpose of this code example):
# In[56]:
#worker1 = tf.distribute.Server(cluster_spec, job_name="worker", task_index=1)
ps0 = tf.distribute.Server(cluster_spec, job_name="ps", task_index=0)
# In[57]:
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
print(repr(os.environ["TF_CONFIG"]))
# In[58]:
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
#CUDA_VISIBLE_DEVICES=0
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
# In[59]:
import tensorflow as tf
from tensorflow import keras
import numpy as np
# At the beginning of the program (restart the kernel before running this cell)
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis] / 255.
X_test = X_test[..., np.newaxis] / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
n_workers = 2
batch_size = 32 * n_workers
dataset = tf.data.Dataset.from_tensor_slices((X_train[..., np.newaxis], y_train)).repeat().batch(batch_size)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=len(X_train)//batch_size, epochs=10)
# In[60]:
# Hyperparameter tuning
# Only talk to ps server
config_proto = tf.ConfigProto(device_filters=['/job:ps', '/job:worker/task:%d' % tf_config['task']['index']])
config = tf.estimator.RunConfig(session_config=config_proto)
# default since 1.10
# In[61]:
strategy.num_replicas_in_sync
| 27.824781 | 657 | 0.697868 |
88244f49428498e751a29a73403ece6d99551a07 | 5,750 | py | Python | examples/python/PricingVanillaOption.py | suhasghorp/FinancePy | 567486785b33768f229a404e860f92fd042034a2 | [
"BSD-3-Clause"
] | null | null | null | examples/python/PricingVanillaOption.py | suhasghorp/FinancePy | 567486785b33768f229a404e860f92fd042034a2 | [
"BSD-3-Clause"
] | 1 | 2020-05-07T14:38:23.000Z | 2020-05-07T14:38:23.000Z | examples/python/PricingVanillaOption.py | suhasghorp/FinancePy | 567486785b33768f229a404e860f92fd042034a2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 13 21:52:16 2019
@author: Dominic O'Kane
"""
from
valueDate = FinDate(2015, 1, 1)
expiryDate = FinDate(2015, 7, 1)
stockPrice = 100
volatility = 0.30
interestRate = 0.05
dividendYield = 0.01
model = FinEquityModelBlackScholes(volatility)
discountCurve = FinFlatCurve(valueDate, interestRate)
numPathsList = [10000, 20000, 40000, 80000, 160000, 320000]
testCases.header("NUMPATHS", "VALUE_BS", "VALUE_MC", "TIME")
for numPaths in numPathsList:
callOption = FinVanillaOption(
expiryDate, 100.0, FinOptionTypes.EUROPEAN_CALL)
value = callOption.value(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
start = time.time()
valueMC = callOption.valueMC(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model,
numPaths)
end = time.time()
duration = end - start
testCases.print(numPaths, value, valueMC, duration)
##########################################################################
stockPrices = range(80, 120, 2)
numPaths = 100000
testCases.header("NUMPATHS", "VALUE_BS", "VALUE_MC", "TIME")
for stockPrice in stockPrices:
callOption = FinVanillaOption(
expiryDate, 100.0, FinOptionTypes.EUROPEAN_CALL)
value = callOption.value(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
start = time.time()
valueMC = callOption.valueMC(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model,
numPaths)
end = time.time()
duration = end - start
testCases.print(numPaths, value, valueMC, duration)
##########################################################################
stockPrices = range(80, 120, 2)
numPaths = 100000
testCases.header("STOCK PRICE", "VALUE_BS", "VALUE_MC", "TIME")
for stockPrice in stockPrices:
putOption = FinVanillaOption(
expiryDate, 100.0, FinOptionTypes.EUROPEAN_PUT)
value = putOption.value(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
start = time.time()
valueMC = putOption.valueMC(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model,
numPaths)
end = time.time()
duration = end - start
testCases.print(stockPrice, value, valueMC, duration)
##########################################################################
stockPrices = range(80, 120, 2)
testCases.header(
"STOCK PRICE",
"VALUE_BS",
"DELTA_BS",
"VEGA_BS",
"THETA_BS",
"RHO_BS")
for stockPrice in stockPrices:
callOption = FinVanillaOption(
expiryDate, 100.0, FinOptionTypes.EUROPEAN_CALL)
value = callOption.value(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
delta = callOption.delta(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
vega = callOption.vega(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
theta = callOption.theta(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
# callOption.rho(valueDate,stockPrice, interestRate, dividendYield, modelType, modelParams)
rho = 999
testCases.print(stockPrice, value, delta, vega, theta, rho)
testCases.header(
"STOCK PRICE",
"VALUE_BS",
"DELTA_BS",
"VEGA_BS",
"THETA_BS",
"RHO_BS")
for stockPrice in stockPrices:
putOption = FinVanillaOption(
expiryDate, 100.0, FinOptionTypes.EUROPEAN_PUT)
value = putOption.value(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
delta = putOption.delta(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
vega = putOption.vega(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
theta = putOption.theta(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
# putOption.rho(valueDate,stockPrice, interestRate, dividendYield,
# modelType, modelParams)
rho = 999
testCases.print(stockPrice, value, delta, vega, theta, rho)
##########################################################################
testCases.header("STOCK PRICE", "VALUE_BS", "VOL_IN", "IMPLD_VOL")
stockPrices = range(60, 150, 2)
for stockPrice in stockPrices:
callOption = FinVanillaOption(
expiryDate, 100.0, FinOptionTypes.EUROPEAN_CALL)
value = callOption.value(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
impliedVol = callOption.impliedVolatility(
valueDate, stockPrice, discountCurve, dividendYield, value)
testCases.print(stockPrice, value, volatility, impliedVol)
test_FinVanillaOption()
testCases.compareTestCases()
| 27.777778 | 99 | 0.526435 |
7974542530d3b117e4dc2cf392cc299d27f818dc | 1,409 | py | Python | app/routes.py | ender18g/ew370 | a4a8af973adc37a02eb9148fed67e857a614faaf | [
"MIT"
] | null | null | null | app/routes.py | ender18g/ew370 | a4a8af973adc37a02eb9148fed67e857a614faaf | [
"MIT"
] | null | null | null | app/routes.py | ender18g/ew370 | a4a8af973adc37a02eb9148fed67e857a614faaf | [
"MIT"
] | null | null | null | from app import app
from app import db
from flask import render_template, url_for, request, jsonify, redirect
import json
from app.models import Comment
# def get_mlData():
# with open ('app/static/brain-examples/ml_data.json','r') as f:
# return json.load(f)
# def save_mlData(ml_data):
# with open ('app/static/brain-examples/ml_data.json', 'w') as f:
# json.dump(ml_data,f)
# return True
@app.route('/')
@app.route('/index')
@app.route('/index.html')
def index():
return render_template('index.html')
@app.route('/brain')
@app.route('/brain.html')
def brain():
return render_template('brainex.html')
# @app.route('/mlData', methods=["GET","POST","DELETE"])
# def mlData():
# #if someone is sending new training data, save it
# if request.method=='POST':
# new_item = {'input':request.form.get('comment'),'output':request.form.get('output')}
# comment = Comment(comment=request.form.get('comment'), output=request.form.get('output'))
# db.session.add(comment)
# db.session.commit()
# return redirect(url_for('brain'))
# #if it's a delete request, delete comment from db
# if request.method=="DELETE":
# delete_id = int(request.args.get('id'))
# comment = Comment.query.get(delete_id)
# db.session.delete(comment)
# db.session.commit()
# mlData = [i.serialize for i in Comment.query.all()]
# #print(mlData)
# return jsonify(mlData)
| 30.630435 | 95 | 0.672818 |
27df010cc1ef623d1994e6e3cc0f657e7f6f9752 | 224 | py | Python | mangum/protocols/__init__.py | tasn/mangum | 6da7e51ca8e7979f41291ab3f0e698882f219814 | [
"MIT"
] | 661 | 2020-06-02T01:06:35.000Z | 2022-03-30T22:40:47.000Z | mangum/protocols/__init__.py | tasn/mangum | 6da7e51ca8e7979f41291ab3f0e698882f219814 | [
"MIT"
] | 116 | 2020-06-02T02:14:14.000Z | 2022-03-25T11:54:38.000Z | mangum/protocols/__init__.py | tasn/mangum | 6da7e51ca8e7979f41291ab3f0e698882f219814 | [
"MIT"
] | 55 | 2020-06-02T02:01:26.000Z | 2022-03-16T16:13:09.000Z | from .http import HTTPCycle
from .websockets import WebSocketCycle
from .lifespan import LifespanCycleState, LifespanCycle
__all__ = [
"HTTPCycle",
"WebSocketCycle",
"LifespanCycleState",
"LifespanCycle",
]
| 20.363636 | 55 | 0.745536 |
d3aa0b628dc8db7172ab6aaf3387523a358363b4 | 2,751 | py | Python | app.py | noorbala7418/ImAlive | f5b26141fc6035b233f18fddc91c34d31a9740ab | [
"MIT"
] | 2 | 2021-11-03T18:33:04.000Z | 2021-12-21T11:16:16.000Z | app.py | noorbala7418/ImAlive | f5b26141fc6035b233f18fddc91c34d31a9740ab | [
"MIT"
] | null | null | null | app.py | noorbala7418/ImAlive | f5b26141fc6035b233f18fddc91c34d31a9740ab | [
"MIT"
] | null | null | null | import os
import json
import subprocess
import re
from dotenv import load_dotenv, find_dotenv
from flask import Flask, request, Response
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import generate_password_hash, check_password_hash
app = Flask(__name__)
app.config["DEBUG"] = True
load_dotenv(find_dotenv(".env"))
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
sys_username = os.getenv("USERNAME")
sys_password = os.getenv("PASSWORD")
if username == sys_username and check_password_hash(generate_password_hash(sys_password), password):
return True
return False
@app.route('/alive', methods=['GET'])
### Says that Server is Ok or not (filter, net problems, etc)
def alive():
uptime = get_uptime()
res = json.dumps({'message': f"Yes! I am Alive :)", 'uptime':str(uptime)})
response = Response(response=res, status=200, mimetype="application/json")
return (response)
def get_uptime():
### Gets uptime of system
with open('/proc/uptime', 'r') as f:
uptime = float(f.readline().split()[0])
return uptime
@app.route('/service/state/<string:service_name>', methods=['GET'])
@auth.login_required
def show_state_services(service_name):
### Gets Service Status from system.
service_state = get_service_full_status(service_name)
if service_state.startswith('Error:'):
srv_not_found = json.dumps({'state': service_state})
return Response(response=srv_not_found, status=404, mimetype="application/json")
else:
service_state = get_service_status(service_state)
res = json.dumps({'state': service_state})
response = Response(response=res, status=200, mimetype="application/json")
return (response)
def get_service_status(service_state):
### cleans service status string.
reg_order = r'(Active:.+)CPU'
res = re.findall(reg_order,service_state)
return res
def get_service_full_status(service_name):
### gets service status from system. convets those to string and returns.
try:
service_state = str(subprocess.check_output(["systemctl","status",service_name]))
return service_state
except subprocess.CalledProcessError as e:
return f'Error: Service {service_name} Not found.'
@app.route('/run', methods=['POST'])
@auth.login_required
### Runs commands
def get_info():
# TODO: this func should run light commands
# return request.get_json()['command']
return {'message':'This EP hasn\'t implemented.'}
@app.route('/', methods=['GET'])
def home():
return 'Im Alive! application. an easy tool for managing your server :)'
app.run(host=os.getenv("HOST_URL"), port=os.getenv("HOST_PORT"))
| 28.071429 | 104 | 0.705198 |
a6cfbb8cb7acf2e4678c0df1ae4266a636a981e8 | 8,057 | py | Python | homeassistant/components/mill/climate.py | ryborg/core | b17120a5113a424cab9c621225956f6c8ec67a87 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mill/climate.py | ryborg/core | b17120a5113a424cab9c621225956f6c8ec67a87 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mill/climate.py | ryborg/core | b17120a5113a424cab9c621225956f6c8ec67a87 | [
"Apache-2.0"
] | null | null | null | """Support for mill wifi-enabled home heaters."""
import mill
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_ON,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_IP_ADDRESS,
CONF_USERNAME,
PRECISION_WHOLE,
TEMP_CELSIUS,
)
from homeassistant.core import ServiceCall, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTR_AWAY_TEMP,
ATTR_COMFORT_TEMP,
ATTR_ROOM_NAME,
ATTR_SLEEP_TEMP,
CLOUD,
CONNECTION_TYPE,
DOMAIN,
LOCAL,
MANUFACTURER,
MAX_TEMP,
MIN_TEMP,
SERVICE_SET_ROOM_TEMP,
)
SET_ROOM_TEMP_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ROOM_NAME): cv.string,
vol.Optional(ATTR_AWAY_TEMP): cv.positive_int,
vol.Optional(ATTR_COMFORT_TEMP): cv.positive_int,
vol.Optional(ATTR_SLEEP_TEMP): cv.positive_int,
}
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Mill climate."""
if entry.data.get(CONNECTION_TYPE) == LOCAL:
mill_data_coordinator = hass.data[DOMAIN][LOCAL][entry.data[CONF_IP_ADDRESS]]
async_add_entities([LocalMillHeater(mill_data_coordinator)])
return
mill_data_coordinator = hass.data[DOMAIN][CLOUD][entry.data[CONF_USERNAME]]
entities = [
MillHeater(mill_data_coordinator, mill_device)
for mill_device in mill_data_coordinator.data.values()
if isinstance(mill_device, mill.Heater)
]
async_add_entities(entities)
async def set_room_temp(service: ServiceCall) -> None:
"""Set room temp."""
room_name = service.data.get(ATTR_ROOM_NAME)
sleep_temp = service.data.get(ATTR_SLEEP_TEMP)
comfort_temp = service.data.get(ATTR_COMFORT_TEMP)
away_temp = service.data.get(ATTR_AWAY_TEMP)
await mill_data_coordinator.mill_data_connection.set_room_temperatures_by_name(
room_name, sleep_temp, comfort_temp, away_temp
)
hass.services.async_register(
DOMAIN, SERVICE_SET_ROOM_TEMP, set_room_temp, schema=SET_ROOM_TEMP_SCHEMA
)
class MillHeater(CoordinatorEntity, ClimateEntity):
"""Representation of a Mill Thermostat device."""
_attr_fan_modes = [FAN_ON, HVAC_MODE_OFF]
_attr_max_temp = MAX_TEMP
_attr_min_temp = MIN_TEMP
_attr_target_temperature_step = PRECISION_WHOLE
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, coordinator, heater):
"""Initialize the thermostat."""
super().__init__(coordinator)
self._available = False
self._id = heater.device_id
self._attr_unique_id = heater.device_id
self._attr_name = heater.name
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, heater.device_id)},
manufacturer=MANUFACTURER,
model=f"Generation {heater.generation}",
name=self.name,
)
if heater.is_gen1:
self._attr_hvac_modes = [HVAC_MODE_HEAT]
else:
self._attr_hvac_modes = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
if heater.generation < 3:
self._attr_supported_features = (
SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
)
else:
self._attr_supported_features = SUPPORT_TARGET_TEMPERATURE
self._update_attr(heater)
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
await self.coordinator.mill_data_connection.set_heater_temp(
self._id, int(temperature)
)
await self.coordinator.async_request_refresh()
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
fan_status = 1 if fan_mode == FAN_ON else 0
await self.coordinator.mill_data_connection.heater_control(
self._id, fan_status=fan_status
)
await self.coordinator.async_request_refresh()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
heater = self.coordinator.data[self._id]
if hvac_mode == HVAC_MODE_HEAT:
await self.coordinator.mill_data_connection.heater_control(
self._id, power_status=1
)
await self.coordinator.async_request_refresh()
elif hvac_mode == HVAC_MODE_OFF and not heater.is_gen1:
await self.coordinator.mill_data_connection.heater_control(
self._id, power_status=0
)
await self.coordinator.async_request_refresh()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return super().available and self._available
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_attr(self.coordinator.data[self._id])
self.async_write_ha_state()
@callback
def _update_attr(self, heater):
self._available = heater.available
self._attr_extra_state_attributes = {
"open_window": heater.open_window,
"heating": heater.is_heating,
"controlled_by_tibber": heater.tibber_control,
"heater_generation": heater.generation,
}
if heater.room:
self._attr_extra_state_attributes["room"] = heater.room.name
self._attr_extra_state_attributes["avg_room_temp"] = heater.room.avg_temp
else:
self._attr_extra_state_attributes["room"] = "Independent device"
self._attr_target_temperature = heater.set_temp
self._attr_current_temperature = heater.current_temp
self._attr_fan_mode = FAN_ON if heater.fan_status == 1 else HVAC_MODE_OFF
if heater.is_gen1 or heater.is_heating == 1:
self._attr_hvac_action = CURRENT_HVAC_HEAT
else:
self._attr_hvac_action = CURRENT_HVAC_IDLE
if heater.is_gen1 or heater.power_status == 1:
self._attr_hvac_mode = HVAC_MODE_HEAT
else:
self._attr_hvac_mode = HVAC_MODE_OFF
class LocalMillHeater(CoordinatorEntity, ClimateEntity):
"""Representation of a Mill Thermostat device."""
_attr_hvac_mode = HVAC_MODE_HEAT
_attr_hvac_modes = [HVAC_MODE_HEAT]
_attr_max_temp = MAX_TEMP
_attr_min_temp = MIN_TEMP
_attr_supported_features = SUPPORT_TARGET_TEMPERATURE
_attr_target_temperature_step = PRECISION_WHOLE
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, coordinator):
"""Initialize the thermostat."""
super().__init__(coordinator)
self._attr_name = coordinator.mill_data_connection.name
self._update_attr()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
await self.coordinator.mill_data_connection.set_target_temperature(
int(temperature)
)
await self.coordinator.async_request_refresh()
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_attr()
self.async_write_ha_state()
@callback
def _update_attr(self) -> None:
data = self.coordinator.data
self._attr_target_temperature = data["set_temperature"]
self._attr_current_temperature = data["ambient_temperature"]
if data["current_power"] > 0:
self._attr_hvac_action = CURRENT_HVAC_HEAT
else:
self._attr_hvac_action = CURRENT_HVAC_IDLE
| 34.431624 | 87 | 0.682264 |
637ec522aa7c886dcb45028cbb78693adda75d32 | 703 | py | Python | setup.py | amackojc/fatube | 72a59b1f41a5c0a6dda75fe41cc3a7342eee7983 | [
"MIT"
] | null | null | null | setup.py | amackojc/fatube | 72a59b1f41a5c0a6dda75fe41cc3a7342eee7983 | [
"MIT"
] | null | null | null | setup.py | amackojc/fatube | 72a59b1f41a5c0a6dda75fe41cc3a7342eee7983 | [
"MIT"
] | 1 | 2022-01-18T20:57:01.000Z | 2022-01-18T20:57:01.000Z | """
Setup file for FATube.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 4.1.1.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
from setuptools import setup
if __name__ == "__main__":
try:
setup(use_scm_version={"version_scheme": "no-guess-dev"})
except: # noqa
print(
"\n\nAn error occurred while building the project, "
"please ensure you have the most updated version of setuptools, "
"setuptools_scm and wheel with:\n"
" pip install -U setuptools setuptools_scm wheel\n\n"
)
raise
| 31.954545 | 77 | 0.638691 |
d77ff9f26f0bd43f1d0474a71e016a19adf7b88d | 1,968 | py | Python | pygeostat/utility/logging.py | haroldvelasquez/pygeostat | b45294ef432ec0169eb32c7cf42bca871651d8ed | [
"MIT"
] | 22 | 2020-08-04T15:13:53.000Z | 2022-03-31T00:15:07.000Z | pygeostat/utility/logging.py | haroldvelasquez/pygeostat | b45294ef432ec0169eb32c7cf42bca871651d8ed | [
"MIT"
] | 15 | 2020-08-04T15:40:21.000Z | 2021-12-22T15:18:01.000Z | pygeostat/utility/logging.py | haroldvelasquez/pygeostat | b45294ef432ec0169eb32c7cf42bca871651d8ed | [
"MIT"
] | 59 | 2020-08-04T17:09:26.000Z | 2022-03-25T23:19:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''utils.py: Contains utility functions for providing status alerts in a script'''
from __future__ import absolute_import, division, print_function
__author__ = 'pygeostat development team'
__date__ = '2015'
__version__ = '1.000'
import textwrap
def printerr(text, width=80, errtype=None):
"""
Small utility to print custom errors with proper indentation and text wrapping. The only error
types coded are `error` and `warning`.
Parameters:
text (str): String of text without the preceding error flag that will be formated
width (int): The maximum length of wrapped lines
errtype (str): Indicate which type of error to format the string as. The default value of
`None` will only text wrap the string to the specified `width`.
.. codeauthor:: pygeostat development team 2015-11-01
"""
import pygeostat as gs
if isinstance(errtype, str):
errtype.lower()
if errtype == 'error':
text = 'ERROR: ' + text
subsequent_indent = " "
elif errtype == 'warning':
text = 'WARNING: ' + text
subsequent_indent = " "
else:
subsequent_indent = ""
print(textwrap.fill(text, width=width, subsequent_indent=subsequent_indent))
def log_progress(sequence, size=None, name='Items'):
"""
Parameters:
sequence: The thing that is being iterated over
Example:
>>> from pygeostat import log_progress
>>> from time import sleep
>>> for i in log_progress(range(200)):
... sleep(0.1)
.. image:: ./figures/log_progress.gif
"""
try:
from tqdm import tqdm, tqdm_notebook
except:
print("pip install tqdm to install the progressbar functionality !")
try:
get_ipython()
return tqdm_notebook(sequence, desc=name, total=size)
except:
return tqdm(sequence, name, total=size, ascii=True) | 31.238095 | 98 | 0.646341 |
b810bfed0938840d1cd44df291b50dfd3b37e391 | 139 | py | Python | opwen_email_server/constants/sync.py | kulado/mailserver | d05c0b2e8d8c97b7b7a08028a9eacd4427a59c4b | [
"Apache-2.0"
] | 15 | 2020-07-06T02:33:15.000Z | 2021-08-09T19:24:11.000Z | opwen_email_server/constants/sync.py | kulado/mailserver | d05c0b2e8d8c97b7b7a08028a9eacd4427a59c4b | [
"Apache-2.0"
] | 298 | 2016-12-24T20:19:56.000Z | 2020-04-29T03:13:21.000Z | opwen_email_server/constants/sync.py | sbathgate/lokole | a4e3fa5da5373880f6783379f7ef8d843e4cc79d | [
"Apache-2.0"
] | 12 | 2017-06-05T12:13:35.000Z | 2020-01-23T03:26:05.000Z | from typing_extensions import Final # noqa: F401
EMAILS_FILE = 'emails.jsonl' # type: Final
USERS_FILE = 'zzusers.jsonl' # type: Final
| 27.8 | 49 | 0.733813 |
5eeb0191c287c35df018c87a3cc5714a6a5ab804 | 1,356 | py | Python | readthedocs/builds/utils.py | felixge/readthedocs.org | 41b0243bfd94c9c502c8f7f5f7df3e8f690891a5 | [
"MIT"
] | 1 | 2017-10-02T09:01:42.000Z | 2017-10-02T09:01:42.000Z | readthedocs/builds/utils.py | felixge/readthedocs.org | 41b0243bfd94c9c502c8f7f5f7df3e8f690891a5 | [
"MIT"
] | null | null | null | readthedocs/builds/utils.py | felixge/readthedocs.org | 41b0243bfd94c9c502c8f7f5f7df3e8f690891a5 | [
"MIT"
] | null | null | null | import re
GH_REGEXS = [
re.compile('github.com/(.+)/(.+)(?:\.git){1}'),
re.compile('github.com/(.+)/(.+)'),
re.compile('github.com:(.+)/(.+).git'),
]
BB_REGEXS = [
re.compile('bitbucket.org/(.+)/(.+)/'),
re.compile('bitbucket.org/(.+)/(.+)'),
re.compile('bitbucket.org:(.+)/(.+)\.git'),
]
def get_github_username_repo(version):
repo_url = version.project.repo
if 'github' in repo_url:
for regex in GH_REGEXS:
match = regex.search(repo_url)
if match:
return match.groups()
return (None, None)
def get_bitbucket_username_repo(version):
repo_url = version.project.repo
if 'bitbucket' in repo_url:
for regex in BB_REGEXS:
match = regex.search(repo_url)
if match:
return match.groups()
return (None, None)
def get_vcs_version(version):
if version.slug == 'latest':
if version.project.default_branch:
return version.project.default_branch
else:
return version.project.vcs_repo().fallback_branch
else:
return version.slug
def get_conf_py_path(version):
conf_py_path = version.project.conf_file(version.slug)
conf_py_path = conf_py_path.replace(
version.project.checkout_path(version.slug), '')
return conf_py_path.replace('conf.py', '')
| 28.851064 | 61 | 0.608407 |
139548cb3781047aceefc145d3a7a14908ccd550 | 37,058 | py | Python | ssqueezepy/visuals.py | CHEN34-hub/ssqueezepy | d6e6d5b331094d8aeee0c187f7efbfb3c97af986 | [
"MIT"
] | 1 | 2022-03-06T14:18:26.000Z | 2022-03-06T14:18:26.000Z | ssqueezepy/visuals.py | CHEN34-hub/ssqueezepy | d6e6d5b331094d8aeee0c187f7efbfb3c97af986 | [
"MIT"
] | null | null | null | ssqueezepy/visuals.py | CHEN34-hub/ssqueezepy | d6e6d5b331094d8aeee0c187f7efbfb3c97af986 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Convenience visual methods"""
import numpy as np
from pathlib import Path
from .algos import find_closest, find_maximum
from .configs import gdefaults
from . import plt
#### Visualizations ##########################################################
def wavelet_tf(wavelet, N=2048, scale=None, notext=False, width=1.1, height=1):
"""Visualize `wavelet` joint time-frequency resolution. Plots frequency-domain
wavelet (psih) along y-axis, and time-domain wavelet (psi) along x-axis.
Orthogonal units (e.g. y-axis for psi) are meaningless; function values
aren't to scale, but *widths* are, so time-frequency uncertainties are
accurately captured.
`wavelet` is instance of `wavelets.Wavelet` or its valid `wavelet` argument.
See also: https://www.desmos.com/calculator/0nslu0qivv
"""
def pick_scale(wavelet, N):
"""Pick scale such that both time- & freq-domain wavelets look nice."""
st_min, st_max = 65 * (N / 2048), 75 * (N / 2048)
max_iters = 100
scale = wavelet.scalec_ct
# generous `min_decay` since we don't care about initial bad cases
kw = dict(wavelet=wavelet, N=N, min_decay=1, nondim=False)
std_t = time_resolution(scale=scale, **kw)
i = 0
while not (st_min < std_t < st_max):
if std_t > st_max:
scale /= 1.1
else:
scale *= 1.1
std_t = time_resolution(scale=scale, **kw)
if i > max_iters:
raise ValueError(f"couldn't autofind `scale` after {max_iters} "
"iterations, aborting")
i += 1
return scale
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if scale is None:
scale = pick_scale(wavelet, N)
#### Compute psi & psihf #################################################
psi = asnumpy(wavelet.psifn(scale=scale, N=N))
apsi = np.abs(psi)
t = np.arange(-N/2, N/2, step=1)
w = _xifn(1, N)[:N//2 + 1]
psih = asnumpy(wavelet(scale * w))
#### Compute stdevs & respective indices #################################
wc = center_frequency(wavelet, scale, N)
std_w = freq_resolution(wavelet, scale, N, nondim=0)
std_t = time_resolution(wavelet, scale, N, nondim=0, min_decay=1)
_wc = np.pi - wc
wlix = np.argmin(np.abs(w - (_wc - std_w)))
wrix = np.argmin(np.abs(w - (_wc + std_w)))
wl, wr = w[wlix], w[wrix]
tlix = np.argmin(np.abs(t - (0 - std_t)))
trix = np.argmin(np.abs(t - (0 + std_t)))
tl, tr = t[tlix], t[trix]
## Rescale psi so that its y-coords span 1/5 of psih's x-coords, & vice-versa
frac = 5
psig = psi * (w.max() / apsi.max()) / frac
apsig = apsi * (w.max() / apsi.max()) / frac
psihg = psih * (t.max() / psih.max()) / frac
# additionally shift psih to psi's left
psihg += t.min()
## Find intersections
w_xminu, w_xmax = psihg[::-1][wlix], tr
w_xmind = psihg[::-1][wrix] # psih not necessarily symmetric
w_ymin, w_ymax = wl, wr
t_xmin, t_xmax = tl, tr
t_yminl, t_ymax = apsig[tlix], wr
t_yminr = apsig[trix] # same for psi
#### Plot ################################################################
plot(t, psig, complex=1, h=1.5)
plot(t, apsig, linestyle='--', color='k')
plot(psihg[::-1], w, color='purple')
# bounds lines
lkw = dict(color='k', linewidth=1)
plot([t_xmin, t_xmin], [t_yminl, t_ymax], **lkw)
plot([t_xmax, t_xmax], [t_yminr, t_ymax], **lkw)
plot([w_xminu, w_xmax], [w_ymin, w_ymin], **lkw)
plot([w_xmind, w_xmax], [w_ymax, w_ymax], **lkw)
plt.xlim(t.min()*1.02, t.max()*1.02)
# radians 0 to pi from top to bottom(=psi's mean)
ylabels = np.round(np.linspace(np.pi, 0, 7), 1)
plt.yticks(np.linspace(0, np.pi, len(ylabels)), ylabels)
if notext:
plt.gcf().set_size_inches(12*width, 12*height)
plt.show()
return
#### Title, annotations, labels, styling #################################
## Annotation: info summary
txt = (" wc = {:<6.5f} rad-c/s\n"
" std_t = {:<6.4f} s/c-rad\n"
" std_w = {:<6.5f} rad-c/s\n"
"area/4 = {:.12f}\n"
" = std_t * std_w\n\n"
"(rad-c/s=\n radians*cycles/samples)"
).format(wc, std_t, std_w, std_t * std_w)
_annotate(txt, xy=(.7, .76), fontsize=16)
## Title: wavelet name & parameters
title = wavelet._desc(N=N, scale=scale)
plt.title(title, loc='left', weight='bold', fontsize=16)
## Styling
plt.xlabel("samples", weight='bold', fontsize=15)
plt.ylabel("radians", weight='bold', fontsize=15)
plt.gcf().set_size_inches(12*width, 12*height)
plt.show()
def wavelet_tf_anim(wavelet, N=2048, scales=None, width=1.1, height=1,
savepath='wavanim.gif', testing=False):
"""This method computes same as `wavelet_tf` but for all scales at once,
and animates 'intelligently'. See help(wavelet_tf).
`scales=None` will default to 'log:minimal' with (.9*min_scale,
0.25*max_scale). These are selected to show the wavelet a little outside of
"well-behaved" range (without slashing max_scale, it's a lot outside such
range). May not work for every wavelet or all of their configs.
"""
def _make_anim_scales(scales, wavelet, N):
if scales is None:
scales = 'log:minimal'
mn, mx = cwt_scalebounds(wavelet, N=N, preset='maximal',
use_padded_N=False)
scales = make_scales(N, 0.90*mn, 0.25*mx, scaletype='log')
else:
scales = process_scales(scales, N, wavelet, use_padded_N=False)
# compute early and late scales more densely as they capture more
# interesting behavior, so animation will slow down smoothly near ends
scales = scales.squeeze()
na = len(scales)
s0 = (25/253)*na # empircally-determined good value
srepl = max(int(s0), 1) # scales to keep from each end
srepr = max(int(s0), 1)
smull = 4 # extension factor
smulr = 3
sright = np.linspace(scales[-srepr], scales[-1], srepr * smulr)
sleft = np.linspace(scales[0], scales[srepl], srepl * smull)
sright = np.hstack([sright, sright[-1].repeat(smulr*2)]) # smooth loop
sleft = np.hstack([sleft[0].repeat(smull*2), sleft])
scales = np.hstack([sleft, scales[srepl:-srepr], sright])
scales = scales.reshape(-1, 1)
return scales
from matplotlib.animation import FuncAnimation
import matplotlib
matplotlib.use("Agg")
NOTE("Switched matplotlib to 'Agg' backend for animating")
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = _make_anim_scales(scales, wavelet, N)
#### Compute Psi & Psih ##################################################
Psi = asnumpy(wavelet.psifn(scale=scales, N=N))
aPsi = np.abs(Psi)
t = np.arange(-N/2, N/2, step=1)
w = _xifn(1, N)[:N//2 + 1]
Psih = asnumpy(wavelet(scales * w))
#### Compute stdevs & respective indices #################################
Wc = np.zeros(len(scales))
std_W = Wc.copy()
std_T = Wc.copy()
for i, scale in enumerate(scales):
Wc[i] = center_frequency(wavelet, float(scale), N, kind='energy')
std_W[i] = freq_resolution( wavelet, float(scale), N, nondim=0)
std_T[i] = time_resolution( wavelet, float(scale), N, nondim=0,
min_decay=1)
_Wc = np.pi - Wc
Wlix = find_closest((_Wc - std_W).reshape(-1, 1), w).squeeze()
Wrix = find_closest((_Wc + std_W).reshape(-1, 1), w).squeeze()
Wl, Wr = w[Wlix], w[Wrix]
Tlix = find_closest(0 - std_T.reshape(-1, 1), t).squeeze()
Trix = find_closest(0 + std_T.reshape(-1, 1), t).squeeze()
Tl, Tr = t[Tlix], t[Trix]
## Rescale Psi so that its y-coords span 1/5 of Psih's x-coords, & vice-versa
frac = 5
Psig = Psi * (w.max() / aPsi.max(axis=-1)).reshape(-1, 1) / frac
aPsig = aPsi * (w.max() / aPsi.max(axis=-1)).reshape(-1, 1) / frac
Psihg = Psih * (t.max() / Psih.max(axis=-1)).reshape(-1, 1) / frac
# additionally shift Psih to Psi's left
Psihg += t.min()
## Find intersections ####################################################
sidx = np.arange(len(scales))
W_xminu, W_xmax = Psihg[:, ::-1][sidx, Wlix], Tr
W_xmind = Psihg[:, ::-1][sidx, Wrix] # Psih not necessarily symmetric
W_ymin, W_ymax = Wl, Wr
T_xmin, T_xmax = Tl, Tr
T_yminl, T_ymax = aPsig[sidx, Tlix], Wr
T_yminr = aPsig[sidx, Trix] # same for Psi
## Set up plot objects ###################################################
fig, ax = plt.subplots()
ax.set_xlim([t.min()*1.02, t.max()*1.02])
ax.set_ylim([-aPsig.max()*1.05, np.pi*1.02])
ylabels = np.round(np.linspace(np.pi, 0, 7), 1)
plt.yticks(np.linspace(0, np.pi, len(ylabels)), ylabels)
fig.set_size_inches(12*width, 12*height)
## Title: wavelet name & parameters
title = wavelet._desc(N=N)
ax.set_title(title, loc='left', weight='bold', fontsize=16)
line1, = ax.plot([], [], color='tab:blue')
line2, = ax.plot([], [], color='tab:orange')
line3, = ax.plot([], [], color='k', linestyle='--')
line4, = ax.plot([], [], color='purple')
lkw = dict(color='k', linewidth=1)
line5, = ax.plot([], [], **lkw)
line6, = ax.plot([], [], **lkw)
line7, = ax.plot([], [], **lkw)
line8, = ax.plot([], [], **lkw)
tkw = dict(horizontalalignment='center', verticalalignment='center',
transform=ax.transAxes, fontsize=15, weight='bold')
txt = ax.text(.9, .95, "scale=%.2f" % scales[0], **tkw)
fig.tight_layout()
#### Animate #############################################################
def unique_savepath(savepath):
"""Ensure doesn't overwrite existing"""
sp = Path(savepath)
savename = sp.stem
if sp.is_file():
paths = [str(p.stem) for p in Path(savepath).parent.iterdir()
if savename in p.stem]
maxnum = 0
for p in paths:
num = p.replace(savename, '')
if num != '' and int(num) > maxnum:
maxnum = int(num)
sp = Path(sp.parent, savename + str(maxnum + 1) + sp.suffix)
sp = str(sp)
return sp
def animate(i):
line1.set_data(t, Psig[i].real)
line2.set_data(t, Psig[i].imag)
line3.set_data(t, aPsig[i])
line4.set_data(Psihg[i][::-1], w)
line5.set_data([T_xmin[i], T_xmin[i]], [T_yminl[i], T_ymax[i]])
line6.set_data([T_xmax[i], T_xmax[i]], [T_yminr[i], T_ymax[i]])
line7.set_data([W_xminu[i], W_xmax[i]], [W_ymin[i], W_ymin[i]])
line8.set_data([W_xmind[i], W_xmax[i]], [W_ymax[i], W_ymax[i]])
txt.set_text("scale=%.2f" % scales[i])
return line1, line2, line3, line4, line5, line6, line7, line8
sp = unique_savepath(savepath)
print(("Successfully computed parameters, scales ranging {:.2f} to {:.2f}; "
"animating...\nWill save to: {}").format(
scales.min(), scales.max(), sp), flush=True)
frames = np.hstack([range(len(scales)), range(len(scales) - 1)[::-1]])
if testing: # animation takes long; skip when unit-testing
print("Passed `testing=True`, won't animate")
return
anim = FuncAnimation(fig, animate, frames=frames, interval=60,
blit=True, repeat=False)
anim.save(sp, writer='imagemagick')
print("Animated and saved to", sp, flush=True)
def wavelet_heatmap(wavelet, scales='log', N=2048):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if not isinstance(scales, np.ndarray):
scales = process_scales(scales, N, wavelet, use_padded_N=False)
#### Compute time- & freq-domain wavelets for all scales #################
Psi = asnumpy(wavelet.psifn(scale=scales, N=N))
w = _xifn(1, N)[:N//2 + 1]
Psih = asnumpy(wavelet(scales * w))
#### Plot ################################################################
mx = np.abs(Psi).max() * .01
title0 = wavelet._desc(N=N)
kw = dict(ylabel="scales", xlabel="samples")
imshow(Psi.real, norm=(-mx, mx), yticks=scales,
title=title0 + " | Time-domain; real part", **kw)
imshow(Psi, abs=1, cmap='bone', norm=(0, mx), yticks=scales,
title=title0 + " | Time-domain; abs-val", **kw)
kw['xlabel'] = "radians"
imshow(Psih, abs=1, yticks=scales, xticks=np.linspace(0, np.pi, N//2),
title=title0 + " | Freq-domain; abs-val", **kw)
def sweep_std_t(wavelet, N, scales='log', get=False, **kw):
def _process_kw(kw):
kw = kw.copy() # don't change external dict
defaults = dict(min_decay=1, max_mult=2, min_mult=2,
nondim=False, force_int=True)
for k, v in kw.items():
if k not in defaults:
raise ValueError(f"unsupported kwarg '{k}'; must be one of: "
+ ', '.join(defaults))
for k, v in defaults.items():
kw[k] = kw.get(k, v)
return kw
kw = _process_kw(kw)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = process_scales(scales, N, wavelet)
std_ts = np.zeros(scales.size)
for i, scale in enumerate(scales):
std_ts[i] = time_resolution(wavelet, scale=scale, N=N, **kw)
title = "std_t [{}] vs log2(scales) | {} wavelet, {}".format(
"nondim" if kw['nondim'] else "s/c-rad", wavelet.name, wavelet.config_str)
hlines = ([N/2, N/4], dict(color='k', linestyle='--'))
plot(np.log2(scales), std_ts, title=title, hlines=hlines, show=1)
if get:
return std_ts
def sweep_std_w(wavelet, N, scales='log', get=False, **kw):
def _process_kw(kw):
kw = kw.copy() # don't change external dict
defaults = dict(nondim=False, force_int=True)
for k, v in kw.items():
if k not in defaults:
raise ValueError(f"unsupported kwarg '{k}'; must be one of: "
+ ', '.join(defaults))
for k, v in defaults.items():
kw[k] = kw.get(k, v)
return kw
kw = _process_kw(kw)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = process_scales(scales, N, wavelet)
std_ws = np.zeros(scales.size)
for i, scale in enumerate(scales):
std_ws[i] = freq_resolution(wavelet, scale=scale, N=N, **kw)
title = "std_w [{}] vs log2(scales) | {} wavelet, {}".format(
"nondim" if kw['nondim'] else "s/c-rad", wavelet.name, wavelet.config_str)
plot(np.log2(scales), std_ws, title=title, show=1)
if get:
return std_ws
def sweep_harea(wavelet, N, scales='log', get=False, kw_w=None, kw_t=None):
"""Sub-.5 and near-0 areas will occur for very high scales as a result of
discretization limitations. Zero-areas have one non-zero frequency-domain,
and std_t==N/2, with latter more accurately set to infinity (which we don't).
Sub-.5 are per freq-domain assymetries degrading time-domain decay,
and limited bin discretization integrating unreliably (yet largely
meaningfully; the unreliable-ness appears emergent from discretization).
"""
kw_w, kw_t = (kw_w or {}), (kw_t or {})
wavelet = Wavelet._init_if_not_isinstance(wavelet)
scales = process_scales(scales, N, wavelet)
std_ws = sweep_std_w(wavelet, N, scales, get=True, **kw_w)
plt.show()
std_ts = sweep_std_t(wavelet, N, scales, get=True, **kw_t)
plt.show()
hareas = std_ws * std_ts
hline = (.5, dict(color='tab:red', linestyle='--'))
title = "(std_w * std_t) vs log2(scales) | {} wavelet, {}".format(
wavelet.name, wavelet.config_str)
plot(np.log2(scales), hareas, color='k', hlines=hline, title=title)
plt.show()
if get:
return hareas, std_ws, std_ts
def wavelet_waveforms(wavelet, N, scale, zoom=True):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
## Freq-domain sampled #######################
w_peak, _ = find_maximum(wavelet.fn)
w_ct = np.linspace(0, w_peak*2, max(4096, p2up(N)[0])) # 'continuous-time'
w_dt = np.linspace(0, np.pi, N//2) * scale # sampling pts at `scale`
psih_ct = asnumpy(wavelet(w_ct))
psih_dt = asnumpy(wavelet(w_dt))
title = ("wavelet(w) sampled by xi at scale={:.2f}, N={} | {} wavelet, {}"
).format(scale, N, wavelet.name, wavelet.config_str)
plot(w_ct, psih_ct, title=title, xlabel="radians")
scat(w_dt, psih_dt, color='tab:red')
plt.legend(["psih at scale=1", "sampled at scale=%.2f" % scale], fontsize=13)
plt.axvline(w_peak, color='tab:red', linestyle='--')
plt.show()
## Freq-domain #######################
# if peak not near left, don't zoom; same as `if .. (w_peak >= w_dt.max())`
if not zoom or (np.argmax(psih_dt) > .05 * N/2):
end = None
else:
peak_idx = np.argmax(psih_dt)
end = np.where(psih_dt[peak_idx:] < 1e-4*psih_dt.max())[0][0]
end += peak_idx + 3 # +3: give few more indices for visual
w_dtn = w_dt * (np.pi / w_dt.max()) # norm to span true w
plot(w_dtn[:end], psih_dt[:end], xlabel="radians",
title="Freq-domain waveform (psih)" + ", zoomed" * (end is not None))
scat(w_dtn[:end], psih_dt[:end], color='tab:red', show=1)
## Time-domain #######################
psi = asnumpy(wavelet.psifn(scale=scale, N=N))
apsi = np.abs(psi)
t = np.arange(-N/2, N/2, step=1)
# don't zoom unless there's fast decay
peak_idx = np.argmax(apsi)
if not zoom or (apsi.max() / apsi[peak_idx:].min() <= 1e3):
start, end = 0, None
else:
dt = np.where(apsi[peak_idx:] < 1e-3*apsi.max())[0][0]
start, end = (N//2 - dt, N//2 + dt + 1)
plot(t[start:end], psi[start:end], complex=1, xlabel="samples",
title="Time-domain waveform (psi)" + ", zoomed" * (end is not None))
plot(t[start:end], apsi[start:end], color='k', linestyle='--', show=1)
def _viz_cwt_scalebounds(wavelet, N, min_scale=None, max_scale=None,
std_t=None, cutoff=1, stdevs=2, Nt=None):
"""Can be used to visualize time & freq domains separately, where
`min_scale` refers to scale at which to show the freq-domain wavelet, and
`max_scale` the time-domain one.
"""
def _viz_max(wavelet, N, max_scale, std_t, stdevs, Nt):
if Nt is None:
Nt = p2up(N)[0]
if std_t is None:
# permissive max_mult to not crash visual
std_t = time_resolution(wavelet, max_scale, N, nondim=False,
min_mult=2, max_mult=2, min_decay=1)
t = np.arange(-Nt/2, Nt/2, step=1)
t -= t.mean()
psi = asnumpy(wavelet.psifn(scale=max_scale, N=len(t)))
plot(t, np.abs(psi)**2, ylims=(0, None),
title="|Time-domain wavelet|^2, extended (outside dashed)")
plt.axvline(std_t, color='tab:red')
plt.axvline(std_t * stdevs, color='tab:green')
# mark target (non-extended) frame
_ = [plt.axvline(v, color='k', linestyle='--') for v in (-N/2, N/2-1)]
_kw = dict(fontsize=16, xycoords='axes fraction', weight='bold')
plt.annotate("1 stdev",
xy=(.88, .95), color='tab:red', **_kw)
plt.annotate("%s stdevs" % stdevs,
xy=(.88, .90), color='tab:green', **_kw)
plt.show()
def _viz_min(wavelet, N, min_scale, cutoff):
w = _xifn(1, N)[:N//2 + 1] # drop negative freqs
psih = asnumpy(wavelet(min_scale * w, nohalf=True))
_, mx = find_maximum(wavelet)
plot(w, psih, title=("Frequency-domain wavelet, positive half "
"(cutoff=%s, peak=%.3f)" % (cutoff, mx)))
plt.axhline(mx * abs(cutoff), color='tab:red')
plt.show()
if min_scale is not None:
_viz_min(wavelet, N, min_scale, cutoff)
if max_scale is not None:
_viz_max(wavelet, N, max_scale, std_t, stdevs, Nt)
if not (min_scale or max_scale):
raise ValueError("Must set at least one of `min_scale`, `max_scale`")
def wavelet_filterbank(wavelet, N=1024, scales='log', skips=0, title_append=None,
positives=False, show=True, get=False):
"""Plot all frequency-domain wavelets, superposed.
`skips=1` will plot every *other* wavelet, `=2` will skip 2, etc.
`=0` shows all.
`title_append`: will `title += title_append` if not None. Must be string.
Can use to display additional info.
`positives=True` will show full wavelets as opposed to trimmed at Nyquist.
`get=True` to return the filter bank (ignores `skip`).
"""
def _title():
scaletype = infer_scaletype(scales)[0]
desc = wavelet._desc(N=N)
desc = desc.replace(" |", " filterbank |")
title = "{}, scaletype={}{}".format(desc, scaletype, title_append or '')
title = _textwrap(title, wrap_len=72)
return title
# process `scales` & prepare freq-domain wavelets
scales = process_scales(scales, N, wavelet)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
Psih = asnumpy(wavelet(scale=scales, N=N))
# process `skips`
Psih_show, scales_show = [], []
for i, psih in enumerate(Psih):
if i % (skips + 1) == 0:
Psih_show.append(psih)
scales_show.append(scales[i])
Psih_show = np.vstack(Psih_show).T
# prepare plot params
if positives:
w = None
xlims = (-N/100, N*1.01)
else:
Psih_show = Psih_show[:N//2]
w = np.linspace(0, np.pi, N//2, endpoint=True)
xlims = (-np.pi/100, np.pi*1.01)
# plot
if positives:
plt.axvline(N/2, color='tab:red') # show Nyquist
plot(w, Psih_show, color='tab:blue', title=_title(), xlims=xlims, show=0,
xlabel="radians")
# style
_, ymax = plt.gca().get_ylim()
plt.ylim(-ymax/100, ymax*1.03)
txt = "(min, max)=(%.3f, %.1f)" % (np.min(scales_show), np.max(scales_show))
_annotate(txt, xy=(.63, .95), fontsize=17)
if show:
plt.show()
if get:
return Psih
def viz_cwt_higher_order(Wx_k, scales=None, wavelet=None, **imshow_kw):
if wavelet is not None:
wavelet = Wavelet._init_if_not_isinstance(wavelet)
title_append = " | " + wavelet._desc(show_N=False)
else:
title_append = ''
yticks = scales.squeeze() if (scales is not None) else None
if imshow_kw.get('ticks', 1):
imshow_kw['yticks'] = imshow_kw.get('yticks', yticks)
if isinstance(Wx_k, list):
for k, Wx in enumerate(Wx_k):
title = "abs(CWT), order={}{}".format(k, title_append)
imshow(Wx, abs=1, title=title, **imshow_kw)
Wx_ka = np.mean(np.vstack([Wx_k]), axis=0)
order_str = ','.join(map(str, range(len(Wx_k))))
title = "abs(CWT), orders {} avg{}".format(order_str, title_append)
imshow(Wx_ka, abs=1, title=title, **imshow_kw)
else:
title = "abs(CWT), higher-order avg{}".format(title_append)
imshow(Wx_k, abs=1, title=title, **imshow_kw)
def viz_gmw_orders(N=1024, n_orders=3, scale=5, gamma=3, beta=60,
norm='bandpass'):
wavs = []
for k in range(n_orders):
wav = Wavelet(('gmw', dict(gamma=gamma, beta=beta, norm=norm, order=k)))
wavs.append(wav)
psihs = [wav(scale=scale)[:N//2 + 1] for wav in wavs]
psis = [wav.psifn(scale=scale) for wav in wavs]
w = np.linspace(0, np.pi, N//2 + 1, endpoint=True)
desc = wavs[0]._desc(show_N=False)
orders_str = ','.join(map(str, range(n_orders)))
for psih in psihs:
plot(w, psih, title="Freq-domain, orders=%s | %s" % (orders_str, desc))
plot([], show=1)
for k, psi in enumerate(psis):
plot(psi, complex=1)
plot(psi, abs=1, color='k', linestyle='--', show=1,
title=f"Time-domain, order={k} | {desc}")
#### Visual tools ## messy code ##############################################
def imshow(data, title=None, show=1, cmap=None, norm=None, complex=None, abs=0,
w=None, h=None, ridge=0, ticks=1, borders=1, aspect='auto', ax=None,
fig=None, yticks=None, xticks=None, xlabel=None, ylabel=None, **kw):
"""
norm: color norm, tuple of (vmin, vmax)
abs: take abs(data) before plotting
ticks: False to not plot x & y ticks
borders: False to not display plot borders
w, h: rescale width & height
kw: passed to `plt.imshow()`
others
"""
# axes
if (ax or fig) and complex:
NOTE("`ax` and `fig` ignored if `complex`")
if complex:
fig, ax = plt.subplots(1, 2)
else:
ax = ax or plt.gca()
fig = fig or plt.gcf()
# norm
if norm is None:
mx = np.max(np.abs(data))
vmin, vmax = ((-mx, mx) if not abs else
(0, mx))
else:
vmin, vmax = norm
# colormap
import matplotlib as mpl
mpl33 = bool(float(mpl.__version__[:3]) >= 3.3)
if cmap is None:
cmap = (('turbo' if mpl33 else 'jet') if abs else
'bwr')
elif cmap == 'turbo':
if not mpl33:
from .utils import WARN
WARN("'turbo' colormap requires matplotlib>=3.3; using 'jet' instead")
cmap = 'jet'
_kw = dict(vmin=vmin, vmax=vmax, cmap=cmap, aspect=aspect, **kw)
if abs:
ax.imshow(np.abs(data), **_kw)
elif complex:
ax[0].imshow(data.real, **_kw)
ax[1].imshow(data.imag, **_kw)
plt.subplots_adjust(left=0, right=1, bottom=0, top=1,
wspace=0, hspace=0)
else:
ax.imshow(data.real, **_kw)
if w or h:
fig.set_size_inches(12 * (w or 1), 12 * (h or 1))
if ridge:
data_mx = np.where(np.abs(data) == np.abs(data).max(axis=0))
ax.scatter(data_mx[1], data_mx[0], color='r', s=4)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
if xticks is not None or yticks is not None:
_ticks(xticks, yticks, ax)
if not borders:
for spine in ax.spines:
ax.spines[spine].set_visible(False)
if xlabel is not None:
ax.set_xlabel(xlabel, weight='bold', fontsize=15)
if ylabel is not None:
ax.set_ylabel(ylabel, weight='bold', fontsize=15)
_maybe_title(title, ax=ax)
if show:
plt.show()
def plot(x, y=None, title=None, show=0, ax_equal=False, complex=0, abs=0,
c_annot=False, w=None, h=None, dx1=False, xlims=None, ylims=None,
vert=False, vlines=None, hlines=None, xlabel=None, ylabel=None,
xticks=None, yticks=None, ax=None, fig=None, ticks=True, squeeze=True,
auto_xlims=True, **kw):
"""
norm: color norm, tuple of (vmin, vmax)
abs: take abs(data) before plotting
complex: plot `x.real` & `x.imag`
ticks: False to not plot x & y ticks
w, h: rescale width & height
kw: passed to `plt.imshow()`
others
"""
ax = ax or plt.gca()
fig = fig or plt.gcf()
if auto_xlims is None:
auto_xlims = bool((x is not None and len(x) != 0) or
(y is not None and len(y) != 0))
if x is None and y is None:
raise Exception("`x` and `y` cannot both be None")
elif x is None:
y = y if isinstance(y, list) or not squeeze else y.squeeze()
x = np.arange(len(y))
elif y is None:
x = x if isinstance(x, list) or not squeeze else x.squeeze()
y = x
x = np.arange(len(x))
x = x if isinstance(x, list) or not squeeze else x.squeeze()
y = y if isinstance(y, list) or not squeeze else y.squeeze()
if vert:
x, y = y, x
if complex:
ax.plot(x, y.real, color='tab:blue', **kw)
ax.plot(x, y.imag, color='tab:orange', **kw)
if c_annot:
_kw = dict(fontsize=15, xycoords='axes fraction', weight='bold')
ax.annotate("real", xy=(.93, .95), color='tab:blue', **_kw)
ax.annotate("imag", xy=(.93, .90), color='tab:orange', **_kw)
else:
if abs:
y = np.abs(y)
ax.plot(x, y, **kw)
if dx1:
ax.set_xticks(np.arange(len(x)))
if vlines:
vhlines(vlines, kind='v')
if hlines:
vhlines(hlines, kind='h')
ticks = ticks if isinstance(ticks, (list, tuple)) else (ticks, ticks)
if not ticks[0]:
ax.set_xticks([])
if not ticks[1]:
ax.set_yticks([])
if xticks is not None or yticks is not None:
_ticks(xticks, yticks, ax)
if xticks is not None or yticks is not None:
_ticks(xticks, yticks, ax)
_maybe_title(title, ax=ax)
_scale_plot(fig, ax, show=show, ax_equal=ax_equal, w=w, h=h,
xlims=xlims, ylims=ylims, dx1=(len(x) if dx1 else 0),
xlabel=xlabel, ylabel=ylabel, auto_xlims=auto_xlims)
def plots(X, Y=None, nrows=None, ncols=None, tight=True, sharex=False,
sharey=False, skw=None, pkw=None, _scat=0, show=1, **kw):
"""Example:
X = [[None, np.arange(xc, xc + wl)],
[None, np.arange(xc + hop, xc + hop + wl)],
None,
None]
Y = [[x, window],
[x, window],
xbuf[:, xbc],
xbuf[:, xbc + 1]]
pkw = [[{}]*2, [{}]*2, *[{'color': 'tab:green'}]*2]
plots(X, Y, nrows=2, ncols=2, sharey='row', tight=tight, pkw=pkw)
"""
def _process_args(X, Y, nrows, ncols, tight, skw, pkw, kw):
X = X if isinstance(X, list) else [X]
Y = Y if isinstance(Y, list) else [Y]
skw = skw or {}
pkw = pkw or [{}] * len(X)
if nrows is None and ncols is None:
nrows, ncols = len(X), 1
elif nrows is None:
nrows = max(len(X) // ncols, 1)
elif ncols is None:
ncols = max(len(X) // nrows, 1)
default = dict(left=0, right=1, bottom=0, top=1, hspace=.1, wspace=.05)
if tight:
if not isinstance(tight, dict):
tight = default.copy()
else:
for name in default:
if name not in tight:
tight[name] = default[name]
kw['w'] = kw.get('w', .8)
kw['h'] = kw.get('h', .8) # default 'tight' enlarges plot
return X, Y, nrows, ncols, tight, skw, pkw, kw
X, Y, nrows, ncols, tight, skw, pkw, kw = _process_args(
X, Y, nrows, ncols, tight, skw, pkw, kw)
_, axes = plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, **skw)
for ax, x, y, _pkw in zip(axes.flat, X, Y, pkw):
if isinstance(x, list):
for _x, _y, __pkw in zip(x, y, _pkw):
plot(_x, _y, ax=ax, **__pkw, **kw)
if _scat:
scat(_x, _y, ax=ax, **__pkw, **kw)
else:
plot(x, y, ax=ax, **_pkw, **kw)
if _scat:
scat(x, y, ax=ax, **_pkw, **kw)
if tight:
plt.subplots_adjust(**tight)
if show:
plt.show()
def scat(x, y=None, title=None, show=0, ax_equal=False, s=18, w=None, h=None,
xlims=None, ylims=None, dx1=False, vlines=None, hlines=None, ticks=1,
complex=False, abs=False, xlabel=None, ylabel=None, ax=None, fig=None,
auto_xlims=True, **kw):
ax = ax or plt.gca()
fig = fig or plt.gcf()
if auto_xlims is None:
auto_xlims = bool((x is not None and len(x) != 0) or
(y is not None and len(y) != 0))
if x is None and y is None:
raise Exception("`x` and `y` cannot both be None")
elif x is None:
x = np.arange(len(y))
elif y is None:
y = x
x = np.arange(len(x))
if complex:
ax.scatter(x, y.real, s=s, **kw)
ax.scatter(x, y.imag, s=s, **kw)
else:
if abs:
y = np.abs(y)
ax.scatter(x, y, s=s, **kw)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
_maybe_title(title, ax=ax)
if vlines:
vhlines(vlines, kind='v')
if hlines:
vhlines(hlines, kind='h')
_scale_plot(fig, ax, show=show, ax_equal=ax_equal, w=w, h=h,
xlims=xlims, ylims=ylims, dx1=(len(x) if dx1 else 0),
xlabel=xlabel, ylabel=ylabel, auto_xlims=auto_xlims)
def plotscat(*args, **kw):
show = kw.pop('show', False)
plot(*args, **kw)
scat(*args, **kw)
if show:
plt.show()
def hist(x, bins=500, title=None, show=0, stats=0, ax=None, fig=None,
w=1, h=1, xlims=None, ylims=None, xlabel=None, ylabel=None):
"""Histogram. `stats=True` to print mean, std, min, max of `x`."""
def _fmt(*nums):
return [(("%.3e" % n) if (abs(n) > 1e3 or abs(n) < 1e-3) else
("%.3f" % n)) for n in nums]
ax = ax or plt.gca()
fig = fig or plt.gcf()
x = np.asarray(x)
_ = ax.hist(x.ravel(), bins=bins)
_maybe_title(title, ax=ax)
_scale_plot(fig, ax, show=show, w=w, h=h, xlims=xlims, ylims=ylims,
xlabel=xlabel, ylabel=ylabel)
if show:
plt.show()
if stats:
mu, std, mn, mx = (x.mean(), x.std(), x.min(), x.max())
print("(mean, std, min, max) = ({}, {}, {}, {})".format(
*_fmt(mu, std, mn, mx)))
return mu, std, mn, mx
def vhlines(lines, kind='v'):
lfn = plt.axvline if kind=='v' else plt.axhline
if not isinstance(lines, (list, tuple)):
lines, lkw = [lines], {}
elif isinstance(lines, (list, np.ndarray)):
lkw = {}
elif isinstance(lines, tuple):
lines, lkw = lines
lines = lines if isinstance(lines, (list, np.ndarray)) else [lines]
else:
raise ValueError("`lines` must be list or (list, dict) "
"(got %s)" % lines)
for line in lines:
lfn(line, **lkw)
def _fmt(*nums):
return [(("%.3e" % n) if (abs(n) > 1e3 or abs(n) < 1e-3) else
("%.3f" % n)) for n in nums]
def _ticks(xticks, yticks, ax):
def fmt(ticks):
return ("%.d" if all(float(h).is_integer() for h in ticks) else
"%.2f")
if yticks is not None:
if not hasattr(yticks, '__len__') and not yticks:
ax.set_yticks([])
else:
idxs = np.linspace(0, len(yticks) - 1, 8).astype('int32')
yt = [fmt(yticks) % h for h in np.asarray(yticks)[idxs]]
ax.set_yticks(idxs)
ax.set_yticklabels(yt)
if xticks is not None:
if not hasattr(xticks, '__len__') and not xticks:
ax.set_xticks([])
else:
idxs = np.linspace(0, len(xticks) - 1, 8).astype('int32')
xt = [fmt(xticks) % h for h in np.asarray(xticks)[idxs]]
ax.set_xticks(idxs)
ax.set_xticklabels(xt)
def _maybe_title(title, ax=None):
if title is None:
return
title, kw = (title if isinstance(title, tuple) else
(title, {}))
defaults = gdefaults('visuals._maybe_title', get_all=True, as_dict=True)
for name in defaults:
kw[name] = kw.get(name, defaults[name])
if ax:
ax.set_title(str(title), **kw)
else:
plt.title(str(title), **kw)
def _scale_plot(fig, ax, show=False, ax_equal=False, w=None, h=None,
xlims=None, ylims=None, dx1=False, xlabel=None, ylabel=None,
auto_xlims=True):
if xlims:
ax.set_xlim(*xlims)
elif auto_xlims:
xmin, xmax = ax.get_xlim()
rng = xmax - xmin
ax.set_xlim(xmin + .018 * rng, xmax - .018 * rng)
if ax_equal:
yabsmax = max(np.abs([*ax.get_ylim()]))
mx = max(yabsmax, max(np.abs([xmin, xmax])))
ax.set_xlim(-mx, mx)
ax.set_ylim(-mx, mx)
fig.set_size_inches(8*(w or 1), 8*(h or 1))
if xlims:
ax.set_xlim(*xlims)
if ylims:
ax.set_ylim(*ylims)
if dx1:
plt.xticks(np.arange(dx1))
if w or h:
fig.set_size_inches(14*(w or 1), 8*(h or 1))
if xlabel is not None:
plt.xlabel(xlabel, weight='bold', fontsize=15)
if ylabel is not None:
plt.ylabel(ylabel, weight='bold', fontsize=15)
if show:
plt.show()
def _annotate(txt, xy=(.85, .9), weight='bold', fontsize=16):
_kw = dict(xycoords='axes fraction', xy=xy, weight=weight, fontsize=fontsize)
try:
# 'Consolas' for vertical align
plt.annotate(txt, family='Consolas', **_kw)
except:
plt.annotate(txt, **_kw) # in case platform lacks 'Consolas'
#############################################################################
from .wavelets import Wavelet, _xifn
from .wavelets import center_frequency, freq_resolution, time_resolution
from .utils.common import NOTE, _textwrap, p2up
from .utils.cwt_utils import process_scales, cwt_scalebounds, make_scales
from .utils.cwt_utils import infer_scaletype
from .utils.backend import asnumpy
| 36.083739 | 82 | 0.561417 |
fbda44705b5b1dcfa785223b65530aef0cf42b93 | 4,282 | py | Python | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/list_privateips_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/list_privateips_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/list_privateips_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListPrivateipsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'subnet_id': 'str',
'limit': 'int',
'marker': 'str'
}
attribute_map = {
'subnet_id': 'subnet_id',
'limit': 'limit',
'marker': 'marker'
}
def __init__(self, subnet_id=None, limit=None, marker=None):
"""ListPrivateipsRequest - a model defined in huaweicloud sdk"""
self._subnet_id = None
self._limit = None
self._marker = None
self.discriminator = None
self.subnet_id = subnet_id
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
@property
def subnet_id(self):
"""Gets the subnet_id of this ListPrivateipsRequest.
私有IP所在子网的唯一标识
:return: The subnet_id of this ListPrivateipsRequest.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""Sets the subnet_id of this ListPrivateipsRequest.
私有IP所在子网的唯一标识
:param subnet_id: The subnet_id of this ListPrivateipsRequest.
:type: str
"""
self._subnet_id = subnet_id
@property
def limit(self):
"""Gets the limit of this ListPrivateipsRequest.
每页返回的个数
:return: The limit of this ListPrivateipsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListPrivateipsRequest.
每页返回的个数
:param limit: The limit of this ListPrivateipsRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListPrivateipsRequest.
分页查询起始的资源id,为空时查询第一页
:return: The marker of this ListPrivateipsRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListPrivateipsRequest.
分页查询起始的资源id,为空时查询第一页
:param marker: The marker of this ListPrivateipsRequest.
:type: str
"""
self._marker = marker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPrivateipsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.488095 | 79 | 0.556049 |
ef3087da4b325c14ea422766752521a10fcd9098 | 5,046 | py | Python | RBC_Python_Numba.py | leitec/Comparison-Programming-Languages-Economics | dcff2d7135fb84c8f389f3ed2bea8500f7f31d11 | [
"MIT"
] | 197 | 2015-01-04T10:13:44.000Z | 2022-01-06T19:01:46.000Z | RBC_Python_Numba.py | leitec/Comparison-Programming-Languages-Economics | dcff2d7135fb84c8f389f3ed2bea8500f7f31d11 | [
"MIT"
] | 15 | 2015-05-24T17:29:18.000Z | 2022-02-25T20:54:46.000Z | RBC_Python_Numba.py | leitec/Comparison-Programming-Languages-Economics | dcff2d7135fb84c8f389f3ed2bea8500f7f31d11 | [
"MIT"
] | 119 | 2015-01-01T17:08:21.000Z | 2021-09-21T17:41:39.000Z | # Basic RBC model with full depreciation (Alternate 1)
#
# Jesus Fernandez-Villaverde
# Haverford, July 3, 2013
import numpy as np
import math
import time
from numba import autojit
# - Start Inner Loop - #
# - bbeta float
# - nGridCapital: int64
# - gridCapitalNextPeriod: int64
# - mOutput: float (17820 x 5)
# - nProductivity: int64
# - vGridCapital: float (17820, )
# - mValueFunction: float (17820 x 5)
# - mPolicyFunction: float (17820 x 5)
@autojit
def innerloop(bbeta, nGridCapital, gridCapitalNextPeriod, mOutput, nProductivity, vGridCapital, expectedValueFunction, mValueFunction, mValueFunctionNew, mPolicyFunction):
for nCapital in xrange(nGridCapital):
valueHighSoFar = -100000.0
capitalChoice = vGridCapital[0]
for nCapitalNextPeriod in xrange(gridCapitalNextPeriod, nGridCapital):
consumption = mOutput[nCapital,nProductivity] - vGridCapital[nCapitalNextPeriod]
valueProvisional = (1-bbeta)*np.log(consumption)+bbeta*expectedValueFunction[nCapitalNextPeriod,nProductivity];
if valueProvisional > valueHighSoFar:
valueHighSoFar = valueProvisional
capitalChoice = vGridCapital[nCapitalNextPeriod]
gridCapitalNextPeriod = nCapitalNextPeriod
else:
break
mValueFunctionNew[nCapital,nProductivity] = valueHighSoFar
mPolicyFunction[nCapital,nProductivity] = capitalChoice
return mValueFunctionNew, mPolicyFunction
def main_func():
# 1. Calibration
aalpha = 1.0/3.0 # Elasticity of output w.r.t. capital
bbeta = 0.95 # Discount factor
# Productivity values
vProductivity = np.array([0.9792, 0.9896, 1.0000, 1.0106, 1.0212],float)
# Transition matrix
mTransition = np.array([[0.9727, 0.0273, 0.0000, 0.0000, 0.0000],
[0.0041, 0.9806, 0.0153, 0.0000, 0.0000],
[0.0000, 0.0082, 0.9837, 0.0082, 0.0000],
[0.0000, 0.0000, 0.0153, 0.9806, 0.0041],
[0.0000, 0.0000, 0.0000, 0.0273, 0.9727]],float)
## 2. Steady State
capitalSteadyState = (aalpha*bbeta)**(1/(1-aalpha))
outputSteadyState = capitalSteadyState**aalpha
consumptionSteadyState = outputSteadyState-capitalSteadyState
print "Output = ", outputSteadyState, " Capital = ", capitalSteadyState, " Consumption = ", consumptionSteadyState
# We generate the grid of capital
vGridCapital = np.arange(0.5*capitalSteadyState,1.5*capitalSteadyState,0.00001)
nGridCapital = len(vGridCapital)
nGridProductivity = len(vProductivity)
## 3. Required matrices and vectors
mOutput = np.zeros((nGridCapital,nGridProductivity),dtype=float)
mValueFunction = np.zeros((nGridCapital,nGridProductivity),dtype=float)
mValueFunctionNew = np.zeros((nGridCapital,nGridProductivity),dtype=float)
mPolicyFunction = np.zeros((nGridCapital,nGridProductivity),dtype=float)
expectedValueFunction = np.zeros((nGridCapital,nGridProductivity),dtype=float)
# 4. We pre-build output for each point in the grid
for nProductivity in range(nGridProductivity):
mOutput[:,nProductivity] = vProductivity[nProductivity]*(vGridCapital**aalpha)
## 5. Main iteration
maxDifference = 10.0
tolerance = 0.0000001
iteration = 0
log = math.log
zeros = np.zeros
dot = np.dot
while(maxDifference > tolerance):
expectedValueFunction = dot(mValueFunction,mTransition.T)
for nProductivity in xrange(nGridProductivity):
# We start from previous choice (monotonicity of policy function)
gridCapitalNextPeriod = 0
# - Start Inner Loop - #
mValueFunctionNew, mPolicyFunction = innerloop(bbeta, nGridCapital, gridCapitalNextPeriod, mOutput, nProductivity, vGridCapital, expectedValueFunction, mValueFunction, mValueFunctionNew, mPolicyFunction)
# - End Inner Loop - #
maxDifference = (abs(mValueFunctionNew-mValueFunction)).max()
mValueFunction = mValueFunctionNew
mValueFunctionNew = zeros((nGridCapital,nGridProductivity),dtype=float)
iteration += 1
if(iteration%10 == 0 or iteration == 1):
print " Iteration = ", iteration, ", Sup Diff = ", maxDifference
return (maxDifference, iteration, mValueFunction, mPolicyFunction)
if __name__ == '__main__':
# - Start Timer - #
t1=time.time()
# - Call Main Function - #
maxDiff, iterate, mValueF, mPolicyFunction = main_func()
# - End Timer - #
t2 = time.time()
print " Iteration = ", iterate, ", Sup Duff = ", maxDiff
print " "
print " My Check = ", mPolicyFunction[1000-1,3-1]
print " "
print "Elapse time = is ", t2-t1 | 37.377778 | 216 | 0.639318 |
c7f98b55400f0b9fabd2188d22fb6daaee6282f9 | 27 | py | Python | sketchy/terminal/feature/merge/__init__.py | mbhall88/sketchy | 5ed26d28f104710f6d425053eae41fd0e99f8760 | [
"MIT"
] | null | null | null | sketchy/terminal/feature/merge/__init__.py | mbhall88/sketchy | 5ed26d28f104710f6d425053eae41fd0e99f8760 | [
"MIT"
] | null | null | null | sketchy/terminal/feature/merge/__init__.py | mbhall88/sketchy | 5ed26d28f104710f6d425053eae41fd0e99f8760 | [
"MIT"
] | null | null | null | from .commands import merge | 27 | 27 | 0.851852 |
4fae268ce23144aa877c5a5341720f051d9f4ae0 | 211 | py | Python | src/year2021/day01a.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 10 | 2017-12-11T17:54:52.000Z | 2021-12-09T20:16:30.000Z | src/year2021/day01a.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 260 | 2015-12-09T11:03:03.000Z | 2021-12-12T14:32:23.000Z | src/year2021/day01a.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | null | null | null | """2021 - Day 1 Part 1: Sonar Sweep."""
from itertools import pairwise
def solve(task: str) -> int:
depths = (int(x) for x in task.strip().split("\n"))
return sum(a < b for (a, b) in pairwise(depths))
| 26.375 | 55 | 0.620853 |
f2fe58c1a326668393a3edad0c123383c6e0878e | 274 | py | Python | metadeploy/api/migrations/0023_merge_20181206_1541.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 33 | 2019-03-20T15:34:39.000Z | 2022-03-30T15:59:40.000Z | metadeploy/api/migrations/0023_merge_20181206_1541.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 2,718 | 2019-02-27T19:46:07.000Z | 2022-03-11T23:18:09.000Z | metadeploy/api/migrations/0023_merge_20181206_1541.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 28 | 2019-03-28T04:57:16.000Z | 2022-02-04T16:49:25.000Z | # Generated by Django 2.1.4 on 2018-12-06 15:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0022_allow_canceled_result"),
("api", "0022_order_products_and_categories"),
]
operations = []
| 19.571429 | 54 | 0.667883 |
feb5193f4ce0f33686c4464d123170276bb22f52 | 3,720 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GLU/glustruct.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GLU/glustruct.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GLU/glustruct.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | """Base class for GLU callback-caching structures"""
import ctypes
import weakref
from OpenGL._bytes import long, integer_types
class GLUStruct( object ):
"""Mix-in class for GLU Structures that want to retain references to callbacks
Also provides original-object-return for the "datapointer" style paremters
Each sub-class must override:
CALLBACK_TYPES -- maps a "which" constant to a function type
CALLBACK_FUNCTION_REGISTRARS -- maps a "which" constant to the
registration function for functions of that type
WRAPPER_METHODS -- maps a "which" consant to a method of the structure
that produces a callable around the function which takes care of
input/output arguments, data conversions, error handling and the
like.
Creates a dictionary member dataPointers if original-object-return is used
Creates a dictionary member callbacks if callback registration is used
"""
def getAsParam( self ):
"""Gets as a ctypes pointer to the underlying structure"""
return ctypes.pointer( self )
_as_parameter_ = property( getAsParam )
CALLBACK_TYPES = None
CALLBACK_FUNCTION_REGISTRARS = None
WRAPPER_METHODS = None
def noteObject( self, object ):
"""Note object for later retrieval as a Python object pointer
This is the registration point for "original object return", returns
a void pointer to the Python object, though this is, effectively, an
opaque value.
"""
identity = id(object)
try:
self.dataPointers[ identity ] = object
except AttributeError as err:
self.dataPointers = { identity: object }
return identity
def originalObject( self, voidPointer ):
"""Given a void-pointer, try to find our original Python object"""
if isinstance( voidPointer, integer_types):
identity = voidPointer
elif voidPointer is None:
return None
else:
try:
identity = voidPointer.value
except AttributeError as err:
identity = voidPointer[0]
try:
return self.dataPointers[ identity ]
except (KeyError,AttributeError) as err:
return voidPointer
def addCallback( self, which, function ):
"""Register a callback for this structure object"""
callbackType = self.CALLBACK_TYPES.get( which )
if not callbackType:
raise ValueError(
"""Don't have a registered callback type for %r"""%(
which,
)
)
wrapperMethod = self.WRAPPER_METHODS.get( which )
if wrapperMethod is not None:
function = getattr(self,wrapperMethod)( function )
cCallback = callbackType( function )
# XXX this is ugly, query to ctypes list on how to fix it...
try:
self.CALLBACK_FUNCTION_REGISTRARS[which]( self, which, cCallback )
except ctypes.ArgumentError as err:
err.args += (which,cCallback)
raise
#gluTessCallbackBase( self, which, cCallback)
# XXX catch errors!
if getattr( self, 'callbacks', None ) is None:
self.callbacks = {}
self.callbacks[ which ] = cCallback
return cCallback
def ptrAsArray( self, ptr, length, type ):
"""Copy length values from ptr into new array of given type"""
result = type.zeros( (length,) )
for i in range(length):
result[i] = ptr[i]
return result
| 42.272727 | 83 | 0.610753 |
7c8475b2bd5948c05b213222fa506ea5a494fa40 | 108 | py | Python | tests/samples/project/vendor/fooba/models/config_dependency_inheritance.py | machinable-org/machinable | 9d96e942dde05d68699bc7bc0c3d062ee18652ad | [
"MIT"
] | 23 | 2020-02-28T14:29:04.000Z | 2021-12-23T20:50:54.000Z | tests/samples/project/vendor/fooba/models/config_dependency_inheritance.py | machinable-org/machinable | 9d96e942dde05d68699bc7bc0c3d062ee18652ad | [
"MIT"
] | 172 | 2020-02-24T12:12:11.000Z | 2022-03-29T03:08:24.000Z | tests/samples/project/vendor/fooba/models/config_dependency_inheritance.py | machinable-org/machinable | 9d96e942dde05d68699bc7bc0c3d062ee18652ad | [
"MIT"
] | 1 | 2020-11-23T22:42:20.000Z | 2020-11-23T22:42:20.000Z | from .extended import ExtendedTestModel
class DependencyInhertanceTestModel(ExtendedTestModel):
pass
| 15.428571 | 55 | 0.833333 |
d1fea66e8c8da10d52738a32fafea13cc59eae8a | 957 | py | Python | backend/tests/improvements/test_mine.py | aecobb53/civ_vi_city_planner | bac9517507f0f50a172fe821a8c08d7945fdf74f | [
"MIT"
] | 2 | 2020-12-10T03:10:52.000Z | 2020-12-27T06:17:01.000Z | backend/tests/improvements/test_mine.py | aecobb53/civ_vi_city_planner | bac9517507f0f50a172fe821a8c08d7945fdf74f | [
"MIT"
] | 8 | 2020-12-10T06:38:10.000Z | 2021-05-01T22:27:37.000Z | backend/tests/improvements/test_mine.py | aecobb53/civ_vi_city_planner | bac9517507f0f50a172fe821a8c08d7945fdf74f | [
"MIT"
] | null | null | null | from backend.improvements.mine import Mine
import pytest
@pytest.fixture(scope="function")
def setup_improvement():
imp = Mine()
return imp
# Init
testdata = [
('food', 0),
('production', 1),
('gold', 0),
('science', 0),
('culture', 0),
('faith', 0),
('housing', 0),
('appeal', -1),
('power', 0),
('acceptable_terrain', [
'deserth',
'grasslandh',
'plainsh',
'snowh',
'tundrah',
]),
('acceptable_features', None),
('resources', [
'copper',
'diamonds',
'gold_ore',
'iron',
'jade',
'mercury',
'salt',
'niter',
'coal',
'aluminum',
'uranium',
'amber',
]),
]
@pytest.mark.parametrize("resource, value", testdata)
def test_init(setup_improvement, resource, value):
test_improvement = setup_improvement
assert getattr(test_improvement, resource) == value
| 20.361702 | 55 | 0.524556 |
166b090810123ffc24f006e7ad0cc51fc8ab4bb2 | 1,950 | py | Python | src/sim/06-allegheny-05-school-work-flu/sim-test-02.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 10 | 2019-01-18T19:11:54.000Z | 2022-03-16T08:39:36.000Z | src/sim/06-allegheny-05-school-work-flu/sim-test-02.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 2 | 2019-02-19T15:10:44.000Z | 2019-02-26T04:26:24.000Z | src/sim/06-allegheny-05-school-work-flu/sim-test-02.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 3 | 2019-02-19T15:11:08.000Z | 2021-08-20T11:51:04.000Z | '''
A test simulation involving the SEIR flu model that is run on an hourly basis (instead of the default hourly
basis).
'''
import pram.util as util
from pram.data import GroupSizeProbe, ProbeMsgMode
from pram.entity import Group, Site
from pram.rule import Rule, SEIRFluRule, TimeAlways
from pram.sim import Simulation
# ----------------------------------------------------------------------------------------------------------------------
class HourRule(Rule):
T_UNIT_MS = util.Time.MS.h
def __init__(self):
super().__init__('hour-rule', TimeAlways())
def apply(self, pop, group, iter, t):
pass
# ----------------------------------------------------------------------------------------------------------------------
rand_seed = 1928
probe_grp_size_flu = GroupSizeProbe.by_attr('flu', SEIRFluRule.ATTR, SEIRFluRule.State, msg_mode=ProbeMsgMode.DISP, memo='Mass distribution across flu states')
(Simulation().
set().
rand_seed(rand_seed).
pragma_autocompact(True).
pragma_autostop(True).
pragma_autostop_t(5).
pragma_autostop_n(1).
# pragma_autostop_p(0.001).
done().
add().
rule(SEIRFluRule()).
rule(HourRule()).
probe(probe_grp_size_flu).
done().
new_group(1000).
done().
summary(True, 0,0,0,0, (0,1)).
run('8d')
.summary(False, 8,0,0,0, (1,0))
)
# (Simulation().
# set().
# # iter_cnt(24 * 3).
# rand_seed(rand_seed).
# pragma_analyze(False).
# pragma_autocompact(True).
# done().
# add().
# rule(SEIRFluRule()).
# rule(HourRule()).
# probe(probe_grp_size_flu).
# done().
# new_group(1000).
# done().
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0)
# )
| 27.857143 | 159 | 0.521026 |
47e179db41e8ea9c1fb17338e8315c927fc73de6 | 12,164 | py | Python | django/db/backends/mysql/operations.py | crdoconnor/django | c530428d360daf904c9e98515ea836643a73a54c | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-04-27T20:12:45.000Z | 2019-04-27T20:12:45.000Z | django/db/backends/mysql/operations.py | crdoconnor/django | c530428d360daf904c9e98515ea836643a73a54c | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/backends/mysql/operations.py | crdoconnor/django | c530428d360daf904c9e98515ea836643a73a54c | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2018-12-10T03:06:36.000Z | 2018-12-10T03:06:36.000Z | import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
from django.utils.duration import duration_microseconds
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = {
**BaseDatabaseOperations.integer_field_ranges,
'PositiveSmallIntegerField': (0, 65535),
'PositiveIntegerField': (0, 4294967295),
}
cast_data_types = {
'CharField': 'char(%(max_length)s)',
'TextField': 'char',
'IntegerField': 'signed integer',
'BigIntegerField': 'signed integer',
'SmallIntegerField': 'signed integer',
'PositiveIntegerField': 'unsigned integer',
'PositiveSmallIntegerField': 'unsigned integer',
}
cast_char_field_without_max_length = 'char'
explain_prefix = 'EXPLAIN'
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'week':
# Override the value of default_week_format for consistency with
# other database backends.
# Mode 3: Monday, 1-53, with 4 or more days this year.
return "WEEK(%s, 3)" % field_name
else:
# EXTRACT returns 1-53 based on ISO-8601 for the week number.
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
elif lookup_type == 'quarter':
return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % (
field_name, field_name
)
elif lookup_type == 'week':
return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % (
field_name, field_name
)
else:
return "DATE(%s)" % (field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', '%s')" % (field_name, tzname)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE(%s)" % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "TIME(%s)" % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
if lookup_type == 'quarter':
return (
"CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + "
"INTERVAL QUARTER({field_name}) QUARTER - " +
"INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)"
).format(field_name=field_name)
if lookup_type == 'week':
return (
"CAST(DATE_FORMAT(DATE_SUB({field_name}, "
"INTERVAL WEEKDAY({field_name}) DAY), "
"'%%Y-%%m-%%d 00:00:00') AS DATETIME)"
).format(field_name=field_name)
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta)
def format_for_duration_arithmetic(self, sql):
return 'INTERVAL %s MICROSECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return str(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
# Convert the result to a signed integer since MySQL's binary operators
# return an unsigned integer.
elif connector in ('&', '|', '<<'):
return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions)
elif connector == '>>':
lhs, rhs = sub_expressions
return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
return super().combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s'
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
return (
"((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2
else:
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params
def explain_query_prefix(self, format=None, **options):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other backends.
if format and format.upper() == 'TEXT':
format = 'TRADITIONAL'
prefix = super().explain_query_prefix(format, **options)
if format:
prefix += ' FORMAT=%s' % format
if self.connection.features.needs_explain_extended and format is None:
# EXTENDED and FORMAT are mutually exclusive options.
prefix += ' EXTENDED'
return prefix
def regex_lookup(self, lookup_type):
# REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE
# doesn't exist in MySQL 5.6.
if self.connection.mysql_version < (8, 0, 0):
if lookup_type == 'regex':
return '%s REGEXP BINARY %s'
return '%s REGEXP %s'
match_option = 'c' if lookup_type == 'regex' else 'i'
return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
| 41.094595 | 113 | 0.605886 |
a0b5918d1dd1ea976c8f51901ddddb3226d6e815 | 6,263 | py | Python | muskit/svs/feats_extract/dio.py | A-Quarter-Mile/Muskits | 60d80727d2ec6b8ec405502d67796e8df319ea82 | [
"Apache-2.0"
] | 74 | 2021-04-15T15:39:32.000Z | 2022-03-23T03:34:25.000Z | muskit/svs/feats_extract/dio.py | A-Quarter-Mile/Muskits | 60d80727d2ec6b8ec405502d67796e8df319ea82 | [
"Apache-2.0"
] | 33 | 2021-04-30T18:24:47.000Z | 2022-03-29T13:27:40.000Z | muskit/svs/feats_extract/dio.py | A-Quarter-Mile/Muskits | 60d80727d2ec6b8ec405502d67796e8df319ea82 | [
"Apache-2.0"
] | 24 | 2021-04-15T15:20:32.000Z | 2022-03-19T04:03:25.000Z | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""F0 extractor using DIO + Stonemask algorithm."""
import logging
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Union
import humanfriendly
import numpy as np
import pyworld
import torch
import torch.nn.functional as F
from scipy.interpolate import interp1d
from typeguard import check_argument_types
from muskit.torch_utils.nets_utils import pad_list
from muskit.svs.feats_extract.abs_feats_extract import AbsFeatsExtract
class Dio(AbsFeatsExtract):
"""F0 estimation with dio + stonemask algorithm.
This is f0 extractor based on dio + stonmask algorithm introduced in `WORLD:
a vocoder-based high-quality speech synthesis system for real-time applications`_.
.. _`WORLD: a vocoder-based high-quality speech synthesis system for real-time
applications`: https://doi.org/10.1587/transinf.2015EDP7457
Note:
This module is based on NumPy implementation. Therefore, the computational graph
is not connected.
Todo:
Replace this module with PyTorch-based implementation.
"""
def __init__(
self,
fs: Union[int, str] = 22050,
n_fft: int = 1024,
hop_length: int = 256,
f0min: int = 80,
f0max: int = 400,
use_token_averaged_f0: bool = False,
use_continuous_f0: bool = True,
use_log_f0: bool = True,
reduction_factor: int = 1,
):
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
self.fs = fs
self.n_fft = n_fft
self.hop_length = hop_length
self.frame_period = 1000 * hop_length / fs
self.f0min = f0min
self.f0max = f0max
self.use_token_averaged_f0 = use_token_averaged_f0
self.use_continuous_f0 = use_continuous_f0
self.use_log_f0 = use_log_f0
if use_token_averaged_f0:
assert reduction_factor >= 1
self.reduction_factor = reduction_factor
def output_size(self) -> int:
return 1
def get_parameters(self) -> Dict[str, Any]:
return dict(
fs=self.fs,
n_fft=self.n_fft,
hop_length=self.hop_length,
f0min=self.f0min,
f0max=self.f0max,
use_token_averaged_f0=self.use_token_averaged_f0,
use_continuous_f0=self.use_continuous_f0,
use_log_f0=self.use_log_f0,
reduction_factor=self.reduction_factor,
)
def forward(
self,
input: torch.Tensor,
input_lengths: torch.Tensor = None,
feats_lengths: torch.Tensor = None,
durations: torch.Tensor = None,
durations_lengths: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# If not provide, we assume that the inputs have the same length
if input_lengths is None:
input_lengths = (
input.new_ones(input.shape[0], dtype=torch.long) * input.shape[1]
)
# F0 extraction
pitch = [self._calculate_f0(x[:xl]) for x, xl in zip(input, input_lengths)]
# (Optional): Adjust length to match with the mel-spectrogram
if feats_lengths is not None:
pitch = [
self._adjust_num_frames(p, fl).view(-1)
for p, fl in zip(pitch, feats_lengths)
]
# (Optional): Average by duration to calculate token-wise f0
if self.use_token_averaged_f0:
durations = durations * self.reduction_factor
pitch = [
self._average_by_duration(p, d).view(-1)
for p, d in zip(pitch, durations)
]
pitch_lengths = durations_lengths
else:
pitch_lengths = input.new_tensor([len(p) for p in pitch], dtype=torch.long)
# Padding
pitch = pad_list(pitch, 0.0)
# Return with the shape (B, T, 1)
return pitch.unsqueeze(-1), pitch_lengths
def _calculate_f0(self, input: torch.Tensor) -> torch.Tensor:
x = input.cpu().numpy().astype(np.double)
f0, timeaxis = pyworld.dio(
x,
self.fs,
f0_floor=self.f0min,
f0_ceil=self.f0max,
frame_period=self.frame_period,
)
f0 = pyworld.stonemask(x, f0, timeaxis, self.fs)
if self.use_continuous_f0:
f0 = self._convert_to_continuous_f0(f0)
if self.use_log_f0:
nonzero_idxs = np.where(f0 != 0)[0]
f0[nonzero_idxs] = np.log(f0[nonzero_idxs])
return input.new_tensor(f0.reshape(-1), dtype=torch.float)
@staticmethod
def _adjust_num_frames(x: torch.Tensor, num_frames: torch.Tensor) -> torch.Tensor:
if num_frames > len(x):
x = F.pad(x, (0, num_frames - len(x)))
elif num_frames < len(x):
x = x[:num_frames]
return x
@staticmethod
def _convert_to_continuous_f0(f0: np.array) -> np.array:
if (f0 == 0).all():
logging.warn("All frames seems to be unvoiced.")
return f0
# padding start and end of f0 sequence
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nonzero_idxs = np.where(f0 != 0)[0]
# perform linear interpolation
interp_fn = interp1d(nonzero_idxs, f0[nonzero_idxs])
f0 = interp_fn(np.arange(0, f0.shape[0]))
return f0
def _average_by_duration(self, x: torch.Tensor, d: torch.Tensor) -> torch.Tensor:
assert 0 <= len(x) - d.sum() < self.reduction_factor
d_cumsum = F.pad(d.cumsum(dim=0), (1, 0))
x_avg = [
x[start:end].masked_select(x[start:end].gt(0.0)).mean(dim=0)
if len(x[start:end].masked_select(x[start:end].gt(0.0))) != 0
else x.new_tensor(0.0)
for start, end in zip(d_cumsum[:-1], d_cumsum[1:])
]
return torch.stack(x_avg)
| 34.224044 | 88 | 0.608654 |
11eabff3021d40aa4ce1808e915123def7af4bba | 4,320 | py | Python | tools/run_sim.py | ptracton/wb_soc_template | ddd2e8460fa996551ef2d1fa5be2550c66d96bf3 | [
"MIT"
] | null | null | null | tools/run_sim.py | ptracton/wb_soc_template | ddd2e8460fa996551ef2d1fa5be2550c66d96bf3 | [
"MIT"
] | null | null | null | tools/run_sim.py | ptracton/wb_soc_template | ddd2e8460fa996551ef2d1fa5be2550c66d96bf3 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""
run_sim.py
Run FPGA simulations via Icarus, NCVerilog, Modelsim or Isim.
"""
import json
import os
import shlex
import subprocess
import sys
import argparse
import string
def which(program):
"""
Find the path to an executable program
"""
def is_exe(fpath):
"""
Return True is the fpath exists and is executable. This is needed since
executables are specifed in the JSON files, but not the path to them.
The executables may be in different locations based on which PC is
running this.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Run FPGA Simulation')
parser.add_argument("-D", "--debug",
help="Debug this script",
action="store_true")
parser.add_argument("--icarus",
help="Use Icarus Verilog",
action="store_true")
parser.add_argument("--ncverilog",
help="Use NCVerilog",
action="store_true")
parser.add_argument("--xsim",
help="Use Xilinx Vivado XSim",
action="store_true")
parser.add_argument("--modelsim",
help="Use Altera Modelsim",
action="store_true")
parser.add_argument("--simulation",
help="Which simulation test case to run",
required=True,
action="store")
parser.add_argument("--application",
help="Which code to run in this test",
required=False,
action="store")
parser.add_argument("--cpu",
help="Which CPU to use",
required=False,
default="LM32",
action="store")
parser.add_argument("--technology",
help="Which technology to use, RTL, ALTERA or XILINX",
required=False,
default="RTL",
action="store")
args = parser.parse_args()
if args.debug:
print(os.environ['PATH'])
print(args)
if args.icarus:
json_file = "../configurations/simulate_iverilog.json"
tool = "icarus"
if args.ncverilog:
json_file = "../configurations/simulate_ncverilog.json"
tool = "ncverilog"
if args.xsim:
json_file = "../configurations/simulate_xsim.json"
tool = "xsim"
if args.modelsim:
json_file = "../configurations/simulate_modelsim.json"
tool = "modelsim"
try:
f = open(json_file, "r")
json_data = json.load(f)
except:
print("Failed to open %s" % (json_file))
sys.exit(-1)
flow_steps = json_data['flow_steps']
if args.debug:
print(flow_steps)
for step in sorted(flow_steps.keys()):
print("\nRunning Step: %s " % step)
executable = json_data['flow'][flow_steps[step]]['executable']
arguments = string.Template(
json_data['flow'][flow_steps[step]]['arguments'])
arguments_str = arguments.safe_substitute(simulation=args.simulation,
application=args.application,
tool=tool,
cpu=args.cpu,
technology=args.technology)
if args.debug:
print(executable)
if (arguments is None):
command = executable
else:
command = executable + " " + arguments_str
print(command)
command = shlex.split(command)
p = subprocess.Popen(command)
p.communicate()
| 32.481203 | 79 | 0.520602 |
213602451fd8941d0f5d0d762712d59591c86c3b | 1,665 | py | Python | sarsa/sarsa_mc.py | AgentMaker/Paddle-RLBooks | 2e879f7ec3befa2058f0181e205b790d47770a85 | [
"Apache-2.0"
] | 127 | 2021-03-22T07:34:43.000Z | 2022-02-04T13:33:15.000Z | sarsa/sarsa_mc.py | WhiteFireFox/Paddle-RLBooks | 1a6add1d01b1bab08bb9d246fcd6ab852a43c18c | [
"Apache-2.0"
] | 1 | 2021-05-16T09:51:07.000Z | 2021-05-16T09:51:07.000Z | sarsa/sarsa_mc.py | WhiteFireFox/Paddle-RLBooks | 1a6add1d01b1bab08bb9d246fcd6ab852a43c18c | [
"Apache-2.0"
] | 16 | 2021-04-03T05:31:30.000Z | 2022-03-26T07:53:49.000Z | import numpy as np
from cliffwalk.cliffwalk import GridWorld
def egreedy_policy(q_values, state, epsilon=0.1):
if np.random.random() < epsilon:
return np.random.choice(4)
else:
return np.argmax(q_values[state])
def sarsa_mc(env,
actions=['UP', 'DOWN', 'RIGHT', 'LEFT'],
num_states=4*12,
num_actions=4,
epochs=1500,
render=True,
exploration_rate=0.1,
learning_rate=0.5,
gamma=0.9):
q = np.zeros((num_states, num_actions))
reward_sum_list = []
for i in range(epochs):
state = env.reset()
done = False
reward_sum = 0
action = egreedy_policy(q, state, exploration_rate)
Gt = 0
step = 0
while not done:
next_state, reward, done = env.step(action)
reward_sum += reward
next_action = egreedy_policy(q, next_state, exploration_rate)
Gt += gamma ** step * reward
state = next_state
action = next_action
step += 1
if i % 100 == 0:
env.render(q, action=actions[action], colorize_q=True)
if step % 20 == 0:
break
error = Gt - q[state][action]
q[state][action] += learning_rate * error
reward_sum_list.append(reward_sum)
if i % 3 == 0:
print('Average scores = ', np.mean(reward_sum_list))
reward_sum_list = []
return q
def train():
env = GridWorld()
q = sarsa_mc(env, render=False, learning_rate=0.5, gamma=0.99)
if __name__ == '__main__':
train()
| 26.015625 | 73 | 0.538739 |
e77964d875066ad3ed8a2d286cd851bccbcf85c0 | 807 | py | Python | backend/recipe_backend/recipe_backend/urls.py | Nadin007/foodgram-project-react | 969f07e4cc74321fbdb566d4cb160ac8eefdde3e | [
"MIT"
] | null | null | null | backend/recipe_backend/recipe_backend/urls.py | Nadin007/foodgram-project-react | 969f07e4cc74321fbdb566d4cb160ac8eefdde3e | [
"MIT"
] | null | null | null | backend/recipe_backend/recipe_backend/urls.py | Nadin007/foodgram-project-react | 969f07e4cc74321fbdb566d4cb160ac8eefdde3e | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('recipe_features.urls')),
path('api/', include('users.urls')),
path('redoc/', TemplateView.as_view(
template_name='redoc.html',
extra_context={'schema_url': 'openapi-schema'}
), name='redoc'),
path('redoc/openapi-schema.yml', TemplateView.as_view(
template_name='openapi-schema.yml',
))
]
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(
settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 32.28 | 64 | 0.702602 |
59444ec2836128266d785c3f1502e917a05f4217 | 10,634 | py | Python | models/common.py | MinkaiXu/GeoDiff | c6f26dc250308bff8923a19884e601e0bb0f975a | [
"MIT"
] | 9 | 2022-03-08T12:32:29.000Z | 2022-03-31T10:39:45.000Z | models/common.py | MinkaiXu/GeoDiff | c6f26dc250308bff8923a19884e601e0bb0f975a | [
"MIT"
] | 1 | 2022-03-30T23:03:07.000Z | 2022-03-31T00:12:07.000Z | models/common.py | MinkaiXu/GeoDiff | c6f26dc250308bff8923a19884e601e0bb0f975a | [
"MIT"
] | 3 | 2022-03-01T06:45:40.000Z | 2022-03-30T13:12:20.000Z | # coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import radius_graph, radius
from torch_scatter import scatter_mean, scatter_add, scatter_max
from torch_sparse import coalesce
from torch_geometric.utils import to_dense_adj, dense_to_sparse
from utils.chem import BOND_TYPES
class MeanReadout(nn.Module):
"""Mean readout operator over graphs with variadic sizes."""
def forward(self, data, input):
"""
Perform readout over the graph(s).
Parameters:
data (torch_geometric.data.Data): batched graph
input (Tensor): node representations
Returns:
Tensor: graph representations
"""
output = scatter_mean(input, data.batch, dim=0, dim_size=data.num_graphs)
return output
class SumReadout(nn.Module):
"""Sum readout operator over graphs with variadic sizes."""
def forward(self, data, input):
"""
Perform readout over the graph(s).
Parameters:
data (torch_geometric.data.Data): batched graph
input (Tensor): node representations
Returns:
Tensor: graph representations
"""
output = scatter_add(input, data.batch, dim=0, dim_size=data.num_graphs)
return output
class MultiLayerPerceptron(nn.Module):
"""
Multi-layer Perceptron.
Note there is no activation or dropout in the last layer.
Parameters:
input_dim (int): input dimension
hidden_dim (list of int): hidden dimensions
activation (str or function, optional): activation function
dropout (float, optional): dropout rate
"""
def __init__(self, input_dim, hidden_dims, activation="relu", dropout=0):
super(MultiLayerPerceptron, self).__init__()
self.dims = [input_dim] + hidden_dims
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = None
if dropout:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.layers = nn.ModuleList()
for i in range(len(self.dims) - 1):
self.layers.append(nn.Linear(self.dims[i], self.dims[i + 1]))
def forward(self, input):
""""""
x = input
for i, layer in enumerate(self.layers):
x = layer(x)
if i < len(self.layers) - 1:
if self.activation:
x = self.activation(x)
if self.dropout:
x = self.dropout(x)
return x
def assemble_atom_pair_feature(node_attr, edge_index, edge_attr):
h_row, h_col = node_attr[edge_index[0]], node_attr[edge_index[1]]
h_pair = torch.cat([h_row*h_col, edge_attr], dim=-1) # (E, 2H)
return h_pair
def generate_symmetric_edge_noise(num_nodes_per_graph, edge_index, edge2graph, device):
num_cum_nodes = num_nodes_per_graph.cumsum(0) # (G, )
node_offset = num_cum_nodes - num_nodes_per_graph # (G, )
edge_offset = node_offset[edge2graph] # (E, )
num_nodes_square = num_nodes_per_graph**2 # (G, )
num_nodes_square_cumsum = num_nodes_square.cumsum(-1) # (G, )
edge_start = num_nodes_square_cumsum - num_nodes_square # (G, )
edge_start = edge_start[edge2graph]
all_len = num_nodes_square_cumsum[-1]
node_index = edge_index.t() - edge_offset.unsqueeze(-1)
node_large = node_index.max(dim=-1)[0]
node_small = node_index.min(dim=-1)[0]
undirected_edge_id = node_large * (node_large + 1) + node_small + edge_start
symm_noise = torch.zeros(size=[all_len.item()], device=device)
symm_noise.normal_()
d_noise = symm_noise[undirected_edge_id].unsqueeze(-1) # (E, 1)
return d_noise
def _extend_graph_order(num_nodes, edge_index, edge_type, order=3):
"""
Args:
num_nodes: Number of atoms.
edge_index: Bond indices of the original graph.
edge_type: Bond types of the original graph.
order: Extension order.
Returns:
new_edge_index: Extended edge indices.
new_edge_type: Extended edge types.
"""
def binarize(x):
return torch.where(x > 0, torch.ones_like(x), torch.zeros_like(x))
def get_higher_order_adj_matrix(adj, order):
"""
Args:
adj: (N, N)
type_mat: (N, N)
Returns:
Following attributes will be updated:
- edge_index
- edge_type
Following attributes will be added to the data object:
- bond_edge_index: Original edge_index.
"""
adj_mats = [torch.eye(adj.size(0), dtype=torch.long, device=adj.device), \
binarize(adj + torch.eye(adj.size(0), dtype=torch.long, device=adj.device))]
for i in range(2, order+1):
adj_mats.append(binarize(adj_mats[i-1] @ adj_mats[1]))
order_mat = torch.zeros_like(adj)
for i in range(1, order+1):
order_mat += (adj_mats[i] - adj_mats[i-1]) * i
return order_mat
num_types = len(BOND_TYPES)
N = num_nodes
adj = to_dense_adj(edge_index).squeeze(0)
adj_order = get_higher_order_adj_matrix(adj, order) # (N, N)
type_mat = to_dense_adj(edge_index, edge_attr=edge_type).squeeze(0) # (N, N)
type_highorder = torch.where(adj_order > 1, num_types + adj_order - 1, torch.zeros_like(adj_order))
assert (type_mat * type_highorder == 0).all()
type_new = type_mat + type_highorder
new_edge_index, new_edge_type = dense_to_sparse(type_new)
_, edge_order = dense_to_sparse(adj_order)
# data.bond_edge_index = data.edge_index # Save original edges
new_edge_index, new_edge_type = coalesce(new_edge_index, new_edge_type.long(), N, N) # modify data
# [Note] This is not necessary
# data.is_bond = (data.edge_type < num_types)
# [Note] In earlier versions, `edge_order` attribute will be added.
# However, it doesn't seem to be necessary anymore so I removed it.
# edge_index_1, data.edge_order = coalesce(new_edge_index, edge_order.long(), N, N) # modify data
# assert (data.edge_index == edge_index_1).all()
return new_edge_index, new_edge_type
def _extend_to_radius_graph(pos, edge_index, edge_type, cutoff, batch, unspecified_type_number=0, is_sidechain=None):
assert edge_type.dim() == 1
N = pos.size(0)
bgraph_adj = torch.sparse.LongTensor(
edge_index,
edge_type,
torch.Size([N, N])
)
if is_sidechain is None:
rgraph_edge_index = radius_graph(pos, r=cutoff, batch=batch) # (2, E_r)
else:
# fetch sidechain and its batch index
is_sidechain = is_sidechain.bool()
dummy_index = torch.arange(pos.size(0), device=pos.device)
sidechain_pos = pos[is_sidechain]
sidechain_index = dummy_index[is_sidechain]
sidechain_batch = batch[is_sidechain]
assign_index = radius(x=pos, y=sidechain_pos, r=cutoff, batch_x=batch, batch_y=sidechain_batch)
r_edge_index_x = assign_index[1]
r_edge_index_y = assign_index[0]
r_edge_index_y = sidechain_index[r_edge_index_y]
rgraph_edge_index1 = torch.stack((r_edge_index_x, r_edge_index_y)) # (2, E)
rgraph_edge_index2 = torch.stack((r_edge_index_y, r_edge_index_x)) # (2, E)
rgraph_edge_index = torch.cat((rgraph_edge_index1, rgraph_edge_index2), dim=-1) # (2, 2E)
# delete self loop
rgraph_edge_index = rgraph_edge_index[:, (rgraph_edge_index[0] != rgraph_edge_index[1])]
rgraph_adj = torch.sparse.LongTensor(
rgraph_edge_index,
torch.ones(rgraph_edge_index.size(1)).long().to(pos.device) * unspecified_type_number,
torch.Size([N, N])
)
composed_adj = (bgraph_adj + rgraph_adj).coalesce() # Sparse (N, N, T)
# edge_index = composed_adj.indices()
# dist = (pos[edge_index[0]] - pos[edge_index[1]]).norm(dim=-1)
new_edge_index = composed_adj.indices()
new_edge_type = composed_adj.values().long()
return new_edge_index, new_edge_type
def extend_graph_order_radius(num_nodes, pos, edge_index, edge_type, batch, order=3, cutoff=10.0,
extend_order=True, extend_radius=True, is_sidechain=None):
if extend_order:
edge_index, edge_type = _extend_graph_order(
num_nodes=num_nodes,
edge_index=edge_index,
edge_type=edge_type, order=order
)
# edge_index_order = edge_index
# edge_type_order = edge_type
if extend_radius:
edge_index, edge_type = _extend_to_radius_graph(
pos=pos,
edge_index=edge_index,
edge_type=edge_type,
cutoff=cutoff,
batch=batch,
is_sidechain=is_sidechain
)
return edge_index, edge_type
def coarse_grain(pos, node_attr, subgraph_index, batch):
cluster_pos = scatter_mean(pos, index=subgraph_index, dim=0) # (num_clusters, 3)
cluster_attr = scatter_add(node_attr, index=subgraph_index, dim=0) # (num_clusters, H)
cluster_batch, _ = scatter_max(batch, index=subgraph_index, dim=0) # (num_clusters, )
return cluster_pos, cluster_attr, cluster_batch
def batch_to_natoms(batch):
return scatter_add(torch.ones_like(batch), index=batch, dim=0)
def get_complete_graph(natoms):
"""
Args:
natoms: Number of nodes per graph, (B, 1).
Returns:
edge_index: (2, N_1 + N_2 + ... + N_{B-1}), where N_i is the number of nodes of the i-th graph.
num_edges: (B, ), number of edges per graph.
"""
natoms_sqr = (natoms ** 2).long()
num_atom_pairs = torch.sum(natoms_sqr)
natoms_expand = torch.repeat_interleave(natoms, natoms_sqr)
index_offset = torch.cumsum(natoms, dim=0) - natoms
index_offset_expand = torch.repeat_interleave(index_offset, natoms_sqr)
index_sqr_offset = torch.cumsum(natoms_sqr, dim=0) - natoms_sqr
index_sqr_offset = torch.repeat_interleave(index_sqr_offset, natoms_sqr)
atom_count_sqr = torch.arange(num_atom_pairs, device=num_atom_pairs.device) - index_sqr_offset
index1 = (atom_count_sqr // natoms_expand).long() + index_offset_expand
index2 = (atom_count_sqr % natoms_expand).long() + index_offset_expand
edge_index = torch.cat([index1.view(1, -1), index2.view(1, -1)])
mask = torch.logical_not(index1 == index2)
edge_index = edge_index[:, mask]
num_edges = natoms_sqr - natoms # Number of edges per graph
return edge_index, num_edges
| 35.684564 | 117 | 0.653282 |
688d27831e4acceb1f167858d0e57a307f9c234e | 2,494 | py | Python | mq+mcp3008.py | mcthoren/mq_gases | e26ac3ba6d0ca321afdaaf80047b632824454d80 | [
"0BSD"
] | null | null | null | mq+mcp3008.py | mcthoren/mq_gases | e26ac3ba6d0ca321afdaaf80047b632824454d80 | [
"0BSD"
] | null | null | null | mq+mcp3008.py | mcthoren/mq_gases | e26ac3ba6d0ca321afdaaf80047b632824454d80 | [
"0BSD"
] | null | null | null | #!/usr/bin/python3
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# thanks to Adafruit for all the docs, boards and code.
# docs and example code from here:
# https://learn.adafruit.com/mcp3008-spi-adc?view=all
# datasheet here:
# https://cdn-shop.adafruit.com/datasheets/MCP3008.pdf
import board, busio, digitalio, time, sys
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
sys.path.append('/home/ghz/wxlib')
import wxlib as wx
wx_dir = "/home/ghz/repos/mq_gases"
dat_fname = "gas_levels"
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
cs0 = digitalio.DigitalInOut(board.D5)
cs1 = digitalio.DigitalInOut(board.D6)
mcp0 = MCP.MCP3008(spi, cs0)
mcp1 = MCP.MCP3008(spi, cs1)
gas_vals = {}
gas_vals_sum = {}
itr = 0
num_sens = 16
for i in range(num_sens):
gas_vals_sum[i] = 0
gas_vals[0] = AnalogIn(mcp0, MCP.P0) # MQ-136 Hydrogen Sulfide
gas_vals[1] = AnalogIn(mcp0, MCP.P1) # MQ-2 Methane, Butane, LPG, Smoke
gas_vals[2] = AnalogIn(mcp0, MCP.P2) # MQ-8 Hydrogen
gas_vals[3] = AnalogIn(mcp0, MCP.P3) # MQ-135 Air Quality (Benzene, Alcohol, Smoke)
gas_vals[4] = AnalogIn(mcp0, MCP.P4) # MQ-7 Carbon Monoxide
gas_vals[5] = AnalogIn(mcp0, MCP.P5) # MQ-3 Alcohol, Ethanol, Smoke
gas_vals[6] = AnalogIn(mcp0, MCP.P6) # MQ-5 Natural Gas, LPG
gas_vals[7] = AnalogIn(mcp0, MCP.P7) # MQ-4 Methane, CNG
gas_vals[8] = AnalogIn(mcp1, MCP.P0) # MQ-6 LPG, Butane
gas_vals[9] = AnalogIn(mcp1, MCP.P1) # MQ-9 "Methane, Propane, etc. Combustible Gas" (only running at 5VDC)
gas_vals[10] = AnalogIn(mcp1, MCP.P1) # MQ-137 Ammonia
gas_vals[11] = AnalogIn(mcp1, MCP.P1)
gas_vals[12] = AnalogIn(mcp1, MCP.P1)
gas_vals[13] = AnalogIn(mcp1, MCP.P1)
gas_vals[14] = AnalogIn(mcp1, MCP.P1)
gas_vals[15] = AnalogIn(mcp1, MCP.P1)
while True:
# print("Raw ADC Value: ", chan.value)
ts = time.strftime("%FT%TZ", time.gmtime())
print(ts, end="")
for i in range(num_sens):
gas_vals_sum[i] += gas_vals[i].voltage
print("\t", end='')
print(str(i) + ": {0:0.4f} V".format(gas_vals[i].voltage), end="")
print("")
itr += 1
if itr >= 57:
dat_s = "{0:s}".format(ts)
for i in range(num_sens):
dat_s += "\t"
dat_s += str(i) + ": {0:0.4f} V".format(gas_vals_sum[i] / itr)
dat_s += "\tpi_temp: {0:0.2f} °C\n".format(float(wx.pi_temp_read()) / 1000)
itr = 0
for i in range(num_sens):
gas_vals_sum[i] = 0
wx.write_out_dat_stamp_iso(ts, dat_fname, dat_s, wx_dir)
time.sleep(1)
| 31.175 | 108 | 0.691259 |
4f8d8d3aae7ea0c73c9796c697bedd1f5e2521ee | 960 | py | Python | var/spack/repos/builtin/packages/fish/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/fish/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/fish/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fish(AutotoolsPackage):
"""fish is a smart and user-friendly command line shell for OS X, Linux, and
the rest of the family.
"""
homepage = "https://fishshell.com/"
url = "https://github.com/fish-shell/fish-shell/releases/download/2.7.1/fish-2.7.1.tar.gz"
list_url = "https://fishshell.com/"
depends_on('ncurses')
version('3.0.0', sha256='ea9dd3614bb0346829ce7319437c6a93e3e1dfde3b7f6a469b543b0d2c68f2cf')
version('2.7.1', sha256='e42bb19c7586356905a58578190be792df960fa81de35effb1ca5a5a981f0c5a')
version('2.7.0', sha256='3a76b7cae92f9f88863c35c832d2427fb66082f98e92a02203dc900b8fa87bcb')
version('2.2.0', sha256='a76339fd14ce2ec229283c53e805faac48c3e99d9e3ede9d82c0554acfc7b77a')
| 40 | 99 | 0.752083 |
7431ca707606785d71c417172c9adc4daeccc68c | 2,892 | py | Python | test/www/url-encoding.py | Vanuan/phantomjs | 54f331b1b5bba48a2624b2d7c288f8cade101e5e | [
"BSD-3-Clause"
] | 1 | 2021-05-18T13:32:05.000Z | 2021-05-18T13:32:05.000Z | test/www/url-encoding.py | Vanuan/phantomjs | 54f331b1b5bba48a2624b2d7c288f8cade101e5e | [
"BSD-3-Clause"
] | null | null | null | test/www/url-encoding.py | Vanuan/phantomjs | 54f331b1b5bba48a2624b2d7c288f8cade101e5e | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
import urlparse
from cStringIO import StringIO
import time
def html_esc(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def do_response(req, body, code=200, headers={}):
req.send_response(code)
req.send_header('Content-Length', str(len(body)))
if 'Content-Type' not in headers:
req.send_header('Content-Type', 'text/html')
for k, v in headers.items():
if k != 'Content-Length':
req.send_header(k, v)
req.end_headers()
return StringIO(body)
def do_redirect(req, target):
return do_response(req,
'<!doctype html><a href="{}">Go here</a>'.format(target),
code=302, headers={ 'Location': target })
def handle_request(req):
url = urlparse.urlparse(req.path)
# This handler returns one of several different documents,
# depending on the query string. Many of the URLs involved contain
# text encoded in Shift_JIS, and will not round-trip correctly if
# misinterpreted as UTF-8. Comments indicate the Unicode equivalent.
if url.query == '/':
return do_redirect(req, '?/%83y%81[%83W')
elif url.query == '/f':
return do_response(req,
'<!doctype html public "-//W3C//DTD HTML 4.01 Frameset//EN"'
' "http://www.w3.org/TR/html4/frameset.dtd">'
'<html><head><title>framed</title></head>'
'<frameset cols="50%,50%">'
'<frame src="?/%98g" name="a">'
'<frame src="?/%95s%96%D1%82%C8%98_%91%88" name="b">'
'</frameset>')
elif url.query == "/r":
return do_response(req,
'<!doctype html><script src="?/%8F%91"></script>')
elif url.query == "/re":
return do_response(req,
'<!doctype html>'
'<img src="?/%8C%CC%8F%E1">'
'<img src="?/%89i%8Bv">')
elif url.query == "/%83y%81[%83W": # ページ
return do_response(req, '<!doctype html><h1>PASS</h1>')
elif url.query == "/%98g": # 枠
return do_response(req, '<!doctype html><h1>PASS</h1>')
elif url.query == "/%95s%96%D1%82%C8%98_%91%88": # 不毛な論争
return do_response(req, '<!doctype html><h1>FRAME</h1>')
elif url.query == "/%8F%91": # 書
return do_response(req,
'window.onload=function(){'
'document.body.innerHTML="<h1>PASS</h1>";};',
headers={'Content-Type': 'application/javascript'})
elif url.query == "/%8C%CC%8F%E1": # 故障
return do_response(req,
'<!doctype html>internal server error',
code=500)
elif url.query == "/%89i%8Bv": # 永久
time.sleep(5)
return do_response('', code=204)
else:
return do_response(req,
'<!doctype html><title>404 Not Found</title>'
'<p>URL not found: {}</p>'
.format(html_esc(req.path)),
code=404)
| 33.241379 | 73 | 0.561895 |
07b133eac2957888817a68b2def6d0e3aac3c5d3 | 4,870 | py | Python | examples/smri_ants_build_template.py | sebastientourbier/nipype | 99c5904176481520c5bf42a501aae1a12184e672 | [
"Apache-2.0"
] | 2 | 2019-01-25T18:20:51.000Z | 2019-07-30T20:51:51.000Z | examples/smri_ants_build_template.py | sebastientourbier/nipype | 99c5904176481520c5bf42a501aae1a12184e672 | [
"Apache-2.0"
] | null | null | null | examples/smri_ants_build_template.py | sebastientourbier/nipype | 99c5904176481520c5bf42a501aae1a12184e672 | [
"Apache-2.0"
] | 2 | 2018-01-25T19:48:17.000Z | 2019-01-25T18:20:52.000Z | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
===============================================
sMRI: Using new ANTS for creating a T1 template
===============================================
In this tutorial we will use ANTS (old version aka "ANTS") based workflow to
create a template out of multiple T1 volumes.
1. Tell python where to find the appropriate functions.
"""
from __future__ import print_function, unicode_literals
from builtins import open
from future import standard_library
standard_library.install_aliases()
import os
import nipype.interfaces.utility as util
import nipype.interfaces.ants as ants
import nipype.interfaces.io as io
import nipype.pipeline.engine as pe # pypeline engine
from nipype.workflows.smri.ants import ANTSTemplateBuildSingleIterationWF
"""
2. Download T1 volumes into home directory
"""
import urllib.request
import urllib.error
import urllib.parse
homeDir = os.getenv("HOME")
requestedPath = os.path.join(homeDir, 'nipypeTestPath')
mydatadir = os.path.realpath(requestedPath)
if not os.path.exists(mydatadir):
os.makedirs(mydatadir)
print(mydatadir)
MyFileURLs = [
('http://slicer.kitware.com/midas3/download?bitstream=13121', '01_T1_half.nii.gz'),
('http://slicer.kitware.com/midas3/download?bitstream=13122', '02_T1_half.nii.gz'),
('http://slicer.kitware.com/midas3/download?bitstream=13124', '03_T1_half.nii.gz'),
('http://slicer.kitware.com/midas3/download?bitstream=13128', '01_T1_inv_half.nii.gz'),
('http://slicer.kitware.com/midas3/download?bitstream=13123', '02_T1_inv_half.nii.gz'),
('http://slicer.kitware.com/midas3/download?bitstream=13125', '03_T1_inv_half.nii.gz'),
]
for tt in MyFileURLs:
myURL = tt[0]
localFilename = os.path.join(mydatadir, tt[1])
if not os.path.exists(localFilename):
remotefile = urllib.request.urlopen(myURL)
localFile = open(localFilename, 'wb')
localFile.write(remotefile.read())
localFile.close()
print("Downloaded file: {0}".format(localFilename))
else:
print("File previously downloaded {0}".format(localFilename))
input_images = [
os.path.join(mydatadir, '01_T1_half.nii.gz'),
os.path.join(mydatadir, '02_T1_half.nii.gz'),
os.path.join(mydatadir, '03_T1_half.nii.gz')
]
input_passive_images = [
{'INV_T1': os.path.join(mydatadir, '01_T1_inv_half.nii.gz')},
{'INV_T1': os.path.join(mydatadir, '02_T1_inv_half.nii.gz')},
{'INV_T1': os.path.join(mydatadir, '03_T1_inv_half.nii.gz')}
]
"""
3. Define the workflow and its working directory
"""
tbuilder = pe.Workflow(name="ANTSTemplateBuilder")
tbuilder.base_dir = requestedPath
"""
4. Define data sources. In real life these would be replace by DataGrabbers
"""
datasource = pe.Node(interface=util.IdentityInterface(fields=['imageList', 'passiveImagesDictionariesList']),
run_without_submitting=True,
name='InputImages')
datasource.inputs.imageList = input_images
datasource.inputs.passiveImagesDictionariesList = input_passive_images
datasource.inputs.sort_filelist = True
"""
5. Template is initialized by a simple average
"""
initAvg = pe.Node(interface=ants.AverageImages(), name='initAvg')
initAvg.inputs.dimension = 3
initAvg.inputs.normalize = True
tbuilder.connect(datasource, "imageList", initAvg, "images")
"""
6. Define the first iteration of template building
"""
buildTemplateIteration1 = ANTSTemplateBuildSingleIterationWF('iteration01')
tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, 'inputspec.fixed_image')
tbuilder.connect(datasource, 'imageList', buildTemplateIteration1, 'inputspec.images')
tbuilder.connect(datasource, 'passiveImagesDictionariesList', buildTemplateIteration1, 'inputspec.ListOfPassiveImagesDictionaries')
"""
7. Define the second iteration of template building
"""
buildTemplateIteration2 = ANTSTemplateBuildSingleIterationWF('iteration02')
tbuilder.connect(buildTemplateIteration1, 'outputspec.template', buildTemplateIteration2, 'inputspec.fixed_image')
tbuilder.connect(datasource, 'imageList', buildTemplateIteration2, 'inputspec.images')
tbuilder.connect(datasource, 'passiveImagesDictionariesList', buildTemplateIteration2, 'inputspec.ListOfPassiveImagesDictionaries')
"""
8. Move selected files to a designated results folder
"""
datasink = pe.Node(io.DataSink(), name="datasink")
datasink.inputs.base_directory = os.path.join(requestedPath, "results")
tbuilder.connect(buildTemplateIteration2, 'outputspec.template', datasink, 'PrimaryTemplate')
tbuilder.connect(buildTemplateIteration2, 'outputspec.passive_deformed_templates', datasink, 'PassiveTemplate')
tbuilder.connect(initAvg, 'output_average_image', datasink, 'PreRegisterAverage')
"""
8. Run the workflow
"""
tbuilder.run()
| 35.547445 | 131 | 0.744559 |
1eb9690860bc156e5f18c96cb70cab862d775043 | 88 | py | Python | python/git_manage/scripts/__init__.py | danb0b/code_git_tools | 3661e90ea5e93012c203ed9569ad956b322729f2 | [
"MIT"
] | null | null | null | python/git_manage/scripts/__init__.py | danb0b/code_git_tools | 3661e90ea5e93012c203ed9569ad956b322729f2 | [
"MIT"
] | null | null | null | python/git_manage/scripts/__init__.py | danb0b/code_git_tools | 3661e90ea5e93012c203ed9569ad956b322729f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 19:45:01 2019
@author: danaukes
"""
| 11 | 35 | 0.579545 |
ac08faee093de8f578577414e56d4047c5ec4888 | 1,816 | py | Python | native_client_sdk/src/build_tools/dsc_info.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2019-11-28T10:46:52.000Z | 2019-11-28T10:46:52.000Z | native_client_sdk/src/build_tools/dsc_info.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | native_client_sdk/src/build_tools/dsc_info.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-03-27T11:15:39.000Z | 2016-08-17T14:19:56.000Z | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts information from a library.dsc file."""
import optparse
import os
import sys
import parse_dsc
def Error(msg):
print >> sys.stderr, 'dsc_info: %s' % msg
sys.exit(1)
def FindTarget(tree, target_name):
targets = tree['TARGETS']
for target in targets:
if target['NAME'] == target_name:
return target
Error('Target %s not found' % target_name)
def GetSources(lib_dir, tree, target_name):
result = []
target = FindTarget(tree, target_name)
for filename in target['SOURCES']:
result.append('/'.join([lib_dir, filename]))
return result
def DoMain(argv):
"Entry point for gyp's pymod_do_main command."
parser = optparse.OptionParser(usage='%prog: [OPTIONS] TARGET')
# Give a clearer error message when this is used as a module.
parser.prog = 'dsc_info'
parser.add_option('-s', '--sources',
help='Print a list of source files for the target',
action='store_true', default=False)
parser.add_option('-l', '--libdir',
help='Directory where the library.dsc file is located',
metavar='DIR')
options, args = parser.parse_args(argv)
if len(args) != 1:
parser.error('Expecting exactly one argument.')
target = args[0]
libdir = options.libdir or ''
tree = parse_dsc.LoadProject(os.path.join(libdir, 'library.dsc'))
if options.sources:
return '\n'.join(GetSources(libdir, tree, target))
parser.error('No action specified')
def main(args):
print DoMain(args)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
Error('interrupted')
| 27.515152 | 75 | 0.670705 |
595fc5cc61fa53462a6ee74873f999226c5a2338 | 6,234 | py | Python | torchlib/generative_model/gan/infogan/trainer.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | 3 | 2019-07-23T21:32:36.000Z | 2022-02-04T23:13:30.000Z | torchlib/generative_model/gan/infogan/trainer.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | null | null | null | torchlib/generative_model/gan/infogan/trainer.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | 1 | 2019-07-23T21:32:23.000Z | 2019-07-23T21:32:23.000Z | """
Trainer for infogan
"""
import numpy as np
import torch
from torchlib.common import FloatTensor
from torchlib.utils.plot import get_visdom_line_plotter
from torch.autograd import Variable
from tqdm import tqdm
class Trainer(object):
def __init__(self, trick_dict=None):
if trick_dict is None:
self.trick_dict = {}
else:
self.trick_dict = trick_dict
self.global_step = 0
self.plotter = get_visdom_line_plotter('main')
def _create_real_data(self, raw_real_data):
noisy_input = self.trick_dict.get('noisy_input', None)
if noisy_input:
raw_real_data = raw_real_data + torch.from_numpy(
np.random.randn(*raw_real_data.shape) * noisy_input['sigma']).type(torch.FloatTensor)
noisy_input['sigma'] = max(0, noisy_input['sigma'] - noisy_input['decay'])
real_data = Variable(raw_real_data.type(FloatTensor))
return real_data
def _create_valid(self, batch_size):
soft_label = self.trick_dict.get('label_smooth', None)
if soft_label:
valid_range = soft_label['valid_range']
else:
valid_range = 1.
if isinstance(valid_range, list):
valid = Variable(FloatTensor(batch_size, 1).uniform_(*valid_range), requires_grad=False)
else:
valid = Variable(FloatTensor(batch_size, 1).fill_(valid_range), requires_grad=False)
return valid
def _create_fake(self, batch_size):
soft_label = self.trick_dict.get('label_smooth', None)
if soft_label:
fake_range = soft_label['fake_range']
else:
fake_range = 0.
if isinstance(fake_range, list):
fake = Variable(FloatTensor(batch_size, 1).uniform_(*fake_range), requires_grad=False)
else:
fake = Variable(FloatTensor(batch_size, 1).fill_(fake_range), requires_grad=False)
return fake
def train(self, num_epoch, data_loader, gan_model, disc_iter, checkpoint_path, epoch_per_save, callbacks):
assert disc_iter > 0, 'Discriminator update iteration must be greater than zero'
for epoch in range(num_epoch):
gan_model._set_to_train()
dis_loss_lst = []
gen_loss_lst = []
info_loss_lst = []
D_x_lst = []
D_G_z1_lst = []
D_G_z2_lst = []
# plot smoothing
smooth_factor = 0.95
plot_dis_s = 0
plot_gen_s = 0
plot_info_s = 0
plot_D_x = 0
plot_D_G_z1 = 0
plot_D_G_z2 = 0
plot_ws = 0
print('Epoch {}'.format(epoch + 1))
for input_and_aux in tqdm(data_loader):
# We assume the input_and_label is a tuple containing data and auxiliary information
# Adversarial ground truths
batch_size = input_and_aux[0].shape[0]
valid = self._create_valid(batch_size)
fake = self._create_fake(batch_size)
flip_label = self.trick_dict.get('flip_label', None)
if flip_label and (self.global_step + 1) % flip_label['num_steps_per_flip'] == 0:
valid, fake = fake, valid
# train discriminator
real_data = self._create_real_data(input_and_aux[0])
for _ in range(disc_iter):
d_real_loss, D_x = gan_model._train_dis_with_real(real_data, valid)
d_fake_loss, D_G_z1 = gan_model._train_dis_with_fake(fake)
# train generator
valid = self._create_valid(2 * batch_size)
g_loss, D_G_z2 = gan_model._train_gen(valid)
i_loss = gan_model._train_info(batch_size)
# gan_model.update_parameters()
dis_loss = (d_real_loss.item() + d_fake_loss.item()) / 2
gen_loss = g_loss.item()
info_loss = i_loss.item()
plot_dis_s = plot_dis_s * smooth_factor + dis_loss * (1 - smooth_factor)
plot_gen_s = plot_gen_s * smooth_factor + gen_loss * (1 - smooth_factor)
plot_info_s = plot_info_s * smooth_factor + info_loss * (1 - smooth_factor)
plot_D_x = plot_D_x * smooth_factor + D_x.item() * (1 - smooth_factor)
plot_D_G_z1 = plot_D_G_z1 * smooth_factor + D_G_z1.item() * (1 - smooth_factor)
plot_D_G_z2 = plot_D_G_z2 * smooth_factor + D_G_z2.item() * (1 - smooth_factor)
plot_ws = plot_ws * smooth_factor + (1 - smooth_factor)
dis_loss_lst.append(plot_dis_s / plot_ws)
gen_loss_lst.append(plot_gen_s / plot_ws)
info_loss_lst.append(plot_info_s / plot_ws)
D_x_lst.append(plot_D_x / plot_ws)
D_G_z1_lst.append(plot_D_G_z1 / plot_ws)
D_G_z2_lst.append(plot_D_G_z2 / plot_ws)
self.global_step += 1
if gan_model.optimizer_G_scheduler:
gan_model.optimizer_G_scheduler.step()
if gan_model.optimizer_D_scheduler:
gan_model.optimizer_D_scheduler.step()
noisy_input = self.trick_dict.get('noisy_input', None)
if noisy_input:
print('Noisy input sigma: {:.4f}'.format(noisy_input['sigma']))
if checkpoint_path and (epoch + 1) % epoch_per_save == 0:
gan_model.save_checkpoint(checkpoint_path)
# plot loss figure
step = [a for a in range(self.global_step - len(dis_loss_lst), self.global_step)]
data = np.array([dis_loss_lst, gen_loss_lst, info_loss_lst]).transpose()
legend = ['dis_loss', 'gen_loss', 'info_loss']
self.plotter.plot('gan_loss', legend, step, data)
data = np.array([D_x_lst, D_G_z1_lst, D_G_z2_lst]).transpose()
legend = ['D_x', 'D_G_z1', 'D_G_z2']
self.plotter.plot('gan_output', legend, step, data)
# callbacks
for callback in callbacks:
callback(self, gan_model)
if checkpoint_path:
gan_model.save_checkpoint(checkpoint_path)
| 41.013158 | 110 | 0.598813 |
02c032e1c3cacae4c5f1e83dbac8cc0d8f2929fe | 32,679 | py | Python | test/IECore/Imath.py | gcodebackups/cortex-vfx | 72fa6c6eb3327fce4faf01361c8fcc2e1e892672 | [
"BSD-3-Clause"
] | 5 | 2016-07-26T06:09:28.000Z | 2022-03-07T03:58:51.000Z | test/IECore/Imath.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | null | null | null | test/IECore/Imath.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | 3 | 2015-03-25T18:45:24.000Z | 2020-02-15T15:37:18.000Z | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
"""Unit test for Imath binding"""
import math
import unittest
import random
from IECore import *
class ImathV2f(unittest.TestCase):
def testConstructors(self):
"""Test V2f constructors"""
v = V2f()
v = V2f(1)
self.assertEqual(v.x, 1)
self.assertEqual(v.y, 1)
v = V2f(2, 3)
self.assertEqual(v.x, 2)
self.assertEqual(v.y, 3)
self.assertEqual( V2f( V2i( 1, 2 ) ), V2f( 1, 2 ) )
self.assertEqual( V2f( V2f( 1, 2 ) ), V2f( 1, 2 ) )
self.assertEqual( V2f( V2d( 1, 2 ) ), V2f( 1, 2 ) )
self.assertEqual( V2d( V2i( 1, 2 ) ), V2d( 1, 2 ) )
self.assertEqual( V2d( V2f( 1, 2 ) ), V2d( 1, 2 ) )
self.assertEqual( V2d( V2d( 1, 2 ) ), V2d( 1, 2 ) )
self.assertEqual( V2i( V2i( 1, 2 ) ), V2i( 1, 2 ) )
self.assertEqual( V2i( V2f( 1, 2 ) ), V2i( 1, 2 ) )
self.assertEqual( V2i( V2d( 1, 2 ) ), V2i( 1, 2 ) )
self.assertEqual( V3f( V3i( 1, 2, 3 ) ), V3f( 1, 2, 3 ) )
self.assertEqual( V3f( V3f( 1, 2, 3 ) ), V3f( 1, 2, 3 ) )
self.assertEqual( V3f( V3d( 1, 2, 3 ) ), V3f( 1, 2, 3 ) )
self.assertEqual( V3d( V3i( 1, 2, 3 ) ), V3d( 1, 2, 3 ) )
self.assertEqual( V3d( V3f( 1, 2, 3 ) ), V3d( 1, 2, 3 ) )
self.assertEqual( V3d( V3d( 1, 2, 3 ) ), V3d( 1, 2, 3 ) )
self.assertEqual( V3i( V3i( 1, 2, 3 ) ), V3i( 1, 2, 3 ) )
self.assertEqual( V3i( V3f( 1, 2, 3 ) ), V3i( 1, 2, 3 ) )
self.assertEqual( V3i( V3d( 1, 2, 3 ) ), V3i( 1, 2, 3 ) )
v = V2f( [ 1, 1 ] )
self.assertEqual(v.x, 1)
self.assertEqual(v.y, 1)
self.assertRaises( RuntimeError, V2f, [ 1 ] )
self.assertRaises( RuntimeError, V2f, [ 1, 2, 3 ] )
def testDimensions(self):
"""Test V2f dimensions"""
v = V2f()
self.assertEqual( v.dimensions(), 2 )
def testIndexing(self):
"""Test V2f indexing via operator[]"""
v1 = V2f(1.0, 2.0)
self.assertEqual(v1[0], 1.0)
self.assertEqual(v1[1], 2.0)
v1[0] = 12.0
v1[1] = 15.0
self.assertEqual(v1[0], 12.0)
self.assertEqual(v1[1], 15.0)
def testCopyAndAssign(self):
"""Test V2f copy construction and assignment"""
v1 = V2f(2.0)
v2 = V2f(3.0)
v2 = v1
# v2 should now contain contents of v1, v1 should be unchanged
self.assertEqual(v2.x, 2)
self.assertEqual(v2.y, 2)
self.assertEqual(v1.x, 2)
self.assertEqual(v1.y, 2)
def testEquality(self):
"""Test V2f comparison for equality"""
v1 = V2f(1.00)
v2 = V2f(1.01)
self.assert_( v1.equalWithAbsError(v2, 0.01) )
v1 = V2f(2.0)
v2 = V2f(3.0)
self.assert_( v1.equalWithRelError(v2, 0.5) )
v1 = V2f(1.0)
v2 = V2f(1.0)
self.assert_( v1 == v2 )
v1 = V2f(1.0)
v2 = V2f(1.1)
self.assert_( v1 != v2 )
def testDotProduct(self):
"""Test V2f dot product"""
v1 = V2f(3.0)
v2 = V2f(4.0)
# By definition
self.assertEqual( v1.dot(v2), 3*4 + 3*4)
# Commutative
self.assertEqual( v1 ^ v2, v2 ^ v1)
self.assertEqual( v1.dot(v2), v2.dot(v1) )
# Operator/method equivalence
self.assertEqual( v1.dot(v2), v1 ^ v2)
# Because cos( angleBetween(v1, v2) ) == 1:
self.assertAlmostEqual( v1 ^ v2, v1.length() * v2.length(), 3 )
def testCrossProduct(self):
"""Test V2f cross product"""
v1 = V2f(2.0, 2.0)
v2 = V2f(0.0, 2.0)
# Area of parallelogram, by definition
self.assertEqual( v1.cross(v2), 4.0 )
# Operator/method equivalence
self.assertEqual( v1.cross(v2), v1 % v2 )
# ImathVec.h comment validity
self.assertEqual( v1.cross(v2), (V3f(v1.x, v1.y, 0.0) % V3f(v2.x, v2.y, 0.0)).z )
def testOperators(self):
"""Test V2f arithmetic operators"""
v1 = V2f(3.4, 9.2)
v2 = V2f(5.3, -0.4)
# ADDITION
# By definition
self.assertEqual( v1 + v2, V2f( v1.x + v2.x, v1.y + v2.y ) )
# Commutative
self.assertEqual( v1 + v2, v2 + v1 )
# Assignment
v1_copy = V2f(v1)
temp = v1
temp += v2
self.assert_( temp is v1 )
self.assertEqual( temp, (v1_copy + v2))
# SUBTRACTION
# By definition
self.assertEqual( v1 - v2, V2f( v1.x - v2.x, v1.y - v2.y ) )
self.assertEqual( v1 - v2, -v2 + v1 )
# Assignment
v1_copy = V2f(v1)
temp = v1
temp -= v2
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy - v2)
# NEGATION
self.assertEqual( -v1, V2f( -v1.x, -v1.y) )
self.assertEqual( -v1, v1.negate() )
self.assertEqual( -( -v1), v1 )
# MULTIPLICATION
# By definition
self.assertEqual( v1 * v2, V2f(v1.x * v2.x, v1.y * v2.y) )
c = 3
self.assertEqual( v1 * c, V2f(v1.x * c, v1.y * c) )
# Commutative
self.assertEqual( v1 * v2, v2 * v1 )
self.assertEqual( c * v1, V2f(v1.x * c, v1.y * c) )
# Assignment
v1_copy = V2f(v1)
temp = v1
temp *= v2
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy * v2)
v1_copy = V2f(v1)
temp = v1
temp *= c
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy * c)
# DIVISION
# By definition
self.assertEqual( v1 / v2, V2f(v1.x / v2.x, v1.y / v2.y) )
self.assertEqual( v1 / c, V2f(v1.x / c, v1.y / c) )
# Assignment
v1_copy = V2f(v1)
temp = v1
temp /= v2
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy / v2)
v1_copy = V2f(v1)
temp = v1
temp /= c
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy / c)
# matrix multiplication
v1 = V2f( 1, 2 )
m = M33f.createTranslated( V2f( 1, 2 ) )
v2 = v1 * m
v1 *= m
self.assertEqual( v1, v2 )
self.assertEqual( v1, V2f( 2, 4 ) )
def testMiscMethods(self):
"""Test V2f miscellaneous methods"""
v1 = V2f(2.3, -4.98)
self.assertAlmostEqual( v1.length2(), v1.dot(v1), 3 )
self.assertAlmostEqual( v1.length(), math.sqrt(v1.dot(v1)), 3 )
self.assertAlmostEqual( v1.length() * v1.length(), v1.length2(), 3 )
v1 = V2f(10.0, 0.0)
self.assertEqual( v1.normalized(), v1 / v1.length() )
self.assertEqual( v1, V2f(10.0, 0.0) )
v1.normalize()
self.assertEqual( v1, V2f(1.0, 0.0) )
class ImathV3f(unittest.TestCase):
def testConstructors(self):
"""Test V3f constructors"""
v = V3f()
v = V3f(1)
self.assertEqual(v.x, 1)
self.assertEqual(v.y, 1)
self.assertEqual(v.z, 1)
v = V3f(2, 3, 4)
self.assertEqual(v.x, 2)
self.assertEqual(v.y, 3)
self.assertEqual(v.z, 4)
v = V3f( [ 1, 1, 1 ] )
self.assertEqual(v.x, 1)
self.assertEqual(v.y, 1)
self.assertEqual(v.z, 1)
self.assertRaises( RuntimeError, V3f, [ 1 ] )
self.assertRaises( RuntimeError, V3f, [ 1, 2 ] )
self.assertRaises( RuntimeError, V3f, [ 1, 2, 3, 4 ] )
def testDimensions(self):
"""Test V3f dimensions"""
v = V3f()
self.assertEqual( v.dimensions(), 3 )
def testIndexing(self):
"""Test V3f indexing via operator[]"""
v1 = V3f(1.0, 2.0, 3.0)
self.assertEqual(v1[0], 1.0)
self.assertEqual(v1[1], 2.0)
self.assertEqual(v1[2], 3.0)
v1[0] = 12.0
v1[1] = 15.0
v1[2] = -25.0
self.assertEqual(v1[0], 12.0)
self.assertEqual(v1[1], 15.0)
self.assertEqual(v1[2], -25.0)
def testCopyAndAssign(self):
"""Test V3f copy construction and assignment"""
v1 = V3f(2.0)
v2 = V3f(3.0)
v2 = v1
# v2 should now contain contents of v1, v1 should be unchanged
self.assertEqual(v2.x, 2)
self.assertEqual(v2.y, 2)
self.assertEqual(v2.z, 2)
self.assertEqual(v1.x, 2)
self.assertEqual(v1.y, 2)
self.assertEqual(v1.z, 2)
def testEquality(self):
"""Test V3f comparison for equality"""
v1 = V3f(1.00)
v2 = V3f(1.01)
self.assert_( v1.equalWithAbsError(v2, 0.01) )
v1 = V3f(2.0)
v2 = V3f(3.0)
self.assert_( v1.equalWithRelError(v2, 0.5) )
v1 = V3f(1.0)
v2 = V3f(1.0)
self.assert_( v1 == v2 )
v1 = V3f(1.0)
v2 = V3f(1.1)
self.assert_( v1 != v2 )
def testDotProduct(self):
"""Test V3f dot product"""
v1 = V3f(3.0)
v2 = V3f(4.0)
# By definition
self.assertEqual( v1.dot(v2), 3*4 + 3*4 + 3*4)
# Commutative
self.assertEqual( v1 ^ v2, v2 ^ v1)
self.assertEqual( v1.dot(v2), v2.dot(v1) )
# Operator/method equivalence
self.assertEqual( v1.dot(v2), v1 ^ v2)
# Because cos( angleBetween(v1, v2) ) == 1:
self.assertAlmostEqual( v1 ^ v2, v1.length() * v2.length(), 3 )
def testCrossProduct(self):
"""Test V3f cross product"""
v1 = V3f(1.0, 0.0, 0.0)
v2 = V3f(0.0, 1.0, 0.0)
# Area of "parallelogram", by definition
self.assertEqual( v1.cross(v2), V3f(0.0, 0.0, 1.0) )
# Operator/method equivalence
self.assertEqual( v1.cross(v2), v1 % v2 )
def testOperators(self):
"""Test V3f arithmetic operators"""
v1 = V3f(3.4, 9.2, 18.05)
v2 = V3f(5.3, -0.4, -5.7 )
# ADDITION
# By definition
self.assertEqual( v1 + v2, V3f( v1.x + v2.x, v1.y + v2.y, v1.z + v2.z ) )
# Commutative
self.assertEqual( v1 + v2, v2 + v1 )
# Assignment
v1_copy = V3f(v1)
temp = v1
temp += v2
self.assert_( temp is v1 )
self.assertEqual( temp, (v1_copy + v2))
# SUBTRACTION
# By definition
self.assertEqual( v1 - v2, V3f( v1.x - v2.x, v1.y - v2.y, v1.z - v2.z ) )
self.assertEqual( v1 - v2, -v2 + v1 )
# Assignment
v1_copy = V3f(v1)
temp = v1
temp -= v2
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy - v2)
# NEGATION
self.assertEqual( -v1, V3f( -v1.x, -v1.y, -v1.z) )
self.assertEqual( -v1, v1.negate() )
self.assertEqual( -( -v1), v1 )
# MULTIPLICATION
# By definition
self.assertEqual( v1 * v2, V3f(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z ) )
c = 3
self.assertEqual( v1 * c, V3f(v1.x * c, v1.y * c, v1.z * c) )
# Commutative
self.assertEqual( v1 * v2, v2 * v1 )
self.assertEqual( c * v1, V3f(v1.x * c, v1.y * c, v1.z * c) )
# Assignment
v1_copy = V3f(v1)
temp = v1
temp *= v2
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy * v2)
v1_copy = V3f(v1)
temp = v1
temp *= c
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy * c)
# DIVISION
# By definition
self.assertEqual( v1 / v2, V3f(v1.x / v2.x, v1.y / v2.y, v1.z / v2.z) )
self.assertEqual( v1 / c, V3f(v1.x / c, v1.y / c, v1.z / c) )
# Assignment
v1_copy = V3f(v1)
temp = v1
temp /= v2
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy / v2)
v1_copy = V3f(v1)
temp = v1
temp /= c
self.assert_( temp is v1 )
self.assertEqual( temp, v1_copy / c)
# matrix multiplication
v1 = V3f( 1, 2, 3 )
m = M44f.createTranslated( V3f( 1, 2, 3 ) )
v2 = v1 * m
v1 *= m
self.assertEqual( v1, v2 )
self.assertEqual( v1, V3f( 2, 4, 6 ) )
def testMiscMethods(self):
"""Test V3f miscellaneous methods"""
v1 = V3f(41.4, 2.3, -4.98)
self.assertAlmostEqual( v1.length2(), v1.dot(v1), 3 )
self.assertAlmostEqual( v1.length(), math.sqrt(v1.dot(v1)), 3 )
self.assertAlmostEqual( v1.length() * v1.length(), v1.length2(), 3 )
v1 = V3f(10.0, 0.0, 0.0)
self.assertEqual( v1.normalized(), v1 / v1.length() )
self.assertEqual( v1, V3f(10.0, 0.0, 0.0) )
v1.normalize()
self.assertEqual( v1, V3f(1.0, 0.0, 0.0) )
class ImathBox3f(unittest.TestCase):
def testConstructors(self):
"""Test Box3f constructors"""
b = Box3f()
self.assert_( b.isEmpty() )
b = Box3f( V3f(1.0, 1.0, 1.0) )
self.assertEqual( b.min, V3f(1.0, 1.0, 1.0) )
self.assertEqual( b.max, V3f(1.0, 1.0, 1.0) )
b = Box3f( V3f(-1.0, -1.0, -1.0), V3f(1.0, 1.0, 1.0) )
self.assertEqual( b.min, V3f(-1.0, -1.0, -1.0) )
self.assertEqual( b.max, V3f( 1.0, 1.0, 1.0) )
def testEquality(self):
"""Test Box3f comparison for equality"""
b1 = Box3f( V3f(1.0, 2.0, 3.0) )
b2 = Box3f( V3f(1.0, 2.0, 3.0) )
self.assert_( b1 == b2 )
b2 = Box3f( V3f(3.0, 2.0, 1.0) )
self.assert_( b1 != b2 )
def testMiscMethods(self):
"""Test Box3f miscellaneous methods"""
b1 = Box3f( V3f(-1.0, -1.0, -1.0), V3f(2.0, 2.0, 2.0) )
self.assertEqual( b1.isEmpty(), False )
self.assert_( b1.hasVolume() )
b1.makeEmpty()
self.assert_( b1.isEmpty() )
self.assertEqual( b1.hasVolume(), False )
b1 = Box3f( V3f(-1.0, -1.0, -1.0), V3f(10.0, 2.0, 2.0) )
X_AXIS = 0
self.assertEqual( b1.majorAxis(), X_AXIS )
self.assertEqual( b1.center(), (b1.min + b1.max) / 2.0 )
b2 = Box3f( V3f(-0.5), V3f(1.0) )
self.assert_( b2.intersects(b1) )
b2 = Box3f( V3f(-5.0), V3f(-2.0) )
self.failIf( b2.intersects(b1) )
self.assertEqual( b2.size(), b2.max - b2.min )
b = Box3f( V3f(1), V3f(2) )
m = M44f()
m[0,0]=2
m[1,1]=2
m[2,2]=2
self.assertEqual( b.transform( m ), Box3f( V3f(2), V3f(4) ) )
m = M44d()
m[0,0]=2
m[1,1]=2
m[2,2]=2
self.assertEqual( b.transform( m ), Box3f( V3f(2), V3f(4) ) )
def testContains( self ) :
b1 = Box3f( V3f( -1 ), V3f( 1 ) )
b2 = Box3f( V3f( 0, -0.5, 0.5 ), V3f( 0.1, 0, 0.9 ) )
b3 = Box3f( V3f( -1.2, -0.6, 0.4 ), V3f( 0.2, 0.1, 1 ) )
self.assert_( b1.contains( b2 ) )
self.assert_( not b2.contains( b1 ) )
self.assert_( not b2.contains( b3 ) )
self.assert_( b3.contains( b2 ) )
self.assert_( not b3.contains( b1 ) )
self.assert_( not b1.contains( b3 ) )
def testSplit( self ) :
r = Rand32()
for i in range( 0, 100 ) :
b = Box3f()
b.extendBy( r.nextV3f() )
b.extendBy( r.nextV3f() )
major = b.majorAxis()
low, high = b.split()
low2, high2 = b.split( major )
self.assertEqual( low, low2 )
self.assertEqual( high, high2 )
b2 = Box3f()
b2.extendBy( low )
b2.extendBy( high )
self.assertEqual( b, b2 )
class ImathQuatf(unittest.TestCase):
def testConstructors(self):
"""Test Quatf constructors"""
q = Quatf()
q = Quatf(q)
q = Quatf(0.1, 0.2, 0.3, 0.4)
q = Quatf(0.1, V3f(0.2, 0.3, 0.4))
q = Quatf.identity()
self.assertEqual( q, Quatf(1,0,0,0) )
def testIndexing(self):
"""Test Quatf indexing via operator[]"""
q = Quatf( 1, 2, 3, 4 )
self.assertEqual( q[0], 1 )
self.assertEqual( q[1], 2 )
self.assertEqual( q[2], 3 )
self.assertEqual( q[3], 4 )
self.assertEqual( q[0], q.r )
self.assertEqual( q[1], q.v.x )
self.assertEqual( q[2], q.v.y )
self.assertEqual( q[3], q.v.z )
def testEquality(self):
"""Test Quatf comparison for equality"""
q1 = Quatf( 1, 2, 3, 4 )
q2 = Quatf( 1, 2, 3, 4 )
self.assertEqual(q1, q1)
self.assertEqual(q1, q2)
q2 = Quatf( 5, 2, 3, 4 )
self.assert_( q1 != q2 )
def testMiscMethods(self):
"""Test Quatf miscellaneous methods"""
q1 = Quatf( 1, 2, 3, 4 )
self.assertAlmostEqual( q1.length(), math.sqrt(q1[0]*q1[0]+(q1.v^q1.v)), 3 )
# axis/angle
axis = V3f( 1, 2, 3 )
axis.normalize()
q1.setAxisAngle( axis, 0.5 )
self.assertAlmostEqual( q1.axis().x, axis.x, 3 )
self.assertAlmostEqual( q1.axis().y, axis.y, 3 )
self.assertAlmostEqual( q1.axis().z, axis.z, 3 )
self.assertAlmostEqual( q1.angle(), 0.5, 3 )
# Rotate x axis onto y axis
q1.setRotation( V3f(1,0,0), V3f(0,1,0) )
#We should have gone 90 degrees about the +ve z-axis
self.assertAlmostEqual( q1.angle(), 90.0 * math.pi / 180.0, 3 )
self.assertAlmostEqual( q1.axis().x, 0.0, 3 )
self.assertAlmostEqual( q1.axis().y, 0.0, 3 )
self.assertAlmostEqual( q1.axis().z, 1.0, 3 )
#inversion
q1 = Quatf( 1, 2, 3, 4 )
qdot = q1 ^ q1
qi_test = Quatf( q1.r / qdot, -q1.v / qdot)
qi = q1.inverse()
self.assertAlmostEqual(qi[0], qi_test[0], 3)
self.assertAlmostEqual(qi[1], qi_test[1], 3)
self.assertAlmostEqual(qi[2], qi_test[2], 3)
self.assertAlmostEqual(qi[3], qi_test[3], 3)
q1.invert()
self.assertAlmostEqual(qi[0], qi_test[0], 3)
self.assertAlmostEqual(qi[1], qi_test[1], 3)
self.assertAlmostEqual(qi[2], qi_test[2], 3)
self.assertAlmostEqual(qi[3], qi_test[3], 3)
#slerp
q2 = Quatf( 0.5, 0.6, 0.7, 0.8 )
qs = slerp(q1, q2, 0.5)
# normalization
qn = qi.normalized()
qn_test = Quatf( qi.r / qi.length(), qi.v / qi.length() )
self.assertAlmostEqual(qn[0], qn_test[0], 3)
self.assertAlmostEqual(qn[1], qn_test[1], 3)
self.assertAlmostEqual(qn[2], qn_test[2], 3)
self.assertAlmostEqual(qn[3], qn_test[3], 3)
qn = qi.normalize()
self.assertAlmostEqual(qn[0], qn_test[0], 3)
self.assertAlmostEqual(qn[1], qn_test[1], 3)
self.assertAlmostEqual(qn[2], qn_test[2], 3)
self.assertAlmostEqual(qn[3], qn_test[3], 3)
#matrix conversion
fromDir = V3f(1,0,0)
toDir = V3f(0,1,0)
q1.setRotation( fromDir, toDir )
m = q1.toMatrix33()
m = q1.toMatrix44()
def testOperators(self):
"""Test Quatf operators"""
q1 = Quatf( 1, 2, 3, 4 )
q2 = Quatf( 5, 6, 7, 8 )
self.assertAlmostEqual( q1 ^ q2, q1.r * q2.r + (q1.v ^ q2.v ), 3 )
def testSlerpStability( self ) :
q1 = Quatd( 0.60477471085951961527, 0.19082800913200048676, -0.73048263950686898038, 0.25343112163777203882, )
q2 = Quatd( 0.6047747108595192822, 0.190828009132000459, -0.73048263950686909141, 0.25343112163777264945, )
q3 = slerp( q1, q2, 0.5 )
self.assert_( q1.v.equalWithAbsError( q3.v, 0.000000000000001 ) )
self.assertAlmostEqual( q1.r, q3.r, 14 )
class ImathM33f(unittest.TestCase):
def testConstructors(self):
"""Test M33f constructors"""
m = M33f()
m = M33f(2)
m = M33f(1, 0, 0,
0, 1, 0,
0, 0, 1);
m = M33f( [ 1, 0, 0, 0, 1, 0, 0, 0, 1 ] )
self.assertRaises( RuntimeError, M33f, [ 1 ] )
self.assertRaises( RuntimeError, M33f, [ 1, 2 ] )
self.assertRaises( RuntimeError, M33f, [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] )
def testDimensions(self):
"""Test M33f dimensions"""
m1 = M33f()
d = m1.dimensions()
self.assertEqual( d[0], 3 )
self.assertEqual( d[1], 3 )
def testCopyAndAssign(self):
"""Test M33f copy construction and assignment"""
m1 = M33f()
m2 = M33f(m1)
self.failIf(m1 is m2)
def testIndexing(self):
"""Test M33f indexing via [] operator"""
def testOperators(self):
"""Test M33f operators"""
x = 10
y = 2
m1 = M33f(x)
m2 = M33f(y)
self.assertEqual(m1 + m2, M33f(x + y))
self.assertEqual(m1 - m2, M33f(x - y))
self.assertEqual(m1 * y, M33f(x * y))
self.assertEqual(m1 / y, M33f(x / y))
def testMiscellaneousMethods(self):
"""Test M33f miscellaneous methods"""
m1 = M33f()
m1.makeIdentity()
m1 = M33f(3)
m2 = M33f(3.1)
self.assert_( m1.equalWithAbsError(m2, 0.1) )
m1 = M33f(2)
m2 = M33f(3)
self.assert_( m1.equalWithRelError(m2, 0.51) )
m1 = M33f(1, 0, 0,
0, 2, 0,
0, 0, 3)
self.assertEqual( m1.transposed().transposed(), m1)
def testEquality(self):
"""Test M33f comparison for equality"""
m1 = M33f(3)
m2 = M33f(3)
self.assertEqual(m1, m2)
def testCreate(self ) :
self.assertEqual( M33f(), M33f.createScaled( V2f( 1 ) ) )
m = M33f()
m.scale( V2f( 2, 3 ) )
self.assertEqual( m, M33f.createScaled( V2f( 2, 3 ) ) )
self.assertEqual( M33f(), M33f.createTranslated( V2f( 0 ) ) )
m = M33f()
m.translate( V2f( 2, 3 ) )
self.assertEqual( m, M33f.createTranslated( V2f( 2, 3 ) ) )
self.assertEqual( M33f(), M33f.createRotated( 0 ) )
m = M33f()
m.rotate( 2 )
self.assertEqual( m, M33f.createRotated( 2 ) )
def testMultMethods( self ) :
v = M33f.createTranslated( V2f( 1, 2 ) ).multVecMatrix( V2f( 0 ) )
self.assertEqual( v, V2f( 1, 2 ) )
v = M33f.createTranslated( V2f( 1, 2 ) ).multDirMatrix( V2f( 1 ) )
self.assertEqual( v, V2f( 1 ) )
def testDeterminant( self ) :
m = M33f()
self.assertAlmostEqual( m.determinant(), 1, 10 )
m.scale( V2f( -1, 1 ) )
self.assertAlmostEqual( m.determinant(), -1, 10 )
m.scale( V2f( 1, -1 ) )
self.assertAlmostEqual( m.determinant(), 1, 10 )
m.scale( V2f( 3, -1 ) )
self.assertAlmostEqual( m.determinant(), -3, 10 )
m.scale( V2f( 3, 3 ) )
self.assertAlmostEqual( m.determinant(), -27, 10 )
r = curry( random.uniform, -10, 10 )
for i in range( 0, 1000 ) :
m = M33f( r(), r(), r(), r(), r(), r(), r(), r(), r() )
d = m.determinant()
if math.fabs( d ) > 0.00001 :
mi = m.inverse()
di = mi.determinant()
self.assertAlmostEqual( d, 1/di, 1 )
mt = m.transposed()
self.assertAlmostEqual( d, mt.determinant(), 3 )
def testConstructFromOtherType( self ) :
md = M33d( 1, 2, 3, 4, 5, 6, 7, 8, 9 )
mf = M33f( 1, 2, 3, 4, 5, 6, 7, 8, 9 )
mf2 = M33f( md )
self.assertEqual( mf2, mf )
md2 = M33d( mf )
self.assertEqual( md2, md )
class ImathM44f(unittest.TestCase):
def testConstructors(self):
"""Test M44f constructors"""
m = M44f(1., 0., 0., 0.,
0., 1., 0., 0.,
0., 0., 1., 0.,
0., 0., 0., 1.);
m3 = M33f(1., 0., 0.,
0., 1., 0.,
0., 0., 1.)
t = V3f(5., 5., 5.)
m = M44f(m3, t)
m = M44f( [ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ] )
self.assertRaises( RuntimeError, M44f, [ 1 ] )
self.assertRaises( RuntimeError, M44f, [ 1, 2, 3, 4, 5, 6, 7, 8, 9 ] )
self.assertRaises( RuntimeError, M44f, [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ] )
def testDimensions(self):
"""Test M44f dimensions"""
m1 = M44f()
d = m1.dimensions()
self.assertEqual( d[0], 4 )
self.assertEqual( d[1], 4 )
def testCopyAndAssign(self):
"""Test M44f copy construction and assignment"""
m1 = M44f()
m2 = M44f(m1)
self.failIf(m1 is m2)
m1 = m2
def testIndexing(self):
"""Test M44f indexing via [] operator"""
pass
def testOperators(self):
"""Test M44f operators"""
x = 10
y = 2
m1 = M44f(x)
m2 = M44f(y)
self.assertEqual(m1 + m2, M44f(x + y))
self.assertEqual(m1 - m2, M44f(x - y))
self.assertEqual(m1 * y, M44f(x * y))
self.assertEqual(m1 / y, M44f(x / y))
def testMiscellaneousMethods(self):
"""Test M44f miscellaneous methods"""
m1 = M44f()
m1.makeIdentity()
m1 = M44f(3)
m2 = M44f(3.1)
self.assert_( m1.equalWithAbsError(m2, 0.1) )
m1 = M44f(2)
m2 = M44f(3)
self.assert_( m1.equalWithRelError(m2, 0.51) )
m1 = M44f(1, 0, 0, 0,
0, 2, 0, 0,
0, 0, 3, 0,
0, 0, 0, 4)
self.assertEqual( m1.transposed().transposed(), m1)
def testEquality(self):
"""Test M44f comparison for equality"""
m1 = M44f(3)
m2 = M44f(3)
self.assertEqual(m1, m2)
def testCreate(self ) :
self.assertEqual( M44f(), M44f.createScaled( V3f( 1 ) ) )
m = M44f()
m.scale( V3f( 2, 3, 4 ) )
self.assertEqual( m, M44f.createScaled( V3f( 2, 3, 4 ) ) )
self.assertEqual( M44f(), M44f.createTranslated( V3f( 0 ) ) )
m = M44f()
m.translate( V3f( 2, 3, 4 ) )
self.assertEqual( m, M44f.createTranslated( V3f( 2, 3, 4 ) ) )
self.assertEqual( M44f(), M44f.createRotated( V3f( 0 ) ) )
m = M44f()
m.rotate( V3f( 1, 2, 3 ) )
self.assertEqual( m, M44f.createRotated( V3f( 1, 2, 3 ) ) )
m = M44f.createAimed( V3f( 1, 0, 0 ), V3f( 0, 1, 0 ) )
self.assert_( V3f( 0, 1, 0 ).equalWithAbsError( V3f( 1, 0, 0 ) * m, 0.0000001 ) )
m = M44f.createAimed( V3f( 1, 0, 0 ), V3f( 0, 0, 1 ), V3f( 0, 1, 0 ) )
self.assert_( V3f( 0, 0, 1 ).equalWithAbsError( V3f( 1, 0, 0 ) * m, 0.0000001 ) )
self.assert_( V3f( 0, 1, 0 ).equalWithAbsError( V3f( 0, 1, 0 ) * m, 0.0000001 ) )
def testMultMethods( self ) :
v = M44f.createTranslated( V3f( 1, 2, 3 ) ).multVecMatrix( V3f( 0 ) )
self.assertEqual( v, V3f( 1, 2, 3 ) )
v = M44f.createTranslated( V3f( 1, 2, 3 ) ).multDirMatrix( V3f( 1 ) )
self.assertEqual( v, V3f( 1 ) )
def testFromBasis( self ) :
for i in range( 0, 10000 ) :
m = M44f()
m.translate( V3f( random.uniform( -1000, 1000 ), random.uniform( -1000, 1000 ), random.uniform( -1000, 1000 ) ) )
m.rotate( V3f( random.uniform( -1000, 1000 ), random.uniform( -1000, 1000 ), random.uniform( -1000, 1000 ) ) )
m.scale( V3f( random.uniform( -100, 100 ), random.uniform( -100, 100 ), random.uniform( -100, 100 ) ) )
x = m.multDirMatrix( V3f( 1, 0, 0 ) )
y = m.multDirMatrix( V3f( 0, 1, 0 ) )
z = m.multDirMatrix( V3f( 0, 0, 1 ) )
o = V3f( 0, 0, 0 ) * m
self.assertEqual( M44f.createFromBasis( x, y, z, o ), m )
def testDeterminant( self ) :
m = M44f()
self.assertAlmostEqual( m.determinant(), 1, 10 )
m.scale( V3f( -1, 1, 1 ) )
self.assertAlmostEqual( m.determinant(), -1, 10 )
m.scale( V3f( 1, -1, 1 ) )
self.assertAlmostEqual( m.determinant(), 1, 10 )
m.scale( V3f( 3, -1, 1 ) )
self.assertAlmostEqual( m.determinant(), -3, 10 )
m.scale( V3f( 3, 3, 1 ) )
self.assertAlmostEqual( m.determinant(), -27, 10 )
random.seed( 42 )
r = curry( random.uniform, -2, 2 )
for i in range( 0, 1000 ) :
m = M44f( r(), r(), r(), r(), r(), r(), r(), r(), r(), r(), r(), r(), r(), r(), r(), r() )
d = m.determinant()
if math.fabs( d ) > 0.00001 :
mi = m.inverse()
di = mi.determinant()
self.assertAlmostEqual( d, 1/di, 4 )
mt = m.transposed()
self.assertAlmostEqual( d, mt.determinant(), 4 )
for i in range( 0, 1000 ) :
m = M44f()
m.translate( V3f( r(), r(), r() ) )
self.assertAlmostEqual( m.determinant(), 1, 10 )
def testConstructFromOtherType( self ) :
md = M44d( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 )
mf = M44f( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 )
mf2 = M44f( md )
self.assertEqual( mf2, mf )
md2 = M44d( mf )
self.assertEqual( md2, md )
class ImathColor3Test( unittest.TestCase ) :
def test( self ) :
c = Color3f( 1 )
self.assertEqual( c.r, 1 )
self.assertEqual( c.g, 1 )
self.assertEqual( c.b, 1 )
c = Color3f( 1, 2, 3 )
self.assertEqual( c.r, 1 )
self.assertEqual( c.g, 2 )
self.assertEqual( c.b, 3 )
cc = Color3f( c )
self.assertEqual( c, cc )
c = Color3f( V3f(1,2,3) )
self.assertEqual( c.r, 1 )
self.assertEqual( c.g, 2 )
self.assertEqual( c.b, 3 )
c = Color3f( V3d(1,2,3) )
self.assertEqual( c.r, 1 )
self.assertEqual( c.g, 2 )
self.assertEqual( c.b, 3 )
cm = -c * 2
self.assertEqual( cm.r, -2 )
self.assertEqual( cm.g, -4 )
self.assertEqual( cm.b, -6 )
cm *= c
self.assertEqual( cm.r, -2 )
self.assertEqual( cm.g, -8 )
self.assertEqual( cm.b, -18 )
cm -= Color3f( 2 )
self.assertEqual( cm, Color3f( -4, -10, -20 ) )
self.assertEqual( c.dimensions(), 3 )
self.assertAlmostEqual( Color3f( 1, 1, 1, ).luminance(), 1 )
self.assertAlmostEqual( Color3f( 1, 2, 3, ).luminance( V3f( 10, 1, 3 ) ), 21 )
def testSaturation( self ) :
c = Color3f( 0.1, 0.5, 0.9 )
c.adjustSaturation( 0 )
self.assertEqual( c[0], c[1] )
self.assertEqual( c[0], c[2] )
def testHSVTransforms( self ) :
c = Color3f( 0.1, 0.2, 0.3 )
chsv = c.rgbToHSV()
self.assertEqual( c, Color3f( 0.1, 0.2, 0.3 ) )
self.failUnless( isinstance( chsv, Color3f ) )
self.failUnless( chsv.equalWithAbsError( Color3f( 0.5833, 0.6667, 0.3 ), 0.001 ) )
crgb = chsv.hsvToRGB()
self.failUnless( chsv.equalWithAbsError( Color3f( 0.5833, 0.6667, 0.3 ), 0.001 ) )
self.failUnless( crgb.equalWithAbsError( c, 0.001 ) )
class ImathColor4Test( unittest.TestCase ) :
def testSaturation( self ) :
c = Color4f( 0.1, 0.5, 0.9, 0.95 )
c.adjustSaturation( 0 )
self.assertEqual( c[0], c[1] )
self.assertEqual( c[0], c[2] )
self.assertAlmostEqual( c[3], 0.95 ) # alpha must remain unchanged
class ImathEulerfTest( unittest.TestCase ) :
def testConstructors(self):
"""Test Eulerf constructors"""
#
e = Eulerf()
self.assertEqual( e.x, 0 )
self.assertEqual( e.y, 0 )
self.assertEqual( e.z, 0 )
self.assertEqual( e.order(), Eulerf.Order.Default )
self.assertEqual( e.order(), Eulerf.Order.XYZ )
#
ecopy = Eulerf(e)
self.assertEqual( ecopy.x, 0 )
self.assertEqual( ecopy.y, 0 )
self.assertEqual( ecopy.z, 0 )
self.assertEqual( ecopy.order(), Eulerf.Order.Default )
self.assertEqual( ecopy.order(), Eulerf.Order.XYZ )
#
e = Eulerf( Eulerf.Order.ZYX )
self.assertEqual( e.order(), Eulerf.Order.ZYX )
#
e = Eulerf( V3f( 0, 0, 0 ) )
self.assertEqual( e.order(), Eulerf.Order.Default )
self.assertEqual( e.order(), Eulerf.Order.XYZ )
e = Eulerf( V3f( 0, 0, 0 ), Eulerf.Order.ZYX )
self.assertEqual( e.order(), Eulerf.Order.ZYX )
#
e = Eulerf( 0, 0, 0 )
e = Eulerf( V3f( 0, 0, 0 ) )
self.assertEqual( e.order(), Eulerf.Order.Default )
self.assertEqual( e.order(), Eulerf.Order.XYZ )
e = Eulerf( 0, 0, 0, Eulerf.Order.ZYX )
self.assertEqual( e.order(), Eulerf.Order.ZYX )
e = Eulerf( 0, 0, 0, Eulerf.Order.ZYX, Eulerf.InputLayout.XYZLayout )
self.assertEqual( e.order(), Eulerf.Order.ZYX )
#
e = Eulerf( M33f() )
e = Eulerf( V3f( 0, 0, 0 ) )
self.assertEqual( e.order(), Eulerf.Order.Default )
self.assertEqual( e.order(), Eulerf.Order.XYZ )
e = Eulerf( M33f(), Eulerf.Order.ZYX )
self.assertEqual( e.order(), Eulerf.Order.ZYX )
#
e = Eulerf( M44f() )
e = Eulerf( V3f( 0, 0, 0 ) )
self.assertEqual( e.order(), Eulerf.Order.Default )
self.assertEqual( e.order(), Eulerf.Order.XYZ )
e = Eulerf( M44f(), Eulerf.Order.ZYX )
self.assertEqual( e.order(), Eulerf.Order.ZYX )
def testOrder(self):
"""Test Eulerf order"""
self.assertEqual( len( Eulerf.Order.values ), 24 )
e = Eulerf()
for order in Eulerf.Order.values.values():
self.assert_( Eulerf.legal( order ) )
e.setOrder( order )
self.assertEqual( e.order(), order )
def testMisc(self):
"""Test Eulerf miscellaneous"""
self.assertEqual( len(Eulerf.Axis.values), 3 )
self.assertEqual( len(Eulerf.InputLayout.values), 2 )
self.assert_( V3f in Eulerf.__bases__ )
def testExtract(self):
"""Test Eulerf extract"""
e = Eulerf()
e.extract( M33f() )
e.extract( M44f() )
e.extract( Quatf() )
m = e.toMatrix33()
m = e.toMatrix44()
q = e.toQuat()
v = e.toXYZVector()
def testAngleOrder(self):
"""Test Eulerf angleOrder"""
e = Eulerf()
o = e.angleOrder()
self.assert_( type(o) is tuple )
self.assertEqual( len(o), 3 )
def testAngleMapping(self):
"""Test Eulerf angleMapping"""
e = Eulerf()
m = e.angleMapping()
self.assert_( type(m) is tuple )
self.assertEqual( len(m), 3 )
def testStr(self):
"""Test Eulerf str"""
e = Eulerf()
self.assertEqual( str(e), "0 0 0" )
def testRepr(self):
"""Test Eulerf repr"""
e = Eulerf()
self.assertEqual( repr(e), "IECore.Eulerf( 0, 0, 0 )" )
def testSimpleXYZRotation(self):
e = Eulerf( math.pi * 6, math.pi * 10, -math.pi * 20 )
ee = Eulerf( e )
t = Eulerf( 0, 0, 0 )
es = Eulerf.simpleXYZRotation( e, t )
# check that the simple rotations are in an appropriate range
for r in es :
self.assert_( math.fabs( r ) <= math.pi )
# and that the original vector isn't modified in place
self.assertEqual( ee, e )
def testNearestRotation(self):
e = Eulerf( math.pi * 6, math.pi * 10, -math.pi * 20 )
ee = Eulerf( e )
t = Eulerf( 0, 0, 0 )
en = Eulerf.nearestRotation( e, t )
# check that the original vector isn't modified in place
self.assertEqual( ee, e )
class ImathPlane3fTest( unittest.TestCase ) :
def testConstructors( self ) :
p = Plane3f( V3f( 0, 0, 0 ), V3f( 1, 0, 0 ) )
self.assertEqual( p.normal, V3f( 1, 0, 0 ) )
self.assertEqual( p.distance, 0 )
p = Plane3f( V3f( 0, 0, 0 ), V3f( 0, 1, 0 ), V3f( 0, 0, 1 ) )
self.assertEqual( p.normal, V3f( 1, 0, 0 ) )
self.assertEqual( p.distance, 0 )
p = Plane3f( V3f( 2, 2, 2 ), V3f( 2, 3, 2 ), V3f( 2, 2, 3 ) )
self.assertEqual( p.normal, V3f( 1, 0, 0 ) )
self.assertEqual( p.distance, 2 )
if __name__ == "__main__":
unittest.main()
| 25.041379 | 116 | 0.601793 |
d4c0ea9454c9cbe2f952f88ed7788fe77deba877 | 2,515 | py | Python | tasks.py | RTBHOUSE/here-be-pythons | c066768c3afca4e2aff2dcdcf5ead36ed1ba9827 | [
"MIT"
] | null | null | null | tasks.py | RTBHOUSE/here-be-pythons | c066768c3afca4e2aff2dcdcf5ead36ed1ba9827 | [
"MIT"
] | null | null | null | tasks.py | RTBHOUSE/here-be-pythons | c066768c3afca4e2aff2dcdcf5ead36ed1ba9827 | [
"MIT"
] | 1 | 2019-07-03T15:11:14.000Z | 2019-07-03T15:11:14.000Z | import invoke
from utils import cowsay
##############
# Core Tasks #
##############
@invoke.task
def coverage_report(c):
"""Open refreshed coverage report in a browser."""
c.run('coverage html && open htmlcov/index.html', pty=True)
@invoke.task
def flake8_report(c):
"""Open refreshed Flake8 report in a browser."""
c.run(
'python -m flake8 --format=html --htmldir=flake-report; '
'open flake-report/index.html',
pty=True
)
@invoke.task
def linters(c):
"""Lint source code using Isort, YAPF and Flake8 (with various plugins)."""
print(cowsay('Sort Python imports with Isort')) # noqa: T001
c.run('python -m isort --apply --quiet', pty=True)
print(cowsay('Enforce Python style guide with YAPF')) # noqa: T001
c.run('python -m yapf --in-place --recursive .', pty=True)
print(cowsay('Apply dozens of linters with Flake8'), end='\n\n') # noqa: T001
c.run('python -m flake8', pty=True)
@invoke.task
def set_precommit(c):
"""Set pre-commit Git hook saved in `$PROJECT_ROOT/githooks/pre-commit`."""
c.run(
'cp githooks/pre-commit .git/hooks/pre-commit '
'&& chmod +x .git/hooks/pre-commit'
'&& git config --bool flake8.strict true',
pty=True
)
@invoke.task
def tests(c):
"""Run pytests with coverage report."""
c.run('python -m pytest --cov=find_broken_links --cov-branch', pty=True)
#################
# Building Docs #
#################
@invoke.task
def build_docs(c):
"""Build Sphinx HTML docs and save them in `$PROJECT_ROOT/docs/build/html/`."""
c.run('sphinx-build -E -a -b html docs/source/ docs/build/html/')
@invoke.task
def develop_docs(c):
"""
Build Sphinx HTML docs and open them in the browser with hot reloading.
The browser opens after 2 seconds.
"""
c.run('sphinx-autobuild -b html --open-browser --delay 2 docs/source/ docs/build/html/')
################################
# Organise Invoke's namespaces #
################################
# The main namespace MUST be named `namespace` or `ns`.
# See: http://docs.pyinvoke.org/en/1.2/concepts/namespaces.html
namespace = invoke.Collection()
namespace.add_task(coverage_report)
namespace.add_task(flake8_report)
namespace.add_task(linters)
namespace.add_task(set_precommit)
namespace.add_task(tests)
docs_namespace = invoke.Collection('docs')
docs_namespace.add_task(build_docs, 'build')
docs_namespace.add_task(develop_docs, 'develop')
namespace.add_collection(docs_namespace)
| 26.473684 | 92 | 0.647714 |
78f8cf8892e66efffc7010a037073d847eaa3cb4 | 3,497 | py | Python | runway/cfngin/lookups/registry.py | cmilam87/runway | e1b2aca8e9468bf246ae5afa878ed4a36fb36ab1 | [
"Apache-2.0"
] | null | null | null | runway/cfngin/lookups/registry.py | cmilam87/runway | e1b2aca8e9468bf246ae5afa878ed4a36fb36ab1 | [
"Apache-2.0"
] | null | null | null | runway/cfngin/lookups/registry.py | cmilam87/runway | e1b2aca8e9468bf246ae5afa878ed4a36fb36ab1 | [
"Apache-2.0"
] | null | null | null | """CFNgin lookup registry."""
import logging
import warnings
from six import string_types
from runway.util import load_object_from_string
from ..exceptions import FailedVariableLookup, UnknownLookupType
from .handlers import ami, default, dynamodb, envvar
from .handlers import file as file_handler
from .handlers import hook_data, kms, output, rxref, split, ssmstore, xref
CFNGIN_LOOKUP_HANDLERS = {}
def register_lookup_handler(lookup_type, handler_or_path):
"""Register a lookup handler.
Args:
lookup_type (str): Name to register the handler under.
handler_or_path (Union[Callable, str]): A function or a path to a
handler.
"""
handler = handler_or_path
if isinstance(handler_or_path, string_types):
handler = load_object_from_string(handler_or_path)
CFNGIN_LOOKUP_HANDLERS[lookup_type] = handler
if not isinstance(handler, type):
# Hander is a not a new-style handler
logger = logging.getLogger(__name__)
logger.warning("Registering lookup `%s`: Please upgrade to use the "
"new style of Lookups.", lookup_type)
warnings.warn(
# For some reason, this does not show up...
# Leaving it in anyway
"Lookup `%s`: Please upgrade to use the new style of Lookups"
"." % lookup_type,
DeprecationWarning,
stacklevel=2,
)
def unregister_lookup_handler(lookup_type):
"""Unregister the specified lookup type.
This is useful when testing various lookup types if you want to unregister
the lookup type after the test runs.
Args:
lookup_type (str): Name of the lookup type to unregister.
"""
CFNGIN_LOOKUP_HANDLERS.pop(lookup_type, None)
def resolve_lookups(variable, context, provider):
"""Resolve a set of lookups.
Args:
variable (:class:`runway.cfngin.variables.Variable`): The variable
resolving it's lookups.
context (:class:`runway.cfngin.context.Context`): Context instance.
provider (:class:`runway.cfngin.providers.base.BaseProvider`): Provider
instance.
Returns:
Dict[str, Any]: Lookup -> resolved value
"""
resolved_lookups = {}
for lookup in variable.lookups:
try:
handler = CFNGIN_LOOKUP_HANDLERS[lookup.type]
except KeyError:
raise UnknownLookupType(lookup)
try:
resolved_lookups[lookup] = handler(
value=lookup.input,
context=context,
provider=provider,
)
except Exception as err:
raise FailedVariableLookup(variable.name, lookup, err)
return resolved_lookups
register_lookup_handler(ami.TYPE_NAME, ami.AmiLookup)
register_lookup_handler(default.TYPE_NAME, default.DefaultLookup)
register_lookup_handler(dynamodb.TYPE_NAME, dynamodb.DynamodbLookup)
register_lookup_handler(envvar.TYPE_NAME, envvar.EnvvarLookup)
register_lookup_handler(file_handler.TYPE_NAME, file_handler.FileLookup)
register_lookup_handler(hook_data.TYPE_NAME, hook_data.HookDataLookup)
register_lookup_handler(kms.TYPE_NAME, kms.KmsLookup)
register_lookup_handler(output.TYPE_NAME, output.OutputLookup)
register_lookup_handler(rxref.TYPE_NAME, rxref.RxrefLookup)
register_lookup_handler(split.TYPE_NAME, split.SplitLookup)
register_lookup_handler(ssmstore.TYPE_NAME, ssmstore.SsmstoreLookup)
register_lookup_handler(xref.TYPE_NAME, xref.XrefLookup)
| 34.623762 | 79 | 0.706892 |
535c97a2c3bd9238db06791e2a52b26922f0879d | 110,031 | py | Python | tests/validation/tests/v3_api/common.py | dveni/rancher | 6a0a991109ca2e88aff9687300b2fe5a70ff8e49 | [
"Apache-2.0"
] | 2 | 2021-11-01T05:49:31.000Z | 2021-11-01T05:49:37.000Z | tests/validation/tests/v3_api/common.py | dveni/rancher | 6a0a991109ca2e88aff9687300b2fe5a70ff8e49 | [
"Apache-2.0"
] | null | null | null | tests/validation/tests/v3_api/common.py | dveni/rancher | 6a0a991109ca2e88aff9687300b2fe5a70ff8e49 | [
"Apache-2.0"
] | null | null | null | from ..common import * # NOQA
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 320
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_APP_V2_TIMEOUT = 60
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
DNS_REGEX = "(https*://)(.*[^/])"
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
HARDENED_CLUSTER = ast.literal_eval(
os.environ.get('RANCHER_HARDENED_CLUSTER', "False"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "ranchertest/mytestcontainer")
TEST_IMAGE_PORT = os.environ.get('RANCHER_TEST_IMAGE_PORT', "80")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
skip_test_hardened = pytest.mark.skipif(
HARDENED_CLUSTER,
reason='Tests Skipped due to being a hardened cluster')
UPDATE_KDM = ast.literal_eval(os.environ.get('RANCHER_UPDATE_KDM', "False"))
KDM_URL = os.environ.get("RANCHER_KDM_URL", "")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
CERT_MANAGER_VERSION = os.environ.get("RANCHER_CERT_MANAGER_VERSION", "v1.0.1")
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def get_cluster_client_for_token_v1(cluster_id, token):
url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas"
return rancher.Client(url=url, token=token, verify=False)
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None, job_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
if job_list is None:
job_list = []
workload_list = deployment_list + daemonset_list + cronjob_list + job_list
wls = [dep.name for dep in project_client.list_workload(namespaceId=ns.id).data]
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
if workload_name in job_list:
validate_workload(project_client, workload, "job",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
job_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
if type == "job":
job_type = True
expected_status = "Succeeded"
else:
job_type = False
expected_status = "Running"
p = wait_for_pod_to_running(p_client, pod, job_type=job_type)
assert p["status"]["phase"] == expected_status
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
if type == "job":
assert wl_result["status"]["succeeded"] == len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT, job_type=False):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
if job_type:
expected_state = "succeeded"
else:
expected_state = "running"
while p.state != expected_state :
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if ('k3s' in cluster.version["gitVersion"] or 'rke2' in cluster.version["gitVersion"]) and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
if result is not None:
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected, port=TEST_IMAGE_PORT):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected, port=port)
def validate_dns_entry(pod, host, expected, port=TEST_IMAGE_PORT):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
if HARDENED_CLUSTER:
cmd = 'curl -vs {}:{} 2>&1'.format(host, port)
else:
cmd = 'ping -c 1 -W 1 {0}'.format(host)
cmd_output = kubectl_pod_exec(pod, cmd)
connectivity_validation_pass = False
for expected_value in expected:
if expected_value in str(cmd_output):
connectivity_validation_pass = True
break
assert connectivity_validation_pass is True
if HARDENED_CLUSTER:
assert " 200 OK" in str(cmd_output)
else:
assert " 0% packet loss" in str(cmd_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for node delete")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
elif HARDENED_CLUSTER:
cmd = 'curl -I {}:{}'.format(pod_ip, TEST_IMAGE_PORT)
else:
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
if not HARDENED_CLUSTER:
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" not in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for {0} to get to active,"
" the actual state: {1}".format(application.name,
application.state))
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None, version=""):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
rpassword = 'admin'
print(auth_url)
if version.find("master") > -1 or version.find("2.6") > -1:
rpassword = ADMIN_PASSWORD
print("on 2.6 or later")
retries = 5
for attempt in range(1,retries):
try:
r = requests.post(auth_url, json={
'username': 'admin',
'password': rpassword,
'responseType': 'json',
}, verify=False)
except requests.exceptions.RequestException:
print("password request failed. Retry attempt: ",
"{} of {}".format(attempt, retries))
time.sleep(2)
else:
break
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip, b64=True):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
if b64:
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
namespace = ["default", "kube-system"]
for ns in namespace:
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
def wait_until_app_v2_deployed(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
List all installed apps and check for the state of "app_name" to see
if it == "deployed"
:param client: cluster client for the user
:param app_name: app which is being installed
:param timeout: time for the app to come to Deployed state
:return:
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Deployed")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app["metadata"]["name"] == app_name:
if app["status"]["summary"]["state"] == "deployed":
return app_list
app = client.list_catalog_cattle_io_app()
return
def wait_until_app_v2_uninstall(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
list all installed apps. search for "app_name" in the list
if app_name is NOT in list, indicates the app has been uninstalled successfully
:param client: cluster client for the user
:param app_name: app which is being unstalled
:param timeout: time for app to be uninstalled
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Uninstalled")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app_name not in app_list:
return app_list
app = client.list_catalog_cattle_io_app()
return
def check_v2_app_and_uninstall(client, chart_name):
app = client.list_catalog_cattle_io_app()
for app in app["data"]:
if app["metadata"]["name"] == chart_name:
response = client.action(obj=app, action_name="uninstall")
app_list = wait_until_app_v2_uninstall(client, chart_name)
assert chart_name not in app_list, \
"App has not uninstalled"
def update_and_validate_kdm(kdm_url, admin_token=ADMIN_TOKEN,
rancher_api_url=CATTLE_API_URL):
print("Updating KDM to use {}".format(kdm_url))
header = {'Authorization': 'Bearer ' + admin_token}
api_url = rancher_api_url + "/settings/rke-metadata-config"
kdm_json = {
"name": "rke-metadata-config",
"value": json.dumps({
"refresh-interval-minutes": "1440",
"url": kdm_url
})
}
r = requests.put(api_url, verify=False, headers=header, json=kdm_json)
r_content = json.loads(r.content)
assert r.ok
assert r_content['name'] == kdm_json['name']
assert r_content['value'] == kdm_json['value']
time.sleep(2)
# Refresh Kubernetes Metadata
kdm_refresh_url = rancher_api_url + "/kontainerdrivers?action=refresh"
response = requests.post(kdm_refresh_url, verify=False, headers=header)
assert response.ok
| 36.910768 | 117 | 0.615726 |
ec9a61eca11175ab9b5c6a3173863a806c7d91a4 | 1,303 | py | Python | monk/system_unit_tests/gluon/test_activation_gelu.py | Shreyashwaghe/monk_v1 | 4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b | [
"Apache-2.0"
] | 7 | 2020-07-26T08:37:29.000Z | 2020-10-30T10:23:11.000Z | monk/system_unit_tests/gluon/test_activation_gelu.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | 9 | 2020-01-28T21:40:39.000Z | 2022-02-10T01:24:06.000Z | monk/system_unit_tests/gluon/test_activation_gelu.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | 1 | 2020-10-07T12:57:44.000Z | 2020-10-07T12:57:44.000Z | import os
import sys
sys.path.append("../../../monk/");
import psutil
from gluon_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import mxnet as mx
import numpy as np
from gluon.losses.return_loss import load_loss
def test_activation_gelu(system_dict):
forward = True;
test = "test_activation_gelu";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.gelu());
gtf.Compile_Network(network);
x = np.random.rand(1, 64, 4);
x = mx.nd.array(x);
y = gtf.system_dict["local"]["model"].forward(x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 26.591837 | 71 | 0.61934 |
567a4544633008e6c6956e45c7bf2aec156faddf | 10,041 | py | Python | tests/contenttypes_tests/test_views.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contenttypes_tests/test_views.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contenttypes_tests/test_views.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | import datetime
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpRequest
from django.test import TestCase, override_settings
from .models import (
Article, Author, FooWithBrokenAbsoluteUrl, FooWithoutUrl, FooWithUrl,
ModelWithM2MToSite, ModelWithNullFKToSite, SchemeIncludedURL,
Site as MockSite,
)
@override_settings(ROOT_URLCONF='contenttypes_tests.urls')
class ContentTypesViewsTests(TestCase):
@classmethod
def setUpTestData(cls):
# Don't use the manager to ensure the site exists with pk=1, regardless
# of whether or not it already exists.
cls.site1 = Site(pk=1, domain='testserver', name='testserver')
cls.site1.save()
cls.author1 = Author.objects.create(name='Boris')
cls.article1 = Article.objects.create(
title='Old Article', slug='old_article', author=cls.author1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23),
)
cls.article2 = Article.objects.create(
title='Current Article', slug='current_article', author=cls.author1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23),
)
cls.article3 = Article.objects.create(
title='Future Article', slug='future_article', author=cls.author1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23),
)
cls.scheme1 = SchemeIncludedURL.objects.create(url='http://test_scheme_included_http/')
cls.scheme2 = SchemeIncludedURL.objects.create(url='https://test_scheme_included_https/')
cls.scheme3 = SchemeIncludedURL.objects.create(url='//test_default_scheme_kept/')
def setUp(self):
Site.objects.clear_cache()
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
with self.subTest(obj=obj):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(), target_status_code=404)
def test_shortcut_with_absolute_url_including_scheme(self):
"""
Can view a shortcut when object's get_absolute_url returns a full URL
the tested URLs are: "http://...", "https://..." and "//..."
"""
for obj in SchemeIncludedURL.objects.all():
with self.subTest(obj=obj):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, obj.get_absolute_url(), fetch_redirect_response=False)
def test_shortcut_no_absolute_url(self):
"""
Shortcuts for an object that has no get_absolute_url() method raise
404.
"""
for obj in Article.objects.all():
with self.subTest(obj=obj):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
@override_settings(ROOT_URLCONF='contenttypes_tests.urls')
class ContentTypesViewsSiteRelTests(TestCase):
def setUp(self):
Site.objects.clear_cache()
@classmethod
def setUpTestData(cls):
cls.site_2 = Site.objects.create(domain='example2.com', name='example2.com')
cls.site_3 = Site.objects.create(domain='example3.com', name='example3.com')
@mock.patch('django.apps.apps.get_model')
def test_shortcut_view_with_null_site_fk(self, get_model):
"""
The shortcut view works if a model's ForeignKey to site is None.
"""
get_model.side_effect = lambda *args, **kwargs: MockSite if args[0] == 'sites.Site' else ModelWithNullFKToSite
obj = ModelWithNullFKToSite.objects.create(title='title')
url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(ModelWithNullFKToSite).id, obj.pk)
response = self.client.get(url)
expected_url = 'http://example.com%s' % obj.get_absolute_url()
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
@mock.patch('django.apps.apps.get_model')
def test_shortcut_view_with_site_m2m(self, get_model):
"""
When the object has a ManyToManyField to Site, redirect to the current
site if it's attached to the object or to the domain of the first site
found in the m2m relationship.
"""
get_model.side_effect = lambda *args, **kwargs: MockSite if args[0] == 'sites.Site' else ModelWithM2MToSite
# get_current_site() will lookup a Site object, so these must match the
# domains in the MockSite model.
MockSite.objects.bulk_create([
MockSite(pk=1, domain='example.com'),
MockSite(pk=self.site_2.pk, domain=self.site_2.domain),
MockSite(pk=self.site_3.pk, domain=self.site_3.domain),
])
ct = ContentType.objects.get_for_model(ModelWithM2MToSite)
site_3_obj = ModelWithM2MToSite.objects.create(title='Not Linked to Current Site')
site_3_obj.sites.add(MockSite.objects.get(pk=self.site_3.pk))
expected_url = 'http://%s%s' % (self.site_3.domain, site_3_obj.get_absolute_url())
with self.settings(SITE_ID=self.site_2.pk):
# Redirects to the domain of the first Site found in the m2m
# relationship (ordering is arbitrary).
response = self.client.get('/shortcut/%s/%s/' % (ct.pk, site_3_obj.pk))
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
obj_with_sites = ModelWithM2MToSite.objects.create(title='Linked to Current Site')
obj_with_sites.sites.set(MockSite.objects.all())
shortcut_url = '/shortcut/%s/%s/' % (ct.pk, obj_with_sites.pk)
expected_url = 'http://%s%s' % (self.site_2.domain, obj_with_sites.get_absolute_url())
with self.settings(SITE_ID=self.site_2.pk):
# Redirects to the domain of the Site matching the current site's
# domain.
response = self.client.get(shortcut_url)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
with self.settings(SITE_ID=None, ALLOWED_HOSTS=[self.site_2.domain]):
# Redirects to the domain of the Site matching the request's host
# header.
response = self.client.get(shortcut_url, SERVER_NAME=self.site_2.domain)
self.assertRedirects(response, expected_url, fetch_redirect_response=False)
class ShortcutViewTests(TestCase):
def setUp(self):
self.request = HttpRequest()
self.request.META = {'SERVER_NAME': 'Example.com', 'SERVER_PORT': '80'}
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_not_dependent_on_sites_app(self):
"""
The view returns a complete URL regardless of whether the sites
framework is installed.
"""
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name='john')
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(self.request, user_ct.id, obj.id)
self.assertEqual(
'http://%s/users/john/' % get_current_site(self.request).domain,
response._headers.get('location')[1]
)
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(self.request, user_ct.id, obj.id)
self.assertEqual('http://Example.com/users/john/', response._headers.get('location')[1])
def test_model_without_get_absolute_url(self):
"""The view returns 404 when Model.get_absolute_url() isn't defined."""
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name='john')
with self.assertRaises(Http404):
shortcut(self.request, user_ct.id, obj.id)
def test_model_with_broken_get_absolute_url(self):
"""
The view doesn't catch an AttributeError raised by
Model.get_absolute_url() (#8997).
"""
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name='john')
with self.assertRaises(AttributeError):
shortcut(self.request, user_ct.id, obj.id)
| 48.043062 | 119 | 0.65601 |
eec372270713adc4becfebb98ad601a30fbb0b58 | 521 | py | Python | core/migrations/0006_appconfig.py | aashreyj/Audition-Management-System | 7bb3ad014920b9f7db5f9a6d6a93d7fc96e16926 | [
"MIT"
] | 3 | 2019-01-29T19:07:21.000Z | 2022-03-10T08:33:31.000Z | core/migrations/0006_appconfig.py | aashreyj/Audition-Management-System | 7bb3ad014920b9f7db5f9a6d6a93d7fc96e16926 | [
"MIT"
] | 18 | 2019-01-29T19:10:51.000Z | 2019-02-08T06:55:35.000Z | core/migrations/0006_appconfig.py | JayjeetAtGithub/Audition-Management-System | c10a48733f74022e52b5cf4729dac8622a57c0d5 | [
"MIT"
] | 8 | 2019-01-31T14:42:33.000Z | 2021-02-24T19:10:40.000Z | # Generated by Django 2.1.5 on 2019-01-24 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_student_stopped'),
]
operations = [
migrations.CreateModel(
name='AppConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show_results', models.BooleanField(default=False)),
],
),
]
| 24.809524 | 114 | 0.585413 |
39b6d60b0077d8a2d2a0ea4b350131c055dd69e9 | 5,276 | py | Python | scripts/quad_walk_mult.py | WPI-MMR/quadrupedal_walking_controller | 952bc350b2204f2a742bf469af1386a2b80ab4bd | [
"Apache-2.0"
] | null | null | null | scripts/quad_walk_mult.py | WPI-MMR/quadrupedal_walking_controller | 952bc350b2204f2a742bf469af1386a2b80ab4bd | [
"Apache-2.0"
] | null | null | null | scripts/quad_walk_mult.py | WPI-MMR/quadrupedal_walking_controller | 952bc350b2204f2a742bf469af1386a2b80ab4bd | [
"Apache-2.0"
] | null | null | null | import gym
import gym_solo
from gym_solo.envs import solo8v2vanilla_realtime
from gym_solo.core import rewards
from gym_solo import testing
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import argparse
import math
import threading
import time
import wandb
class DistanceReward(rewards.Reward):
def __init__(self, robot_id):
self.robot = robot_id
def compute(self) -> float:
(x, y, _), _ = self.client.getBasePositionAndOrientation(self.robot)
return math.sqrt(x ** 2 + y ** 2)
epi_times, epi_rewards = [], []
def episode_listener():
global epi_times
global epi_rewards
global end
curr_timestep = 0.
with tqdm(total=int(0.9 * math.floor((end - time.time())/args.dt)),
desc='Evaluating Episode') as t:
while time.time() < end:
epi_times.append(curr_timestep)
epi_rewards.append(walk_reward.compute())
curr_timestep += args.dt
t.update()
time.sleep(args.dt)
return epi_times, epi_rewards
def FLHR_HFE(joints, value):
joints['FL_HFE'] = 1.1 * -value
joints['HR_HFE'] = value
def FLHR_KFE(joints, value):
joints['FL_KFE'] = value
joints['HR_KFE'] = -value
def FRHL_HFE(joints, value):
joints['FR_HFE'] = 1.1 * value
joints['HL_HFE'] = -value
def FRHL_KFE(joints, value):
joints['FR_KFE'] = -value
joints['HL_KFE'] = -value
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--length', default=10, type=int,
help='how many seconds to run the simulation for.')
parser.add_argument('-dt', '--reward_dt', default=.05, type=int, dest='dt',
help='how often to sample the reward.')
args, unknown = parser.parse_known_args()
for arg in unknown:
if arg.startswith(("-", "--")):
parser.add_argument(arg.split('=')[0])
args = parser.parse_args()
# Reward configuration
args.flat_reward_hard_margin = 0
args.flat_reward_soft_margin = 0.3
args.height_reward_target = 0.2
args.height_reward_hard_margin = 0.005
args.height_reward_soft_margin = 0.15
args.speed_reward_target = .25
args.speed_reward_hard_margin = 0
args.speed_reward_soft_margin = 0.1
# Trot configuration
args.trot_hip_launch = -0.8
args.trot_knee_launch = 1.4
args.trot_launch_dur = 0.25
args.trot_knee_clearance = 2
args.trot_clearance_dur = 0.1
args.trot_hip_step = -0.05
args.trot_knee_step = 1.5
args.trot_step_dur = 0.1
# wandb.init(
# project='quadrupedal-walking',
# entity='wpi-mmr',
# config=args,
# tags=['multiplicative_reward'],
# )
config = solo8v2vanilla_realtime.RealtimeSolo8VanillaConfig()
config.urdf_path = 'assets/solo8_URDF_v2/solo8_URDF_v2.urdf'
# Set the robot to quadrupedal standing
config.starting_joint_pos = {
'FL_HFE': -np.pi / 4,
'FL_KFE': -np.pi / 2,
'FL_ANKLE': 0,
'FR_HFE': np.pi / 4,
'FR_KFE': np.pi / 2,
'FR_ANKLE': 0,
'HL_HFE': -np.pi / 4,
'HL_KFE': np.pi / 2,
'HL_ANKLE': np.pi / 2,
'HR_HFE': np.pi / 4,
'HR_KFE': np.pi / 2,
'HR_ANKLE': np.pi / 2
}
env = gym.make('solo8vanilla-realtime-v0', config=config)
env.obs_factory.register_observation(testing.CompliantObs(env.robot))
flat_reward = rewards.FlatTorsoReward(
env.robot, args.flat_reward_hard_margin, args.flat_reward_soft_margin)
height_reward = rewards.TorsoHeightReward(
env.robot, args.height_reward_target, args.height_reward_hard_margin,
args.height_reward_soft_margin)
speed_reward = rewards.HorizontalMoveSpeedReward(
env.robot, args.speed_reward_target, hard_margin=args.speed_reward_hard_margin,
soft_margin=args.speed_reward_soft_margin)
walk_reward = rewards.MultiplicitiveReward(1, flat_reward, height_reward,
speed_reward)
walk_reward.client = env.client
to_action = lambda d: [d[j] + config.starting_joint_pos[j]
for j in env.joint_ordering]
joints = {
'FL_HFE': np.pi / 4,
'FL_KFE': np.pi / 2,
'FL_ANKLE': 0,
'FR_HFE': -np.pi / 4,
'FR_KFE': -np.pi / 2,
'FR_ANKLE': 0,
'HL_HFE': np.pi / 4,
'HL_KFE': -np.pi / 2,
'HL_ANKLE': 0,
'HR_HFE': -np.pi / 4,
'HR_KFE': -np.pi / 2,
'HR_ANKLE': 0
}
env.step(to_action(joints))
end = time.time() + args.length
scorer = threading.Thread(target=episode_listener)
scorer.start()
while time.time() < end:
# Get ready to launch FR and HL
FLHR_HFE(joints, -1)
FLHR_KFE(joints, 1.2)
FRHL_KFE(joints, 1.8)
env.step(to_action(joints))
time.sleep(args.trot_launch_dur)
# Make the FR and HL Movement
FRHL_HFE(joints, -.4)
FRHL_KFE(joints, 1.5)
env.step(to_action(joints))
time.sleep(args.trot_step_dur)
# Get ready to launch FL and HR
FRHL_HFE(joints, -1)
FRHL_KFE(joints, 1.2)
FLHR_KFE(joints, 1.8)
env.step(to_action(joints))
time.sleep(args.trot_launch_dur)
# Make the FL and HR Movement
FLHR_HFE(joints, -.4)
FLHR_KFE(joints, 1.5)
env.step(to_action(joints))
time.sleep(args.trot_step_dur)
scorer.join()
env.close()
scores = np.array(epi_rewards)
print(f'Average Score: {np.array(epi_rewards).mean()}')
print(f'Cum Score: {np.array(epi_rewards).sum()}')
plt.plot(epi_times, epi_rewards)
plt.title('Reward over Episode')
plt.xlabel('Simulation Time (seconds)')
plt.ylabel('Rewards')
wandb.log({
'mean_reward': scores.mean(),
'cum_reward': scores.sum(),
'rewards_vs_time': wandb.Image(plt)
}) | 25.862745 | 81 | 0.69655 |
c0fb1e896dfaf267de9b9410b69542268e20185a | 3,996 | py | Python | hubspot/cms/hubdb/__init__.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/hubdb/__init__.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/cms/hubdb/__init__.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# flake8: noqa
"""
HubDB endpoints
HubDB is a relational data store that presents data as rows, columns, and cells in a table, much like a spreadsheet. HubDB tables can be added or modified [in the HubSpot CMS](https://knowledge.hubspot.com/cos-general/how-to-edit-hubdb-tables), but you can also use the API endpoints documented here. For more information on HubDB tables and using their data on a HubSpot site, see the [CMS developers site](https://designers.hubspot.com/docs/tools/hubdb). You can also see the [documentation for dynamic pages](https://designers.hubspot.com/docs/tutorials/how-to-build-dynamic-pages-with-hubdb) for more details about the `useForPages` field. HubDB tables support `draft` and `live` versions and you can publish and unpublish the live version. This allows you to update data in the table, either for testing or to allow for a manual approval process, without affecting any live pages using the existing data. Draft data can be reviewed, pushed to live version, and published by a user working in HubSpot or published via the API. Draft data can also be discarded, allowing users to go back to the live version of the data without disrupting it. If a table is set to be `allowed for public access`, you can access the published version of the table and rows without any authentication. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from hubspot.cms.hubdb.api.rows_api import RowsApi
from hubspot.cms.hubdb.api.rows_batch_api import RowsBatchApi
from hubspot.cms.hubdb.api.tables_api import TablesApi
# import ApiClient
from hubspot.cms.hubdb.api_client import ApiClient
from hubspot.cms.hubdb.configuration import Configuration
from hubspot.cms.hubdb.exceptions import OpenApiException
from hubspot.cms.hubdb.exceptions import ApiTypeError
from hubspot.cms.hubdb.exceptions import ApiValueError
from hubspot.cms.hubdb.exceptions import ApiKeyError
from hubspot.cms.hubdb.exceptions import ApiException
# import models into sdk package
from hubspot.cms.hubdb.models.batch_input_hub_db_table_row_v3 import (
BatchInputHubDbTableRowV3,
)
from hubspot.cms.hubdb.models.batch_input_json_node import BatchInputJsonNode
from hubspot.cms.hubdb.models.batch_input_string import BatchInputString
from hubspot.cms.hubdb.models.batch_response_hub_db_table_row_v3_with_errors import (
BatchResponseHubDbTableRowV3WithErrors,
)
from hubspot.cms.hubdb.models.collection_response_with_total_hub_db_table_row_v3_forward_paging import (
CollectionResponseWithTotalHubDbTableRowV3ForwardPaging,
)
from hubspot.cms.hubdb.models.collection_response_with_total_hub_db_table_v3_forward_paging import (
CollectionResponseWithTotalHubDbTableV3ForwardPaging,
)
from hubspot.cms.hubdb.models.column import Column
from hubspot.cms.hubdb.models.column_input import ColumnInput
from hubspot.cms.hubdb.models.error import Error
from hubspot.cms.hubdb.models.error_detail import ErrorDetail
from hubspot.cms.hubdb.models.foreign_id import ForeignId
from hubspot.cms.hubdb.models.forward_paging import ForwardPaging
from hubspot.cms.hubdb.models.hub_db_table_clone_request import HubDbTableCloneRequest
from hubspot.cms.hubdb.models.hub_db_table_row_v3 import HubDbTableRowV3
from hubspot.cms.hubdb.models.hub_db_table_row_v3_input import HubDbTableRowV3Input
from hubspot.cms.hubdb.models.hub_db_table_v3 import HubDbTableV3
from hubspot.cms.hubdb.models.hub_db_table_v3_input import HubDbTableV3Input
from hubspot.cms.hubdb.models.hub_db_table_v3_live_input import HubDbTableV3LiveInput
from hubspot.cms.hubdb.models.import_result import ImportResult
from hubspot.cms.hubdb.models.next_page import NextPage
from hubspot.cms.hubdb.models.option import Option
from hubspot.cms.hubdb.models.simple_user import SimpleUser
from hubspot.cms.hubdb.models.standard_error import StandardError
| 61.476923 | 1,304 | 0.835335 |
942906e319b8e14e5317ed0a1af3b7e5abbfac34 | 132 | py | Python | connect4/apps.py | ozzy92/JaysConnect4 | 831eee406594348b0ecfea325873bd9c47bece91 | [
"MIT"
] | null | null | null | connect4/apps.py | ozzy92/JaysConnect4 | 831eee406594348b0ecfea325873bd9c47bece91 | [
"MIT"
] | null | null | null | connect4/apps.py | ozzy92/JaysConnect4 | 831eee406594348b0ecfea325873bd9c47bece91 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
class Connect4Config(AppConfig):
name = 'connect4'
| 16.5 | 39 | 0.795455 |
217ee6f60e2d962c01e38823e89950092847abd4 | 5,158 | py | Python | tests/test_miner_sim.py | gitter-badger/py-flexpoolapi | feccb28321575afde1f64643a5d96724f09a9214 | [
"MIT"
] | null | null | null | tests/test_miner_sim.py | gitter-badger/py-flexpoolapi | feccb28321575afde1f64643a5d96724f09a9214 | [
"MIT"
] | null | null | null | tests/test_miner_sim.py | gitter-badger/py-flexpoolapi | feccb28321575afde1f64643a5d96724f09a9214 | [
"MIT"
] | null | null | null | #
# Software distrubuted under MIT License (MIT)
#
# Copyright (c) 2020 Flexpool
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import flexpoolapi
import math
from . import simdata
from . import utils
class TestMinerSimulated:
def setup_class(self):
flexpoolapi.set_base_endpoint("http://localhost:5000/api/v1")
self.miner_api = flexpoolapi.miner(simdata.MINER_ADDRESS)
def test_balance(self):
assert self.miner_api.balance() == simdata.MINER_BALANCE
def test_current_hashrate(self):
effective, reported = self.miner_api.current_hashrate()
assert effective == simdata.CURRENT_EFFECTIVE_HASHRATE
assert reported == simdata.CURRENT_REPORTED_HASHRATE
def test_daily_hashrate(self):
got = self.miner_api.daily_average_stats()
assert got.effective_hashrate == simdata.DAILY_EFFECTIVE_HASHRATE
assert got.reported_hashrate == simdata.DAILY_REPORTED_HASHRATE
assert got.valid_shares == simdata.DAILY_VALID_SHARES
assert got.stale_shares == simdata.DAILY_STALE_SHARES
assert got.invalid_shares == simdata.DAILY_INVALID_SHARES
def test_stats(self):
got = self.miner_api.stats()
assert got.current_effective_hashrate == simdata.CURRENT_EFFECTIVE_HASHRATE
assert got.current_reported_hashrate == simdata.CURRENT_REPORTED_HASHRATE
assert got.average_effective_hashrate == simdata.DAILY_EFFECTIVE_HASHRATE
assert got.average_reported_hashrate == simdata.DAILY_REPORTED_HASHRATE
assert got.valid_shares == simdata.DAILY_VALID_SHARES
assert got.stale_shares == simdata.DAILY_STALE_SHARES
assert got.invalid_shares == simdata.DAILY_INVALID_SHARES
def test_worker_count(self):
assert self.miner_api.worker_count() == simdata.WORKER_COUNT
def test_workers(self):
got = self.miner_api.workers()
for i, expected_worker in enumerate(simdata.WORKERS):
assert got[i].worker_name == expected_worker["name"]
assert got[i].is_online == expected_worker["online"]
def test_chart(self):
got = self.miner_api.chart()
for i in range(0, 144):
assert got[i].reported_hashrate == simdata.MINER_CHART[i]["reported_hashrate"]
assert got[i].effective_hashrate == simdata.MINER_CHART[i]["effective_hashrate"]
assert got[i].valid_shares == simdata.MINER_CHART[i]["valid_shares"]
assert got[i].stale_shares == simdata.MINER_CHART[i]["stale_shares"]
assert got[i].invalid_shares == simdata.MINER_CHART[i]["invalid_shares"]
def test_payment_count(self):
assert self.miner_api.payment_count() == simdata.PAYMENT_COUNT
def test_payments_pages(self):
page_count = math.ceil(simdata.PAYMENT_COUNT / 10)
for i in range(0, page_count):
got = self.miner_api.payments_paged(i)
expected = simdata.PAYMENTS[i * 10:i * 10 + 10]
for j in range(0, len(expected)):
assert expected[j]["amount"] == got[j].amount
assert expected[j]["timestamp"] == got[j].time.timestamp()
assert expected[j]["duration"] == got[j].duration
assert expected[j]["txid"] == got[j].txid
def test_block_count(self):
assert self.miner_api.block_count() == simdata.MINER_BLOCK_COUNT
def test_blocks_pages(self):
page_count = math.ceil(simdata.MINER_BLOCK_COUNT / 10)
for i in range(0, page_count):
got = self.miner_api.blocks_paged(i)
expected = simdata.MINER_BLOCKS[i * 10:i * 10 + 10]
for j in range(0, len(expected)):
utils.compare_blocks(expected[j], got[j])
def test_details(self):
got = self.miner_api.details()
assert got.censored_email == simdata.MINER_CENSORED_EMAIL
assert got.censored_ip == simdata.MINER_CENSORED_IP
assert got.pool_donation == simdata.MINER_POOL_DONATION
assert got.min_payout_threshold == simdata.MINER_MIN_PAYOUT_THRESHOLD
assert got.first_joined_date.timestamp() == simdata.MINER_FIRST_JOINED
| 47.759259 | 118 | 0.705312 |
c27cedd35cb43b55cf4605836a232139f9577786 | 8,038 | py | Python | neutron/db/migration/alembic_migrations/cisco_init_ops.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 10 | 2015-09-22T10:22:53.000Z | 2016-02-25T06:12:05.000Z | neutron/db/migration/alembic_migrations/cisco_init_ops.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 12 | 2015-01-08T18:30:45.000Z | 2015-03-13T21:04:15.000Z | neutron/db/migration/alembic_migrations/cisco_init_ops.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 7 | 2015-02-05T10:23:52.000Z | 2019-05-18T17:11:19.000Z | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for cisco plugin
from alembic import op
import sqlalchemy as sa
segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
name='segment_type')
profile_type = sa.Enum('network', 'policy', name='profile_type')
def upgrade():
op.create_table(
'cisco_policy_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vlan_allocations',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), autoincrement=False,
nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id'))
op.create_table(
'cisco_network_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('segment_type', segment_type, nullable=False),
sa.Column('sub_type', sa.String(length=255), nullable=True),
sa.Column('segment_range', sa.String(length=255), nullable=True),
sa.Column('multicast_ip_index', sa.Integer(), nullable=True),
sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
sa.Column('physical_network', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vxlan_allocations',
sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('vxlan_id'))
op.create_table(
'cisco_credentials',
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('credential_name', sa.String(length=255), nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('type', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('credential_name'))
op.create_table(
'cisco_qos_policies',
sa.Column('qos_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('qos_name', sa.String(length=255), nullable=False),
sa.Column('qos_desc', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'qos_name'))
op.create_table(
'cisco_nexusport_bindings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('port_id', sa.String(length=255), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=False),
sa.Column('switch_ip', sa.String(length=255), nullable=False),
sa.Column('instance_id', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_profile_bindings',
sa.Column('profile_type', profile_type, nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))
op.create_table(
'cisco_n1kv_vmnetworks',
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('port_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_policy_profiles.id'], ),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'cisco_n1kv_trunk_segments',
sa.Column('trunk_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment_id', sa.String(length=36), nullable=False),
sa.Column('dot1qtag', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag'))
op.create_table(
'cisco_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=255), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_multi_segments',
sa.Column('multi_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment1_id', sa.String(length=36), nullable=False),
sa.Column('segment2_id', sa.String(length=36), nullable=False),
sa.Column('encap_profile_name', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id',
'segment2_id'))
op.create_table(
'cisco_n1kv_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.Column('multicast_ip', sa.String(length=32), nullable=True),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_network_profiles.id']),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']),
sa.PrimaryKeyConstraint('port_id'))
def downgrade():
op.drop_table('cisco_n1kv_port_bindings')
op.drop_table('cisco_n1kv_network_bindings')
op.drop_table('cisco_n1kv_multi_segments')
op.drop_table('cisco_provider_networks')
op.drop_table('cisco_n1kv_trunk_segments')
op.drop_table('cisco_n1kv_vmnetworks')
op.drop_table('cisco_n1kv_profile_bindings')
op.drop_table('cisco_nexusport_bindings')
op.drop_table('cisco_qos_policies')
op.drop_table('cisco_credentials')
op.drop_table('cisco_n1kv_vxlan_allocations')
op.drop_table('cisco_network_profiles')
op.drop_table('cisco_n1kv_vlan_allocations')
op.drop_table('cisco_policy_profiles')
# generate DDL for dropping enumns
segment_type.drop(op.get_bind(), checkfirst=False)
profile_type.drop(op.get_bind(), checkfirst=False)
| 45.670455 | 78 | 0.654143 |
8d95189d02606f9195921321a95976c50ffd5b87 | 8,440 | py | Python | all_repos/autofix_lib.py | charlievieth/all-repos | 279d2910c56567d9518ab41bd8894216b9f649e5 | [
"MIT"
] | null | null | null | all_repos/autofix_lib.py | charlievieth/all-repos | 279d2910c56567d9518ab41bd8894216b9f649e5 | [
"MIT"
] | null | null | null | all_repos/autofix_lib.py | charlievieth/all-repos | 279d2910c56567d9518ab41bd8894216b9f649e5 | [
"MIT"
] | null | null | null | import argparse
import contextlib
import functools
import os
import shlex
import subprocess
import tempfile
import traceback
from typing import Any
from typing import Callable
from typing import Generator
from typing import Iterable
from typing import NamedTuple
from typing import NoReturn
from typing import Optional
from typing import Tuple
import pkg_resources
from all_repos import cli
from all_repos import color
from all_repos import git
from all_repos import mapper
from all_repos.config import Config
from all_repos.config import load_config
def add_fixer_args(parser: argparse.ArgumentParser) -> None:
cli.add_common_args(parser)
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument(
'--dry-run', action='store_true',
help='show what would happen but do not push.',
)
mutex.add_argument(
'-i', '--interactive', action='store_true',
help='interactively approve / deny fixes.',
)
cli.add_jobs_arg(mutex, default=1)
parser.add_argument(
'--limit', type=int, default=None,
help='maximum number of repos to process (default: unlimited).',
)
parser.add_argument(
'--author',
help=(
'override commit author. '
'This is passed directly to `git commit`. '
"An example: `--author='Herp Derp <herp.derp@umich.edu>'`."
),
)
parser.add_argument(
'--repos', nargs='*',
help=(
'run against specific repositories instead. This is especially '
'useful with `xargs autofixer ... --repos`. This can be used to '
'specify repositories which are not managed by `all-repos`.'
),
)
class Commit(NamedTuple):
msg: str
branch_name: str
author: Optional[str]
class AutofixSettings(NamedTuple):
jobs: int
color: bool
limit: Optional[int]
dry_run: bool
interactive: bool
@classmethod
def from_cli(cls, args: Any) -> 'AutofixSettings':
return cls(
jobs=args.jobs, color=args.color, limit=args.limit,
dry_run=args.dry_run, interactive=args.interactive,
)
def filter_repos(
config: Config,
cli_repos: Optional[Iterable[str]],
find_repos: Callable[[Config], Iterable[str]],
) -> Iterable[str]:
if cli_repos is not None:
return cli_repos
else:
return find_repos(config)
def from_cli(
args: Any,
*,
find_repos: Callable[[Config], Iterable[str]],
msg: str,
branch_name: str,
) -> Tuple[Iterable[str], Config, Commit, AutofixSettings]:
config = load_config(args.config_filename)
return (
filter_repos(config, args.repos, find_repos),
config,
Commit(msg=msg, branch_name=branch_name, author=args.author),
AutofixSettings.from_cli(args),
)
def run(*cmd: str, **kwargs: Any) -> 'subprocess.CompletedProcess[str]':
cmdstr = ' '.join(shlex.quote(arg) for arg in cmd)
print(f'$ {cmdstr}', flush=True)
kwargs.setdefault('check', True)
return subprocess.run(cmd, **kwargs)
@contextlib.contextmanager
def cwd(path: str) -> Generator[None, None, None]:
pwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(pwd)
def assert_importable(module: str, *, install: str) -> None:
try:
__import__(module)
except ImportError:
raise SystemExit(
f'This tool requires the `{module}` module to be installed.\n'
f'Try installing it via `pip install {install}`.',
)
def require_version_gte(pkg_name: str, version: str) -> None:
pkg = pkg_resources.get_distribution(pkg_name)
pkg_version = pkg_resources.parse_version(pkg.version)
target_version = pkg_resources.parse_version(version)
if pkg_version < target_version:
raise SystemExit(
f'This tool requires the `{pkg_name}` package is at least version '
f'{version}. '
f'The currently installed version is {pkg.version}.\n\n'
f'Try `pip install --upgrade {pkg_name}`',
)
def target_branch() -> str:
cmd = ('git', 'rev-parse', '--abbrev-ref', '--symbolic', '@{u}')
out = subprocess.check_output(cmd).strip().decode()
assert out.startswith('origin/')
return out[len('origin/'):]
@contextlib.contextmanager
def repo_context(repo: str, *, use_color: bool) -> Generator[None, None, None]:
print(color.fmt(f'***{repo}', color.TURQUOISE_H, use_color=use_color))
try:
remote = git.remote(repo)
with tempfile.TemporaryDirectory() as tmpdir:
run('git', 'clone', '--quiet', repo, tmpdir)
with cwd(tmpdir):
run('git', 'remote', 'set-url', 'origin', remote)
run('git', 'fetch', '--prune', '--quiet')
yield
except Exception:
print(color.fmt('***Errored', color.RED_H, use_color=use_color))
traceback.print_exc()
def shell() -> None:
print('Opening an interactive shell, type `exit` to continue.')
print('Any modifications will be committed.')
subprocess.call(os.environ.get('SHELL', 'bash'))
def _interactive_check(*, use_color: bool) -> bool:
def _quit() -> NoReturn:
print('Goodbye!')
raise SystemExit()
while True:
try:
s = input(
color.fmt(
'***Looks good [y,n,s,q,?]? ',
color.BLUE_B, use_color=use_color,
),
)
except (EOFError, KeyboardInterrupt):
_quit()
s = s.strip().lower()
if s in {'y', 'yes'}:
return True
elif s in {'n', 'no'}:
return False
elif s in {'s', 'shell'}:
shell()
elif s in {'q', 'quit'}:
_quit()
else:
if s not in {'?', 'help'}:
print(
color.fmt(
f'Unexpected input: {s}',
color.RED, use_color=use_color,
),
)
print('y (yes): yes it looks good, commit and continue.')
print('n (no): no, do not commit this repository.')
print('s (shell): open an interactive shell in the repo.')
print('q (quit, ^C): early exit from the autofixer.')
print('? (help): show this help message.')
def _fix_inner(
repo: str,
apply_fix: Callable[[], None],
check_fix: Callable[[], None],
config: Config,
commit: Commit,
autofix_settings: AutofixSettings,
) -> None:
with repo_context(repo, use_color=autofix_settings.color):
branch_name = f'all-repos_autofix_{commit.branch_name}'
run('git', 'checkout', '--quiet', 'origin/HEAD', '-b', branch_name)
apply_fix()
diff = run('git', 'diff', 'origin/HEAD', '--exit-code', check=False)
if not diff.returncode:
return
check_fix()
if (
autofix_settings.interactive and
not _interactive_check(use_color=autofix_settings.color)
):
return
commit_message = (
f'{commit.msg}\n\n'
f'Committed via https://github.com/asottile/all-repos'
)
commit_cmd: Tuple[str, ...] = (
'git', 'commit', '--quiet', '-a', '-m', commit_message,
)
if commit.author:
commit_cmd += ('--author', commit.author)
run(*commit_cmd)
if autofix_settings.dry_run:
return
config.push(config.push_settings, branch_name)
def _noop_check_fix() -> None:
"""A lambda is not pickleable, this must be a module-level function"""
def fix(
repos: Iterable[str],
*,
apply_fix: Callable[[], None],
check_fix: Callable[[], None] = _noop_check_fix,
config: Config,
commit: Commit,
autofix_settings: AutofixSettings,
) -> None:
assert not autofix_settings.interactive or autofix_settings.jobs == 1
repos = tuple(repos)[:autofix_settings.limit]
func = functools.partial(
_fix_inner,
apply_fix=apply_fix, check_fix=check_fix,
config=config, commit=commit, autofix_settings=autofix_settings,
)
with mapper.process_mapper(autofix_settings.jobs) as do_map:
mapper.exhaust(do_map(func, repos))
| 29.407666 | 79 | 0.59372 |
5295066e967236d3a0aaf1f00c316b1008b3f527 | 4,461 | py | Python | src/main.py | tonifuc3m/drugprot-evaluation-library | 6b939554de38dbb69c462cc0d45c1cb17c3eaf57 | [
"MIT"
] | 3 | 2021-07-14T13:51:10.000Z | 2021-11-18T05:56:39.000Z | src/main.py | tonifuc3m/drugprot-evaluation-library | 6b939554de38dbb69c462cc0d45c1cb17c3eaf57 | [
"MIT"
] | 1 | 2021-08-04T00:41:18.000Z | 2021-08-12T22:54:33.000Z | src/main.py | tonifuc3m/drugprot-evaluation-library | 6b939554de38dbb69c462cc0d45c1cb17c3eaf57 | [
"MIT"
] | 2 | 2021-07-25T12:06:50.000Z | 2021-08-12T18:16:58.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 15:32:08 2021
@author: tonifuc3m
"""
import argparse
import warnings
import pandas as pd
import os
import compute_metrics
from utils import load_entities_dict, prepro_relations, \
format_relations, get_chemical_gene_combinations, filter_pred
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
def parse_arguments():
'''
DESCRIPTION: Parse command line arguments
'''
parser = argparse.ArgumentParser(description='process user given parameters')
parser.add_argument('-g', '--gs_path', required = False, dest = 'gs_path',
default = '../gs-data/gs_relations.tsv',
help = 'path to GS relations file (TSV)')
parser.add_argument('-e', '--ent_path', required = False, dest = 'ent_path',
default = '../gs-data/gs_entities.tsv',
help = 'path to GS entities file (TSV)')
parser.add_argument('-p', '--pred_path', required = False, dest = 'pred_path',
default = '../toy-data/pred_relations.tsv',
help = 'path to predictions file (TSV)')
parser.add_argument('--pmids', required = False, dest = 'pmids',
default = '../gs-data/pmids.txt',
help = 'path to list of valid pubmed IDs. One PMID per line')
return parser.parse_args()
def main(args):
'''
Load GS and Predictions; format them; compute precision, recall and
F1-score and print them.
Parameters
----------
gs_path : str
Path to GS Relations TSV file.
pred_path : str
Path to Predictions Relations TSV file.
ent_path : str
Path to GS Entities TSV file
pmids : str
Path to file with valid pubmed IDs
Returns
-------
None.
'''
rel_types = ['INDIRECT-DOWNREGULATOR','INDIRECT-UPREGULATOR','DIRECT-REGULATOR',
'ACTIVATOR','INHIBITOR','AGONIST','AGONIST-ACTIVATOR',
'AGONIST-INHIBITOR','ANTAGONIST','PRODUCT-OF','SUBSTRATE',
'SUBSTRATE_PRODUCT-OF','PART-OF']
reltype2tag = {w: i+1 for i, w in enumerate(rel_types)}
NREL = len(reltype2tag.keys())
# Load GS
print("Loading GS files...")
_dict_, genes, chemicals = load_entities_dict(args.ent_path)
combinations, NCOMB = get_chemical_gene_combinations(_dict_)
pmids = set(map(lambda x: str(x.strip()), open(args.pmids)))
gs = pd.read_csv(args.gs_path, sep='\t', header=None, dtype=str, skip_blank_lines=True,
names = ['pmid', 'rel_type', 'arg1', 'arg2'], encoding = 'utf-8')
# Load predictions
print("Loading prediction files...")
tmp_pred_path = filter_pred(args.pred_path, pmids) # Create tmp predictions with only GS files
pred = pd.read_csv(tmp_pred_path, sep='\t', header=None, dtype=str, skip_blank_lines=True,
names = ['pmid', 'rel_type', 'arg1', 'arg2'], encoding = 'utf-8')
# Format data
print("Checking GS files...")
gs_valid,gs_rel_list = prepro_relations(gs, chemicals, rel_types, is_gs=True)
print("Checking Predictions files...")
pred_valid,pred_rel_list = prepro_relations(pred, chemicals, rel_types, is_gs=False, gs_files=pmids)
print("Formatting data...")
y_true, y_pred = format_relations(gs_valid, pred_valid, combinations,
NCOMB, NREL, reltype2tag)
# Compute metrics
print("Computing DrugProt (BioCreative VII) metrics ...\n(p = Precision, r=Recall, f1 = F1 score)")
compute_metrics.main(y_true, y_pred, reltype2tag, gs_rel_list, pred_rel_list)
if __name__ == '__main__':
args = parse_arguments()
if os.path.exists(args.gs_path)==False:
raise Exception(f'Gold Standard path {args.gs_path} does not exist')
if os.path.exists(args.pred_path)==False:
raise Exception(f'Predictions path {args.pred_path} does not exist')
if os.path.exists(args.ent_path)==False:
raise Exception(f'Gold Standard entities path {args.ent_path} does not exist')
if os.path.exists(args.pmids)==False:
raise Exception(f'PMIDs file list path {args.pmids} does not exist')
main(args)
| 38.791304 | 104 | 0.629455 |
f60df4417d42939c2f2ec9a12e6964b2a0424164 | 1,478 | py | Python | 19.2.py | StPluto/Test19 | 8d33e53fe302bf750b88ff1ba5231f9b966e344c | [
"MIT"
] | null | null | null | 19.2.py | StPluto/Test19 | 8d33e53fe302bf750b88ff1ba5231f9b966e344c | [
"MIT"
] | null | null | null | 19.2.py | StPluto/Test19 | 8d33e53fe302bf750b88ff1ba5231f9b966e344c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- config: utf-8 -*-
# перепишите программу из пункта 8 так, чтобы интерфейс выглядел
# примерно следующим образом:
from tkinter import *
class Window(Tk):
# создание окна
def __init__(self):
super().__init__()
self.title('Радуга')
self.geometry("280x220") # размер окна
# окно, в котором прописывается текст и обозначение
self.lbl = Label(text="Радуга", width=280)
self.e1 = Entry(width=280, justify=CENTER)
self.lbl.pack()
self.e1.pack()
# обозначения и названия цветов
dct = {'#ff0000': 'Красный',
'#ff7d00': 'Оранжевый',
'#ffff00': 'Желтый',
'#00ff00': 'Зеленый',
'#007dff': 'Голубой',
'#0000ff': 'Синий',
'#7d00ff': 'Фиолетовый', }
buttons = Frame(self)
buttons.pack()
for colour in dct.keys():
func = lambda c=colour, ruc=dct[colour]: self.onclick(c, ruc)
b = Button(buttons, text='', command=func, bg=colour, width=1, height=1, ) # кнопки
b.pack(side=RIGHT)
# очистка строчки при новом нажатии на цвет и появление нового
def onclick(self, colour, ru_colour):
self.e1.delete(0, END)
self.e1.insert(0, colour)
self.lbl['text'] = ru_colour
# закрытие окна
if __name__ == '__main__':
root = Window()
root.mainloop()
| 31.446809 | 97 | 0.548038 |
1b78cd893112a87a1bde1f813faa929a6819048f | 22,293 | py | Python | libs/sol-REL-1.7.5.0/sensorobjectlibrary/SolDefines.py | realms-team/solmanager | 95fa049df041add5f8d37c053ef560d0e5d06dff | [
"BSD-3-Clause"
] | null | null | null | libs/sol-REL-1.7.5.0/sensorobjectlibrary/SolDefines.py | realms-team/solmanager | 95fa049df041add5f8d37c053ef560d0e5d06dff | [
"BSD-3-Clause"
] | 104 | 2016-04-10T19:22:20.000Z | 2018-11-20T15:47:14.000Z | libs/sol-REL-1.7.5.0/sensorobjectlibrary/SolDefines.py | realms-team/solmanager | 95fa049df041add5f8d37c053ef560d0e5d06dff | [
"BSD-3-Clause"
] | null | null | null | from math import sqrt as sqrt
import sys
SOL_PORT = 0xf0ba
# type names
SOL_TYPE_DISTANCE_JUDD_RS232_RAW = 0x01
SOL_TYPE_DISTANCE_JUDD_RS232_STATS = 0x02
SOL_TYPE_SNOW_MAXBOTIX_MB7554_RS232_RAW = 0x03
SOL_TYPE_SNOW_MAXBOTIX_MB7554_RS232_STATS = 0x04
SOL_TYPE_TEMPRH_SENSERION_SHT15_RS232_RAW = 0x05
SOL_TYPE_TEMPRH_SENSERION_SHT15_RS232_STATS = 0x06
SOL_TYPE_TEMPRH_SENSERION_SHT25_RS232_RAW = 0x07
SOL_TYPE_TEMPRH_SENSERION_SHT25_RS232_STATS = 0x08
SOL_TYPE_SOLAR_HUKSEFLUX_LP25_AV_RAW = 0x09
SOL_TYPE_SOLAR_HUKSEFLUX_LP25_AV_STATS = 0x0a
SOL_TYPE_SOIL_DECAGON_GS3_RS232_RAW = 0x0b
SOL_TYPE_SOIL_DECAGON_GS3_RS232_STATS = 0x0c
SOL_TYPE_DUST_NOTIFLOG = 0x0d
SOL_TYPE_DUST_NOTIFDATA = 0x0e
SOL_TYPE_DUST_NOTIFIPDATA = 0x0f
SOL_TYPE_DUST_NOTIF_HRDEVICE = 0x10
SOL_TYPE_DUST_NOTIF_HRNEIGHBORS = 0x11
SOL_TYPE_DUST_NOTIF_HRDISCOVERED = 0x12
SOL_TYPE_DUST_EVENTCOMMANDFINISHED = 0x13
SOL_TYPE_DUST_EVENTPATHCREATE = 0x14
SOL_TYPE_DUST_EVENTPATHDELETE = 0x15
SOL_TYPE_DUST_EVENTPING = 0x16
SOL_TYPE_DUST_EVENTNETWORKTIME = 0x17
SOL_TYPE_DUST_EVENTNETWORKRESET = 0x18
SOL_TYPE_DUST_EVENTMOTEJOIN = 0x19
SOL_TYPE_DUST_EVENTMOTECREATE = 0x1a
SOL_TYPE_DUST_EVENTMOTEDELETE = 0x1b
SOL_TYPE_DUST_EVENTMOTELOST = 0x1c
SOL_TYPE_DUST_EVENTMOTEOPERATIONAL = 0x1d
SOL_TYPE_DUST_EVENTMOTERESET = 0x1e
SOL_TYPE_DUST_EVENTPACKETSENT = 0x1f
SOL_TYPE_DUST_SNAPSHOT = 0x20
SOL_TYPE_DUST_SNAPSHOT_2 = 0x21
SOL_TYPE_JUDD_T2D2R1N1 = 0x22
SOL_TYPE_MB7554_DTYPE_D2SD2N1NL1NG1 = 0x24
SOL_TYPE_SHT15_T4RH4N1 = 0x25
SOL_TYPE_DUST_OAP_TEMPSAMPLE = 0x27
SOL_TYPE_SOLMANAGER_STATS = 0x28
SOL_TYPE_SENS_MB7363_D2S2N1L1G1 = 0x29
SOL_TYPE_SENS_GS3_I1D4T4E4N1 = 0x30
SOL_TYPE_SENS_SHT25_T2N1H2N1 = 0x31
SOL_TYPE_SENS_NEOVBAT_V2N1 = 0x32
SOL_TYPE_SENS_GS3_I1D4T4E4N1_0 = 0x33
SOL_TYPE_SENS_GS3_I1D4T4E4N1_1 = 0x34
SOL_TYPE_SENS_GS3_I1D4T4E4N1_2 = 0x35
SOL_TYPE_SENS_LP02_R4N1 = 0x36
SOL_TYPE_SENS_ECTM = 0x37
SOL_TYPE_SENS_MPS1 = 0x38
SOL_TYPE_ADXL362_FFT_Z = 0x39
SOL_TYPE_DUST_EVENTJOINFAILED = 0x3a
SOL_TYPE_SOLMANAGER_STATS_2 = 0x3b
SOL_TYPE_TEMPRH_SHT31 = 0x40
SOL_TYPE_DUST_OAP_ANALOG = 0x41
SOL_TYPE_DUST_OAP_DIGITAL_IN = 0x42
SOL_TYPE_TEMPRH_SHT3X = 0x43
SOL_TYPE_DUST_NOTIF_HREXTENDED = 0x44
SOL_TYPE_SENS_MPS6_ID1P4T4N1 = 0x45
SOL_TYPE_SENS_GS1_I1MV2 = 0x46
SOL_TYPE_SENS_MICROWAVE_MOTION = 0x47
SOL_TYPE_SENS_INDUCTION_CURRENT_C_SOURCE = 0x48
SOL_TYPE_SENS_INDUCTION_CURRENT_V_SOURCE = 0x49
def sol_type_to_type_name(type_id):
SolDefines = sys.modules[__name__]
for n in dir(SolDefines):
if n.startswith('SOL_TYPE_') and getattr(SolDefines, n) == type_id:
return n
raise ValueError("SOL type %s does not exist" % type_id)
def sol_name_to_type(type_name):
SolDefines = sys.modules[__name__]
if type_name in dir(SolDefines):
return getattr(SolDefines, type_name)
raise ValueError("SOL object name %s does not exist" % type_name)
def solStructure(type_id):
"""
Return the SOL structure according to the given type id
If the element is not found, it raises a ValueError.
:param int|str type_id:
:return: a dictionary that contains the following keys:
type, description, structure, fields
"""
if isinstance(type_id, basestring):
type_id = sol_name_to_type(type_id)
sol_item = {}
for item in sol_types:
if item['type'] == type_id:
sol_item = item
if any(sol_item):
return sol_item
else:
raise ValueError("SOL structure not found for given id:%s" % type_id)
### Dust Constants
MAX_NUM_NEIGHBORS = 100
### Header
# version
SOL_HDR_V_OFFSET = 6
SOL_HDR_V = 0
# Type: single or multi MTtlv
SOL_HDR_T_OFFSET = 5
SOL_HDR_T_SINGLE = 0
SOL_HDR_T_MULTI = 1
# MAC
SOL_HDR_M_OFFSET = 4
SOL_HDR_M_NOMAC = 0
SOL_HDR_M_8BMAC = 1
# timestamp encoding
SOL_HDR_S_OFFSET = 3
SOL_HDR_S_EPOCH = 0
SOL_HDR_S_ELIDED = 1
SOL_HDR_S_SIZE = 1
# Type encoding
SOL_HDR_Y_OFFSET = 2
SOL_HDR_Y_1B = 0
SOL_HDR_Y_2B = 1
# Length encoding
SOL_HDR_L_OFFSET = 0
SOL_HDR_L_WK = 0
SOL_HDR_L_1B = 1
SOL_HDR_L_2B = 2
SOL_HDR_L_ELIDED = 3
### SOL Object
SOL_HEADER_SIZE = 1
SOL_HEADER_OFFSET = 0
SOL_TIMESTAMP_SIZE = 4
SOL_TIMESTAMP_OFFSET = 1
SOL_OBJNUMBER_SIZE = 1
### type definitions
sol_types = [
{
'type': SOL_TYPE_DISTANCE_JUDD_RS232_RAW,
'description': '',
'structure': '>HHHB',
'fields': ['airtemp', 'travel_time', 'distance', 'retries'],
},
{
'type': SOL_TYPE_DISTANCE_JUDD_RS232_STATS,
'description': '',
'structure': '>HHHBBI',
'fields': ['airtemp', 'travel_time', 'distance', 'retries', 'count', 'std'],
},
{
'type': SOL_TYPE_SNOW_MAXBOTIX_MB7554_RS232_RAW,
'description': '',
'structure': '>H',
'fields': ['distance'],
},
{
'type': SOL_TYPE_SNOW_MAXBOTIX_MB7554_RS232_STATS,
'description': '',
'structure': '>HBI',
'fields': ['distance', 'count', 'std'],
},
{
'type': SOL_TYPE_TEMPRH_SENSERION_SHT15_RS232_RAW,
'description': '',
'structure': '>II',
'fields': ['temp', 'rH'],
},
{
'type': SOL_TYPE_TEMPRH_SENSERION_SHT15_RS232_STATS,
'description': '',
'structure': '>IIBBII',
'fields': ['temp', 'rH', 'count', 'std_temp', 'std_rH'],
},
{
'type': SOL_TYPE_TEMPRH_SENSERION_SHT25_RS232_RAW,
'description': '',
'structure': '>II',
'fields': ['temp', 'rH'],
},
{
'type': SOL_TYPE_TEMPRH_SENSERION_SHT25_RS232_STATS,
'description': '',
'structure': '>IIBII',
'fields': ['temp', 'rH', 'count', 'std_temp', 'std_rH'],
},
{
'type': SOL_TYPE_SOLAR_HUKSEFLUX_LP25_AV_RAW,
'description': '',
'structure': '>I',
'fields': ['Vout'],
},
{
'type': SOL_TYPE_SOLAR_HUKSEFLUX_LP25_AV_STATS,
'description': '',
'structure': '>IBI',
'fields': ['Vout', 'count', 'std'],
},
{
'type': SOL_TYPE_SOIL_DECAGON_GS3_RS232_RAW,
'description': '',
'structure': '>III',
'fields': ['moisture', 'soil_temp', 'soil_ec'],
},
{
'type': SOL_TYPE_SOIL_DECAGON_GS3_RS232_STATS,
'description': '',
'structure': '>IIIBI',
'fields': ['moisture', 'soil_temp', 'soil_ec', 'count', 'std'],
},
{
'type': SOL_TYPE_DUST_NOTIFDATA,
'description': '',
'structure': '>HH',
'fields': ['srcPort', 'dstPort'],
'extrafields': 'data',
},
{
'type': SOL_TYPE_DUST_NOTIF_HRDEVICE,
'description': '',
'structure': '>IBbHHHHHBBBIBBBB',
'fields': [
'charge',
'queueOcc',
'temperature',
'batteryVoltage',
'numTxOk',
'numTxFail',
'numRxOk',
'numRxLost',
'numMacDropped',
'numTxBad',
'badLinkFrameId',
'badLinkSlot',
'badLinkOffset',
'numNetMicErr',
'numMacMicErr',
'numMacCrcErr',
],
},
{
'type': SOL_TYPE_DUST_EVENTCOMMANDFINISHED,
'description': '',
'structure': '>IB',
'fields': ['callbackId', 'rc'],
},
{
'type': SOL_TYPE_DUST_EVENTPATHCREATE,
'description': '',
'structure': '>QQB',
'fields': ['source', 'dest', 'direction'],
},
{
'type': SOL_TYPE_DUST_EVENTPATHDELETE,
'description': '',
'structure': '>QQB',
'fields': ['source', 'dest', 'direction'],
},
{
'type': SOL_TYPE_DUST_EVENTPING,
'description': '',
'structure': '>IQIHB',
'fields': ['callbackId', 'macAddress', 'delay', 'voltage', 'temperature'],
},
{
'type': SOL_TYPE_DUST_EVENTNETWORKTIME,
'description': '',
'structure': '>IQ5pH',
'fields': ['uptime', 'utcTime', 'asn', 'asnOffset'],
},
{
'type': SOL_TYPE_DUST_EVENTNETWORKRESET,
'description': '',
'structure': '>',
'fields': [],
},
{
'type': SOL_TYPE_DUST_EVENTMOTEJOIN,
'description': '',
'structure': '>Q',
'fields': ['macAddress'],
},
{
'type': SOL_TYPE_DUST_EVENTMOTECREATE,
'description': '',
'structure': '>QH',
'fields': ['macAddress', 'moteId'],
},
{
'type': SOL_TYPE_DUST_EVENTMOTEDELETE,
'description': '',
'structure': '>QH',
'fields': ['macAddress', 'moteId'],
},
{
'type': SOL_TYPE_DUST_EVENTMOTELOST,
'description': '',
'structure': '>Q',
'fields': ['macAddress'],
},
{
'type': SOL_TYPE_DUST_EVENTMOTEOPERATIONAL,
'description': '',
'structure': '>Q',
'fields': ['macAddress'],
},
{
'type': SOL_TYPE_DUST_EVENTMOTERESET,
'description': '',
'structure': '>Q',
'fields': ['macAddress'],
},
{
'type': SOL_TYPE_DUST_EVENTPACKETSENT,
'description': '',
'structure': '>IB',
'fields': ['callbackId', 'rc'],
},
{
'type': SOL_TYPE_JUDD_T2D2R1N1,
'description': 'ultrasonic snow depth and temperature sensor',
'structure': '>hHBB',
'fields': ['temperature', 'depth', 'numReadings', 'retries'],
},
{
'type': SOL_TYPE_MB7554_DTYPE_D2SD2N1NL1NG1,
'description': 'mean & stddev of Nval d2g readings',
'structure': '<HHBBB',
'fields': ['mean_d2g', 'stdev', 'Nval', 'Nltm', 'NgtM'],
'apply': [
{
'tag': "mean_d2g",
'function': lambda x: x,
'args': ['mean_d2g'],
},
{
'tag': "Nval",
'function': lambda x: x,
'args': ['Nval'],
},
{
'tag': "stdev",
'function': lambda x: x,
'args': ['stdev'],
}
],
},
{
'type': SOL_TYPE_SHT15_T4RH4N1,
'description': 'temperature and relative humidity sensor',
'structure': '<ffB',
'fields': ['temperature', 'rH', 'numReadings'],
},
{
'type': SOL_TYPE_DUST_OAP_TEMPSAMPLE,
'description': '',
'structure': '>h',
'fields': ['temperature'],
},
{
'type': SOL_TYPE_SOLMANAGER_STATS,
'description': '',
'structure': '>III',
'fields': ['sol_version', 'solmanager_version', 'sdk_version'],
},
{
'type': SOL_TYPE_SENS_MB7363_D2S2N1L1G1,
'description': 'mean & stddev of Nval d2g readings',
'structure': '<HHBBB',
'fields': ['mean_d2g', 'stdev', 'Nval', 'Nltm', 'NgtM'],
'apply': [
{
'tag': "mean_d2g",
'function': lambda x: x,
'args': ['mean_d2g'],
},
{
'tag': "Nval",
'function': lambda x: x,
'args': ['Nval'],
},
{
'tag': "stdev",
'function': lambda x: x,
'args': ['stdev'],
}
],
},
{
'type': SOL_TYPE_SENS_GS3_I1D4T4E4N1,
'description': 'soil moisture. sub_id indicates depth',
'structure': '<BfffB',
'fields': ['sub_id', 'dielect', 'temp', 'eleCond', 'Nval'],
'apply': [
{
'tag': "sub_id",
'function': lambda x: x,
'args': ['sub_id'],
},
{
'tag': "Nval",
'function': lambda x: x,
'args': ['Nval'],
}
],
},
{
'type': SOL_TYPE_SENS_SHT25_T2N1H2N1,
'description': 'temperature and humidity sensor',
'structure': '<HBHB',
'fields': ['temp_raw', 't_Nval', 'rh_raw', 'rh_Nval'],
'apply': [
{
'field': "temp_phys",
'function': lambda x: -46.85 + 175.72*(float(x)/65536),
'args': ['temp_raw'],
},
{
'field': "rh_phys",
'function': lambda x: -6 + 125*(float(x)/65536),
'args': ['rh_raw'],
},
{
'tag': "t_Nval",
'function': lambda x: x,
'args': ['t_Nval'],
},
{
'tag': "rh_Nval",
'function': lambda x: x,
'args': ['rh_Nval'],
}
]
},
{
'type': SOL_TYPE_SENS_NEOVBAT_V2N1,
'description': 'raw battery voltage of Neomote',
'structure': '<hB',
'fields': ['voltage', 'N'],
'apply': [
{
'field': "vol_phys",
'function': lambda x: float(x)*0.11,
'args': ['voltage'],
},
{
'tag': "N",
'function': lambda x: x,
'args': ['N'],
}
]
},
{
'type': SOL_TYPE_SENS_GS3_I1D4T4E4N1_0,
'description': 'soil moisture at depth 0',
'structure': '<fffB',
'fields': ['dielect', 'temp', 'eleCond', 'Nval'],
'apply': [
{
'tag': "Nval",
'function': lambda x: x,
'args': ['Nval'],
}
],
},
{
'type': SOL_TYPE_SENS_GS3_I1D4T4E4N1_1,
'description': 'soil moisture at depth 1',
'structure': '<fffB',
'fields': ['dielect', 'temp', 'eleCond', 'Nval'],
'apply': [
{
'tag': "Nval",
'function': lambda x: x,
'args': ['Nval'],
}
],
},
{
'type': SOL_TYPE_SENS_GS3_I1D4T4E4N1_2,
'description': 'soil moisture at depth 2',
'structure': '<fffB',
'fields': ['dielect', 'temp', 'eleCond', 'Nval'],
'apply': [
{
'tag': "Nval",
'function': lambda x: x,
'args': ['Nval'],
}
],
},
{
'type': SOL_TYPE_SENS_LP02_R4N1,
'description': 'radiation sensor',
'structure': '<iB',
'fields': ['irradiance', 'N'],
},
{
'type': SOL_TYPE_SENS_ECTM,
'description': 'Decagon ECTM soil moisture and temp',
'structure': '<iiif',
'fields': ['die_raw', 'EC_raw', 'temp_raw', 'depth'],
'apply': [
{
'tag': "depth",
'function': lambda x: x,
'args': ['depth'],
}
],
},
{
'type': SOL_TYPE_SENS_MPS1,
'description': 'Decagon MPS1 soil matric potential',
'structure': '<ff',
'fields': ['die_raw', 'depth'],
'apply': [
{
'tag': "depth",
'function': lambda x: x,
'args': ['depth'],
}
],
},
{
'type': SOL_TYPE_ADXL362_FFT_Z,
'description': 'highest 5 frequency bins and magnitudes',
'structure': '<BBHHHHHHHHHH',
'fields': ['conf1', 'conf2', 'f0', 'f1', 'f2', 'f3', 'f4', 'm0', 'm1', 'm2', 'm3', 'm4'],
},
{
'type': SOL_TYPE_DUST_EVENTJOINFAILED,
'description': 'generated when a mote sends a join request to the manager but the request can not be validated',
'structure': '>QB',
'fields': ['macAddress', 'reason'],
},
{
'type': SOL_TYPE_SOLMANAGER_STATS_2,
'description': '',
'structure': '>IIIII',
'fields': ['sol_version', 'solmanager_version', 'sdk_version',
'ram_usage', 'disk_usage'],
},
{
'type': SOL_TYPE_TEMPRH_SHT31,
'description': 'temperature and humidity sensor',
'structure': '>HHB',
'fields': ['temp_raw', 'rh_raw', 'id'],
'apply': [
{
'tag': "id",
'function': lambda x: x,
'args': ['id'],
},
{
'field': "temp_phys",
'function': lambda x: (x*175/0xffff)-45,
'args': ['temp_raw'],
},
{
'field': "rh_phys",
'function': lambda x: x*100/0xffff,
'args': ['rh_raw'],
},
],
},
{
'type': SOL_TYPE_TEMPRH_SHT3X,
'description': 'temperature and humidity sensor',
'structure': '<HBHB',
'fields': ['temp_raw', 't_Nval', 'rh_raw', 'rh_Nval'],
'apply': [
{
'field': "temp_phys",
'function': lambda x: (x*175.0/0xffff)-45,
'args': ['temp_raw'],
},
{
'field': "rh_phys",
'function': lambda x: x*100.0/0xffff,
'args': ['rh_raw'],
},
],
},
{
'type': SOL_TYPE_SENS_MPS6_ID1P4T4N1,
'description': 'soil temp and matric potential',
'structure': '<BffB',
'fields': ['id', 'pot', 'temp', 'Nval'],
},
{
'type': SOL_TYPE_SENS_GS1_I1MV2,
'description': 'analog soil moisture',
'structure': '<BH',
'fields': ['id', 'NmVolts'],
'apply': [
{
'tag': "id",
'function': lambda x: x,
'args': ['id'],
},
{
'field': "soil_moist",
'function': lambda x: (0.000494 * x -0.554),
'args': ['NmVolts'],
},
],
},
{
'type': SOL_TYPE_DUST_OAP_ANALOG,
'description': 'OAP analog sample',
'structure': '>Bh',
'fields': ['input','voltage'],
},
{
'type': SOL_TYPE_DUST_OAP_DIGITAL_IN,
'description': 'OAP digital_in sample',
'structure': '>BB',
'fields': ['input','state'],
},
{
'type': SOL_TYPE_SENS_MICROWAVE_MOTION,
'description': 'microwave motion sensor with digital output',
'structure': '<HB',
'fields': ['edge_count', 'sensor_id'],
'apply': [
{
'tag': "id",
'function': lambda x: x,
'args': ['sensor_id'],
}
],
},
{
'type': SOL_TYPE_SENS_INDUCTION_CURRENT_C_SOURCE,
'description': 'clamp on current sensor with digital output',
'structure': '<HB',
'fields': ['tick_count', 'sensor_id'],
'apply': [
{
'tag': "id",
'function': lambda x: x,
'args': ['sensor_id'],
}
],
},
{
'type': SOL_TYPE_SENS_INDUCTION_CURRENT_V_SOURCE,
'description': 'clamp on current sensor with analog output, raw counts are reported',
'structure': '<LLHB',
'fields': ['accu_sum','accu_sum_of_squares', 'sample_count', 'sensor_id'],
'apply': [
{
'tag': "id",
'function': lambda x: x,
'args': ['sensor_id'],
},
{
'field': "current_A",
'function': lambda x,y,z: sqrt(y/float(z)-x*x/float(z)/float(z))*0.001*98.464318,
'args': ['accu_sum', 'accu_sum_of_squares', 'sample_count'],
},
],
},
]
| 32.592105 | 121 | 0.451801 |
bd21a9ab2e1b09e120414df555e030c0e26aaaca | 9,791 | py | Python | ObfuscateCommands.py | CyberMatters/ObfuscateCommands | ca3f857303b45bb7cd89e897ab25aae48184f562 | [
"Apache-2.0"
] | null | null | null | ObfuscateCommands.py | CyberMatters/ObfuscateCommands | ca3f857303b45bb7cd89e897ab25aae48184f562 | [
"Apache-2.0"
] | null | null | null | ObfuscateCommands.py | CyberMatters/ObfuscateCommands | ca3f857303b45bb7cd89e897ab25aae48184f562 | [
"Apache-2.0"
] | null | null | null | # Copyright [2021] [Intrinsec]
# Program written by Dany GIANG aka CyberMatters
import argparse
import sys
import logging
import base64
import re
import pandas as pd
logger = logging.getLogger(__name__)
def clean_dataset(df):
logger.info("Cleaning dataset ...\n")
pattern_command_ok = re.compile(r'^\*[^\*]+\*$') # Regex that matches a string that starts with a wildcard, ends with a wildcard, does not contain any other wildcards.
cpt = 0
len_df = len(df)
while cpt < len_df:
if (re.fullmatch(pattern_command_ok, df.at[cpt,'command'], flags=0) == None) or (len(df.at[cpt,'command']) < 7) : # drop the rows in which the command is shorter than 7 caracters or does not match the previously defined regex.
df = df.drop(index=cpt)
else:
df.at[cpt,'command'] = re.sub('\*', '', df.at[cpt,'command']) # delete the wildcards
cpt += 1
df.reset_index(drop=True,inplace=True)
cols_to_keep = ['command','description']
df = df.loc[:, cols_to_keep]
logger.info("=> OK\n")
return df
def base64_encode(df_clean, df_final, cpt_clean, cpt_final):
add_char = [' ', '&', '|', ';', ',']
encode_String64 = ""
data_to_encode = df_clean.at[cpt_clean, 'command']
encoded_Bytes64 = base64.b64encode(data_to_encode.encode("utf-8"))
temp_encode_String64 = str(encoded_Bytes64, "utf-8")
for char in range(0, len(temp_encode_String64)):
encode_String64 = encode_String64 + temp_encode_String64[char]
df_final.at[cpt_final, 'obfuscated_command'] = "*" + encode_String64 + "*"
df_final.at[cpt_final, 'encoding_type'] = "base_64"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final += 1
encode_String64 = ""
#1 character preceding data_to_encode
for item in add_char :
data_to_encode = item + df_clean.at[cpt_clean, 'command']
encoded_Bytes64 = base64.b64encode(data_to_encode.encode("utf-8"))
temp_encode_String64 = str(encoded_Bytes64, "utf-8")
for char in range(0, len(temp_encode_String64)):
encode_String64 = encode_String64 + temp_encode_String64[char]
df_final.at[cpt_final, 'obfuscated_command'] = "*" + encode_String64 + "*"
df_final.at[cpt_final, 'encoding_type'] = "base_64"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final += 1
encode_String64 = ""
#1 character succeeding data_to_encode
for item in add_char :
data_to_encode = df_clean.at[cpt_clean, 'command'] + item
encoded_Bytes64 = base64.b64encode(data_to_encode.encode("utf-8"))
temp_encode_String64 = str(encoded_Bytes64, "utf-8")
for char in range(0, len(temp_encode_String64)):
encode_String64 = encode_String64 + temp_encode_String64[char]
df_final.at[cpt_final, 'obfuscated_command'] = "*" + encode_String64 + "*"
df_final.at[cpt_final, 'encoding_type'] = "base_64"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final += 1
encode_String64 = ""
#2 characters preceding data_to_encode
for item1 in add_char:
for item2 in add_char:
data_to_encode = item1 + item2 + df_clean.at[cpt_clean, 'command']
encoded_Bytes64 = base64.b64encode(data_to_encode.encode("utf-8"))
temp_encode_String64 = str(encoded_Bytes64, "utf-8")
for char in range(0, len(temp_encode_String64)):
encode_String64 = encode_String64 + temp_encode_String64[char]
df_final.at[cpt_final, 'obfuscated_command'] = "*" + encode_String64 + "*"
df_final.at[cpt_final, 'encoding_type'] = "base_64"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final += 1
encode_String64 = ""
#1 character preceding data_to_encode + 1 caracter succeeding
for item1 in add_char:
for item2 in add_char:
data_to_encode = item1 + df_clean.at[cpt_clean, 'command'] + item2
encoded_Bytes64 = base64.b64encode(data_to_encode.encode("utf-8"))
temp_encode_String64 = str(encoded_Bytes64, "utf-8")
for char in range(0, len(temp_encode_String64)):
encode_String64 = encode_String64 + temp_encode_String64[char]
df_final.at[cpt_final, 'obfuscated_command'] = "*" + encode_String64 + "*"
df_final.at[cpt_final, 'encoding_type'] = "base_64"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final += 1
encode_String64 = ""
return cpt_final
def hex_encode(df_clean, df_final, cpt_clean, cpt_final):
data_to_encode = df_clean.at[cpt_clean, 'command']
encoded_Bytes = data_to_encode.encode("utf-8")
encode_String_hex = encoded_Bytes.hex()
df_final.at[cpt_final, 'obfuscated_command'] = "*" + encode_String_hex + "*"
df_final.at[cpt_final, 'encoding_type'] = "hex"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
return cpt_final + 1
def ascii_encode(df_clean, df_final, cpt_clean, cpt_final):
data_to_encode = df_clean.at[cpt_clean, 'command']
result=''
for char in data_to_encode:
result += str(ord(char)) + ' '
df_final.at[cpt_final, 'obfuscated_command'] = "*" + result[:len(result)-1] + "*"
df_final.at[cpt_final, 'encoding_type'] = "ascii"
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
return cpt_final + 1
def rot_encode(df_clean, df_final, cpt_clean, cpt_final):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
data_to_encode = df_clean.at[cpt_clean, 'command']
data_to_encode = data_to_encode.lower()
result = ''
for rot in range(1,26) :
result=''
for char in data_to_encode:
if char.isalpha():
result += alphabet[(alphabet.index(char) + rot) % 26]
else:
result += char
df_final.at[cpt_final, 'obfuscated_command'] = "*" + result + "*"
df_final.at[cpt_final, 'encoding_type'] = "rot_" + str(rot)
df_final.at[cpt_final, 'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final = cpt_final + 1
return cpt_final
def encode(df_clean, df_final):
logger.info("Encoding data ...\n")
len_df_clean = len(df_clean)
cpt_clean = 0
cpt_final = 0
while cpt_clean < len_df_clean : #For each row in the clean dataframe
df_final.at[cpt_final,'obfuscated_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
df_final.at[cpt_final,'encoding_type'] = "plain_text"
df_final.at[cpt_final,'plain_text_command'] = '*' + df_clean.at[cpt_clean, 'command'] + '*'
cpt_final += 1
cpt_final = base64_encode(df_clean, df_final, cpt_clean, cpt_final)
cpt_final = hex_encode(df_clean, df_final, cpt_clean, cpt_final)
cpt_final = ascii_encode(df_clean, df_final, cpt_clean, cpt_final)
cpt_final = rot_encode(df_clean, df_final, cpt_clean, cpt_final)
cpt_clean += 1
logger.info("=> OK\n")
return df_final
def retrieve_description(filePath1, df_final):
logger.info("Retrieving description ...\n")
df_initial = pd.read_csv(filePath1)
len_final = len(df_final)
cpt_final = 0
while cpt_final < len_final :
cpt_initial = 0
done = False
while done != True :
if df_final.at[cpt_final,'plain_text_command'] == df_initial.at[cpt_initial,'command'] :
df_final.at[cpt_final,'description'] = df_initial.at[cpt_initial,'description']
df_final.at[cpt_final,'FE'] = df_initial.at[cpt_initial,'FE']
df_final.at[cpt_final,'TTP'] = df_initial.at[cpt_initial,'TTP']
done = True
else :
cpt_initial = cpt_initial + 1
cpt_final = cpt_final + 1
logger.info("=> OK\n")
return df_final
#************************ MAIN *****************************
def main(argv):
inputFile = ''
outputFile = ''
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="-i is followed by the path of the input file which contains the plaintext commands and description", required=True)
parser.add_argument("-o", "--output", help="-o is followed by the path of the output file path which will contain the obfuscated commands and description", required=True)
args = parser.parse_args()
filePath1 = args.input
finalFilePath = args.output
logging.basicConfig(level=logging.INFO)
df = pd.read_csv(filePath1)
# Call function that cleans dataset
df_clean = clean_dataset(df)
# Create the dataframe that will contain the obfuscated commands and associated descriptions
data = {'obfuscated_command':['a'],'encoding_type':['b'],'plain_text_command':['c'],'description':['d'],'FE':['e'],'TTP':['f']}
df_final = pd.DataFrame(data)
df_final = encode(df_clean, df_final)
df_final = retrieve_description(filePath1, df_final)
df_final.to_csv(finalFilePath, index=False)
logger.info("The program terminated successfully ;)")
if __name__=="__main__":
main(sys.argv[1:]) | 35.219424 | 234 | 0.623634 |
1de18e8a3c1266e9e12bc063b5c4998b2a2b7d75 | 21,564 | py | Python | infra/cifuzz/cifuzz_test.py | liamjm/oss-fuzz | edf81080a87410ddf5f5e7e705173c2dd6a14ff9 | [
"Apache-2.0"
] | 1 | 2021-07-01T17:32:41.000Z | 2021-07-01T17:32:41.000Z | infra/cifuzz/cifuzz_test.py | liamjm/oss-fuzz | edf81080a87410ddf5f5e7e705173c2dd6a14ff9 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:43:27.000Z | 2022-02-26T10:20:18.000Z | infra/cifuzz/cifuzz_test.py | liamjm/oss-fuzz | edf81080a87410ddf5f5e7e705173c2dd6a14ff9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the functionality of the cifuzz module's functions:
1. Building fuzzers.
2. Running fuzzers.
"""
import json
import os
import pickle
import shutil
import sys
import tempfile
import unittest
from unittest import mock
# pylint: disable=wrong-import-position
INFRA_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(INFRA_DIR)
OSS_FUZZ_DIR = os.path.dirname(INFRA_DIR)
import cifuzz
import fuzz_target
# NOTE: This integration test relies on
# https://github.com/google/oss-fuzz/tree/master/projects/example project.
EXAMPLE_PROJECT = 'example'
# Location of files used for testing.
TEST_FILES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_files')
# An example fuzzer that triggers an crash.
# Binary is a copy of the example project's do_stuff_fuzzer and can be
# generated by running "python3 infra/helper.py build_fuzzers example".
EXAMPLE_CRASH_FUZZER = 'example_crash_fuzzer'
# An example fuzzer that does not trigger a crash.
# Binary is a modified version of example project's do_stuff_fuzzer. It is
# created by removing the bug in my_api.cpp.
EXAMPLE_NOCRASH_FUZZER = 'example_nocrash_fuzzer'
# A fuzzer to be built in build_fuzzers integration tests.
EXAMPLE_BUILD_FUZZER = 'do_stuff_fuzzer'
MEMORY_FUZZER_DIR = os.path.join(TEST_FILES_PATH, 'memory')
MEMORY_FUZZER = 'curl_fuzzer_memory'
UNDEFINED_FUZZER_DIR = os.path.join(TEST_FILES_PATH, 'undefined')
UNDEFINED_FUZZER = 'curl_fuzzer_undefined'
# pylint: disable=no-self-use
class BuildFuzzersIntegrationTest(unittest.TestCase):
"""Test build_fuzzers function in the utils module."""
def test_valid_commit(self):
"""Test building fuzzers with valid inputs."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'out')
os.mkdir(out_path)
self.assertTrue(
cifuzz.build_fuzzers(
EXAMPLE_PROJECT,
'oss-fuzz',
tmp_dir,
commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))
self.assertTrue(
os.path.exists(os.path.join(out_path, EXAMPLE_BUILD_FUZZER)))
def test_valid_pull_request(self):
"""Test building fuzzers with valid pull request."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'out')
os.mkdir(out_path)
self.assertTrue(
cifuzz.build_fuzzers(EXAMPLE_PROJECT,
'oss-fuzz',
tmp_dir,
pr_ref='refs/pull/1757/merge'))
self.assertTrue(
os.path.exists(os.path.join(out_path, EXAMPLE_BUILD_FUZZER)))
def test_invalid_pull_request(self):
"""Test building fuzzers with invalid pull request."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'out')
os.mkdir(out_path)
self.assertTrue(
cifuzz.build_fuzzers(EXAMPLE_PROJECT,
'oss-fuzz',
tmp_dir,
pr_ref='ref-1/merge'))
def test_invalid_project_name(self):
"""Test building fuzzers with invalid project name."""
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertFalse(
cifuzz.build_fuzzers(
'not_a_valid_project',
'oss-fuzz',
tmp_dir,
commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))
def test_invalid_repo_name(self):
"""Test building fuzzers with invalid repo name."""
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertFalse(
cifuzz.build_fuzzers(
EXAMPLE_PROJECT,
'not-real-repo',
tmp_dir,
commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523'))
def test_invalid_commit_sha(self):
"""Test building fuzzers with invalid commit SHA."""
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(AssertionError):
cifuzz.build_fuzzers(EXAMPLE_PROJECT,
'oss-fuzz',
tmp_dir,
commit_sha='')
def test_invalid_workspace(self):
"""Test building fuzzers with invalid workspace."""
self.assertFalse(
cifuzz.build_fuzzers(
EXAMPLE_PROJECT,
'oss-fuzz',
'not/a/dir',
commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523',
))
class RunMemoryFuzzerIntegrationTest(unittest.TestCase):
"""Test build_fuzzers function in the cifuzz module."""
def tearDown(self):
"""Remove any existing crashes and test files."""
out_dir = os.path.join(MEMORY_FUZZER_DIR, 'out')
for out_file in os.listdir(out_dir):
out_path = os.path.join(out_dir, out_file)
#pylint: disable=consider-using-in
if out_file == MEMORY_FUZZER:
continue
if os.path.isdir(out_path):
shutil.rmtree(out_path)
else:
os.remove(out_path)
def test_run_with_memory_sanitizer(self):
"""Test run_fuzzers with a valid build."""
run_success, bug_found = cifuzz.run_fuzzers(10,
MEMORY_FUZZER_DIR,
'curl',
sanitizer='memory')
self.assertTrue(run_success)
self.assertFalse(bug_found)
class RunUndefinedFuzzerIntegrationTest(unittest.TestCase):
"""Test build_fuzzers function in the cifuzz module."""
def tearDown(self):
"""Remove any existing crashes and test files."""
out_dir = os.path.join(UNDEFINED_FUZZER_DIR, 'out')
for out_file in os.listdir(out_dir):
out_path = os.path.join(out_dir, out_file)
#pylint: disable=consider-using-in
if out_file == UNDEFINED_FUZZER:
continue
if os.path.isdir(out_path):
shutil.rmtree(out_path)
else:
os.remove(out_path)
def test_run_with_undefined_sanitizer(self):
"""Test run_fuzzers with a valid build."""
run_success, bug_found = cifuzz.run_fuzzers(10,
UNDEFINED_FUZZER_DIR,
'curl',
sanitizer='undefined')
self.assertTrue(run_success)
self.assertFalse(bug_found)
class RunAddressFuzzersIntegrationTest(unittest.TestCase):
"""Test build_fuzzers function in the cifuzz module."""
def tearDown(self):
"""Remove any existing crashes and test files."""
out_dir = os.path.join(TEST_FILES_PATH, 'out')
files_to_keep = [
'undefined', 'memory', EXAMPLE_CRASH_FUZZER, EXAMPLE_NOCRASH_FUZZER
]
for out_file in os.listdir(out_dir):
out_path = os.path.join(out_dir, out_file)
if out_file in files_to_keep:
continue
if os.path.isdir(out_path):
shutil.rmtree(out_path)
else:
os.remove(out_path)
def test_new_bug_found(self):
"""Test run_fuzzers with a valid build."""
# Set the first return value to True, then the second to False to
# emulate a bug existing in the current PR but not on the downloaded
# OSS-Fuzz build.
with mock.patch.object(fuzz_target.FuzzTarget,
'is_reproducible',
side_effect=[True, False]):
run_success, bug_found = cifuzz.run_fuzzers(10, TEST_FILES_PATH,
EXAMPLE_PROJECT)
build_dir = os.path.join(TEST_FILES_PATH, 'out', 'oss_fuzz_latest')
self.assertTrue(os.path.exists(build_dir))
self.assertNotEqual(0, len(os.listdir(build_dir)))
self.assertTrue(run_success)
self.assertTrue(bug_found)
def test_old_bug_found(self):
"""Test run_fuzzers with a bug found in OSS-Fuzz before."""
with mock.patch.object(fuzz_target.FuzzTarget,
'is_reproducible',
side_effect=[True, True]):
run_success, bug_found = cifuzz.run_fuzzers(10, TEST_FILES_PATH,
EXAMPLE_PROJECT)
build_dir = os.path.join(TEST_FILES_PATH, 'out', 'oss_fuzz_latest')
self.assertTrue(os.path.exists(build_dir))
self.assertNotEqual(0, len(os.listdir(build_dir)))
self.assertTrue(run_success)
self.assertFalse(bug_found)
def test_invalid_build(self):
"""Test run_fuzzers with an invalid build."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'out')
os.mkdir(out_path)
run_success, bug_found = cifuzz.run_fuzzers(10, tmp_dir, EXAMPLE_PROJECT)
self.assertFalse(run_success)
self.assertFalse(bug_found)
def test_invalid_fuzz_seconds(self):
"""Tests run_fuzzers with an invalid fuzz seconds."""
with tempfile.TemporaryDirectory() as tmp_dir:
out_path = os.path.join(tmp_dir, 'out')
os.mkdir(out_path)
run_success, bug_found = cifuzz.run_fuzzers(0, tmp_dir, EXAMPLE_PROJECT)
self.assertFalse(run_success)
self.assertFalse(bug_found)
def test_invalid_out_dir(self):
"""Tests run_fuzzers with an invalid out directory."""
run_success, bug_found = cifuzz.run_fuzzers(10, 'not/a/valid/path',
EXAMPLE_PROJECT)
self.assertFalse(run_success)
self.assertFalse(bug_found)
class ParseOutputUnitTest(unittest.TestCase):
"""Test parse_fuzzer_output function in the cifuzz module."""
def test_parse_valid_output(self):
"""Checks that the parse fuzzer output can correctly parse output."""
test_output_path = os.path.join(TEST_FILES_PATH,
'example_crash_fuzzer_output.txt')
test_summary_path = os.path.join(TEST_FILES_PATH, 'bug_summary_example.txt')
with tempfile.TemporaryDirectory() as tmp_dir:
with open(test_output_path, 'rb') as test_fuzz_output:
cifuzz.parse_fuzzer_output(test_fuzz_output.read(), tmp_dir)
result_files = ['bug_summary.txt']
self.assertCountEqual(os.listdir(tmp_dir), result_files)
# Compare the bug summaries.
with open(os.path.join(tmp_dir, 'bug_summary.txt'), 'r') as bug_summary:
detected_summary = bug_summary.read()
with open(os.path.join(test_summary_path), 'r') as bug_summary:
real_summary = bug_summary.read()
self.assertEqual(detected_summary, real_summary)
def test_parse_invalid_output(self):
"""Checks that no files are created when an invalid input was given."""
with tempfile.TemporaryDirectory() as tmp_dir:
cifuzz.parse_fuzzer_output(b'not a valid output_string', tmp_dir)
self.assertEqual(len(os.listdir(tmp_dir)), 0)
class CheckFuzzerBuildUnitTest(unittest.TestCase):
"""Tests the check_fuzzer_build function in the cifuzz module."""
def test_correct_fuzzer_build(self):
"""Checks check_fuzzer_build function returns True for valid fuzzers."""
test_fuzzer_dir = os.path.join(TEST_FILES_PATH, 'out')
self.assertTrue(cifuzz.check_fuzzer_build(test_fuzzer_dir))
def test_not_a_valid_fuzz_path(self):
"""Tests that False is returned when a bad path is given."""
self.assertFalse(cifuzz.check_fuzzer_build('not/a/valid/path'))
def test_not_a_valid_fuzzer(self):
"""Checks a directory that exists but does not have fuzzers is False."""
self.assertFalse(cifuzz.check_fuzzer_build(TEST_FILES_PATH))
@mock.patch.dict(os.environ, {'ALLOWED_BROKEN_TARGETS_PERCENTAGE': '0'})
@mock.patch('helper.docker_run')
def test_allow_broken_fuzz_targets_percentage(self, mocked_docker_run):
"""Tests that ALLOWED_BROKEN_TARGETS_PERCENTAGE is set when running
docker if it is set in the environment."""
test_fuzzer_dir = os.path.join(TEST_FILES_PATH, 'out')
mocked_docker_run.return_value = 0
cifuzz.check_fuzzer_build(test_fuzzer_dir)
self.assertIn('-e ALLOWED_BROKEN_TARGETS_PERCENTAGE=0',
' '.join(mocked_docker_run.call_args[0][0]))
class GetFilesCoveredByTargetUnitTest(unittest.TestCase):
"""Test to get the files covered by a fuzz target in the cifuzz module."""
example_cov_json = 'example_curl_cov.json'
example_fuzzer_cov_json = 'example_curl_fuzzer_cov.json'
example_fuzzer = 'curl_fuzzer'
example_curl_file_list = 'example_curl_file_list'
def setUp(self):
with open(os.path.join(TEST_FILES_PATH, self.example_cov_json),
'r') as file:
self.proj_cov_report_example = json.loads(file.read())
with open(os.path.join(TEST_FILES_PATH, self.example_fuzzer_cov_json),
'r') as file:
self.fuzzer_cov_report_example = json.loads(file.read())
def test_valid_target(self):
"""Tests that covered files can be retrieved from a coverage report."""
with mock.patch.object(cifuzz,
'get_target_coverage_report',
return_value=self.fuzzer_cov_report_example):
file_list = cifuzz.get_files_covered_by_target(
self.proj_cov_report_example, self.example_fuzzer, '/src/curl')
with open(os.path.join(TEST_FILES_PATH, 'example_curl_file_list'),
'rb') as file_handle:
true_files_list = pickle.load(file_handle)
self.assertCountEqual(file_list, true_files_list)
def test_invalid_target(self):
"""Test asserts an invalid fuzzer returns None."""
self.assertIsNone(
cifuzz.get_files_covered_by_target(self.proj_cov_report_example,
'not-a-fuzzer', '/src/curl'))
self.assertIsNone(
cifuzz.get_files_covered_by_target(self.proj_cov_report_example, '',
'/src/curl'))
def test_invalid_project_build_dir(self):
"""Test asserts an invalid build dir returns None."""
self.assertIsNone(
cifuzz.get_files_covered_by_target(self.proj_cov_report_example,
self.example_fuzzer, '/no/pe'))
self.assertIsNone(
cifuzz.get_files_covered_by_target(self.proj_cov_report_example,
self.example_fuzzer, ''))
class GetTargetCoverageReporUnitTest(unittest.TestCase):
"""Test get_target_coverage_report function in the cifuzz module."""
example_cov_json = 'example_curl_cov.json'
example_fuzzer = 'curl_fuzzer'
def setUp(self):
with open(os.path.join(TEST_FILES_PATH, self.example_cov_json),
'r') as file:
self.cov_exmp = json.loads(file.read())
def test_valid_target(self):
"""Test a target's coverage report can be downloaded and parsed."""
with mock.patch.object(cifuzz, 'get_json_from_url',
return_value='{}') as mock_get_json:
cifuzz.get_target_coverage_report(self.cov_exmp, self.example_fuzzer)
(url,), _ = mock_get_json.call_args
self.assertEqual(
'https://storage.googleapis.com/oss-fuzz-coverage/'
'curl/fuzzer_stats/20200226/curl_fuzzer.json', url)
def test_invalid_target(self):
"""Test an invalid target coverage report will be None."""
self.assertIsNone(
cifuzz.get_target_coverage_report(self.cov_exmp, 'not-valid-target'))
self.assertIsNone(cifuzz.get_target_coverage_report(self.cov_exmp, ''))
def test_invalid_project_json(self):
"""Test a project json coverage report will be None."""
self.assertIsNone(
cifuzz.get_target_coverage_report('not-a-proj', self.example_fuzzer))
self.assertIsNone(cifuzz.get_target_coverage_report('',
self.example_fuzzer))
class GetLatestCoverageReportUnitTest(unittest.TestCase):
"""Test get_latest_cov_report_info function in the cifuzz module."""
test_project = 'curl'
def test_get_valid_project(self):
"""Tests that a project's coverage report can be downloaded and parsed.
NOTE: This test relies on the test_project repo's coverage report.
Example was not used because it has no coverage reports.
"""
with mock.patch.object(cifuzz, 'get_json_from_url',
return_value='{}') as mock_fun:
cifuzz.get_latest_cov_report_info(self.test_project)
(url,), _ = mock_fun.call_args
self.assertEqual(
'https://storage.googleapis.com/oss-fuzz-coverage/'
'latest_report_info/curl.json', url)
def test_get_invalid_project(self):
"""Tests a project's coverage report will return None if bad project."""
self.assertIsNone(cifuzz.get_latest_cov_report_info('not-a-proj'))
self.assertIsNone(cifuzz.get_latest_cov_report_info(''))
class KeepAffectedFuzzersUnitTest(unittest.TestCase):
"""Test the keep_affected_fuzzer method in the CIFuzz module."""
test_fuzzer_1 = os.path.join(TEST_FILES_PATH, 'out', 'example_crash_fuzzer')
test_fuzzer_2 = os.path.join(TEST_FILES_PATH, 'out', 'example_nocrash_fuzzer')
example_file_changed = 'test.txt'
def test_keeping_fuzzer_w_no_coverage(self):
"""Tests that a specific fuzzer is kept if it is deemed affected."""
with tempfile.TemporaryDirectory() as tmp_dir, mock.patch.object(
cifuzz, 'get_latest_cov_report_info', return_value=1):
shutil.copy(self.test_fuzzer_1, tmp_dir)
shutil.copy(self.test_fuzzer_2, tmp_dir)
with mock.patch.object(cifuzz,
'get_files_covered_by_target',
side_effect=[[self.example_file_changed], None]):
cifuzz.remove_unaffected_fuzzers(EXAMPLE_PROJECT, tmp_dir,
[self.example_file_changed], '')
self.assertEqual(2, len(os.listdir(tmp_dir)))
def test_keeping_specific_fuzzer(self):
"""Tests that a specific fuzzer is kept if it is deemed affected."""
with tempfile.TemporaryDirectory() as tmp_dir, mock.patch.object(
cifuzz, 'get_latest_cov_report_info', return_value=1):
shutil.copy(self.test_fuzzer_1, tmp_dir)
shutil.copy(self.test_fuzzer_2, tmp_dir)
with mock.patch.object(cifuzz,
'get_files_covered_by_target',
side_effect=[[self.example_file_changed],
['not/a/real/file']]):
cifuzz.remove_unaffected_fuzzers(EXAMPLE_PROJECT, tmp_dir,
[self.example_file_changed], '')
self.assertEqual(1, len(os.listdir(tmp_dir)))
def test_no_fuzzers_kept_fuzzer(self):
"""Tests that if there is no affected then all fuzzers are kept."""
with tempfile.TemporaryDirectory() as tmp_dir, mock.patch.object(
cifuzz, 'get_latest_cov_report_info', return_value=1):
shutil.copy(self.test_fuzzer_1, tmp_dir)
shutil.copy(self.test_fuzzer_2, tmp_dir)
with mock.patch.object(cifuzz,
'get_files_covered_by_target',
side_effect=[None, None]):
cifuzz.remove_unaffected_fuzzers(EXAMPLE_PROJECT, tmp_dir,
[self.example_file_changed], '')
self.assertEqual(2, len(os.listdir(tmp_dir)))
def test_both_fuzzers_kept_fuzzer(self):
"""Tests that if both fuzzers are affected then both fuzzers are kept."""
with tempfile.TemporaryDirectory() as tmp_dir, mock.patch.object(
cifuzz, 'get_latest_cov_report_info', return_value=1):
shutil.copy(self.test_fuzzer_1, tmp_dir)
shutil.copy(self.test_fuzzer_2, tmp_dir)
with mock.patch.object(
cifuzz,
'get_files_covered_by_target',
side_effect=[self.example_file_changed, self.example_file_changed]):
cifuzz.remove_unaffected_fuzzers(EXAMPLE_PROJECT, tmp_dir,
[self.example_file_changed], '')
self.assertEqual(2, len(os.listdir(tmp_dir)))
@unittest.skip('Test is too long to be run with presubmit.')
class BuildSantizerIntegrationTest(unittest.TestCase):
"""Integration tests for the build_fuzzers function in the cifuzz module.
Note: This test relies on the curl project being an OSS-Fuzz project."""
def test_valid_project_curl_memory(self):
"""Test if sanitizers can be detected from project.yaml"""
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertTrue(
cifuzz.build_fuzzers('curl',
'curl',
tmp_dir,
pr_ref='fake_pr',
sanitizer='memory'))
def test_valid_project_curl_undefined(self):
"""Test if sanitizers can be detected from project.yaml"""
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertTrue(
cifuzz.build_fuzzers('curl',
'curl',
tmp_dir,
pr_ref='fake_pr',
sanitizer='undefined'))
if __name__ == '__main__':
unittest.main()
| 40.840909 | 80 | 0.663421 |
bcbd12c9b6d44fdcf49c26485686b31f1b116860 | 4,448 | py | Python | profiles/minkult.py | Zverik/osm_conflate | a7af835ce44b3ac194469b53b7f388bba168cbe4 | [
"Apache-2.0"
] | 42 | 2017-02-16T05:00:44.000Z | 2022-01-02T08:28:38.000Z | profiles/minkult.py | Zverik/osm_conflate | a7af835ce44b3ac194469b53b7f388bba168cbe4 | [
"Apache-2.0"
] | 26 | 2017-05-13T12:36:19.000Z | 2021-03-12T12:56:48.000Z | profiles/minkult.py | Zverik/osm_conflate | a7af835ce44b3ac194469b53b7f388bba168cbe4 | [
"Apache-2.0"
] | 20 | 2017-05-04T08:21:50.000Z | 2021-07-29T15:14:55.000Z | source = 'opendata.mkrf.ru'
dataset_id = 'mkrf_theaters'
query = [('amenity', 'theatre')]
max_distance = 300
master_tags = ('official_name', 'phone', 'opening_hours', 'website')
# Reading the dataset passport to determine an URL of the latest dataset version
def download_url():
import logging
import requests
dataset_id = '7705851331-' + (param or 'museums')
r = requests.get('http://opendata.mkrf.ru/opendata/{}/meta.json'.format(dataset_id))
if r.status_code != 200 or len(r.content) == 0:
logging.error('Could not get URL for dataset: %s %s', r.status_code, r.text)
logging.error('Please check http://opendata.mkrf.ru/opendata/{}'.format(dataset_id))
return None
result = r.json()
latest = result['data'][-1]
logging.info('Downloading %s from %s', result['title'], latest['created'])
return latest['source']
source = 'opendata.mkrf.ru'
dataset_id = 'mkrf_'+(param or 'museums')
if not param or param == 'museums':
query = [('tourism', 'museum')]
elif param == 'theaters':
query = [('amenity', 'theatre')]
elif param == 'circuses':
query = [('amenity', 'circus')]
elif param == 'philharmonic':
query = [('amenity', 'theatre')]
else:
raise ValueError('Unknown param value: {}'.format(param))
max_distance = 300
master_tags = ('official_name', 'phone', 'opening_hours', 'website')
def dataset(fileobj):
import json
import codecs
def make_wd_ranges(r):
"""Converts e.g. [0,1,4] into 'Mo-Tu, Fr'."""
wd = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
res = wd[r[0]]
in_range = False
for i in range(1, len(r)+1):
if i < len(r) and r[i] == r[i-1] + 1:
in_range = True
else:
if in_range:
res += '-' + wd[r[i-1]]
in_range = False
if i < len(r):
res += ', ' + wd[r[i]]
return res
def parse_hours(h):
"""Receives a dict {'0': {'from': '10:00:00', 'to': '18:00:00'}, ...}
and returns a proper opening_hours value."""
days = {}
for wd, d in h.items():
if not d['from']:
continue
for i in ('from', 'to'):
d[i] = d[i][:5]
if d['to'] == '00:00':
d['to'] = '24:00'
elif not d['to']:
d['to'] = '19:00+'
k = '{}-{}'.format(d['from'], d['to'])
if k not in days:
days[k] = set()
days[k].add(int(wd))
days2 = {}
for op, d in days.items():
days2[tuple(sorted(d))] = op
res = []
for d in sorted(days2.keys(), key=lambda x: min(x)):
res.append(' '.join([make_wd_ranges(d), days2[d]]))
return '; '.join(res)
def wrap(coord, absmax):
if coord < -absmax:
return coord + absmax * 2
if coord > absmax:
return coord - absmax * 2
return coord
def format_phone(ph):
if ph and len(ph) == 11 and ph[0] == '7':
return '+7 {} {}-{}-{}'.format(ph[1:4], ph[4:7], ph[7:9], ph[9:])
return ph
source = json.load(codecs.getreader('utf-8')(fileobj))
data = []
for el in source:
d = el['data']['general']
gid = d['id']
lon = wrap(d['address']['mapPosition']['coordinates'][1], 180)
lat = d['address']['mapPosition']['coordinates'][0]
tags = {
'amenity': 'theatre',
'name': d['name'],
# 'official_name': d['name'],
# 'image': d['image']['url'],
'operator': d['organization']['name'],
'addr:full': '{}, {}'.format(d['locale']['name'], d['address']['street']),
}
if tags['operator'] == tags['name']:
del tags['operator']
if d.get('workingSchedule'):
tags['opening_hours'] = parse_hours(d['workingSchedule'])
if 'email' in d['contacts']:
tags['email'] = d['contacts']['email']
if 'website' in d['contacts']:
tags['website'] = d['contacts']['website']
if tags['website'].endswith('.ru'):
tags['website'] += '/'
if 'phones' in d['contacts'] and d['contacts']['phones']:
tags['phone'] = format_phone(d['contacts']['phones'][0]['value'])
data.append(SourcePoint(gid, lat, lon, tags))
return data
| 35.023622 | 92 | 0.505845 |
98e843465164918e5cbf5c523d0c1055f8cbb7ec | 936 | py | Python | ex053.py | lucaspereirag/pythonProject | 15a88762ca94322918474537bbed13e0ed2b60a6 | [
"MIT"
] | null | null | null | ex053.py | lucaspereirag/pythonProject | 15a88762ca94322918474537bbed13e0ed2b60a6 | [
"MIT"
] | null | null | null | ex053.py | lucaspereirag/pythonProject | 15a88762ca94322918474537bbed13e0ed2b60a6 | [
"MIT"
] | null | null | null | palindromo = str(input('Digite uma frase: ')).strip().upper() #espaços eliminados
palavras = palindromo.split() #gerou uma lista
frase = ''.join(palavras) #juntou a lista sem espaço
inverso = frase[::-1]
print('O inverso de {} é {}'.format(frase, inverso))
if inverso == frase:
print('A frase é um palíndromo!')
else:
print('A frase não é um palíndromo')
#Solução com FOR:
palindromo2 = str(input('Digite uma frase: ')).strip().upper() #espaços eliminados
palavras2 = palindromo2.split() #gerou uma lista
frase2 = ''.join(palavras2) #juntou a lista sem espaço
inverso2 = '' #varrer a frase de trás pra frente
for letra2 in range(len(frase2) - 1, -1, -1):#esse RANGE,foi da última letra, até a primeira, voltando -1
inverso2 += frase2[letra2]
print('O inverso de {} é {}'.format(frase2, inverso2))
if inverso2 == frase2:
print('A frase é um palíndromo!')
else:
print('A frase não é um palíndromo') | 37.44 | 105 | 0.679487 |
9841aee11aa0504aba7768a5fe9e573981f6fc33 | 26,471 | py | Python | tests/calendars/test_trading_calendar.py | AustenZhu/zipline | 285d06ba7996fb4959b8dd1c1a72fe802661b7ba | [
"Apache-2.0"
] | 1 | 2018-04-05T05:33:34.000Z | 2018-04-05T05:33:34.000Z | tests/calendars/test_trading_calendar.py | miguelyad26/zipline | 52b332919e133750dfd8b726b474179f5a989430 | [
"Apache-2.0"
] | null | null | null | tests/calendars/test_trading_calendar.py | miguelyad26/zipline | 52b332919e133750dfd8b726b474179f5a989430 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from os.path import (
abspath,
dirname,
join,
)
from unittest import TestCase
import numpy as np
import pandas as pd
from nose_parameterized import parameterized
from pandas import read_csv
from pandas.tslib import Timedelta
from pandas.util.testing import assert_index_equal
from pytz import timezone
from toolz import concat
from zipline.errors import (
CalendarNameCollision,
InvalidCalendarName,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.calendars import (
deregister_calendar,
get_calendar,
register_calendar,
)
from zipline.utils.calendars.calendar_utils import (
_default_calendar_aliases,
_default_calendar_factories,
register_calendar_type,
)
from zipline.utils.calendars.trading_calendar import days_at_time, \
TradingCalendar
class FakeCalendar(TradingCalendar):
@property
def name(self):
return "DMY"
@property
def tz(self):
return "Asia/Ulaanbaatar"
@property
def open_time(self):
return time(11, 13)
@property
def close_time(self):
return time(11, 49)
class CalendarRegistrationTestCase(TestCase):
def setUp(self):
self.dummy_cal_type = FakeCalendar
def tearDown(self):
deregister_calendar('DMY')
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
register_calendar('DMY', dummy_cal)
retr_cal = get_calendar('DMY')
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
register_calendar('DMY', dummy_cal)
# Deregister the calendar and ensure that it is removed
deregister_calendar('DMY')
with self.assertRaises(InvalidCalendarName):
get_calendar('DMY')
def test_register_calendar_type(self):
register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
register_calendar('DMY', dummy_cal)
with self.assertRaises(CalendarNameCollision):
register_calendar_type('DMY', type(dummy_cal))
deregister_calendar('DMY')
# if type is registered, can't register instance with same name
register_calendar_type('DMY', type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
register_calendar('DMY', dummy_cal)
def test_force_registration(self):
register_calendar("DMY", self.dummy_cal_type())
first_dummy = get_calendar("DMY")
# force-register a new instance
register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
for name in concat([_default_calendar_factories,
_default_calendar_aliases]):
self.assertIsNotNone(get_calendar(name),
"get_calendar(%r) returned None" % name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand([
# NYSE standard day
(
'2016-07-19', 0, time(9, 31), timezone('US/Eastern'),
'2016-07-19 9:31',
),
# CME standard day
(
'2016-07-19', -1, time(17, 1), timezone('America/Chicago'),
'2016-07-18 17:01',
),
# CME day after DST start
(
'2004-04-05', -1, time(17, 1), timezone('America/Chicago'),
'2004-04-04 17:01'
),
# ICE day after DST start
(
'1990-04-02', -1, time(19, 1), timezone('America/Chicago'),
'1990-04-01 19:01',
),
])
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert('UTC')
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
GAPS_BETWEEN_SESSIONS = True
MAX_SESSION_HOURS = 0
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/calendars/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
'../resources',
'calendars',
filename + '.csv',
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz='UTC')
)
@classmethod
def setupClass(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(minutes=1)
cls.one_hour = pd.Timedelta(hours=1)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertTrue((delta.seconds / 3600) <= self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
for market_minute in self.answers.market_open:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(self.calendar.is_open_on_minute(market_minute_utc))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(self.calendar.is_open_on_minute(pre_market))
for market_minute in self.answers.market_close:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(self.calendar.is_open_on_minute(close_minute_utc))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(self.calendar.is_open_on_minute(post_market))
def _verify_minute(self, calendar, minute,
next_open_answer, prev_open_answer,
next_close_answer, prev_close_answer):
self.assertEqual(
calendar.next_open(minute),
next_open_answer
)
self.assertEqual(
self.calendar.previous_open(minute),
prev_open_answer
)
self.assertEqual(
self.calendar.next_close(minute),
next_close_answer
)
self.assertEqual(
self.calendar.previous_close(minute),
prev_close_answer
)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
answers_to_use = self.answers[1:-2]
for idx, info in enumerate(answers_to_use.iterrows()):
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
minute_before_open = open_minute - self.one_minute
# answers_to_use starts at the second element of self.answers,
# so self.answers.iloc[idx] is one element before, and
# self.answers.iloc[idx + 2] is one element after the current
# element
previous_open = self.answers.iloc[idx].market_open
next_open = self.answers.iloc[idx + 2].market_open
previous_close = self.answers.iloc[idx].market_close
next_close = self.answers.iloc[idx + 2].market_close
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, minute_before_open, open_minute,
previous_open, close_minute, previous_close
)
# open minute
self._verify_minute(
self.calendar, open_minute, next_open, previous_open,
close_minute, previous_close
)
# second minute of session
self._verify_minute(
self.calendar, open_minute + self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# minute before the close
self._verify_minute(
self.calendar, close_minute - self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# the close
self._verify_minute(
self.calendar, close_minute, next_open, open_minute,
next_close, previous_close
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, close_minute + self.one_minute, next_open,
open_minute, next_close, close_minute
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2],
self.calendar.next_minute(minute)
)
self.assertEqual(
all_minutes[idx],
self.calendar.previous_minute(minute)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open)
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close)
)
def test_minute_to_session_label(self):
for idx, info in enumerate(self.answers[1:-2].iterrows()):
session_label = info[1].name
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.iloc[idx + 2].name
previous_session_label = self.answers.iloc[idx].name
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
self.calendar.minute_to_session_label(open_minute),
self.calendar.minute_to_session_label(open_minute,
direction="next"),
self.calendar.minute_to_session_label(open_minute,
direction="previous"),
self.calendar.minute_to_session_label(open_minute,
direction="none"),
self.calendar.minute_to_session_label(hour_into_session),
self.calendar.minute_to_session_label(hour_into_session,
direction="next"),
self.calendar.minute_to_session_label(hour_into_session,
direction="previous"),
self.calendar.minute_to_session_label(hour_into_session,
direction="none"),
self.calendar.minute_to_session_label(close_minute),
self.calendar.minute_to_session_label(close_minute,
direction="next"),
self.calendar.minute_to_session_label(close_minute,
direction="previous"),
self.calendar.minute_to_session_label(close_minute,
direction="none"),
session_label
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session,
direction="next"
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_after_session,
direction="previous"
)
)
self.assertTrue(all(x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session))
minutes_that_resolve_to_next_session = [
self.calendar.minute_to_session_label(minute_after_session),
self.calendar.minute_to_session_label(minute_after_session,
direction="next"),
next_session_label
]
self.assertTrue(all(x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session))
self.assertEqual(
self.calendar.minute_to_session_label(minute_before_session,
direction="previous"),
previous_session_label
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(open_minute, "asdf")
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(
minute_before_session,
direction="none"
)
@parameterized.expand([
(1, 0),
(2, 0),
(2, 1),
])
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
pd.Timestamp('2011-01-04', tz='UTC'),
pd.Timestamp('2011-04-04', tz='UTC'),
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
np.array(minutes.map(self.calendar.minute_to_session_label),
dtype='datetime64[ns]'),
self.calendar.minute_index_to_session_labels(minutes)
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label),
session_labels[idx + 1]
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label),
session_labels[idx - 1]
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(
full_session_label
)
np.testing.assert_array_equal(
minutes,
pd.date_range(start=_open, end=_close, freq="min")
)
# early close period
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = \
self.calendar.minutes_for_session(early_close_session_label)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min")
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = \
self.calendar.schedule.index[first_idx:second_idx + 1]
np.testing.assert_array_equal(
answer_key,
self.calendar.sessions_in_range(first_session_label,
second_session_label)
)
def _get_session_block(self):
# find and return a (full session, early close session, full session)
# block
shortened_session = self.calendar.early_closes[0]
shortened_session_idx = \
self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[
shortened_session_idx - 1
]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self._get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(
sessions[0]
)
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = \
self.calendar.open_and_close_for_session(sessions[1])
last_open, last_close = self.calendar.open_and_close_for_session(
sessions[-1]
)
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(
first_open,
last_close
)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open,
minute_after_last_close
)
if self.GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
all_minutes = np.concatenate([
pd.date_range(
start=first_open,
end=first_close,
freq="min"
),
pd.date_range(
start=middle_open,
end=middle_close,
freq="min"
),
pd.date_range(
start=last_open,
end=last_close,
freq="min"
)
])
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self._get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(
sessions[0],
sessions[-1]
)
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate([
session0_minutes.values,
session1_minutes.values,
session2_minutes.values
])
np.testing.assert_array_equal(
concatenated_minutes,
minutes.values
)
def test_sessions_window(self):
sessions = self._get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1),
self.calendar.sessions_in_range(sessions[0], sessions[-1])
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1],
-1 * (len(sessions) - 1)),
self.calendar.sessions_in_range(sessions[0], sessions[-1])
)
def test_session_distance(self):
sessions = self._get_session_block()
forward_distance = self.calendar.session_distance(
sessions[0],
sessions[-1],
)
self.assertEqual(forward_distance, len(sessions))
backward_distance = self.calendar.session_distance(
sessions[-1],
sessions[0],
)
self.assertEqual(backward_distance, -len(sessions))
one_day_distance = self.calendar.session_distance(
sessions[0],
sessions[0],
)
self.assertEqual(one_day_distance, 1)
def test_open_and_close_for_session(self):
for index, row in self.answers.iterrows():
session_label = row.name
open_answer = row.iloc[0]
close_answer = row.iloc[1]
found_open, found_close = \
self.calendar.open_and_close_for_session(session_label)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
)
assert_equal(found_opens, self.answers['market_open'])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
)
assert_equal(found_closes, self.answers['market_close'])
def test_daylight_savings(self):
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
for date in ["2004-04-05", "2004-11-01"]:
next_day = pd.Timestamp(date, tz='UTC')
open_date = next_day + Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize("UTC").tz_convert(
self.calendar.tz
)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day)
)
self.assertEqual(
self.calendar.open_time.hour,
localized_open.hour
)
self.assertEqual(
self.calendar.open_time.minute,
localized_open.minute
)
| 35.341789 | 79 | 0.601451 |
eea0e208fa585f82dc6321906ad0b2dca20ec1b5 | 991 | py | Python | library/script/dcp_pad.py | NYCPlanning/db-data-library | 2e824434683216978c5333a37df0efe51152e16d | [
"MIT"
] | 1 | 2022-01-24T22:05:11.000Z | 2022-01-24T22:05:11.000Z | library/script/dcp_pad.py | NYCPlanning/db-data-library | 2e824434683216978c5333a37df0efe51152e16d | [
"MIT"
] | 112 | 2021-01-21T17:20:02.000Z | 2022-03-30T20:42:41.000Z | library/script/dcp_pad.py | NYCPlanning/db-data-library | 2e824434683216978c5333a37df0efe51152e16d | [
"MIT"
] | null | null | null | import os
from zipfile import ZipFile
import pandas as pd
import requests
from . import df_to_tempfile
class Scriptor:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@property
def version(self):
return self.config["dataset"]["version"]
def ingest(self) -> pd.DataFrame:
url = f"https://www1.nyc.gov/assets/planning/download/zip/data-maps/open-data/pad{self.version}.zip"
r = requests.get(url, stream=True)
with open(f"pad{self.version}.zip", "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
with ZipFile(f"pad{self.version}.zip", "r") as zip:
zip.extract("bobaadr.txt")
df = pd.read_csv("bobaadr.txt", dtype=str)
return df
def runner(self) -> str:
df = self.ingest()
local_path = df_to_tempfile(df)
os.remove(f"pad{self.version}.zip")
os.remove("bobaadr.txt")
return local_path
| 27.527778 | 108 | 0.614531 |
e2e637ef3e16c9b551178325b15b9743bdb1c05c | 3,700 | py | Python | processing/pre_extract_database.py | shizsun0609tw/visil | b06c235eb76d715f0ff2277210fd5b692a5a52ed | [
"Apache-2.0"
] | null | null | null | processing/pre_extract_database.py | shizsun0609tw/visil | b06c235eb76d715f0ff2277210fd5b692a5a52ed | [
"Apache-2.0"
] | null | null | null | processing/pre_extract_database.py | shizsun0609tw/visil | b06c235eb76d715f0ff2277210fd5b692a5a52ed | [
"Apache-2.0"
] | null | null | null | import json
import argparse
import tensorflow as tf
import numpy as np
import sys
import pickle
sys.path.append('/home/zhenhua/Project/Fake_News/visil')
from tqdm import tqdm
from model.visil import ViSiL
from datasets import VideoGenerator
# python3 processing/pre_extract_database.py --query_file queries.txt --database_file database.txt --model_dir ckpt/resnet/
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--query_file', type=str, required=True,
help='Path to file that contains the query videos')
parser.add_argument('-d', '--database_file', type=str, required=True,
help='Path to file that contains the database videos')
parser.add_argument('-o', '--output_file', type=str, default='results.json',
help='Name of the output file. Default: \"results.json\"')
parser.add_argument('-n', '--network', type=str, default='resnet',
help='Backbone network used for feature extraction. '
'Options: \"resnet\" or \"i3d\". Default: \"resnet\"')
parser.add_argument('-m', '--model_dir', type=str, default='ckpt/resnet',
help='Path to the directory of the pretrained model. Default: \"ckpt/resnet\"')
parser.add_argument('-s', '--similarity_function', type=str, default='chamfer',
help='Function that will be used to calculate similarity '
'between query-target frames and videos.'
'Options: \"chamfer\" or \"symmetric_chamfer\". Default: \"chamfer\"')
parser.add_argument('-b', '--batch_sz', type=int, default=128,
help='Number of frames contained in each batch during feature extraction. Default: 128')
parser.add_argument('-g', '--gpu_id', type=int, default=0,
help='Id of the GPU used. Default: 0')
parser.add_argument('-l', '--load_queries', action='store_true',
help='Flag that indicates that the queries will be loaded to the GPU memory.')
parser.add_argument('-t', '--threads', type=int, default=8,
help='Number of threads used for video loading. Default: 8')
args = parser.parse_args()
# Initialize ViSiL model
model = ViSiL(args.model_dir, net=args.network,
load_queries=args.load_queries, gpu_id=args.gpu_id,
similarity_function=args.similarity_function,
queries_number=None)
# Create a video generator for the database video
enqueuer = tf.keras.utils.OrderedEnqueuer(VideoGenerator(args.database_file, all_frames='i3d' in args.network),
use_multiprocessing=True, shuffle=False)
enqueuer.start(workers=args.threads, max_queue_size=args.threads*2)
generator = enqueuer.get()
# Extract database features
features = dict()
pbar = tqdm(range(len(enqueuer.sequence)))
for _ in pbar:
frames, video_id = next(generator)
if frames.shape[0] > 1:
feature = model.extract_features(frames, args.batch_sz)
filename = 'processing/database/' + video_id + '.npy'
with open(filename, 'wb') as f:
np.save(f, feature)
features[video_id] = filename
pbar.set_postfix(video_id=video_id)
enqueuer.stop()
with open('processing/database/database_features.pk', 'wb') as f:
pickle.dump(features, f, protocol=pickle.HIGHEST_PROTOCOL)
#with open('processing/database_features.json', 'w') as f:
# json.dump(features, f, indent=1)
| 46.25 | 123 | 0.625405 |
15ed94a962e6c8cc418e937aa0988b15abc6da9c | 4,100 | py | Python | pay-api/tests/unit/services/test_invoice.py | nitheesh-aot/sbc-pay | dcb9c1bd3d2954f11c8d643aa6618d8470e3b0f7 | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/services/test_invoice.py | nitheesh-aot/sbc-pay | dcb9c1bd3d2954f11c8d643aa6618d8470e3b0f7 | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/services/test_invoice.py | nitheesh-aot/sbc-pay | dcb9c1bd3d2954f11c8d643aa6618d8470e3b0f7 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the FeeSchedule Service.
Test-Suite to ensure that the FeeSchedule Service is working as expected.
"""
import pytest
from pay_api.exceptions import BusinessException
from pay_api.models import FeeSchedule
from pay_api.services.invoice import Invoice as Invoice_service
from tests.utilities.base_test import (
factory_invoice, factory_payment, factory_payment_account, factory_payment_line_item)
def test_invoice_saved_from_new(session):
"""Assert that the invoice is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
i = factory_invoice(payment=payment, payment_account=payment_account)
i.save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(i.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
invoice = Invoice_service.find_by_id(i.id, skip_auth_check=True)
assert invoice is not None
assert invoice.id is not None
assert invoice.payment_id is not None
assert invoice.invoice_status_code is not None
assert invoice.refund is None
assert invoice.payment_date is None
assert invoice.total is not None
assert invoice.paid is None
assert invoice.payment_line_items is not None
assert invoice.folio_number is not None
def test_invoice_invalid_lookup(session):
"""Test invalid lookup."""
with pytest.raises(BusinessException) as excinfo:
Invoice_service.find_by_id(999, skip_auth_check=True)
assert excinfo.type == BusinessException
def test_invoice_find_by_valid_payment_id(session):
"""Assert that the invoice is saved to the table."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
i = factory_invoice(payment=payment, payment_account=payment_account)
i.save()
invoice = Invoice_service.find_by_payment_identifier(payment.id, skip_auth_check=True)
assert invoice is not None
assert invoice.id is not None
assert invoice.payment_id is not None
assert invoice.invoice_status_code is not None
assert invoice.refund is None
assert invoice.payment_date is None
assert invoice.total is not None
assert invoice.paid is None
assert not invoice.payment_line_items
def test_invoice_get_invoices(session):
"""Assert that get_invoices works."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
i = factory_invoice(payment=payment, payment_account=payment_account)
i.save()
invoices = Invoice_service.get_invoices(payment.id, skip_auth_check=True)
assert invoices is not None
assert len(invoices.get('items')) == 1
assert not invoices.get('items')[0].get('line_items')
def test_invoice_get_invoices_with_no_invoice(session):
"""Assert that get_invoices works."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoices = Invoice_service.get_invoices(payment.id, skip_auth_check=True)
assert invoices is not None
assert len(invoices.get('items')) == 0
def test_invoice_find_by_invalid_payment_id(session):
"""Test invalid lookup."""
invoice = Invoice_service.find_by_payment_identifier(999, skip_auth_check=True)
assert invoice is not None
assert invoice.id is None
| 34.745763 | 90 | 0.75878 |
bae8eb1f4df73a8d5b1ad9cecfbd7bbb9d16afcd | 5,387 | py | Python | examples/eq_van_der_pol.py | ZhaozhiQIAN/torchdiffeq | 5409011d64630618aad2a154f6b7a823376a8687 | [
"MIT"
] | null | null | null | examples/eq_van_der_pol.py | ZhaozhiQIAN/torchdiffeq | 5409011d64630618aad2a154f6b7a823376a8687 | [
"MIT"
] | null | null | null | examples/eq_van_der_pol.py | ZhaozhiQIAN/torchdiffeq | 5409011d64630618aad2a154f6b7a823376a8687 | [
"MIT"
] | null | null | null | import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from ode_models import *
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
# initial value (2, 1)
true_y0 = torch.tensor([[2.0, 1.0]])
t = torch.linspace(-2.5, 2.5, args.data_size)
eps = 0.01
class Lambda(nn.Module):
def forward(self, t, y):
x = y[:, 0]
dx_dt = 1 / eps * (y[:, 1] - 1.0 / 3.0 * x**3 + x)
dy_dt = -x
return torch.cat((dx_dt.reshape(-1, 1), dy_dt.reshape(-1, 1)), axis=1)
# This is not the actual solution!
with torch.no_grad():
# data_size (1000) x 1 x dimension (2)
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
def get_batch():
s = torch.from_numpy(
np.random.choice(
np.arange(args.data_size - args.batch_time, dtype=np.int64),
args.batch_size, replace=False))
# batch_size x 1 x dimension (D)
batch_y0 = true_y[s] # (M, D)
# T time steps to evaluate at
batch_t = t[:args.batch_time] # (T)
# T x batch_size x 1 x dimension
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0, batch_t, batch_y
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('png')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 4), facecolor='white')
ax_traj = fig.add_subplot(131, frameon=False)
ax_phase = fig.add_subplot(132, frameon=False)
ax_vecfield = fig.add_subplot(133, frameon=False)
plt.show(block=False)
def visualize(true_y, pred_y, odefunc, itr):
if args.viz:
ax_traj.cla()
ax_traj.set_title('Trajectories')
ax_traj.set_xlabel('t')
ax_traj.set_ylabel('x,y')
ax_traj.plot(t.numpy(), true_y.numpy()[:, 0, 0], t.numpy(), true_y.numpy()[:, 0, 1], 'g-')
ax_traj.plot(t.numpy(), pred_y.numpy()[:, 0, 0], '--', t.numpy(), pred_y.numpy()[:, 0, 1], 'b--')
ax_traj.set_xlim(t.min(), t.max())
ax_traj.set_ylim(-2, 2)
ax_traj.legend()
ax_phase.cla()
ax_phase.set_title('Phase Portrait')
ax_phase.set_xlabel('x')
ax_phase.set_ylabel('y')
ax_phase.plot(true_y.numpy()[:, 0, 0], true_y.numpy()[:, 0, 1], 'g-')
ax_phase.plot(pred_y.numpy()[:, 0, 0], pred_y.numpy()[:, 0, 1], 'b--')
ax_phase.set_xlim(t.min(), t.max())
ax_phase.set_ylim(-2, 2)
ax_vecfield.cla()
ax_vecfield.set_title('Learned Vector Field')
ax_vecfield.set_xlabel('x')
ax_vecfield.set_ylabel('y')
y, x = np.mgrid[-2:2:21j, -2:2:21j]
dydt = odefunc(0, torch.Tensor(np.stack([x, y], -1).reshape(21 * 21, 2))).cpu().detach().numpy()
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(21, 21, 2)
ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black")
ax_vecfield.set_xlim(t.min(), t.max())
ax_vecfield.set_ylim(-2, 2)
fig.tight_layout()
plt.savefig('png/{:03d}'.format(itr))
plt.draw()
plt.pause(0.001)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFuncLayeredResidual()
optimizer = optim.Adam(func.parameters(), lr=1e-3)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
pred_y = odeint(func, batch_y0, batch_t)
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, t)
loss = torch.mean(torch.abs(pred_y - true_y))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
visualize(true_y, pred_y, func, ii)
ii += 1
end = time.time()
| 30.95977 | 105 | 0.601634 |
8ad02af5f994380047cf12fcda8255279c7d9d61 | 14,842 | py | Python | fedlearner/data_join/joiner_impl/stream_joiner.py | WuLC/fedlearner | 7a9dbbecb1388bbb02dc04dfae1ddb810a427c4b | [
"Apache-2.0"
] | null | null | null | fedlearner/data_join/joiner_impl/stream_joiner.py | WuLC/fedlearner | 7a9dbbecb1388bbb02dc04dfae1ddb810a427c4b | [
"Apache-2.0"
] | null | null | null | fedlearner/data_join/joiner_impl/stream_joiner.py | WuLC/fedlearner | 7a9dbbecb1388bbb02dc04dfae1ddb810a427c4b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import logging
import time
from fedlearner.common import metrics
import fedlearner.data_join.common as common
from fedlearner.data_join.joiner_impl.example_joiner import ExampleJoiner
class _CmpCtnt(object):
def __init__(self, item):
self._event_time = item.event_time
self._example_id = item.example_id
def __lt__(self, other):
assert isinstance(other, _CmpCtnt)
if self._event_time != other._event_time:
return self._event_time < other._event_time
return self._example_id < other._example_id
def __eq__(self, other):
assert isinstance(other, _CmpCtnt)
return self._event_time == other._event_time and \
self._example_id == other._example_id
class _JoinWindow(object):
def __init__(self, pt_rate, qt_rate):
assert 0.0 <= pt_rate <= 1.0, \
"pt_rate {} should in [0.0, 1.0]".format(pt_rate)
assert 0.0 <= qt_rate <= 1.0, \
"qt_rate {} should in [0.0, 1.0]".format(qt_rate)
self._buffer = []
self._cmp_ctnt = []
self._cmp_ctnt_sorted = True
self._pt_rate = pt_rate
self._qt_rate = qt_rate
self._committed_pt = None
def __iter__(self):
return iter(self._buffer)
def append(self, index, item):
if len(self._cmp_ctnt) > 0 and \
self._cmp_ctnt[-1] > _CmpCtnt(item):
self._cmp_ctnt_sorted = False
self._cmp_ctnt.append(_CmpCtnt(item))
self._buffer.append((index, item))
def size(self):
return len(self._buffer)
def forward_pt(self):
if len(self._buffer) == 0:
return False
new_pt = self._cal_pt(self._pt_rate)
if self._committed_pt is None or new_pt > self._committed_pt:
self._committed_pt = new_pt
return True
return False
def committed_pt(self):
return self._committed_pt
def qt(self):
return self._cal_pt(self._qt_rate)
def reset(self, new_buffer, state_stale):
self._cmp_ctnt = [_CmpCtnt(item[1]) for item in new_buffer]
self._buffer = new_buffer
if state_stale:
self._committed_pt = None
self._cmp_ctnt_sorted = \
all((self._cmp_ctnt[i] < self._cmp_ctnt[i+1] or
self._cmp_ctnt[i] == self._cmp_ctnt[i+1])
for i in range(len(self._cmp_ctnt)-1))
def __getitem__(self, index):
return self._buffer[index]
def _cal_pt(self, rate):
if not self._buffer:
return None
if not self._cmp_ctnt_sorted:
self._cmp_ctnt.sort()
self._cmp_ctnt_sorted = True
pos = int(len(self._buffer) * rate)
if pos == len(self._buffer):
pos = len(self._buffer) - 1
return self._cmp_ctnt[pos]
class StreamExampleJoiner(ExampleJoiner):
def __init__(self, example_joiner_options, raw_data_options,
data_block_builder_options, etcd, data_source, partition_id):
super(StreamExampleJoiner, self).__init__(example_joiner_options,
raw_data_options,
data_block_builder_options,
etcd, data_source,
partition_id)
self._min_window_size = example_joiner_options.min_matching_window
self._max_window_size = example_joiner_options.max_matching_window
self._leader_join_window = _JoinWindow(0.05, 0.99)
self._follower_join_window = _JoinWindow(0.05, 0.90)
self._joined_cache = {}
self._leader_unjoined_example_ids = []
self._follower_example_cache = {}
self._fill_leader_enough = False
self._reset_joiner_state(True)
@classmethod
def name(cls):
return 'STREAM_JOINER'
def _inner_joiner(self, state_stale):
if self.is_join_finished():
return
sync_example_id_finished, raw_data_finished = \
self._prepare_join(state_stale)
join_data_finished = False
while self._fill_leader_join_window(sync_example_id_finished):
leader_exhausted = sync_example_id_finished and \
self._leader_join_window.size() <= \
self._min_window_size / 2
follower_exhausted = False
delay_dump = True
while delay_dump and \
self._fill_follower_join_window(raw_data_finished):
follower_exhausted = raw_data_finished and \
self._follower_join_window.size() <= \
self._min_window_size / 2
delay_dump = self._need_delay_dump(raw_data_finished)
if delay_dump:
self._update_join_cache()
else:
for meta in self._dump_joined_items():
yield meta
self._evit_stale_follower_cache()
if not delay_dump:
self._reset_joiner_state(False)
if leader_exhausted:
join_data_finished = not delay_dump
elif follower_exhausted:
join_data_finished = True
if delay_dump or join_data_finished:
break
if self._get_data_block_builder(False) is not None and \
(self._need_finish_data_block_since_interval() or
join_data_finished):
yield self._finish_data_block()
if join_data_finished:
self._set_join_finished()
logging.warning("finish join example for partition %d by %s",
self._partition_id, self.name())
def _prepare_join(self, state_stale):
if state_stale:
self._reset_joiner_state(True)
return super(StreamExampleJoiner, self)._prepare_join(state_stale)
def _need_delay_dump(self, raw_data_finished):
if self._follower_visitor.finished() and raw_data_finished:
return False
leader_qt = self._leader_join_window.qt()
follower_qt = self._follower_join_window.qt()
if leader_qt is not None and follower_qt is not None and \
not follower_qt < leader_qt:
return False
return True
def _update_join_cache(self):
start_tm = time.time()
new_unjoined_example_ids = []
for example_id in self._leader_unjoined_example_ids:
if example_id in self._follower_example_cache:
self._joined_cache[example_id] = \
self._follower_example_cache[example_id]
else:
new_unjoined_example_ids.append(example_id)
self._leader_unjoined_example_ids = new_unjoined_example_ids
metrics.emit_timer(name='stream_joiner_update_join_cache',
value=int(time.time()-start_tm),
tags=self._metrics_tags)
def _dump_joined_items(self):
start_tm = time.time()
for (li, le) in self._leader_join_window:
eid = le.example_id
if eid not in self._follower_example_cache and \
eid not in self._joined_cache:
continue
if eid not in self._joined_cache:
self._joined_cache[eid] = \
self._follower_example_cache[eid]
builder = self._get_data_block_builder(True)
assert builder is not None, "data block builder must be "\
"not None if before dummping"
fi, item = self._joined_cache[eid]
builder.append_item(item, li, fi)
if builder.check_data_block_full():
yield self._finish_data_block()
metrics.emit_timer(name='stream_joiner_dump_joined_items',
value=int(time.time()-start_tm),
tags=self._metrics_tags)
def _reset_joiner_state(self, state_stale):
self._leader_join_window.reset([], state_stale)
self._fill_leader_enough = False
self._joined_cache = {}
self._leader_unjoined_example_ids = []
if state_stale:
self._follower_join_window.reset([], True)
self._follower_example_cache = {}
def _fill_leader_join_window(self, sync_example_id_finished):
if not self._fill_leader_enough:
start_tm = time.time()
start_pos = self._leader_join_window.size()
if not self._fill_join_windows(self._leader_visitor,
self._leader_join_window,
None):
self._fill_leader_enough = sync_example_id_finished
else:
self._fill_leader_enough = True
if self._fill_leader_enough:
self._leader_unjoined_example_ids = \
[item.example_id for _, item in self._leader_join_window]
end_pos = self._leader_join_window.size()
eids = [(self._leader_join_window[idx][0],
self._leader_join_window[idx][1].example_id)
for idx in range(start_pos, end_pos)]
self._joiner_stats.fill_leader_example_ids(eids)
metrics.emit_timer(name='stream_joiner_fill_leader_join_window',
value=int(time.time()-start_tm),
tags=self._metrics_tags)
return self._fill_leader_enough
def _fill_follower_join_window(self, raw_data_finished):
start_tm = time.time()
start_pos = self._follower_join_window.size()
follower_enough = self._fill_join_windows(self._follower_visitor,
self._follower_join_window,
self._follower_example_cache)
end_pos = self._follower_join_window.size()
eids = [(self._follower_join_window[idx][0],
self._follower_join_window[idx][1].example_id)
for idx in range(start_pos, end_pos)]
self._joiner_stats.fill_follower_example_ids(eids)
metrics.emit_timer(name='stream_joiner_fill_leader_join_window',
value=int(time.time()-start_tm),
tags=self._metrics_tags)
return follower_enough or raw_data_finished
def _fill_join_windows(self, visitor, join_window, join_cache):
while not visitor.finished() and \
join_window.size() < self._max_window_size:
required_item_count = self._min_window_size
if join_window.size() >= self._min_window_size:
required_item_count *= 2
if required_item_count >= self._max_window_size:
required_item_count = self._max_window_size
self._consume_item_until_count(
visitor, join_window,
required_item_count, join_cache
)
if join_window.forward_pt():
return True
return join_window.size() >= self._max_window_size
def _evict_if_useless(self, item):
return item.example_id in self._joined_cache or \
self._leader_join_window.committed_pt() is None or \
_CmpCtnt(item) < self._leader_join_window.committed_pt()
def _evict_if_force(self, item):
return self._leader_join_window.qt() is None or \
_CmpCtnt(item) < self._leader_join_window.qt()
def _evict_impl(self, candidates, filter_fn):
reserved_items = []
for (index, item) in candidates:
example_id = item.example_id
if filter_fn(item):
self._follower_example_cache.pop(example_id, None)
else:
reserved_items.append((index, item))
return reserved_items
def _evit_stale_follower_cache(self):
start_tm = time.time()
reserved_items = self._evict_impl(self._follower_join_window,
self._evict_if_useless)
if len(reserved_items) < self._max_window_size:
self._follower_join_window.reset(reserved_items, False)
return
reserved_items = self._evict_impl(reserved_items,
self._evict_if_force)
self._follower_join_window.reset(reserved_items, False)
metrics.emit_timer(name='stream_joiner_evit_stale_follower_cache',
value=int(time.time()-start_tm),
tags=self._metrics_tags)
def _consume_item_until_count(self, visitor, windows,
required_item_count, cache=None):
for (index, item) in visitor:
if item.example_id == common.InvalidExampleId:
logging.warning("ignore item indexed as %d from %s since "\
"invalid example id", index, visitor.name())
elif item.event_time == common.InvalidEventTime:
logging.warning("ignore item indexed as %d from %s since "\
"invalid event time", index, visitor.name())
else:
windows.append(index, item)
if cache is not None:
cache[item.example_id] = (index, item)
if windows.size() >= required_item_count:
return
assert visitor.finished(), "visitor shoud be finished of "\
"required_item is not satisfied"
def _finish_data_block(self):
meta = super(StreamExampleJoiner, self)._finish_data_block()
self._follower_restart_index = self._follower_visitor.get_index()
if self._follower_join_window.size() > 0:
self._follower_restart_index = \
self._follower_join_window[0][0]
for index, _ in self._joined_cache.values():
if index < self._follower_restart_index:
self._follower_restart_index = index
return meta
| 43.145349 | 79 | 0.601065 |
a9777f0294ec9fae6d7e6279fec7e86c81925a47 | 21,283 | py | Python | mlmodels/model_tf/misc/tf_nlp/chatbot/48.transformer-xl-lstm.py | gitter-badger/mlmodels | f08cc9b6ec202d4ad25ecdda2f44487da387569d | [
"MIT"
] | 1 | 2022-03-11T07:57:48.000Z | 2022-03-11T07:57:48.000Z | mlmodels/model_tf/misc/tf_nlp/chatbot/48.transformer-xl-lstm.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | mlmodels/model_tf/misc/tf_nlp/chatbot/48.transformer-xl-lstm.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import collections
import os
import re
import time
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
# In[2]:
def build_dataset(words, n_words, atleast=1):
count = [["PAD", 0], ["GO", 1], ["EOS", 2], ["UNK", 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# In[3]:
lines = open("movie_lines.txt", encoding="utf-8", errors="ignore").read().split("\n")
conv_lines = open("movie_conversations.txt", encoding="utf-8", errors="ignore").read().split("\n")
id2line = {}
for line in lines:
_line = line.split(" +++$+++ ")
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = []
for line in conv_lines[:-1]:
_line = line.split(" +++$+++ ")[-1][1:-1].replace("'", "").replace(" ", "")
convs.append(_line.split(","))
questions = []
answers = []
for conv in convs:
for i in range(len(conv) - 1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i + 1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return " ".join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
# In[4]:
concat_from = " ".join(short_questions + question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(
concat_from, vocabulary_size_from
)
print("vocab from size: %d" % (vocabulary_size_from))
print("Most common words", count_from[4:10])
print("Sample data", data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print("filtered vocab size:", len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from) / vocabulary_size_from, 4) * 100))
# In[5]:
concat_to = " ".join(short_answers + answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print("vocab from size: %d" % (vocabulary_size_to))
print("Most common words", count_to[4:10])
print("Sample data", data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print("filtered vocab size:", len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to) / vocabulary_size_to, 4) * 100))
# In[6]:
GO = dictionary_from["GO"]
PAD = dictionary_from["PAD"]
EOS = dictionary_from["EOS"]
UNK = dictionary_from["UNK"]
# In[7]:
for i in range(len(short_answers)):
short_answers[i] += " EOS"
# In[8]:
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k, UNK))
X.append(ints)
return X
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
# In[9]:
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
# In[10]:
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
# In[11]:
learning_rate = 1e-3
batch_size = 16
epoch = 20
n_layer = 3
d_model = 256
d_embed = 256
n_head = 10
d_head = 50
d_inner = 512
# In[12]:
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = tf.einsum("i,j->ij", pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
if bsz is not None:
return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
else:
return pos_emb[:, None, :]
def positionwise_FF(inp, d_model, d_inner, kernel_initializer, scope="ff"):
output = inp
with tf.variable_scope(scope):
output = tf.layers.dense(
inp,
d_inner,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
name="layer_1",
)
output = tf.layers.dense(
output, d_model, kernel_initializer=kernel_initializer, name="layer_2"
)
output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1)
return output
def rel_shift(x):
x_size = tf.shape(x)
x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
x = tf.reshape(x, x_size)
return x
def rel_multihead_attn(
w,
r,
r_w_bias,
r_r_bias,
attn_mask,
mems,
d_model,
n_head,
d_head,
kernel_initializer,
scope="rel_attn",
):
scale = 1 / (d_head ** 0.5)
with tf.variable_scope(scope):
qlen = tf.shape(w)[0]
rlen = tf.shape(r)[0]
bsz = tf.shape(w)[1]
cat = tf.concat([mems, w], 0) if mems is not None and mems.shape.ndims > 1 else w
w_heads = tf.layers.dense(
cat,
3 * n_head * d_head,
use_bias=False,
kernel_initializer=kernel_initializer,
name="qkv",
)
r_head_k = tf.layers.dense(
r, n_head * d_head, use_bias=False, kernel_initializer=kernel_initializer, name="r"
)
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, -1)
w_head_q = w_head_q[-qlen:]
klen = tf.shape(w_head_k)[0]
w_head_q = tf.reshape(w_head_q, [qlen, bsz, n_head, d_head])
w_head_k = tf.reshape(w_head_k, [klen, bsz, n_head, d_head])
w_head_v = tf.reshape(w_head_v, [klen, bsz, n_head, d_head])
r_head_k = tf.reshape(r_head_k, [rlen, n_head, d_head])
rw_head_q = w_head_q + r_w_bias
rr_head_q = w_head_q + r_r_bias
AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k)
BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k)
BD = rel_shift(BD)
paddings = tf.fill(tf.shape(BD), float("-inf"))
attn_score = (AC + BD) * scale
attn_mask_t = attn_mask[:, :, None, None]
attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t
attn_prob = tf.nn.softmax(attn_score, 1)
attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
size_t = tf.shape(attn_vec)
attn_vec = tf.reshape(attn_vec, [size_t[0], size_t[1], n_head * d_head])
attn_out = tf.layers.dense(
attn_vec, d_model, use_bias=False, kernel_initializer=kernel_initializer, name="o"
)
output = tf.contrib.layers.layer_norm(attn_out + w, begin_norm_axis=-1)
return output
def embedding_lookup(lookup_table, x):
return tf.nn.embedding_lookup(lookup_table, x)
def mask_adaptive_embedding_lookup(
x,
n_token,
d_embed,
d_proj,
cutoffs,
initializer,
proj_initializer,
div_val=1,
proj_same_dim=True,
scope="adaptive_embed",
**kwargs,
):
emb_scale = d_proj ** 0.5
with tf.variable_scope(scope):
if div_val == 1:
lookup_table = tf.get_variable(
"lookup_table", [n_token, d_embed], initializer=initializer
)
y = embedding_lookup(lookup_table, x)
if d_proj != d_embed:
proj_W = tf.get_variable("proj_W", [d_embed, d_proj], initializer=proj_initializer)
y = tf.einsum("ibe,ed->ibd", y, proj_W)
else:
proj_W = None
ret_params = [lookup_table, proj_W]
else:
tables, projs = [], []
cutoff_ends = [0] + cutoffs + [n_token]
x_size = tf.shape(x)
y = tf.zeros([x_size[0], x_size[1], d_proj])
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope("cutoff_{}".format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
mask = (x >= l_idx) & (x < r_idx)
cur_x = tf.boolean_mask(x, mask) - l_idx
cur_d_embed = d_embed // (div_val ** i)
lookup_table = tf.get_variable(
"lookup_table", [r_idx - l_idx, cur_d_embed], initializer=initializer
)
cur_y = embedding_lookup(lookup_table, cur_x)
if d_proj == cur_d_embed and not proj_same_dim:
proj_W = None
else:
proj_W = tf.get_variable(
"proj_W", [cur_d_embed, d_proj], initializer=proj_initializer
)
cur_y = tf.einsum("id,de->ie", cur_y, proj_W)
mask_idx = tf.to_int64(tf.where(mask))
y += tf.scatter_nd(mask_idx, cur_y, tf.to_int64(tf.shape(y)))
tables.append(lookup_table)
projs.append(proj_W)
ret_params = [tables, projs]
y *= emb_scale
return y, ret_params
def _create_mask(qlen, mlen, same_length=False):
attn_mask = tf.ones([qlen, qlen])
mask_u = tf.matrix_band_part(attn_mask, 0, -1)
mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen])
ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if same_length:
mask_l = tf.matrix_band_part(attn_mask, -1, 0)
ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
return ret
def _cache_mem(curr_out, prev_mem, mem_len=None):
if mem_len is None or prev_mem is None:
new_mem = curr_out
elif mem_len == 0:
return prev_mem
else:
new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]
return tf.stop_gradient(new_mem)
def transformer(
dec_inp,
mems,
n_token,
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
proj_initializer=None,
mem_len=None,
cutoffs=[],
div_val=1,
tie_projs=[],
same_length=False,
clamp_len=-1,
untie_r=False,
proj_same_dim=True,
scope="transformer",
reuse=tf.AUTO_REUSE,
):
"""
cutoffs: a list of python int. Cutoffs for adaptive softmax.
tie_projs: a list of python bools. Whether to tie the projections.
perms: a list of tensors. Each tensor should of size [len, bsz, bin_size].
Only used in the adaptive setting.
"""
new_mems = []
with tf.variable_scope(scope, reuse=reuse):
if untie_r:
r_w_bias = tf.get_variable(
"r_w_bias", [n_layer, n_head, d_head], initializer=initializer
)
r_r_bias = tf.get_variable(
"r_r_bias", [n_layer, n_head, d_head], initializer=initializer
)
else:
r_w_bias = tf.get_variable("r_w_bias", [n_head, d_head], initializer=initializer)
r_r_bias = tf.get_variable("r_r_bias", [n_head, d_head], initializer=initializer)
qlen = tf.shape(dec_inp)[0]
mlen = tf.shape(mems[0])[0] if mems is not None else 0
klen = mlen + qlen
if proj_initializer is None:
proj_initializer = initializer
lookup_fn = mask_adaptive_embedding_lookup
embeddings, shared_params = lookup_fn(
x=dec_inp,
n_token=n_token,
d_embed=d_embed,
d_proj=d_model,
cutoffs=cutoffs,
initializer=initializer,
proj_initializer=proj_initializer,
div_val=div_val,
proj_same_dim=proj_same_dim,
)
attn_mask = _create_mask(qlen, mlen, same_length)
pos_seq = tf.range(klen - 1, -1, -1.0)
if clamp_len > 0:
pos_seq = tf.minimum(pos_seq, clamp_len)
inv_freq = 1 / (10000 ** (tf.range(0, d_model, 2.0) / d_model))
pos_emb = positional_embedding(pos_seq, inv_freq)
if mems is None:
mems = [None] * n_layer
output = embeddings
for i in range(n_layer):
# cache new mems
new_mems.append(_cache_mem(output, mems[i], mem_len))
with tf.variable_scope("layer_{}".format(i)):
output = rel_multihead_attn(
w=output,
r=pos_emb,
r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
attn_mask=attn_mask,
mems=mems[i],
d_model=d_model,
n_head=n_head,
d_head=d_head,
kernel_initializer=initializer,
)
output = positionwise_FF(
inp=output, d_model=d_model, d_inner=d_inner, kernel_initializer=initializer
)
return output, new_mems
# In[13]:
class Chatbot:
def __init__(self):
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)
batch_size = tf.shape(self.X)[0]
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
initializer = tf.initializers.random_normal(stddev=0.1)
memory = tf.fill([n_layer, tf.shape(self.X)[0], tf.shape(self.X)[1], d_model], PAD)
memory = tf.cast(memory, tf.float32)
logits, next_memory = transformer(
self.X,
memory,
len(dictionary_from),
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
scope="encoder",
reuse=False,
)
logits, next_memory = transformer(
self.X,
next_memory,
len(dictionary_from),
n_layer,
d_model,
d_embed,
n_head,
d_head,
d_inner,
initializer,
scope="decoder",
reuse=False,
)
embedding = tf.Variable(tf.random_uniform([len(dictionary_to), d_model], -1, 1))
init_state = tf.reduce_mean(tf.reduce_mean(next_memory, axis=0), axis=1)
cell = tf.nn.rnn_cell.LSTMCell(d_model)
vocab_proj = tf.layers.Dense(len(dictionary_to))
helper = tf.contrib.seq2seq.TrainingHelper(
inputs=tf.nn.embedding_lookup(embedding, decoder_input),
sequence_length=tf.to_int32(self.Y_seq_len),
)
encoder_state = tf.nn.rnn_cell.LSTMStateTuple(c=init_state, h=init_state)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=cell, helper=helper, initial_state=encoder_state, output_layer=vocab_proj
)
decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder, maximum_iterations=tf.reduce_max(self.Y_seq_len)
)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding,
start_tokens=tf.tile(tf.constant([GO], dtype=tf.int32), [tf.shape(init_state)[0]]),
end_token=EOS,
)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=cell, helper=helper, initial_state=encoder_state, output_layer=vocab_proj
)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder, maximum_iterations=2 * tf.reduce_max(self.X_seq_len)
)
self.training_logits = decoder_output.rnn_output
self.predicting_ids = predicting_decoder_output.sample_id
self.logits = decoder_output.sample_id
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(
logits=self.training_logits, targets=self.Y, weights=masks
)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits, axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# In[14]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot()
sess.run(tf.global_variables_initializer())
# In[15]:
def pad_sentence_batch_dynamic(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
# In[16]:
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, len(short_questions), batch_size):
index = min(k + batch_size, len(short_questions))
batch_x, seq_x = pad_sentence_batch_dynamic(X[k:index], PAD)
batch_y, seq_y = pad_sentence_batch_dynamic(Y[k:index], PAD)
predicted, accuracy, loss, _ = sess.run(
[model.predicting_ids, model.accuracy, model.cost, model.optimizer],
feed_dict={model.X: batch_x, model.Y: batch_y},
)
total_loss += loss
total_accuracy += accuracy
total_loss /= len(short_questions) / batch_size
total_accuracy /= len(short_questions) / batch_size
print("epoch: %d, avg loss: %f, avg accuracy: %f" % (i + 1, total_loss, total_accuracy))
# In[17]:
for i in range(len(batch_x)):
print("row %d" % (i + 1))
print(
"QUESTION:", " ".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])
)
print(
"REAL ANSWER:",
" ".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),
)
print(
"PREDICTED ANSWER:",
" ".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),
"\n",
)
# In[18]:
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD, maxlen_question)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD, maxlen_answer)
predicted = sess.run(model.predicting_ids, feed_dict={model.X: batch_x})
for i in range(len(batch_x)):
print("row %d" % (i + 1))
print(
"QUESTION:", " ".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])
)
print(
"REAL ANSWER:",
" ".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),
)
print(
"PREDICTED ANSWER:",
" ".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),
"\n",
)
# In[ ]:
| 31.024781 | 100 | 0.599634 |
7b988aef834ce4222c3c0cae78bf5cbf2d98afbc | 4,810 | py | Python | tests/test_user_add.py | ScilifelabDataCentre/Data-Delivery-System | 52cbb6ccce03123546062d02ee1b13d2049b3581 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T08:29:29.000Z | 2021-04-27T08:29:29.000Z | tests/test_user_add.py | ScilifelabDataCentre/Data-Delivery-System | 52cbb6ccce03123546062d02ee1b13d2049b3581 | [
"BSD-3-Clause"
] | 308 | 2020-06-02T13:24:11.000Z | 2021-05-11T14:37:01.000Z | tests/test_user_add.py | ScilifelabDataCentre/Data-Delivery-System | 52cbb6ccce03123546062d02ee1b13d2049b3581 | [
"BSD-3-Clause"
] | null | null | null | import json
from dds_web import db
from dds_web.database import models
import tests
first_new_email = {"email": "first_test_email@mailtrap.io"}
first_new_user = {**first_new_email, "role": "Researcher"}
first_new_user_extra_args = {**first_new_user, "extra": "test"}
first_new_user_invalid_role = {**first_new_email, "role": "Invalid Role"}
first_new_user_invalid_email = {"email": "first_invalid_email", "role": first_new_user["role"]}
existing_invite = {"email": "existing_invite_email@mailtrap.io", "role": "Researcher"}
new_unit_admin = {"email": "new_unit_admin@mailtrap.io", "role": "Super Admin"}
def test_add_user_with_researcher(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["researchuser"]).post_headers(),
data=json.dumps(first_new_user),
content_type="application/json",
)
assert response.status == "403 FORBIDDEN"
invited_user = (
db.session.query(models.Invite).filter_by(email=first_new_user["email"]).one_or_none()
)
assert invited_user is None
def test_add_user_with_unituser_no_role(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).post_headers(),
data=json.dumps(first_new_email),
content_type="application/json",
)
assert response.status == "400 BAD REQUEST"
invited_user = (
db.session.query(models.Invite).filter_by(email=first_new_email["email"]).one_or_none()
)
assert invited_user is None
def test_add_user_with_unitadmin_with_extraargs(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).post_headers(),
data=json.dumps(first_new_user_extra_args),
content_type="application/json",
)
assert response.status == "400 BAD REQUEST"
invited_user = (
db.session.query(models.Invite)
.filter_by(email=first_new_user_extra_args["email"])
.one_or_none()
)
assert invited_user is None
def test_add_user_with_unitadmin_and_invalid_role(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).post_headers(),
data=json.dumps(first_new_user_invalid_role),
content_type="application/json",
)
assert response.status == "400 BAD REQUEST"
invited_user = (
db.session.query(models.Invite)
.filter_by(email=first_new_user_invalid_role["email"])
.one_or_none()
)
assert invited_user is None
def test_add_user_with_unitadmin_and_invalid_email(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).post_headers(),
data=json.dumps(first_new_user_invalid_email),
content_type="application/json",
)
assert response.status == "400 BAD REQUEST"
invited_user = (
db.session.query(models.Invite)
.filter_by(email=first_new_user_invalid_email["email"])
.one_or_none()
)
assert invited_user is None
def test_add_user_with_unitadmin(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).post_headers(),
data=json.dumps(first_new_user),
content_type="application/json",
)
assert response.status == "200 OK"
invited_user = (
db.session.query(models.Invite).filter_by(email=first_new_user["email"]).one_or_none()
)
assert invited_user
assert invited_user.email == first_new_user["email"]
assert invited_user.role == first_new_user["role"]
def test_add_user_existing_email(client):
invited_user = (
db.session.query(models.Invite)
.filter_by(email=existing_invite["email"], role=existing_invite["role"])
.one_or_none()
)
assert invited_user
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).post_headers(),
data=json.dumps(existing_invite),
content_type="application/json",
)
assert response.status == "400 BAD REQUEST"
def test_add_user_with_unitpersonnel_permission_denied(client):
response = client.post(
tests.DDSEndpoint.USER_ADD,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).post_headers(),
data=json.dumps(new_unit_admin),
content_type="application/json",
)
assert response.status == "403 FORBIDDEN"
invited_user = (
db.session.query(models.Invite).filter_by(email=new_unit_admin["email"]).one_or_none()
)
assert invited_user is None
| 35.109489 | 95 | 0.702287 |
c683082c992fe0d98fc14a72f31e8c5c10479091 | 1,055 | py | Python | test/runtime/operators_test/clipped_relu_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | test/runtime/operators_test/clipped_relu_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | test/runtime/operators_test/clipped_relu_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.graph import Graph
from webdnn.graph.operators.clipped_relu import ClippedRelu
from webdnn.graph.order import OrderNHWC, OrderNCHW
from webdnn.graph.variable import Variable
@wrap_template
def template(x_order=OrderNHWC, y_order=OrderNHWC, cap=0.25, description: str = ""):
vx = np.random.rand(2, 3, 4, 5) - 0.5
vy = np.clip(vx, 0.0, cap)
x = Variable(vx.shape, order=OrderNHWC)
y, = ClippedRelu(None, cap=cap)(x)
y.change_order(x_order)
y.change_order(y_order)
generate_kernel_test_case(
description=f"ClippedRelu {description}",
graph=Graph([x], [y]),
inputs={x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])},
expected={y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])},
)
def test():
template()
def test_different_order():
template(x_order=OrderNCHW)
def test_cap_1():
template(cap=1)
def test_cap_0():
template(cap=0)
| 24.534884 | 87 | 0.696682 |
186e39cc2e949bf2e726e3f99f5236068090b614 | 158 | py | Python | apps/config.py | mrf-foundation/ckios_v1 | 3556a99ba5e01f00e137fd124903ace77d2cba28 | [
"Apache-2.0"
] | 1 | 2021-12-27T10:03:30.000Z | 2021-12-27T10:03:30.000Z | apps/config.py | mrf-foundation/ckios_v1 | 3556a99ba5e01f00e137fd124903ace77d2cba28 | [
"Apache-2.0"
] | null | null | null | apps/config.py | mrf-foundation/ckios_v1 | 3556a99ba5e01f00e137fd124903ace77d2cba28 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class AppsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps'
label = 'apps' | 22.571429 | 56 | 0.721519 |
02c7f99d47cf898a6bfeda3ade11eb0b59e4f17b | 724 | py | Python | var/spack/repos/builtin/packages/py-docutils-stubs/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/py-docutils-stubs/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/py-docutils-stubs/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDocutilsStubs(PythonPackage):
"""PEP 561 based Type information for docutils."""
homepage = "https://github.com/tk0miya/docutils-stubs"
url = "https://pypi.io/packages/source/d/docutils-stubs/docutils-stubs-0.0.21.tar.gz"
version('0.0.21', sha256='e0d3d2588a0c0b47bf66b917bf4ff2c100cf4cf77bbe2f518d97b8f4d63e735c')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-docutils@0.14', type=('build', 'run'))
| 36.2 | 96 | 0.720994 |
8f8ddb33e58046843c93082b5f6e42c084cf606c | 1,286 | py | Python | backpack/extensions/secondorder/mngd/batchnorm1d.py | maryamhgf/backpack | 63d2717656df2e0f18b3b6ee50320e82ce7358b6 | [
"MIT"
] | null | null | null | backpack/extensions/secondorder/mngd/batchnorm1d.py | maryamhgf/backpack | 63d2717656df2e0f18b3b6ee50320e82ce7358b6 | [
"MIT"
] | null | null | null | backpack/extensions/secondorder/mngd/batchnorm1d.py | maryamhgf/backpack | 63d2717656df2e0f18b3b6ee50320e82ce7358b6 | [
"MIT"
] | 2 | 2021-06-11T14:15:28.000Z | 2021-06-16T11:19:11.000Z | import backpack.utils.linear as LinUtils
from backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives
from backpack.extensions.secondorder.mngd.mngd_base import MNGDBaseModule
from torch import einsum
import torch
class MNGDBatchNorm1d(MNGDBaseModule):
def __init__(self):
super().__init__(derivatives=BatchNorm1dDerivatives(), params=["bias", "weight"])
# TODO: FIX these functions for NGD
def weight(self, ext, module, grad_inp, grad_out, backproped):
# dgamma = self.derivatives._weight_jac_t_mat_prod(module, grad_inp, grad_out, backproped, sum_batch=False)
# # fake
# new_bp = self.derivatives._my_jac_t_mat_prod(module, grad_inp, grad_out, backproped)
# print('new_bp :\n', new_bp)
# return einsum("vni,zqi->vnzq", (dgamma, dgamma))
return None
def bias(self, ext, module, grad_inp, grad_out, backproped):
# dbeta = self.derivatives._bias_jac_t_mat_prod(module, grad_inp, grad_out, backproped, sum_batch=False)
# print(torch.norm(dbeta))
# fake
# new_bp = self.derivatives._my_jac_t_mat_prod(module, grad_inp, grad_out, backproped)
# print('new_bp bias:\n', new_bp)
# return einsum("vni,zqi->vnzq", (dbeta, dbeta))
return None | 44.344828 | 115 | 0.700622 |
945958989888a2b615e7eed9a30f2ed819bd0681 | 3,191 | py | Python | test_corpus.py | jakubkulhan/bayesian-spam-filter | 640917b8f93173c9286402fccd5ccc40a4c048dc | [
"MIT",
"Unlicense"
] | 1 | 2015-12-19T13:26:57.000Z | 2015-12-19T13:26:57.000Z | test_corpus.py | jakubkulhan/bayesian-spam-filter | 640917b8f93173c9286402fccd5ccc40a4c048dc | [
"MIT",
"Unlicense"
] | null | null | null | test_corpus.py | jakubkulhan/bayesian-spam-filter | 640917b8f93173c9286402fccd5ccc40a4c048dc | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the Corpus class."""
import os
import shutil
import unittest
from test_readClassificationFromFile import (
random_filename,
random_string,
FNAME_CHARS)
from corpus import Corpus
SPECIAL_FILENAME = '!special.txt'
CORPUS_DIR = 'testing_corpus_delete_me'
FCONTENTS_CHARS = FNAME_CHARS + '\n '
N_EMAILS = 20
class TestCorpus(unittest.TestCase):
def setUp(self):
"""Prepare classification file for the test."""
self.expected = create_corpus_dictionary(N_EMAILS)
create_corpus_dir_from_dictionary(self.expected)
def tearDown(self):
delete_corpus_directory()
def test_corpusContainsOnlyEmails(self):
"""Test reading the corpus with email messages only."""
corpus = Corpus(CORPUS_DIR)
# Exercise the SUT
observed = {}
for fname, contents in corpus.emails_as_string():
observed[fname] = contents
# Verify the results
self.assertEqual(len(self.expected), len(observed),
'The emails_as_string() method did not generate all the corpus files.')
self.assertEqual(self.expected, observed,
'The read file contents are not equal to the expected contents.')
def test_corpusContainsAlsoSpecialFiles(self):
"""Test reading the corpus with special files."""
# Add a special file into the corpus dir
save_file_to_corpus_dir(
fname=SPECIAL_FILENAME, contents='fake', dirname=CORPUS_DIR)
corpus = Corpus(CORPUS_DIR)
# Exercise the SUT
observed = {}
for fname, contents in corpus.emails_as_string():
observed[fname] = contents
# Verify the results
self.assertEqual(len(self.expected), len(observed),
'The emails_as_string() method did not generate all the corpus files.')
self.assertEqual(self.expected, observed,
'The read file contents are not equal to the expected contents.')
def create_corpus_dictionary(nitems=N_EMAILS):
"""Create a random dictionary of email file names and their contents."""
d = {}
for i in range(nitems):
filename = random_filename()
contents = random_string(200, chars=FCONTENTS_CHARS)
d[filename] = contents
return d
def create_corpus_dir_from_dictionary(d, dirname=CORPUS_DIR):
"""Save the dictionary to a directory."""
os.makedirs(dirname, exist_ok=True)
for fname, contents in d.items():
save_file_to_corpus_dir(fname, contents, dirname)
def save_file_to_corpus_dir(fname, contents, dirname=CORPUS_DIR):
"""Save the contents to the file into the dirname directory."""
fpath = os.path.join(dirname, fname)
with open(fpath, 'wt', encoding='utf-8') as f:
f.write(contents)
def delete_corpus_directory(dirname=CORPUS_DIR):
"""Delete the directory with testing corpus."""
shutil.rmtree(dirname, ignore_errors=True)
if __name__ == "__main__":
unittest.main()
| 36.678161 | 97 | 0.642432 |
507739b347bc9d246161243a3ee5395fa04457f4 | 463 | py | Python | configs/segformer/segformer_mit-b3_512x512_20k_Road.py | kkkkkaiai/mmsegmentation | f597e8404417c46ff87e636a87d41e0af95abfa4 | [
"Apache-2.0"
] | null | null | null | configs/segformer/segformer_mit-b3_512x512_20k_Road.py | kkkkkaiai/mmsegmentation | f597e8404417c46ff87e636a87d41e0af95abfa4 | [
"Apache-2.0"
] | null | null | null | configs/segformer/segformer_mit-b3_512x512_20k_Road.py | kkkkkaiai/mmsegmentation | f597e8404417c46ff87e636a87d41e0af95abfa4 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/segformer_mit-b0.py', '../_base_/datasets/road.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
# model settings
model = dict(
# pretrained='models/road_v1.pth'
backbone=dict(
embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 18, 3]),
decode_head=dict(num_classes=5, norm_cfg=norm_cfg, in_channels=[64, 128, 320, 512]),
)
| 30.866667 | 88 | 0.665227 |
6ee371d2b5dacdb1f6111c74646e403fb645e729 | 6,073 | py | Python | kube_scheduler/datadog_checks/kube_scheduler/config_models/defaults.py | OuesFa/integrations-core | 0ffe4ca306580a2e775b515152384034c2dfdc03 | [
"BSD-3-Clause"
] | null | null | null | kube_scheduler/datadog_checks/kube_scheduler/config_models/defaults.py | OuesFa/integrations-core | 0ffe4ca306580a2e775b515152384034c2dfdc03 | [
"BSD-3-Clause"
] | null | null | null | kube_scheduler/datadog_checks/kube_scheduler/config_models/defaults.py | OuesFa/integrations-core | 0ffe4ca306580a2e775b515152384034c2dfdc03 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_auth(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_path(field, value):
return get_default_field_value(field, value)
def instance_bearer_token_refresh_interval(field, value):
return 60
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_health_service_check(field, value):
return True
def instance_health_url(field, value):
return 'http://localhost:10251/healthz'
def instance_ignore_metrics(field, value):
return get_default_field_value(field, value)
def instance_ignore_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_include_labels(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_label_joins(field, value):
return get_default_field_value(field, value)
def instance_label_to_hostname(field, value):
return get_default_field_value(field, value)
def instance_labels_mapper(field, value):
return get_default_field_value(field, value)
def instance_leader_election(field, value):
return True
def instance_leader_election_kind(field, value):
return 'auto'
def instance_log_requests(field, value):
return False
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return 'service'
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_prometheus_metrics_prefix(field, value):
return get_default_field_value(field, value)
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 10
def instance_send_distribution_buckets(field, value):
return False
def instance_send_distribution_counts_as_monotonic(field, value):
return False
def instance_send_distribution_sums_as_monotonic(field, value):
return False
def instance_send_histograms_buckets(field, value):
return True
def instance_send_monotonic_counter(field, value):
return True
def instance_send_monotonic_with_gauge(field, value):
return False
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_type_overrides(field, value):
return get_default_field_value(field, value)
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_process_start_time(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
| 20.869416 | 105 | 0.783303 |
6dc3880a61d8d83b66cb032386032d9f5ac6705c | 5,465 | py | Python | experiments/first_attempts/models.py | lstrgar/experimental-rnns | c522ae373237811411f14212bceb97cda8ddcf36 | [
"MIT"
] | null | null | null | experiments/first_attempts/models.py | lstrgar/experimental-rnns | c522ae373237811411f14212bceb97cda8ddcf36 | [
"MIT"
] | null | null | null | experiments/first_attempts/models.py | lstrgar/experimental-rnns | c522ae373237811411f14212bceb97cda8ddcf36 | [
"MIT"
] | null | null | null | import numpy as np
https://github.com/cknd/pyESNimport torch
import torch.nnT
from torch.distributions import normal
from torch.distributions import uniform
import matplotlib.pyplot as plt
'''
Naive reservoir model with 'n' neurons, 'p' fractional
connectivity, initial weight distribution from Normal(0, sig).
'''
class OriginalReservoir():
def __init__(self,n=100,p=0.2,sig=0.1,bias=True,nl=np.tanh):
self.n = n
self.p = p
self.sig = sig
self.v = np.zeros(self.n) ## State Vector
self.w = np.zeros([self.n,self.n]) ## Weight Matrix
if bias: ## Network Bias
self.b = np.random.randn(self.n)
else:
self.b = np.zeros(self.n)
self.nl = nl ## Non-Linear Activation
## Populate Weight Matrix
for i in range(self.n):
for j in range(self.n):
uni_draw = np.random.uniform()
if uni_draw < self.p:
self.w[i,j] = np.random.normal(loc=0,scale=self.sig)
'''
Update state vector, given current state vector,
weight matrix, and bias.
'''
def forward(self):
z = np.matmul(self.w,self.v) + self.b
y = self.nl(z)
self.v = y
return y
import torch, torch.nn as nn
from torch.distributions import normal
from torch.distributions import uniform
'''
PyTorch based reservoir model with 'n' neurons, 'p' fractional
connectivity, initial weight distribution from Normal(0, sig).
'''
class CUDAvoir(nn.Module):
def __init__(self,n,p,sig,o=0.0,bias=True):
super(CUDAvoir,self).__init__()
self.n = torch.tensor(n)
self.p = torch.tensor(p)
self.sig = torch.tensor(sig)
self.v = torch.zeros(self.n) ## Recurrent Layer State Vector
self.w = torch.zeros(self.n,self.n) ## Recurrent Layer Weight Matrix
self.ol = nn.Linear(self.n, 1, bias=False) ## Linear Output Layer
self.o = torch.tensor([o]) ## Initalize Output Neuron
self.fb = nn.Linear(1, self.n, bias=False) ## Linear Feedback Layer
if bias: ## Recurrent Layer Bias
self.b = torch.FloatTensor(n).uniform_(0,1)
else:
self.b = torch.zeros(self.n)
## Populate Recurrent Layer Weight Matrix
norm = normal.Normal(loc=0,scale=self.sig)
uni = uniform.Uniform(0,1)
for i in range(self.n):
for j in range(self.n):
uni_draw = uni.sample()
if uni_draw < self.p:
self.w[i,j] = norm.sample()
'''
Update state vector and output vector given
current state vector and weight matrix, feedback
layer, output, and bias
'''
def forward(self):
z = torch.matmul(self.w,self.v) + self.fb(self.o) + self.b
nl = nn.Tanh()
y = nl(z)
self.v = y
self.o = self.ol(y)
return y
class Reservoir():
def __init__(self,n,p,g,init_pattern='random',bias=True,feedback=True,fb_scale=0.01,seed=1):
torch.manual_seed(seed)
if init_pattern == 'random':
self.v = torch.randn(n)
elif init_pattern == 'single':
self.v = torch.zeros(n)
self.v[0]=torch.tensor(1)
else:
self.v = torch.zeros(n)
w = torch.zeros(n,n) ## Recurrent Layer Weight Matrix
self.readout_w = torch.randn([n,1],requires_grad=True) ## Linear Output Layer
self.y = torch.tensor([0.]) ## Initalize Output Neuron
if bias: ## Recurrent Layer Bias
self.b = torch.randn(n)
else:
self.b = torch.zeros(n)
if feedback:
self.fb_w = torch.randn([1,n]) * torch.tensor(fb_scale)
else:
self.fb_w = torch.zeros([1,n])
## Populate Recurrent Layer Weight Matrix
scale_factor = g / np.sqrt(n)
self.scale_factor = scale_factor
norm = normal.Normal(loc=0,scale=scale_factor)
uni = uniform.Uniform(0,1)
for i in range(n):
for j in range(n):
uni_draw = uni.sample()
if uni_draw < p:
w[i,j] = norm.sample()
self.w = w
def forward(self):
z = torch.matmul(self.w,self.v) + self.b + torch.matmul(self.y,self.fb_w)
nl = nn.Tanh()
v = nl(z)
y = torch.matmul(v,self.readout_w)
# y = nl(y)
self.v = v
self.y = y
return y
def run(self,steps=200,plot=True,return_data=False):
# Empty arrays for data collegtion
vs = []
ys = []
vs.append(self.v.detach().numpy())
ys.append(self.y.detach().numpy())
for t in range(steps):
y = self.forward()
v = self.v.detach().numpy()
vs.append(v)
ys.append(y.detach().numpy())
vs = np.asarray(vs)
if plot:
plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
plt.imshow(vs.T,cmap='viridis')
plt.show()
plt.plot(ys)
if return_data:
return vs, ys
| 28.170103 | 96 | 0.524245 |
6a62bf97d00887873913a55dda9e27c9617fdb7f | 168 | py | Python | lab_7/main_15.py | MrLuckUA/python_course | 50a87bc54550aedaac3afcce5b8b5c132fb6ec98 | [
"MIT"
] | null | null | null | lab_7/main_15.py | MrLuckUA/python_course | 50a87bc54550aedaac3afcce5b8b5c132fb6ec98 | [
"MIT"
] | null | null | null | lab_7/main_15.py | MrLuckUA/python_course | 50a87bc54550aedaac3afcce5b8b5c132fb6ec98 | [
"MIT"
] | null | null | null | import random
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
numbers = "01234567890"
symbols = "!@#$%^&*()?"
pass_len = 8
password = random.sample()
| 21 | 64 | 0.744048 |
de1a8f0819c606e5b95f2adcc31c342eadfd1fc8 | 73,939 | py | Python | fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py | RenfeiChen-FB/FBGEMM | 96e7cd84463db486f3945d50f399ac578d7443e1 | [
"BSD-3-Clause"
] | null | null | null | fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py | RenfeiChen-FB/FBGEMM | 96e7cd84463db486f3945d50f399ac578d7443e1 | [
"BSD-3-Clause"
] | null | null | null | fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py | RenfeiChen-FB/FBGEMM | 96e7cd84463db486f3945d50f399ac578d7443e1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# pyre-ignore-all-errors[56]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import enum
import logging
from dataclasses import dataclass
from itertools import accumulate
from math import log2
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Type
import fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType
from fbgemm_gpu.split_embedding_configs import SparseType
from torch import Tensor, nn
ASSOC = 32
# Maximum number of times prefetch() can be called without
# a corresponding forward() call
MAX_PREFETCH_DEPTH = 100
INT8_EMB_ROW_DIM_OFFSET = 8
class DoesNotHavePrefix(Exception):
pass
class EmbeddingLocation(enum.IntEnum):
DEVICE = 0
MANAGED = 1
MANAGED_CACHING = 2
HOST = 3
class ComputeDevice(enum.IntEnum):
CPU = 0
CUDA = 1
class CacheAlgorithm(enum.Enum):
LRU = 0
LFU = 1
class PoolingMode(enum.IntEnum):
SUM = 0
MEAN = 1
NONE = 2
class BoundsCheckMode(enum.IntEnum):
# Raise an exception (CPU) or device-side assert (CUDA)
FATAL = 0
# Log the first out-of-bounds instance per kernel, and set to zero.
WARNING = 1
# Set to zero.
IGNORE = 2
# No bounds checks.
NONE = 3
RecordCacheMetrics: NamedTuple = NamedTuple(
"RecordCacheMetrics",
[("record_cache_miss_counter", bool), ("record_tablewise_cache_miss", bool)],
)
@dataclass
class SplitState:
dev_size: int
host_size: int
uvm_size: int
placements: List[EmbeddingLocation]
offsets: List[int]
def construct_split_state(
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]],
rowwise: bool,
cacheable: bool,
precision: SparseType = SparseType.FP32,
int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET,
) -> SplitState:
placements = []
offsets = []
dev_size = 0
host_size = 0
uvm_size = 0
for (num_embeddings, embedding_dim, location, _) in embedding_specs:
assert embedding_dim % 4 == 0, f"{embedding_dim}"
if precision == SparseType.INT8:
embedding_dim += int8_emb_row_dim_offset
state_size = num_embeddings * embedding_dim if not rowwise else num_embeddings
if location == EmbeddingLocation.HOST:
placements.append(EmbeddingLocation.HOST)
offsets.append(host_size)
host_size += state_size
# If table is on device, then opimtizer is on device.
# If table is managed, then if optimizer state is rowwise, optimizer is on device, otherwise optimizer is managed.
elif location == EmbeddingLocation.DEVICE or rowwise:
placements.append(EmbeddingLocation.DEVICE)
offsets.append(dev_size)
dev_size += state_size
else:
if cacheable and location == EmbeddingLocation.MANAGED_CACHING:
placements.append(EmbeddingLocation.MANAGED_CACHING)
else:
placements.append(EmbeddingLocation.MANAGED)
offsets.append(uvm_size)
uvm_size += state_size
assert len(placements) == len(offsets)
return SplitState(
dev_size=dev_size,
host_size=host_size,
uvm_size=uvm_size,
placements=placements,
offsets=offsets,
)
@dataclass
class CacheState:
# T + 1 elements and cache_hash_size_cumsum[-1] == total_cache_hash_size
cache_hash_size_cumsum: List[int]
cache_index_table_map: List[int]
total_cache_hash_size: int
def construct_cache_state(
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]],
feature_table_map: List[int],
) -> CacheState:
_cache_hash_size_cumsum = [0]
total_cache_hash_size = 0
for (num_embeddings, _, location, _) in embedding_specs:
if location == EmbeddingLocation.MANAGED_CACHING:
total_cache_hash_size += num_embeddings
_cache_hash_size_cumsum.append(total_cache_hash_size)
# [T], -1: non-cached table
cache_hash_size_cumsum = []
# [total_cache_hash_size], linear cache index -> table index
cache_index_table_map = [-1] * total_cache_hash_size
for t, t_ in enumerate(feature_table_map):
for i in range(_cache_hash_size_cumsum[t_], _cache_hash_size_cumsum[t_ + 1]):
cache_index_table_map[i] = t
(_, _, location, _) = embedding_specs[t_]
if location == EmbeddingLocation.MANAGED_CACHING:
cache_hash_size_cumsum.append(_cache_hash_size_cumsum[t_])
else:
cache_hash_size_cumsum.append(-1)
cache_hash_size_cumsum.append(total_cache_hash_size)
s = CacheState(
cache_hash_size_cumsum=cache_hash_size_cumsum,
cache_index_table_map=cache_index_table_map,
total_cache_hash_size=total_cache_hash_size,
)
return s
class SplitTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Multiple sparse features can share one embedding table.
'feature_table_map' specifies the feature-table mapping.
T: number of logical tables
T_: number of physical tables
T >= T_
"""
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]]
optimizer_args: invokers.lookup_args.OptimizerArgs
lxu_cache_locations_list: List[Tensor]
lxu_cache_locations_empty: Tensor
timesteps_prefetched: List[int]
record_cache_metrics: RecordCacheMetrics
def __init__( # noqa C901
self,
embedding_specs: List[
Tuple[int, int, EmbeddingLocation, ComputeDevice]
], # tuple of (rows, dims, placements, compute_devices)
feature_table_map: Optional[List[int]] = None, # [T]
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
cache_load_factor: float = 0.2,
cache_sets: int = 0,
cache_reserved_memory: float = 0.0,
cache_precision: SparseType = SparseType.FP32,
weights_precision: SparseType = SparseType.FP32,
enforce_hbm: bool = False, # place all weights/momentums in HBM when using cache
optimizer: OptimType = OptimType.EXACT_SGD,
record_cache_metrics: Optional[RecordCacheMetrics] = None,
# General Optimizer args
stochastic_rounding: bool = False,
gradient_clipping: bool = False,
max_gradient: float = 1.0,
learning_rate: float = 0.01,
eps: float = 1.0e-8, # used by Adagrad, LAMB, and Adam
momentum: float = 0.9, # used by LARS-SGD
weight_decay: float = 0.0, # used by LARS-SGD, LAMB, and ADAM
eta: float = 0.001, # used by LARS-SGD,
beta1: float = 0.9, # used by LAMB and ADAM
beta2: float = 0.999, # used by LAMB and ADAM
pooling_mode: PoolingMode = PoolingMode.SUM,
device: Optional[torch.device] = None,
bounds_check_mode: BoundsCheckMode = BoundsCheckMode.WARNING,
) -> None:
super(SplitTableBatchedEmbeddingBagsCodegen, self).__init__()
self.pooling_mode = pooling_mode
self.bounds_check_mode_int: int = bounds_check_mode.value
self.weights_precision = weights_precision
if record_cache_metrics is not None:
self.record_cache_metrics = record_cache_metrics
else:
self.record_cache_metrics = RecordCacheMetrics(False, False)
# NOTE: a placeholder to avoid multi-construction and make TorchScript work!
self.dummy_tensor: Tensor = torch.zeros(0, device=device)
self.embedding_specs = embedding_specs
(rows, dims, locations, compute_devices) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
assert T_ > 0
assert all(
cd == compute_devices[0] for cd in compute_devices
), "Heterogenous compute_devices are NOT supported!"
self.use_cpu: bool = all(cd == ComputeDevice.CPU for cd in compute_devices)
assert not self.use_cpu or all(
loc == EmbeddingLocation.HOST for loc in locations
), "ComputeDevice.CPU is only for EmbeddingLocation.HOST!"
if device is not None:
self.current_device: torch.device = device
else:
self.current_device: torch.device = (
torch.device("cpu") if self.use_cpu else torch.cuda.current_device()
)
# add placeholder require_grad param tensor to enable autograd with int8 weights
self.placeholder_autograd_tensor = nn.Parameter(
torch.zeros(0, device=self.current_device, dtype=torch.float)
)
self.int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
self.max_D: int = max(dims)
cached_dims = [
embedding_spec[1]
for embedding_spec in embedding_specs
if embedding_spec[2] == EmbeddingLocation.MANAGED_CACHING
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
hash_size_cumsum = [0] + list(accumulate(rows))
self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
self.register_buffer(
"rows_per_table",
torch.tensor(
[rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"bounds_check_warning",
torch.tensor([0], device=self.current_device, dtype=torch.int64),
)
weight_split = construct_split_state(
embedding_specs,
rowwise=False,
cacheable=True,
precision=weights_precision,
)
table_embedding_dtype = torch.float32
if weights_precision == SparseType.FP16:
table_embedding_dtype = torch.float16
elif weights_precision == SparseType.INT8:
table_embedding_dtype = torch.uint8
self._apply_split(
weight_split,
prefix="weights",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param but
# got `Type[typing.Union[torch.float16, torch.float32, torch.uint8]]`.
dtype=table_embedding_dtype,
enforce_hbm=enforce_hbm,
)
if self.use_cpu:
# Construct optimizer states
assert optimizer in (
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_SGD,
OptimType.ROWWISE_ADAGRAD,
OptimType.SGD,
), f"Optimizer {optimizer} is not supported in cpu mode."
else:
assert optimizer in (
OptimType.ADAM,
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_SGD,
OptimType.LAMB,
OptimType.LARS_SGD,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
OptimType.SGD,
), f"Optimizer {optimizer} is not supported."
self.stochastic_rounding = stochastic_rounding
self.optimizer = optimizer
self.optimizer_args = invokers.lookup_args.OptimizerArgs(
stochastic_rounding=stochastic_rounding,
gradient_clipping=gradient_clipping,
max_gradient=max_gradient,
learning_rate=learning_rate,
eps=eps,
beta1=beta1,
beta2=beta2,
weight_decay=weight_decay,
eta=eta,
momentum=momentum,
)
if optimizer in (
OptimType.SGD,
OptimType.EXACT_SGD,
):
# NOTE: make TorchScript work!
self.register_buffer(
"momentum1_dev", torch.tensor([0], dtype=torch.int64), persistent=False
)
self.register_buffer(
"momentum1_host", torch.tensor([0], dtype=torch.int64), persistent=False
)
self.register_buffer(
"momentum1_uvm", torch.tensor([0], dtype=torch.int64), persistent=False
)
self.register_buffer(
"momentum1_placements",
torch.tensor([0], dtype=torch.int64),
persistent=False,
)
self.register_buffer(
"momentum1_offsets",
torch.tensor([0], dtype=torch.int64),
persistent=False,
)
else:
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=optimizer
in [OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.ROWWISE_ADAGRAD],
cacheable=False,
),
prefix="momentum1",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
enforce_hbm=enforce_hbm,
)
if optimizer in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
):
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=optimizer
in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.PARTIAL_ROWWISE_LAMB),
cacheable=False,
),
prefix="momentum2",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
self.register_buffer(
"iter", torch.zeros(1, dtype=torch.int64, device=self.current_device)
)
else:
# NOTE: make TorchScript work!
self.register_buffer(
"momentum2_dev",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"momentum2_host",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"momentum2_uvm",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"momentum2_placements",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"momentum2_offsets",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"iter",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
cache_state = construct_cache_state(embedding_specs, self.feature_table_map)
# Add table-wise cache miss counter
if self.record_cache_metrics.record_tablewise_cache_miss:
num_tables = len(cache_state.cache_hash_size_cumsum) - 1
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
num_tables,
device=self.current_device,
dtype=torch.int64,
),
)
# NOTE: make TorchScript work!
else:
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
0,
device=self.current_device,
dtype=torch.int64,
),
)
if cache_precision == SparseType.FP32:
cache_embedding_dtype = torch.float32
elif cache_precision == SparseType.FP16:
cache_embedding_dtype = torch.float16
else:
raise AssertionError(f"cache_precision {cache_precision} not supported!")
self._apply_cache_state(
cache_state,
cache_algorithm,
cache_load_factor,
cache_sets,
cache_reserved_memory,
dtype=cache_embedding_dtype,
)
logging.debug(
f"Using fused {optimizer} with optimizer_args={self.optimizer_args}"
)
self.step = 0
def get_states(self, prefix: str) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
if not hasattr(self, f"{prefix}_physical_placements"):
raise DoesNotHavePrefix()
dev_param = getattr(self, f"{prefix}_dev")
host_param = getattr(self, f"{prefix}_host")
uvm_param = getattr(self, f"{prefix}_uvm")
placements = getattr(self, f"{prefix}_physical_placements")
offsets = getattr(self, f"{prefix}_physical_offsets")
return (
dev_param,
host_param,
uvm_param,
torch.tensor(placements, dtype=torch.int32),
torch.tensor(offsets, dtype=torch.int64),
)
def get_all_states(self) -> List[Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]]:
all_states = []
for prefix in ["weights", "momentum1", "momentum2"]:
try:
all_states.append(self.get_states(prefix))
except DoesNotHavePrefix:
pass
return all_states
@torch.jit.export
def get_cache_miss_counter(self) -> Tensor:
# cache_miss_counter contains two items:
# The first one is cache_miss_forward_count which records the total number of forwards which has at least one cache miss
# The second one is the unique_cache_miss_count which records to total number of unique (dedup) cache misses
# pyre-fixme[7]: Expected `Tensor` but got `typing.Union[Tensor,
# nn.Module]`.
return self.cache_miss_counter
@torch.jit.export
def get_table_wise_cache_miss(self) -> Tensor:
# table_wise_cache_miss contains all the cache miss count for each table in this embedding table object:
return self.table_wise_cache_miss
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fb.bounds_check_indices(self.rows_per_table, indices, offsets, self.bounds_check_mode_int, self.bounds_check_warning)
self.step += 1
if len(self.timesteps_prefetched) == 0:
self.prefetch(indices, offsets)
self.timesteps_prefetched.pop(0)
lxu_cache_locations = (
self.lxu_cache_locations_empty
if len(self.lxu_cache_locations_list) == 0
else self.lxu_cache_locations_list.pop(0)
)
common_args = invokers.lookup_args.CommonArgs(
placeholder_autograd_tensor=self.placeholder_autograd_tensor,
# pyre-fixme[6]: Expected `Tensor` for 2nd param but got `Union[Tensor,
# nn.Module]`.
dev_weights=self.weights_dev,
# pyre-fixme[6]: Expected `Tensor` for 3rd param but got `Union[Tensor,
# nn.Module]`.
host_weights=self.weights_host,
# pyre-fixme[6]: Expected `Tensor` for 4th param but got `Union[Tensor,
# nn.Module]`.
uvm_weights=self.weights_uvm,
# pyre-fixme[6]: Expected `Tensor` for 5th param but got `Union[Tensor,
# nn.Module]`.
lxu_cache_weights=self.lxu_cache_weights,
# pyre-fixme[6]: Expected `Tensor` for 6th param but got `Union[Tensor,
# nn.Module]`.
weights_placements=self.weights_placements,
# pyre-fixme[6]: Expected `Tensor` for 7th param but got `Union[Tensor,
# nn.Module]`.
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
lxu_cache_locations=lxu_cache_locations,
)
if self.optimizer == OptimType.EXACT_SGD:
return invokers.lookup_sgd.invoke(common_args, self.optimizer_args)
elif self.optimizer == OptimType.SGD:
assert self.use_cpu, "Approx SGD is only supported in CPU mode"
return invokers.lookup_approx_sgd.invoke(common_args, self.optimizer_args)
momentum1 = invokers.lookup_args.Momentum(
dev=self.momentum1_dev,
host=self.momentum1_host,
uvm=self.momentum1_uvm,
offsets=self.momentum1_offsets,
placements=self.momentum1_placements,
)
if self.optimizer == OptimType.LARS_SGD:
return invokers.lookup_lars_sgd.invoke(
common_args, self.optimizer_args, momentum1
)
if self.optimizer == OptimType.EXACT_ADAGRAD:
return invokers.lookup_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
if self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD:
return invokers.lookup_rowwise_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
if self.optimizer == OptimType.ROWWISE_ADAGRAD:
assert self.use_cpu, "Approx rowwise AdaGrad is only supported in CPU mode"
return invokers.lookup_approx_rowwise_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
momentum2 = invokers.lookup_args.Momentum(
dev=self.momentum2_dev,
host=self.momentum2_host,
uvm=self.momentum2_uvm,
offsets=self.momentum2_offsets,
placements=self.momentum2_placements,
)
# Ensure iter is always on CPU so the increment doesn't synchronize.
if self.iter.is_cuda:
self.iter = self.iter.cpu()
self.iter[0] += 1
if self.optimizer == OptimType.ADAM:
return invokers.lookup_adam.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.PARTIAL_ROWWISE_ADAM:
return invokers.lookup_partial_rowwise_adam.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.LAMB:
return invokers.lookup_lamb.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.PARTIAL_ROWWISE_LAMB:
return invokers.lookup_partial_rowwise_lamb.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
raise ValueError(f"Invalid OptimType: {self.optimizer}")
def prefetch(self, indices: Tensor, offsets: Tensor) -> None:
self.timestep += 1
self.timesteps_prefetched.append(self.timestep)
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],
# int], Tensor], Tensor, nn.Module]` is not a function.
if not self.lxu_cache_weights.numel():
return
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fb.linearize_cache_indices(
self.cache_hash_size_cumsum,
indices,
offsets,
)
if (
self.record_cache_metrics.record_cache_miss_counter
or self.record_cache_metrics.record_tablewise_cache_miss
):
lxu_cache_locations = torch.ops.fb.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
)
if self.record_cache_metrics.record_cache_miss_counter:
self._update_cache_miss_counter(
lxu_cache_locations, linear_cache_indices
)
if self.record_cache_metrics.record_tablewise_cache_miss:
self._update_tablewise_cache_miss(
lxu_cache_locations, linear_cache_indices, offsets
)
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fb.lru_cache_populate(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep,
self.lxu_state,
self.stochastic_rounding,
)
elif self.cache_algorithm == CacheAlgorithm.LFU:
torch.ops.fb.lfu_cache_populate(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.lxu_state,
self.stochastic_rounding,
)
assert (
len(self.lxu_cache_locations_list) < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {len(self.lxu_cache_locations_list)}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
self.lxu_cache_locations_list.append(
torch.ops.fb.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
)
)
def _update_cache_miss_counter(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
) -> None:
CACHE_MISS = -1
CACHE_HIT = -2
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
unique_ids_list = torch.unique(cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,
# Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,
# nn.Module]` is not a function.
self.cache_miss_counter[0] += (miss_count > 0).to(torch.int64)
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,
# Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,
# nn.Module]` is not a function.
self.cache_miss_counter[1] += miss_count
def _update_tablewise_cache_miss(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
offsets: Tensor,
) -> None:
CACHE_MISS = -1
CACHE_HIT = -2
# pyre-ignore[6]:
# Incompatible parameter type [6]: Expected `typing.Sized` for 1st
# positional only parameter to call `len` but got `typing.Union[Tensor, nn.Module]`.
num_tables = len(self.cache_hash_size_cumsum) - 1
num_offsets_per_table = (len(offsets) - 1) // num_tables
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
for i in range(num_tables):
start = offsets[i * num_offsets_per_table]
end = offsets[(i + 1) * num_offsets_per_table]
current_cache_missed_locations = cache_missed_locations[start:end]
unique_ids_list = torch.unique(current_cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.table_wise_cache_miss[i] += miss_count
def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:
splits = self.split_embedding_weights()
if self.weights_precision == SparseType.INT8:
# TODO: add in-place FloatToFused8BitRowwiseQuantized conversion
for emb in splits:
assert (
len(emb.shape) == 2
), "Int8 embedding only supported for 2D weight tensors."
shape = [emb.shape[0], emb.shape[1] - self.int8_emb_row_dim_offset]
tmp_emb = torch.zeros(shape, device=self.current_device)
tmp_emb.uniform_(min_val, max_val)
tmp_emb_i8 = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(tmp_emb)
emb.data.copy_(tmp_emb_i8)
else:
for param in splits:
param.uniform_(min_val, max_val)
@torch.jit.ignore
def split_embedding_weights(self) -> List[Tensor]:
"""
Returns a list of weights, split by table
"""
splits = []
for t, (rows, dim, _, _) in enumerate(self.embedding_specs):
if self.weights_precision == SparseType.INT8:
dim += self.int8_emb_row_dim_offset
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,
# Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,
# nn.Module]` is not a function.
placement = self.weights_physical_placements[t]
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.__getitem__)[[Named(self,
# Tensor), Named(item, typing.Any)], typing.Any], Tensor], Tensor,
# nn.Module]` is not a function.
offset = self.weights_physical_offsets[t]
if placement == EmbeddingLocation.DEVICE.value:
weights = self.weights_dev
elif placement == EmbeddingLocation.HOST.value:
weights = self.weights_host
else:
weights = self.weights_uvm
splits.append(
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.detach)[[Named(self,
# Tensor)], Tensor], Tensor], Tensor, nn.Module]` is not a function.
weights.detach()[offset : offset + rows * dim].view(rows, dim)
)
return splits
@torch.jit.ignore
def get_optimizer_buffer(self, state: str) -> torch.Tensor:
for name, buffer in self.named_buffers():
if name == state:
return buffer
return torch.tensor(0)
@torch.jit.export
def get_optimizer_state(self) -> List[Dict[str, torch.Tensor]]:
r"""
Get the optimizer state dict that matches the OSS Pytorch optims
TODO: populate the supported list of optimizers
"""
if (
self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
or self.optimizer == OptimType.ROWWISE_ADAGRAD
):
list_of_state_dict = [
{"sum": _sum[0]} for _sum in self.split_optimizer_states()
]
else:
raise NotImplementedError(
f"Getting optimizer state {self.optimizer} is not implmeneted"
)
return list_of_state_dict
@torch.jit.ignore
def split_optimizer_states(self) -> List[Tuple[torch.Tensor]]:
"""
Returns a list of states, split by table
"""
def get_optimizer_states(
state_dev: Tensor,
state_host: Tensor,
state_uvm: Tensor,
state_offsets: Tensor,
state_placements: Tensor,
rowwise: bool,
) -> List[torch.Tensor]:
splits = []
for t, (rows, dim, _, _) in enumerate(self.embedding_specs):
offset = state_offsets[t]
placement = state_placements[t]
if placement == EmbeddingLocation.DEVICE:
state = state_dev
elif placement == EmbeddingLocation.HOST:
state = state_host
else:
state = state_uvm
if not rowwise:
splits.append(
state.detach()[offset : offset + rows * dim].view(rows, dim)
)
else:
splits.append(state.detach()[offset : offset + rows].view(rows))
return splits
states: List[List[torch.Tensor]] = []
if self.optimizer not in (
OptimType.SGD,
OptimType.EXACT_SGD,
):
states.append(
get_optimizer_states(
self.momentum1_dev,
self.momentum1_host,
self.momentum1_uvm,
# pyre-fixme[6]: Expected `Tensor` for 4th param but got
# `Union[Tensor, nn.Module]`.
self.momentum1_physical_offsets,
# pyre-fixme[6]: Expected `Tensor` for 5th param but got
# `Union[Tensor, nn.Module]`.
self.momentum1_physical_placements,
rowwise=self.optimizer
in [OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.ROWWISE_ADAGRAD],
)
)
if self.optimizer in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
):
states.append(
get_optimizer_states(
self.momentum2_dev,
self.momentum2_host,
self.momentum2_uvm,
# pyre-fixme[6]: Expected `Tensor` for 4th param but got
# `Union[Tensor, nn.Module]`.
self.momentum2_physical_offsets,
# pyre-fixme[6]: Expected `Tensor` for 5th param but got
# `Union[Tensor, nn.Module]`.
self.momentum2_physical_placements,
rowwise=self.optimizer
in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.PARTIAL_ROWWISE_LAMB),
)
)
return list(zip(*states))
@torch.jit.export
def set_learning_rate(self, lr: float) -> None:
"""
Sets the learning rate.
"""
self._set_learning_rate(lr)
@torch.jit.ignore
def _set_learning_rate(self, lr: float) -> float:
"""
Helper function to script `set_learning_rate`.
Note that returning None does not work.
"""
self.optimizer_args = self.optimizer_args._replace(learning_rate=lr)
return 0.0
@torch.jit.export
def flush(self) -> None:
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],
# int], Tensor], Tensor, nn.Module]` is not a function.
if not self.lxu_cache_weights.numel():
return
torch.ops.fb.lxu_cache_flush(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
self.total_D,
self.lxu_cache_state,
self.lxu_cache_weights,
self.stochastic_rounding,
)
def _apply_split(
self,
split: SplitState,
prefix: str,
dtype: Type[torch.dtype],
enforce_hbm: bool = False,
) -> None:
setattr(self, f"{prefix}_physical_placements", split.placements)
setattr(self, f"{prefix}_physical_offsets", split.offsets)
offsets = [split.offsets[t] for t in self.feature_table_map]
placements = [split.placements[t] for t in self.feature_table_map]
self.register_buffer(
f"{prefix}_offsets",
torch.tensor(offsets, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
f"{prefix}_placements",
torch.tensor(placements, device=self.current_device, dtype=torch.int32),
)
if split.dev_size > 0:
self.register_buffer(
f"{prefix}_dev",
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for 3rd
# param but got `Type[Type[torch._dtype]]`.
torch.zeros(split.dev_size, device=self.current_device, dtype=dtype),
)
else:
self.register_buffer(
f"{prefix}_dev",
torch.empty(0, device=self.current_device, dtype=dtype),
)
if split.host_size > 0:
if dtype == torch.uint8:
self.register_buffer(
f"{prefix}_host",
torch.zeros(
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for
# 3rd param but got `Type[Type[torch._dtype]]`.
split.host_size, device=self.current_device, dtype=dtype
),
)
else:
setattr(
self,
f"{prefix}_host",
nn.Parameter(
torch.zeros(
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]`
# for 3rd param but got `Type[Type[torch._dtype]]`.
split.host_size, device=self.current_device, dtype=dtype
)
),
)
else:
self.register_buffer(
f"{prefix}_host",
torch.empty(0, device=self.current_device, dtype=dtype),
)
if split.uvm_size > 0:
assert not self.use_cpu
if enforce_hbm:
self.register_buffer(
f"{prefix}_uvm",
torch.zeros(
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for
# 3rd param but got `Type[Type[torch._dtype]]`.
split.uvm_size, device=self.current_device, dtype=dtype
),
)
else:
self.register_buffer(
f"{prefix}_uvm",
torch.zeros(
split.uvm_size,
out=torch.ops.fb.new_managed_tensor(
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]`
# for 3rd param but got `Type[Type[torch._dtype]]`.
torch.zeros(1, device=self.current_device, dtype=dtype),
[split.uvm_size],
),
),
)
else:
self.register_buffer(
f"{prefix}_uvm",
torch.empty(0, device=self.current_device, dtype=dtype),
)
def _apply_cache_state(
self,
cache_state: CacheState,
cache_algorithm: CacheAlgorithm,
cache_load_factor: float,
cache_sets: int,
cache_reserved_memory: float,
dtype: torch.dtype,
) -> None:
self.cache_algorithm = cache_algorithm
self.timestep = 1
self.timesteps_prefetched = []
self.max_prefetch_depth = MAX_PREFETCH_DEPTH
self.lxu_cache_locations_list = []
self.lxu_cache_locations_empty = torch.empty(
0, device=self.current_device, dtype=torch.int32
).fill_(-1)
# NOTE: no cache for CPU mode!
if cache_state.total_cache_hash_size == 0 or self.use_cpu:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(0, 0, device=self.current_device, dtype=dtype),
)
# NOTE: make TorchScript work!
self.register_buffer(
"cache_hash_size_cumsum",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"total_cache_hash_size",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_index_table_map",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0], dtype=torch.int64),
persistent=False,
)
return
assert cache_load_factor > 0
element_size = 2 if dtype == torch.float16 else 4
if cache_sets <= 0:
total_memory = torch.cuda.get_device_properties(
self.current_device
).total_memory
free_memory = (
total_memory
- torch.cuda.memory_reserved(self.current_device)
- int(cache_reserved_memory)
)
assert free_memory > 0
cache_sets = (
int(cache_state.total_cache_hash_size * cache_load_factor) + ASSOC - 1
) // ASSOC
cache_size = cache_sets * ASSOC * element_size * self.max_D_cache
if cache_size > free_memory:
cache_sets = (
int(1.0 * free_memory / self.max_D_cache / element_size) + ASSOC - 1
) // ASSOC
cache_load_factor = (
1.0 * cache_sets * ASSOC / int(cache_state.total_cache_hash_size)
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2 ** 24 - 1
cache_size = cache_sets * 32 * element_size * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
f"{cache_algorithm}, {cache_sets} sets, "
f"load_factor: {cache_load_factor : .3f}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.total_cache_hash_size = cache_state.total_cache_hash_size
self.register_buffer(
"cache_hash_size_cumsum",
torch.tensor(
cache_state.cache_hash_size_cumsum,
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_index_table_map",
torch.tensor(
cache_state.cache_index_table_map,
device=self.current_device,
dtype=torch.int32,
),
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(
cache_sets, ASSOC, device=self.current_device, dtype=torch.int64
).fill_(-1),
)
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * ASSOC,
self.max_D_cache,
device=self.current_device,
dtype=dtype,
),
)
self.register_buffer(
"lxu_state",
# pyre-fixme[28]: Unexpected keyword argument `size`.
torch.zeros(
size=(self.total_cache_hash_size + 1,)
if cache_algorithm == CacheAlgorithm.LFU
else (cache_sets, ASSOC),
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0], device=self.current_device, dtype=torch.int64),
)
if cache_algorithm not in (CacheAlgorithm.LFU, CacheAlgorithm.LRU):
raise ValueError(
f"cache_algorithm must be {CacheAlgorithm.LRU} "
f"or {CacheAlgorithm.LFU}"
)
def reset_cache_states(self) -> None:
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(Tensor.numel)[[Named(self, Tensor)],
# int], Tensor], Tensor, nn.Module]` is not a function.
if not self.lxu_cache_weights.numel():
return
self.lxu_cache_state.fill_(-1)
self.lxu_state.fill_(0)
self.timestep = 1
class DenseTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Table-batched version of nn.EmbeddingBag(sparse=False)
"""
weights: Tensor
weights_offsets: Tensor
D_offsets: Tensor
total_D: int
max_D: int
hash_size_cumsum: Tensor
total_hash_size_bits: int
embedding_specs: List[Tuple[int, int]]
def __init__(
self,
embedding_specs: List[Tuple[int, int]], # tuple of (rows, dims)
feature_table_map: Optional[List[int]] = None, # [T]
pooling_mode: PoolingMode = PoolingMode.SUM,
use_cpu: bool = False,
) -> None: # noqa C901 # tuple of (rows, dims,)
super(DenseTableBatchedEmbeddingBagsCodegen, self).__init__()
self.pooling_mode = pooling_mode
self.use_cpu = use_cpu
self.current_device: torch.device = (
torch.device("cpu") if self.use_cpu else torch.cuda.current_device()
)
self.embedding_specs = embedding_specs
(rows, dims) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
assert T_ > 0
feature_table_map = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(feature_table_map)
assert T_ <= T
D_offsets = [dims[t] for t in feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D = D_offsets[-1]
self.max_D = max(dims)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(accumulate(rows))
self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
weights_offsets = [0] + list(
accumulate([row * dim for (row, dim) in embedding_specs])
)
self.weights = nn.Parameter(
torch.randn(
weights_offsets[-1],
device=self.current_device,
)
)
for feature in range(T):
t = feature_table_map[feature]
row, dim = embedding_specs[t]
if (
self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()
!= row * dim
):
logging.info(
f"row {row} dim {dim} feature {feature} t {t} {self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()}"
)
assert (
self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()
== row * dim
)
assert self.hash_size_cumsum[feature] == sum(
row for (row, _) in embedding_specs[:t]
)
self.weights_physical_offsets: List[int] = weights_offsets
weights_offsets = [weights_offsets[t] for t in feature_table_map]
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets, device=self.current_device, dtype=torch.int64
),
)
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
return torch.ops.fb.dense_embedding_codegen_lookup_function(
dev_weights=self.weights,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
)
@torch.jit.export
def split_embedding_weights(self) -> List[Tensor]:
"""
Returns a list of weights, split by table
"""
splits = []
for t, (rows, dim) in enumerate(self.embedding_specs):
offset = self.weights_physical_offsets[t]
splits.append(
self.weights.detach()[offset : offset + rows * dim].view(rows, dim)
)
return splits
def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:
splits = self.split_embedding_weights()
for param in splits:
param.uniform_(min_val, max_val)
class SequenceEmbeddingCodegen(SplitTableBatchedEmbeddingBagsCodegen):
"""
This class wraps around SplitTableBatchedEmbeddingBagsCodegen to get
sequence embedding op: nn.EmbeddingBag(sparse=True)
"""
def __init__(
self,
**kwargs: Any,
) -> None:
# assert T == 1
assert "embedding_specs" in kwargs
assert len(kwargs["embedding_specs"]) == 1
super(SequenceEmbeddingCodegen, self).__init__(
**kwargs,
)
# @torch.jit.ignore
def forward(
self,
indices: Tensor,
offsets: Optional[Tensor] = None,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
offsets = torch.arange(
0,
indices.numel() + 1,
device=indices.device,
dtype=torch.int64,
)
return super(SequenceEmbeddingCodegen, self).forward(
indices,
offsets,
per_sample_weights,
feature_requires_grad,
)
class DenseSequenceEmbeddingCodegen(DenseTableBatchedEmbeddingBagsCodegen):
"""
This class wraps around DenseTableBatchedEmbeddingBagsCodegen to get
sequence embedding op, nn.EmbeddingBag(sparse=False)
"""
def __init__(
self,
**kwargs: Any,
) -> None:
# assert T == 1
assert "embedding_specs" in kwargs
assert len(kwargs["embedding_specs"]) == 1
super(DenseSequenceEmbeddingCodegen, self).__init__(
**kwargs,
)
# @torch.jit.ignore
def forward(
self,
indices: Tensor,
offsets: Optional[Tensor] = None,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
offsets = torch.arange(
0,
indices.numel() + 1,
device=indices.device,
dtype=torch.int64,
)
return super(DenseSequenceEmbeddingCodegen, self).forward(
indices,
offsets,
per_sample_weights,
feature_requires_grad,
)
def round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def rounded_row_size_in_bytes(dim: int, weight_ty: SparseType) -> int:
r = unpadded_row_size_in_bytes(dim, weight_ty)
# align each row to 16-byte boundaries.
return round_up(r, 16)
def unpadded_row_size_in_bytes(dim: int, weight_ty: SparseType) -> int:
r = {
SparseType.FP32.value: dim * 4,
SparseType.FP16.value: dim * 2,
SparseType.INT8.value: dim + 4,
SparseType.INT4.value: dim // 2 + 4,
SparseType.INT2.value: dim // 4 + 4,
}[weight_ty.value]
return r
def intn_construct_split_state(
embedding_specs: List[Tuple[str, int, int, SparseType, EmbeddingLocation]],
cacheable: bool,
) -> SplitState:
placements = []
offsets = []
dev_size = 0
host_size = 0
uvm_size = 0
for (_, num_embeddings, embedding_dim, weight_ty, location) in embedding_specs:
def align_to_cacheline(a: int) -> int:
# align each table to 128b cache line boundary.
return round_up(a, 128)
embedding_dim = rounded_row_size_in_bytes(embedding_dim, weight_ty)
state_size = align_to_cacheline(num_embeddings * embedding_dim)
if location == EmbeddingLocation.HOST:
placements.append(EmbeddingLocation.HOST)
offsets.append(host_size)
host_size += state_size
elif location == EmbeddingLocation.DEVICE:
placements.append(EmbeddingLocation.DEVICE)
offsets.append(dev_size)
dev_size += state_size
else:
if cacheable and location == EmbeddingLocation.MANAGED_CACHING:
placements.append(
EmbeddingLocation.MANAGED_CACHING
) # Note: this isn't supported yet.
raise AssertionError("MANAGED_CACHING is not supported yet")
else:
placements.append(EmbeddingLocation.MANAGED)
offsets.append(uvm_size)
uvm_size += state_size
assert len(placements) == len(offsets)
return SplitState(
dev_size=dev_size,
host_size=host_size,
uvm_size=uvm_size,
placements=placements,
offsets=offsets,
)
class IntNBitTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Table-batched version of nn.EmbeddingBag(sparse=False)
Inference version, with FP16/INT8/INT4 supports
"""
def __init__(
self,
embedding_specs: List[
Tuple[str, int, int, SparseType, EmbeddingLocation]
], # tuple of (feature_names, rows, dims, SparseType, EmbeddingLocation/placement)
feature_table_map: Optional[List[int]] = None, # [T]
index_remapping: Optional[List[Tensor]] = None,
pooling_mode: PoolingMode = PoolingMode.SUM,
use_cpu: bool = False,
bounds_check_mode: BoundsCheckMode = BoundsCheckMode.WARNING,
weight_lists: Optional[List[Tuple[Tensor, Tensor]]] = None,
load_factor: float = 0.5,
use_array_for_index_remapping: bool = True,
) -> None: # noqa C901 # tuple of (rows, dims,)
super(IntNBitTableBatchedEmbeddingBagsCodegen, self).__init__()
self.use_cpu = use_cpu
self.current_device: torch.device = (
torch.device("cpu") if self.use_cpu else torch.cuda.current_device()
)
self.pooling_mode = pooling_mode
self.bounds_check_mode_int: int = bounds_check_mode.value
self.embedding_specs = embedding_specs
# (feature_names, rows, dims, weights_tys, locations) = zip(*embedding_specs)
# Pyre workaround
self.feature_names: List[str] = [e[0] for e in embedding_specs]
rows: List[int] = [e[1] for e in embedding_specs]
dims: List[int] = [e[2] for e in embedding_specs]
self.dims: List[int] = dims
weights_tys: List[SparseType] = [e[3] for e in embedding_specs]
locations: List[EmbeddingLocation] = [e[4] for e in embedding_specs]
assert not self.use_cpu or all(
loc == EmbeddingLocation.HOST for loc in locations
), "ComputeDevice.CPU is only for EmbeddingLocation.HOST!"
T_ = len(self.embedding_specs)
assert T_ > 0
for (dim, weight_ty) in zip(dims, weights_tys):
assert dim % weight_ty.align_size() == 0
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
def max_ty_D(ty: SparseType) -> int:
return max(
[dim for dim, weight_ty in zip(dims, weights_tys) if weight_ty == ty],
default=0,
)
self.max_int2_D: int = max_ty_D(SparseType.INT2)
self.max_int4_D: int = max_ty_D(SparseType.INT4)
self.max_int8_D: int = max_ty_D(SparseType.INT8)
self.max_float16_D: int = max_ty_D(SparseType.FP16)
self.max_float32_D: int = max_ty_D(SparseType.FP32)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
self.register_buffer(
"rows_per_table",
torch.tensor(
[rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"bounds_check_warning",
torch.tensor([0], device=self.current_device, dtype=torch.int64),
)
def align_to_cacheline(a: int) -> int:
# align each table to 128b cache line boundary.
return round_up(a, 128)
weights_tys_int = [weights_tys[t].as_int() for t in self.feature_table_map]
self.register_buffer(
"weights_tys",
torch.tensor(
weights_tys_int, device=self.current_device, dtype=torch.uint8
),
)
self.weight_initialized: bool = False
self.weights_dev: torch.Tensor = torch.zeros(
0,
device=self.current_device,
dtype=torch.uint8,
)
self.weights_host: torch.Tensor = torch.zeros(
0, device=self.current_device, dtype=torch.uint8
)
self.weights_uvm: torch.Tensor = torch.empty(0, device=self.current_device, dtype=torch.uint8)
weight_split: SplitState = intn_construct_split_state(
self.embedding_specs,
cacheable=True,
)
self.weights_physical_placements: List[int] = [
t.value for t in weight_split.placements
]
self.weights_physical_offsets: List[int] = weight_split.offsets
self.host_size: int = weight_split.host_size
self.dev_size: int = weight_split.dev_size
self.uvm_size: int = weight_split.uvm_size
# Assign weights after weights and weights_offsets are initialized.
if weight_lists:
self._apply_split(
self.dev_size,
self.host_size,
self.uvm_size,
self.weights_physical_placements,
self.weights_physical_offsets,
)
self.assign_embedding_weights(weight_lists) # type: ignore
# Handle index remapping for embedding pruning.
self.register_buffer(
"index_remappings_array_offsets",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"index_remappings_array",
torch.empty(0, device=self.current_device, dtype=torch.int32),
)
self.register_buffer(
"index_remapping_hash_table_offsets",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"index_remapping_hash_table",
torch.empty(0, device=self.current_device, dtype=torch.int32),
)
# pyre-fixme[4]: Attribute must be annotated.
self.index_remapping_hash_table_cpu = None
if index_remapping:
self.set_index_remappings(
index_remapping, load_factor, use_array_for_index_remapping
)
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
assert self.weight_initialized
if self.index_remapping_hash_table_cpu is not None:
indices = self.index_remapping_hash_table_cpu.lookup(indices, offsets)
elif self.index_remapping_hash_table.numel() > 0:
# Convert from raw indices to pruned indices
indices = torch.ops.fb.pruned_hashmap_lookup(
indices,
offsets,
self.index_remapping_hash_table,
self.index_remapping_hash_table_offsets,
)
elif self.index_remappings_array.numel() > 0:
indices = torch.ops.fb.pruned_array_lookup(
indices,
offsets,
self.index_remappings_array,
self.index_remappings_array_offsets,
)
# We cast to int as a TorchScript workaround.
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fb.bounds_check_indices(
self.rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
)
# Note: CPU and CUDA ops use the same interface to facilitate JIT IR
# generation for CUDA/CPU. For CPU op, we don't need weights_uvm and
# weights_placements
return torch.ops.fb.int_nbit_split_embedding_codegen_lookup_function(
dev_weights=self.weights_host if self.host_size > 0 else self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_int2_D=self.max_int2_D,
max_int4_D=self.max_int4_D,
max_int8_D=self.max_int8_D,
max_float16_D=self.max_float16_D,
max_float32_D=self.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
)
def _apply_split(
self,
dev_size: int,
host_size: int,
uvm_size: int,
placements: List[int],
offsets: List[int],
) -> None:
assert not self.weight_initialized, "Weights have already been initialized."
self.weight_initialized = True
self.weights_physical_placements = placements
self.weights_physical_offsets = offsets
self.host_size = host_size
self.dev_size = dev_size
self.uvm_size = uvm_size
offsets = [offsets[t] for t in self.feature_table_map]
placements = [placements[t] for t in self.feature_table_map]
self.weights_offsets = torch.tensor(
offsets, device=self.D_offsets.device, dtype=torch.int64
)
self.weights_placements = torch.tensor(
placements, device=self.D_offsets.device, dtype=torch.int32
)
if dev_size > 0:
self.weights_dev = torch.zeros(
dev_size,
device=self.D_offsets.device,
dtype=torch.uint8,
)
if host_size > 0:
self.weights_host = torch.zeros(
host_size, device=self.D_offsets.device, dtype=torch.uint8
)
if uvm_size > 0:
assert not self.use_cpu
self.weights_uvm = torch.zeros(
uvm_size,
out=torch.ops.fb.new_managed_tensor(
torch.zeros(1, device=self.D_offsets.device, dtype=torch.uint8),
[uvm_size],
),
)
@torch.jit.export
def split_embedding_weights(self) -> List[Tuple[Tensor, Optional[Tensor]]]:
"""
Returns a list of weights, split by table
"""
assert self.weight_initialized
splits: List[Tuple[Tensor, Optional[Tensor]]] = []
for t, (_, rows, dim, weight_ty, _) in enumerate(self.embedding_specs):
placement = self.weights_physical_placements[t]
if placement == EmbeddingLocation.DEVICE.value:
weights = self.weights_dev
elif placement == EmbeddingLocation.HOST.value:
weights = self.weights_host
else:
weights = self.weights_uvm
offset = self.weights_physical_offsets[t]
weights_shifts = weights.detach()[
offset : offset + rows * rounded_row_size_in_bytes(dim, weight_ty)
].view(rows, rounded_row_size_in_bytes(dim, weight_ty))
# remove the padding at the end of each row.
weights_shifts = weights_shifts[
:, : unpadded_row_size_in_bytes(dim, weight_ty)
]
if (
weight_ty == SparseType.INT8
or weight_ty == SparseType.INT4
or weight_ty == SparseType.INT2
):
splits.append(
(
weights_shifts[:, 4:],
weights_shifts[:, :4],
)
)
else:
assert weight_ty == SparseType.FP16 or weight_ty == SparseType.FP32
splits.append(
(
weights_shifts,
None,
)
)
return splits
def initialize_weights(self) -> None:
if not self.weight_initialized:
self._apply_split(
self.dev_size,
self.host_size,
self.uvm_size,
self.weights_physical_placements,
self.weights_physical_offsets,
)
self.weight_initialized: bool = True
def fill_random_weights(self) -> None:
"""
Fill the buffer with random weights, table by table
FIXME: make it in-place fill.
"""
self.initialize_weights()
weights = self.split_embedding_weights()
for dest_weight in weights:
dest_weight[0].copy_(
torch.randint(
0,
255,
size=dest_weight[0].shape,
dtype=torch.uint8,
device=self.current_device,
)
)
def assign_embedding_weights(
self, q_weight_list: List[Tuple[Tensor, Optional[Tensor]]]
) -> None:
"""
Assigns self.split_embedding_weights() with values from the input list of weights and scale_shifts.
"""
weights = self.split_embedding_weights()
assert len(q_weight_list) == len(weights)
for (dest_weight, input_weight) in zip(weights, q_weight_list):
dest_weight[0].copy_(input_weight[0])
if input_weight[1] is not None:
assert dest_weight[1] is not None
dest_weight[1].copy_(input_weight[1])
else:
assert dest_weight[1] is None
def set_index_remappings(
self,
index_remapping: List[Tensor],
load_factor: float = 0.5,
use_array_for_index_remapping: bool = True,
) -> None:
rows: List[int] = [e[1] for e in self.embedding_specs]
T = len(self.embedding_specs)
if not use_array_for_index_remapping:
capacities = [
round_up(int(row * 1.0 / load_factor), 32)
if index_remap is not None
else 0
for (index_remap, row) in zip(index_remapping, rows)
]
hash_table = torch.empty(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table[:, :] = -1
hash_table_offsets = torch.tensor(
[0] + list(accumulate(capacities))
).long()
merged_index_remappings = [
mapping if mapping is not None else Tensor(list(range(spec[1])))
for (mapping, spec) in zip(index_remapping, self.embedding_specs)
]
original_feature_rows = [
mapping.numel() for mapping in merged_index_remappings
]
dense_indices = torch.cat(merged_index_remappings, dim=0).int()
indices = torch.cat(
[torch.arange(row) for row in original_feature_rows], dim=0
).int()
offsets = torch.tensor(
[0] + list(accumulate(original_feature_rows))
).int()
if self.use_cpu:
self.index_remapping_hash_table_cpu = torch.classes.fb.PrunedMapCPU()
self.index_remapping_hash_table_cpu.insert(
indices, dense_indices, offsets, T
)
else:
torch.ops.fb.pruned_hashmap_insert(
indices, dense_indices, offsets, hash_table, hash_table_offsets
)
self.index_remapping_hash_table = hash_table.to(self.current_device)
self.index_remapping_hash_table_offsets = hash_table_offsets.to(
self.current_device
)
self.index_remapping_hash_table_cpu = None
else:
index_remappings_array_offsets = [0]
last_offset = 0
for mapping in index_remapping:
if mapping is not None:
last_offset += mapping.numel()
index_remappings_array_offsets.append(last_offset)
self.index_remappings_array_offsets = torch.tensor(
index_remappings_array_offsets,
device=self.current_device,
dtype=torch.int64,
)
self.index_remappings_array = (
torch.empty(0, dtype=torch.int32, device=self.current_device)
if self.index_remappings_array_offsets[-1] == 0
else torch.cat(
[mapping for mapping in index_remapping if mapping is not None]
).to(self.current_device)
)
| 37.589731 | 259 | 0.580925 |
9e1c35c7cd801823750aeb8595b547fb6d823174 | 4,404 | py | Python | AdelaiDet/adet/modeling/blendmask/basis_module.py | km1562/AdelaiDet2 | 293cd6410631d36145f9ae4eb06a63520c66b92d | [
"Apache-2.0"
] | null | null | null | AdelaiDet/adet/modeling/blendmask/basis_module.py | km1562/AdelaiDet2 | 293cd6410631d36145f9ae4eb06a63520c66b92d | [
"Apache-2.0"
] | null | null | null | AdelaiDet/adet/modeling/blendmask/basis_module.py | km1562/AdelaiDet2 | 293cd6410631d36145f9ae4eb06a63520c66b92d | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from torch import nn
from torch.nn import functional as F
from detectron2.utils.registry import Registry
from detectron2.layers import ShapeSpec
from adet.layers import conv_with_kaiming_uniform
BASIS_MODULE_REGISTRY = Registry("BASIS_MODULE")
BASIS_MODULE_REGISTRY.__doc__ = """
Registry for basis module, which produces global bases from feature maps.
The registered object will be called with `obj(cfg, input_shape)`.
The call should return a `nn.Module` object.
"""
def build_basis_module(cfg, input_shape):
name = cfg.MODEL.BASIS_MODULE.NAME
return BASIS_MODULE_REGISTRY.get(name)(cfg, input_shape)
@BASIS_MODULE_REGISTRY.register()
class ProtoNet(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
"""
TODO: support deconv and variable channel width
"""
# official protonet has a relu after each conv
super().__init__()
# fmt: off
mask_dim = cfg.MODEL.BASIS_MODULE.NUM_BASES
planes = cfg.MODEL.BASIS_MODULE.CONVS_DIM
self.in_features = cfg.MODEL.BASIS_MODULE.IN_FEATURES
self.loss_on = cfg.MODEL.BASIS_MODULE.LOSS_ON
norm = cfg.MODEL.BASIS_MODULE.NORM
num_convs = cfg.MODEL.BASIS_MODULE.NUM_CONVS
self.visualize = cfg.MODEL.BLENDMASK.VISUALIZE
# fmt: on
feature_channels = {k: v.channels for k, v in input_shape.items()}
conv_block = conv_with_kaiming_uniform(norm, True) # conv relu bn
self.refine = nn.ModuleList()
for in_feature in self.in_features:
self.refine.append(conv_block(
feature_channels[in_feature], planes, 3, 1))
tower = []
for i in range(num_convs):
tower.append(
conv_block(planes, planes, 3, 1))
tower.append(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False))
tower.append(
conv_block(planes, planes, 3, 1))
tower.append(
nn.Conv2d(planes, mask_dim, 1))
self.add_module('tower', nn.Sequential(*tower))
if self.loss_on:
# fmt: off
self.common_stride = cfg.MODEL.BASIS_MODULE.COMMON_STRIDE
num_classes = cfg.MODEL.BASIS_MODULE.NUM_CLASSES + 1
self.sem_loss_weight = cfg.MODEL.BASIS_MODULE.LOSS_WEIGHT
# fmt: on
inplanes = feature_channels[self.in_features[0]]
self.seg_head = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=1, bias=False),
nn.BatchNorm2d(planes),
nn.ReLU(),
nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False),
nn.BatchNorm2d(planes),
nn.ReLU(),
nn.Conv2d(planes, num_classes, kernel_size=1,
stride=1))
def forward(self, features, targets=None):
for i, f in enumerate(self.in_features):
if i == 0:
x = self.refine[i](features[f])
else:
x_p = self.refine[i](features[f])
x_p = F.interpolate(x_p, x.size()[2:], mode="bilinear", align_corners=False)
# x_p = aligned_bilinear(x_p, features.size(3) // x_p.size(3))
x = x + x_p
outputs = {"bases": [self.tower(x)]}
losses = {}
# auxiliary thing semantic loss
if self.training and self.loss_on:
sem_out = self.seg_head(features[self.in_features[0]])
# resize target to reduce memory
gt_sem = targets.unsqueeze(1).float()
gt_sem = F.interpolate(
gt_sem, scale_factor=1 / self.common_stride)
seg_loss = F.cross_entropy(
sem_out, gt_sem.squeeze(1).long())
losses['loss_basis_sem'] = seg_loss * self.sem_loss_weight
elif self.visualize and hasattr(self, "seg_head"):
outputs["seg_thing_out"] = self.seg_head(features[self.in_features[0]])
return outputs, losses
| 41.942857 | 92 | 0.570618 |
6eb99c36725beb0bd21d85f5afa25b1f50af0ab5 | 2,582 | py | Python | examples/ensemble/tree_embedding.py | synapticarbors/sk-dist | e5729e62fbdb7b8513be1c4fd0d463d8aec5b837 | [
"Apache-2.0"
] | 292 | 2019-08-29T20:31:05.000Z | 2022-03-25T23:14:48.000Z | examples/ensemble/tree_embedding.py | synapticarbors/sk-dist | e5729e62fbdb7b8513be1c4fd0d463d8aec5b837 | [
"Apache-2.0"
] | 20 | 2019-09-05T08:39:05.000Z | 2021-07-18T23:35:14.000Z | examples/ensemble/tree_embedding.py | synapticarbors/sk-dist | e5729e62fbdb7b8513be1c4fd0d463d8aec5b837 | [
"Apache-2.0"
] | 61 | 2019-09-02T21:40:03.000Z | 2022-02-17T18:10:29.000Z | """
===============================================================
Encode circles data with tree embedding and compare classifiers
===============================================================
Here we look at an example modelled of of scikit-learn's hashing
feature transformation example:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_embedding.html
We use more training data and more trees in the extra trees
ensemble. We also score the estimators, both naive bayes and
extra frees on both the raw feature data and the tree embedding
transformed data.
This illustrates the power of using tree embedding for hashing
feature transformation. Both the naive bayes and extra trees
classifiers do better with the transformed data. Of particular
interest is the naive bayes model performing no better than
random on the original data but very high scoring with the
hashing transformed data.
Here is a sample output run:
Naive Bayes -- Transformed: 0.9733504
Naive Bayes -- Original: 0.4964787
Extra Trees -- Transformed: 0.98369
Extra Trees -- Original: 0.9469593
"""
print(__doc__)
import numpy as np
from sklearn.datasets import make_circles
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import BernoulliNB
from skdist.distribute.ensemble import DistRandomTreesEmbedding
from pyspark.sql import SparkSession
# instantiate spark session
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
# make a synthetic dataset
X, y = make_circles(n_samples=10000, factor=0.5, random_state=0, noise=0.15)
# use DistRandomTreesEmbedding to transform data
hasher = DistRandomTreesEmbedding(n_estimators=1000, random_state=0, max_depth=3, sc=sc)
X_transformed = hasher.fit_transform(X)
# score a Naive Bayes classifier on the original and transformed data
nb = BernoulliNB()
print(
"Naive Bayes -- Transformed: {0}".format(
np.mean(cross_val_score(nb, X_transformed, y, cv=5, scoring="roc_auc"))
)
)
print(
"Naive Bayes -- Original: {0}".format(
np.mean(cross_val_score(nb, X, y, cv=5, scoring="roc_auc"))
)
)
# score an Extra Trees classifier on the original and transformed data
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
print(
"Extra Trees -- Transformed: {0}".format(
np.mean(cross_val_score(trees, X_transformed, y, cv=5, scoring="roc_auc"))
)
)
print(
"Extra Trees -- Original: {0}".format(
np.mean(cross_val_score(trees, X, y, cv=5, scoring="roc_auc"))
)
)
| 33.973684 | 88 | 0.730442 |
d4681dd2eec9abdac25889a83dff2ea68cf89fc5 | 2,655 | py | Python | qiskit/providers/baseprovider.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,599 | 2018-07-10T10:59:12.000Z | 2022-03-31T23:56:25.000Z | qiskit/providers/baseprovider.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 5,244 | 2018-07-10T06:20:13.000Z | 2022-03-31T22:18:48.000Z | qiskit/providers/baseprovider.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,409 | 2018-07-10T02:16:12.000Z | 2022-03-31T09:01:32.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Base class for a backend provider."""
from abc import ABC, abstractmethod
import warnings
from .exceptions import QiskitBackendNotFoundError
class BaseProvider(ABC):
"""Base class for a Backend Provider."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
warnings.warn(
"The BaseProvider abstract interface is deprecated as of "
"the 0.18.0 release and will be removed in a future "
"release. Instead you should build your backends using "
"the ProviderV1 abstract class (which is the current "
"latest version of the provider interface).",
DeprecationWarning,
stacklevel=2,
)
pass
def get_backend(self, name=None, **kwargs):
"""Return a single backend matching the specified filtering.
Args:
name (str): name of the backend.
**kwargs: dict used for filtering.
Returns:
BaseBackend: a backend matching the filtering.
Raises:
QiskitBackendNotFoundError: if no backend could be found or
more than one backend matches the filtering criteria.
"""
backends = self.backends(name, **kwargs)
if len(backends) > 1:
raise QiskitBackendNotFoundError("More than one backend matches the criteria")
if not backends:
raise QiskitBackendNotFoundError("No backend matches the criteria")
return backends[0]
@abstractmethod
def backends(self, name=None, **kwargs):
"""Return a list of backends matching the specified filtering.
Args:
name (str): name of the backend.
**kwargs: dict used for filtering.
Returns:
list[BaseBackend]: a list of Backends that match the filtering
criteria.
"""
pass
def __eq__(self, other):
"""Equality comparison.
By default, it is assumed that two `Providers` from the same class are
equal. Subclassed providers can override this behavior.
"""
return type(self).__name__ == type(other).__name__
| 33.607595 | 90 | 0.644444 |
7848e0fbf17d1f2ea4a1a8d11b058af7fa2cd9d0 | 1,944 | py | Python | tests/run/test_global_permission.py | dramich/rio | a9a39e498df52258096f4aeb265d0d5606ee4617 | [
"Apache-2.0"
] | 2 | 2019-10-14T18:47:23.000Z | 2019-10-26T18:54:56.000Z | tests/run/test_global_permission.py | jjasghar/rio | b686eddc3a639b5920cf186f9cb887871d632140 | [
"Apache-2.0"
] | null | null | null | tests/run/test_global_permission.py | jjasghar/rio | b686eddc3a639b5920cf186f9cb887871d632140 | [
"Apache-2.0"
] | null | null | null | from random import randint
import util
def rio_permission_setup(stack, *rpermission):
name = "tsrv" + str(randint(1000, 5000))
fullName = "%s/%s" % (stack, name)
cmd = (f'rio run -n {fullName}')
for p in rpermission:
cmd += " --global-permission " + p
cmd += " nginx"
util.runwait(cmd, fullName)
print(cmd)
return name
def riotest(stack, sname):
fullName = (f"{stack}/{sname}")
inspect = util.rioInspect(fullName)
return inspect['globalPermissions'][0]['verbs']
def kubesatest(stack, sname):
fullName = (f"{stack}/{sname}")
id = util.rioInspect(fullName, "id")
namespace = id.split(":")[0]
obj = util.kubectl(namespace, "sa", sname)
replicas = obj['metadata']['name']
return replicas
def kubeclusterrolebindingtest(stack, sname):
fullName = (f"{stack}/{sname}")
id = util.rioInspect(fullName, "id")
namespace = id.split(":")[0].split("-")[1]
cmd = (f'rio kubectl get -o=json clusterrolebinding')
obj = util.runToJson(cmd)
for item in obj['items']:
if item['metadata']['name'] == (f'{sname}-{namespace}'):
return item['subjects'][0]['name']
return None
def kubeclusterroletest(stack, sname):
fullName = (f"{stack}/{sname}")
id = util.rioInspect(fullName, "id")
namespace = id.split(":")[0].split("-")[1]
cmd = (f'rio kubectl get -o=json clusterrole')
obj = util.runToJson(cmd)
for item in obj['items']:
if item['metadata']['name'] == (f'{sname}-{namespace}'):
return item['rules'][0]['verbs']
return None
def test_name1(stack):
serviceName = rio_permission_setup(stack, "'update cluster'")
assert riotest(stack, serviceName) == ['update']
assert serviceName in kubesatest(stack, serviceName)
assert kubeclusterroletest(stack, serviceName) == ['update']
assert serviceName in kubeclusterrolebindingtest(stack, serviceName)
| 24 | 72 | 0.624486 |
5c94343b53ab2a00411bacd98965e2e5b71e1d5d | 1,253 | py | Python | Python/python-practice/visible_date/highs_lows.py | jiaoqiyuan/Tests | a3595b0e4b430d910f90e428d6b6b4465f67a059 | [
"Apache-2.0"
] | null | null | null | Python/python-practice/visible_date/highs_lows.py | jiaoqiyuan/Tests | a3595b0e4b430d910f90e428d6b6b4465f67a059 | [
"Apache-2.0"
] | null | null | null | Python/python-practice/visible_date/highs_lows.py | jiaoqiyuan/Tests | a3595b0e4b430d910f90e428d6b6b4465f67a059 | [
"Apache-2.0"
] | null | null | null | import csv
from datetime import datetime
from matplotlib import pyplot as plt
# Get dates, high, and low temperatures from file.
filename = 'death_valley_2014.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
# print(header_row)
# for index, column_header in enumerate(header_row):
# print(index, column_header)
dates, highs, lows = [], [], []
for row in reader:
try:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
# print(highs)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plot(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot.
title = "Daily high and low temperatures - 2014\nDeath Valley, CA"
plt.title(title, fontsize=20)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
| 28.477273 | 66 | 0.640862 |
ea3e699a49973d70e9955f218df90476d137a7c2 | 229 | py | Python | exercices/questao23.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | exercices/questao23.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | exercices/questao23.py | LBarros77/Python | 283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af | [
"MIT"
] | null | null | null | from sys import path
with open(path[0] + "/lista-de-compras.txt", "r") as f:
informations = f.readlines()
lst = [i[:-1].split(":") for i in informations]
print(sum([float(ind[1]) for ind in [i[1].split(" ") for i in lst]])) | 32.714286 | 69 | 0.620087 |
c6c664bffa65a6db2b914b9fa42a6407ee3b639b | 10,291 | py | Python | heat/engine/resources/openstack/magnum/baymodel.py | grebennikov/heat1 | 6a11bd0b5984c8f880d1a24ed324620020032b5a | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/engine/resources/openstack/magnum/baymodel.py | grebennikov/heat1 | 6a11bd0b5984c8f880d1a24ed324620020032b5a | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/openstack/magnum/baymodel.py | grebennikov/heat1 | 6a11bd0b5984c8f880d1a24ed324620020032b5a | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
class BayModel(resource.Resource):
"""A resource for the BayModel in Magnum.
BayModel is an object that stores template information about the bay which
is used to create new bays consistently.
"""
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, IMAGE, FLAVOR, MASTER_FLAVOR, KEYPAIR,
EXTERNAL_NETWORK, FIXED_NETWORK, DNS_NAMESERVER,
DOCKER_VOLUME_SIZE, SSH_AUTHORIZED_KEY, COE, NETWORK_DRIVER,
HTTP_PROXY, HTTPS_PROXY, NO_PROXY, LABELS, TLS_DISABLED, PUBLIC,
REGISTRY_ENABLED, VOLUME_DRIVER
) = (
'name', 'image', 'flavor', 'master_flavor', 'keypair',
'external_network', 'fixed_network', 'dns_nameserver',
'docker_volume_size', 'ssh_authorized_key', 'coe', 'network_driver',
'http_proxy', 'https_proxy', 'no_proxy', 'labels', 'tls_disabled',
'public', 'registry_enabled', 'volume_driver'
)
# Change it when magnum supports more function in the future.
SUPPORTED_VOLUME_DRIVER = {'kubernetes': ['cinder'], 'swarm': ['rexray'],
'mesos': ['rexray']}
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('The bay model name.'),
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The image name or UUID to use as a base image for this '
'baymodel.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
required=True
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The flavor of this bay model.'),
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
MASTER_FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The flavor of the master node for this bay model.'),
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
KEYPAIR: properties.Schema(
properties.Schema.STRING,
_('The name or id of the nova ssh keypair.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
],
required=True
),
EXTERNAL_NETWORK: properties.Schema(
properties.Schema.STRING,
_('The external network to attach the Bay.'),
constraints=[
constraints.CustomConstraint('neutron.network')
],
required=True
),
FIXED_NETWORK: properties.Schema(
properties.Schema.STRING,
_('The fixed network to attach the Bay.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
DNS_NAMESERVER: properties.Schema(
properties.Schema.STRING,
_('The DNS nameserver address.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
DOCKER_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size in GB of the docker volume.'),
constraints=[
constraints.Range(min=1),
]
),
SSH_AUTHORIZED_KEY: properties.Schema(
properties.Schema.STRING,
_('The SSH Authorized Key.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='6.0.0',
message=_('This attribute has been removed in Magnum'),
previous_status=support.SupportStatus(version='5.0.0')
)
),
COE: properties.Schema(
properties.Schema.STRING,
_('The Container Orchestration Engine for this bay model.'),
constraints=[
constraints.AllowedValues(['kubernetes', 'swarm', 'mesos'])
],
required=True
),
NETWORK_DRIVER: properties.Schema(
properties.Schema.STRING,
_('The name of the driver used for instantiating '
'container networks. By default, Magnum will choose the '
'pre-configured network driver based on COE type.'),
support_status=support.SupportStatus(version='6.0.0')
),
HTTP_PROXY: properties.Schema(
properties.Schema.STRING,
_('The http_proxy address to use for nodes in bay.'),
support_status=support.SupportStatus(version='6.0.0')
),
HTTPS_PROXY: properties.Schema(
properties.Schema.STRING,
_('The https_proxy address to use for nodes in bay.'),
support_status=support.SupportStatus(version='6.0.0')
),
NO_PROXY: properties.Schema(
properties.Schema.STRING,
_('A comma separated list of addresses for which proxies should '
'not be used in the bay.'),
support_status=support.SupportStatus(version='6.0.0')
),
LABELS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary labels in the form of key=value pairs to '
'associate with a baymodel.'),
support_status=support.SupportStatus(version='6.0.0')
),
TLS_DISABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Disable TLS in the bay.'),
default=False,
support_status=support.SupportStatus(version='6.0.0')
),
PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Make the baymodel public.'),
default=False,
support_status=support.SupportStatus(version='6.0.0')
),
REGISTRY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Enable the docker registry in the bay.'),
default=False,
support_status=support.SupportStatus(version='6.0.0')
),
VOLUME_DRIVER: properties.Schema(
properties.Schema.STRING,
_('The volume driver name for instantiating container volume.'),
support_status=support.SupportStatus(version='7.0.0'),
constraints=[
constraints.AllowedValues(['cinder', 'rexray'])
]
),
}
default_client_name = 'magnum'
entity = 'baymodels'
def translation_rules(self, props):
if props.get(self.SSH_AUTHORIZED_KEY):
return [
translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
[self.SSH_AUTHORIZED_KEY]
)
]
def validate(self):
"""Validate the provided params."""
super(BayModel, self).validate()
coe = self.properties[self.COE]
volume_driver = self.properties[self.VOLUME_DRIVER]
# Confirm that volume driver is supported by Magnum COE per
# SUPPORTED_VOLUME_DRIVER.
value = self.SUPPORTED_VOLUME_DRIVER[coe]
if volume_driver is not None and volume_driver not in value:
msg = (_('Volume driver type %(driver)s is not supported by '
'COE:%(coe)s, expecting a %(supported_volume_driver)s '
'volume driver.') % {
'driver': volume_driver, 'coe': coe,
'supported_volume_driver': value})
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
args = {
'name': self.properties[self.NAME],
'image_id': self.properties[self.IMAGE],
'flavor_id': self.properties[self.FLAVOR],
'master_flavor_id': self.properties[self.MASTER_FLAVOR],
'keypair_id': self.properties[self.KEYPAIR],
'external_network_id': self.properties[self.EXTERNAL_NETWORK],
'fixed_network': self.properties[self.FIXED_NETWORK],
'dns_nameserver': self.properties[self.DNS_NAMESERVER],
'docker_volume_size': self.properties[self.DOCKER_VOLUME_SIZE],
'coe': self.properties[self.COE],
}
if self.properties[self.NETWORK_DRIVER]:
args['network_driver'] = self.properties[self.NETWORK_DRIVER]
if self.properties[self.HTTP_PROXY]:
args['http_proxy'] = self.properties[self. HTTP_PROXY]
if self.properties[self.HTTPS_PROXY]:
args['https_proxy'] = self.properties[self.HTTPS_PROXY]
if self.properties[self.NO_PROXY]:
args['no_proxy'] = self.properties[self.NO_PROXY]
if self.properties[self.LABELS]:
args['labels'] = self.properties[self.LABELS]
if self.properties[self.TLS_DISABLED]:
args['tls_disabled'] = self.properties[self.TLS_DISABLED]
if self.properties[self.PUBLIC]:
args['public'] = self.properties[self.PUBLIC]
if self.properties[self.REGISTRY_ENABLED]:
args['registry_enabled'] = self.properties[self.REGISTRY_ENABLED]
if self.properties[self.VOLUME_DRIVER]:
args['volume_driver'] = self.properties[self.VOLUME_DRIVER]
bm = self.client().baymodels.create(**args)
self.resource_id_set(bm.uuid)
def resource_mapping():
return {
'OS::Magnum::BayModel': BayModel
}
| 39.129278 | 78 | 0.595569 |
83b8ea3d13842ca36c037e8b49ce57dcca05a1dc | 8,718 | py | Python | homeassistant/components/samsungtv/bridge.py | squirrel289/core | 6c5bcbfc3ee40927458e9188d6b79bf63933d3f9 | [
"Apache-2.0"
] | 5 | 2020-09-17T21:47:23.000Z | 2021-06-04T04:37:29.000Z | homeassistant/components/samsungtv/bridge.py | squirrel289/core | 6c5bcbfc3ee40927458e9188d6b79bf63933d3f9 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:13:11.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/samsungtv/bridge.py | CrossEyeORG/homeassistant | 6c5bcbfc3ee40927458e9188d6b79bf63933d3f9 | [
"Apache-2.0"
] | 1 | 2021-11-19T19:01:57.000Z | 2021-11-19T19:01:57.000Z | """samsungctl and samsungtvws bridge classes."""
from abc import ABC, abstractmethod
from samsungctl import Remote
from samsungctl.exceptions import AccessDenied, ConnectionClosed, UnhandledResponse
from samsungtvws import SamsungTVWS
from samsungtvws.exceptions import ConnectionFailure
from websocket import WebSocketException
from homeassistant.const import (
CONF_HOST,
CONF_ID,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TIMEOUT,
CONF_TOKEN,
)
from .const import (
CONF_DESCRIPTION,
LOGGER,
METHOD_LEGACY,
RESULT_AUTH_MISSING,
RESULT_NOT_SUCCESSFUL,
RESULT_NOT_SUPPORTED,
RESULT_SUCCESS,
VALUE_CONF_ID,
VALUE_CONF_NAME,
)
class SamsungTVBridge(ABC):
"""The Base Bridge abstract class."""
@staticmethod
def get_bridge(method, host, port=None, token=None):
"""Get Bridge instance."""
if method == METHOD_LEGACY:
return SamsungTVLegacyBridge(method, host, port)
return SamsungTVWSBridge(method, host, port, token)
def __init__(self, method, host, port):
"""Initialize Bridge."""
self.port = port
self.method = method
self.host = host
self.token = None
self.default_port = None
self._remote = None
self._callback = None
def register_reauth_callback(self, func):
"""Register a callback function."""
self._callback = func
@abstractmethod
def try_connect(self):
"""Try to connect to the TV."""
def is_on(self):
"""Tells if the TV is on."""
self.close_remote()
try:
return self._get_remote() is not None
except (
UnhandledResponse,
AccessDenied,
ConnectionFailure,
):
# We got a response so it's working.
return True
except OSError:
# Different reasons, e.g. hostname not resolveable
return False
def send_key(self, key):
"""Send a key to the tv and handles exceptions."""
try:
# recreate connection if connection was dead
retry_count = 1
for _ in range(retry_count + 1):
try:
self._send_key(key)
break
except (
ConnectionClosed,
BrokenPipeError,
WebSocketException,
):
# BrokenPipe can occur when the commands is sent to fast
# WebSocketException can occur when timed out
self._remote = None
except (UnhandledResponse, AccessDenied):
# We got a response so it's on.
LOGGER.debug("Failed sending command %s", key, exc_info=True)
except OSError:
# Different reasons, e.g. hostname not resolveable
pass
@abstractmethod
def _send_key(self, key):
"""Send the key."""
@abstractmethod
def _get_remote(self):
"""Get Remote object."""
def close_remote(self):
"""Close remote object."""
try:
if self._remote is not None:
# Close the current remote connection
self._remote.close()
self._remote = None
except OSError:
LOGGER.debug("Could not establish connection")
def _notify_callback(self):
"""Notify access denied callback."""
if self._callback:
self._callback()
class SamsungTVLegacyBridge(SamsungTVBridge):
"""The Bridge for Legacy TVs."""
def __init__(self, method, host, port):
"""Initialize Bridge."""
super().__init__(method, host, None)
self.config = {
CONF_NAME: VALUE_CONF_NAME,
CONF_DESCRIPTION: VALUE_CONF_NAME,
CONF_ID: VALUE_CONF_ID,
CONF_HOST: host,
CONF_METHOD: method,
CONF_PORT: None,
CONF_TIMEOUT: 1,
}
def try_connect(self):
"""Try to connect to the Legacy TV."""
config = {
CONF_NAME: VALUE_CONF_NAME,
CONF_DESCRIPTION: VALUE_CONF_NAME,
CONF_ID: VALUE_CONF_ID,
CONF_HOST: self.host,
CONF_METHOD: self.method,
CONF_PORT: None,
# We need this high timeout because waiting for auth popup is just an open socket
CONF_TIMEOUT: 31,
}
try:
LOGGER.debug("Try config: %s", config)
with Remote(config.copy()):
LOGGER.debug("Working config: %s", config)
return RESULT_SUCCESS
except AccessDenied:
LOGGER.debug("Working but denied config: %s", config)
return RESULT_AUTH_MISSING
except UnhandledResponse:
LOGGER.debug("Working but unsupported config: %s", config)
return RESULT_NOT_SUPPORTED
except OSError as err:
LOGGER.debug("Failing config: %s, error: %s", config, err)
return RESULT_NOT_SUCCESSFUL
def _get_remote(self):
"""Create or return a remote control instance."""
if self._remote is None:
# We need to create a new instance to reconnect.
try:
LOGGER.debug("Create SamsungRemote")
self._remote = Remote(self.config.copy())
# This is only happening when the auth was switched to DENY
# A removed auth will lead to socket timeout because waiting for auth popup is just an open socket
except AccessDenied:
self._notify_callback()
raise
return self._remote
def _send_key(self, key):
"""Send the key using legacy protocol."""
self._get_remote().control(key)
class SamsungTVWSBridge(SamsungTVBridge):
"""The Bridge for WebSocket TVs."""
def __init__(self, method, host, port, token=None):
"""Initialize Bridge."""
super().__init__(method, host, port)
self.token = token
self.default_port = 8001
def try_connect(self):
"""Try to connect to the Websocket TV."""
for self.port in (8001, 8002):
config = {
CONF_NAME: VALUE_CONF_NAME,
CONF_HOST: self.host,
CONF_METHOD: self.method,
CONF_PORT: self.port,
# We need this high timeout because waiting for auth popup is just an open socket
CONF_TIMEOUT: 31,
}
result = None
try:
LOGGER.debug("Try config: %s", config)
with SamsungTVWS(
host=self.host,
port=self.port,
token=self.token,
timeout=config[CONF_TIMEOUT],
name=config[CONF_NAME],
) as remote:
remote.open()
self.token = remote.token
if self.token:
config[CONF_TOKEN] = "*****"
LOGGER.debug("Working config: %s", config)
return RESULT_SUCCESS
except WebSocketException:
LOGGER.debug("Working but unsupported config: %s", config)
result = RESULT_NOT_SUPPORTED
except (OSError, ConnectionFailure) as err:
LOGGER.debug("Failing config: %s, error: %s", config, err)
# pylint: disable=useless-else-on-loop
else:
if result:
return result
return RESULT_NOT_SUCCESSFUL
def _send_key(self, key):
"""Send the key using websocket protocol."""
if key == "KEY_POWEROFF":
key = "KEY_POWER"
self._get_remote().send_key(key)
def _get_remote(self):
"""Create or return a remote control instance."""
if self._remote is None:
# We need to create a new instance to reconnect.
try:
LOGGER.debug("Create SamsungTVWS")
self._remote = SamsungTVWS(
host=self.host,
port=self.port,
token=self.token,
timeout=10,
name=VALUE_CONF_NAME,
)
self._remote.open()
# This is only happening when the auth was switched to DENY
# A removed auth will lead to socket timeout because waiting for auth popup is just an open socket
except ConnectionFailure:
self._notify_callback()
raise
except WebSocketException:
self._remote = None
return self._remote
| 32.898113 | 110 | 0.560106 |
f78383cfcf49ed3f683c774e8b3a2cd30d034957 | 54 | py | Python | Medium/testimp2.py | Nahalius/PythonBasics | f59f167cb7c3edf962cb381ee7d66394da500a11 | [
"MIT"
] | null | null | null | Medium/testimp2.py | Nahalius/PythonBasics | f59f167cb7c3edf962cb381ee7d66394da500a11 | [
"MIT"
] | null | null | null | Medium/testimp2.py | Nahalius/PythonBasics | f59f167cb7c3edf962cb381ee7d66394da500a11 | [
"MIT"
] | null | null | null | import lib.inportfunc
print(lib.inportfunc.cube(31))
| 13.5 | 30 | 0.796296 |
1cd6050bf2dd11e2eaeefbc3187e007d3c3e8f98 | 17,025 | py | Python | pyramid/tests/test_config/test_predicates.py | danielpronych/pyramid-doxygen | ad95a8c151c2c4e029e03aed2feda2993380f36f | [
"BSD-2-Clause"
] | null | null | null | pyramid/tests/test_config/test_predicates.py | danielpronych/pyramid-doxygen | ad95a8c151c2c4e029e03aed2feda2993380f36f | [
"BSD-2-Clause"
] | null | null | null | pyramid/tests/test_config/test_predicates.py | danielpronych/pyramid-doxygen | ad95a8c151c2c4e029e03aed2feda2993380f36f | [
"BSD-2-Clause"
] | null | null | null | import unittest
from pyramid import testing
from pyramid.compat import text_
class TestXHRPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import XHRPredicate
return XHRPredicate(val, None)
def test___call___true(self):
inst = self._makeOne(True)
request = Dummy()
request.is_xhr = True
result = inst(None, request)
self.assertTrue(result)
def test___call___false(self):
inst = self._makeOne(True)
request = Dummy()
request.is_xhr = False
result = inst(None, request)
self.assertFalse(result)
def test_text(self):
inst = self._makeOne(True)
self.assertEqual(inst.text(), 'xhr = True')
def test_phash(self):
inst = self._makeOne(True)
self.assertEqual(inst.phash(), 'xhr = True')
class TestRequestMethodPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import RequestMethodPredicate
return RequestMethodPredicate(val, None)
def test_ctor_get_but_no_head(self):
inst = self._makeOne('GET')
self.assertEqual(inst.val, ('GET', 'HEAD'))
def test___call___true_single(self):
inst = self._makeOne('GET')
request = Dummy()
request.method = 'GET'
result = inst(None, request)
self.assertTrue(result)
def test___call___true_multi(self):
inst = self._makeOne(('GET','HEAD'))
request = Dummy()
request.method = 'GET'
result = inst(None, request)
self.assertTrue(result)
def test___call___false(self):
inst = self._makeOne(('GET','HEAD'))
request = Dummy()
request.method = 'POST'
result = inst(None, request)
self.assertFalse(result)
def test_text(self):
inst = self._makeOne(('HEAD','GET'))
self.assertEqual(inst.text(), 'request_method = GET,HEAD')
def test_phash(self):
inst = self._makeOne(('HEAD','GET'))
self.assertEqual(inst.phash(), 'request_method = GET,HEAD')
class TestPathInfoPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import PathInfoPredicate
return PathInfoPredicate(val, None)
def test_ctor_compilefail(self):
from pyramid.exceptions import ConfigurationError
self.assertRaises(ConfigurationError, self._makeOne, '\\')
def test___call___true(self):
inst = self._makeOne(r'/\d{2}')
request = Dummy()
request.upath_info = text_('/12')
result = inst(None, request)
self.assertTrue(result)
def test___call___false(self):
inst = self._makeOne(r'/\d{2}')
request = Dummy()
request.upath_info = text_('/n12')
result = inst(None, request)
self.assertFalse(result)
def test_text(self):
inst = self._makeOne('/')
self.assertEqual(inst.text(), 'path_info = /')
def test_phash(self):
inst = self._makeOne('/')
self.assertEqual(inst.phash(), 'path_info = /')
class TestRequestParamPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import RequestParamPredicate
return RequestParamPredicate(val, None)
def test___call___true_exists(self):
inst = self._makeOne('abc')
request = Dummy()
request.params = {'abc':1}
result = inst(None, request)
self.assertTrue(result)
def test___call___true_withval(self):
inst = self._makeOne('abc=1')
request = Dummy()
request.params = {'abc':'1'}
result = inst(None, request)
self.assertTrue(result)
def test___call___true_multi(self):
inst = self._makeOne(('abc', 'def =2 '))
request = Dummy()
request.params = {'abc':'1', 'def': '2'}
result = inst(None, request)
self.assertTrue(result)
def test___call___false_multi(self):
inst = self._makeOne(('abc=3', 'def =2 '))
request = Dummy()
request.params = {'abc':'3', 'def': '1'}
result = inst(None, request)
self.assertFalse(result)
def test___call___false(self):
inst = self._makeOne('abc')
request = Dummy()
request.params = {}
result = inst(None, request)
self.assertFalse(result)
def test_text_exists(self):
inst = self._makeOne('abc')
self.assertEqual(inst.text(), 'request_param abc')
def test_text_withval(self):
inst = self._makeOne('abc= 1')
self.assertEqual(inst.text(), 'request_param abc=1')
def test_text_multi(self):
inst = self._makeOne(('abc= 1', 'def'))
self.assertEqual(inst.text(), 'request_param abc=1,def')
def test_phash_exists(self):
inst = self._makeOne('abc')
self.assertEqual(inst.phash(), 'request_param abc')
def test_phash_withval(self):
inst = self._makeOne('abc= 1')
self.assertEqual(inst.phash(), "request_param abc=1")
class TestMatchParamPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import MatchParamPredicate
return MatchParamPredicate(val, None)
def test___call___true_single(self):
inst = self._makeOne('abc=1')
request = Dummy()
request.matchdict = {'abc':'1'}
result = inst(None, request)
self.assertTrue(result)
def test___call___true_multi(self):
inst = self._makeOne(('abc=1', 'def=2'))
request = Dummy()
request.matchdict = {'abc':'1', 'def':'2'}
result = inst(None, request)
self.assertTrue(result)
def test___call___false(self):
inst = self._makeOne('abc=1')
request = Dummy()
request.matchdict = {}
result = inst(None, request)
self.assertFalse(result)
def test___call___matchdict_is_None(self):
inst = self._makeOne('abc=1')
request = Dummy()
request.matchdict = None
result = inst(None, request)
self.assertFalse(result)
def test_text(self):
inst = self._makeOne(('def= 1', 'abc =2'))
self.assertEqual(inst.text(), 'match_param abc=2,def=1')
def test_phash(self):
inst = self._makeOne(('def= 1', 'abc =2'))
self.assertEqual(inst.phash(), 'match_param abc=2,def=1')
class TestCustomPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import CustomPredicate
return CustomPredicate(val, None)
def test___call___true(self):
def func(context, request):
self.assertEqual(context, None)
self.assertEqual(request, None)
return True
inst = self._makeOne(func)
result = inst(None, None)
self.assertTrue(result)
def test___call___false(self):
def func(context, request):
self.assertEqual(context, None)
self.assertEqual(request, None)
return False
inst = self._makeOne(func)
result = inst(None, None)
self.assertFalse(result)
def test_text_func_has___text__(self):
pred = predicate()
pred.__text__ = 'text'
inst = self._makeOne(pred)
self.assertEqual(inst.text(), 'text')
def test_text_func_repr(self):
pred = predicate()
inst = self._makeOne(pred)
self.assertEqual(inst.text(), 'custom predicate: object predicate')
def test_phash(self):
pred = predicate()
inst = self._makeOne(pred)
self.assertEqual(inst.phash(), 'custom:1')
class TestTraversePredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import TraversePredicate
return TraversePredicate(val, None)
def test___call__traverse_has_remainder_already(self):
inst = self._makeOne('/1/:a/:b')
info = {'traverse':'abc'}
request = Dummy()
result = inst(info, request)
self.assertEqual(result, True)
self.assertEqual(info, {'traverse':'abc'})
def test___call__traverse_matches(self):
inst = self._makeOne('/1/:a/:b')
info = {'match':{'a':'a', 'b':'b'}}
request = Dummy()
result = inst(info, request)
self.assertEqual(result, True)
self.assertEqual(info, {'match':
{'a':'a', 'b':'b', 'traverse':('1', 'a', 'b')}})
def test___call__traverse_matches_with_highorder_chars(self):
inst = self._makeOne(text_(b'/La Pe\xc3\xb1a/{x}', 'utf-8'))
info = {'match':{'x':text_(b'Qu\xc3\xa9bec', 'utf-8')}}
request = Dummy()
result = inst(info, request)
self.assertEqual(result, True)
self.assertEqual(
info['match']['traverse'],
(text_(b'La Pe\xc3\xb1a', 'utf-8'),
text_(b'Qu\xc3\xa9bec', 'utf-8'))
)
def test_text(self):
inst = self._makeOne('/abc')
self.assertEqual(inst.text(), 'traverse matchdict pseudo-predicate')
def test_phash(self):
inst = self._makeOne('/abc')
self.assertEqual(inst.phash(), '')
class Test_CheckCSRFTokenPredicate(unittest.TestCase):
def _makeOne(self, val, config):
from pyramid.config.predicates import CheckCSRFTokenPredicate
return CheckCSRFTokenPredicate(val, config)
def test_text(self):
inst = self._makeOne(True, None)
self.assertEqual(inst.text(), 'check_csrf = True')
def test_phash(self):
inst = self._makeOne(True, None)
self.assertEqual(inst.phash(), 'check_csrf = True')
def test_it_call_val_True(self):
inst = self._makeOne(True, None)
request = Dummy()
def check_csrf_token(req, val, raises=True):
self.assertEqual(req, request)
self.assertEqual(val, 'csrf_token')
self.assertEqual(raises, False)
return True
inst.check_csrf_token = check_csrf_token
result = inst(None, request)
self.assertEqual(result, True)
def test_it_call_val_str(self):
inst = self._makeOne('abc', None)
request = Dummy()
def check_csrf_token(req, val, raises=True):
self.assertEqual(req, request)
self.assertEqual(val, 'abc')
self.assertEqual(raises, False)
return True
inst.check_csrf_token = check_csrf_token
result = inst(None, request)
self.assertEqual(result, True)
def test_it_call_val_False(self):
inst = self._makeOne(False, None)
request = Dummy()
result = inst(None, request)
self.assertEqual(result, True)
class TestHeaderPredicate(unittest.TestCase):
def _makeOne(self, val):
from pyramid.config.predicates import HeaderPredicate
return HeaderPredicate(val, None)
def test___call___true_exists(self):
inst = self._makeOne('abc')
request = Dummy()
request.headers = {'abc':1}
result = inst(None, request)
self.assertTrue(result)
def test___call___true_withval(self):
inst = self._makeOne('abc:1')
request = Dummy()
request.headers = {'abc':'1'}
result = inst(None, request)
self.assertTrue(result)
def test___call___true_withregex(self):
inst = self._makeOne(r'abc:\d+')
request = Dummy()
request.headers = {'abc':'1'}
result = inst(None, request)
self.assertTrue(result)
def test___call___false_withregex(self):
inst = self._makeOne(r'abc:\d+')
request = Dummy()
request.headers = {'abc':'a'}
result = inst(None, request)
self.assertFalse(result)
def test___call___false(self):
inst = self._makeOne('abc')
request = Dummy()
request.headers = {}
result = inst(None, request)
self.assertFalse(result)
def test_text_exists(self):
inst = self._makeOne('abc')
self.assertEqual(inst.text(), 'header abc')
def test_text_withval(self):
inst = self._makeOne('abc:1')
self.assertEqual(inst.text(), 'header abc=1')
def test_text_withregex(self):
inst = self._makeOne(r'abc:\d+')
self.assertEqual(inst.text(), r'header abc=\d+')
def test_phash_exists(self):
inst = self._makeOne('abc')
self.assertEqual(inst.phash(), 'header abc')
def test_phash_withval(self):
inst = self._makeOne('abc:1')
self.assertEqual(inst.phash(), "header abc=1")
def test_phash_withregex(self):
inst = self._makeOne(r'abc:\d+')
self.assertEqual(inst.phash(), r'header abc=\d+')
class Test_PhysicalPathPredicate(unittest.TestCase):
def _makeOne(self, val, config):
from pyramid.config.predicates import PhysicalPathPredicate
return PhysicalPathPredicate(val, config)
def test_text(self):
inst = self._makeOne('/', None)
self.assertEqual(inst.text(), "physical_path = ('',)")
def test_phash(self):
inst = self._makeOne('/', None)
self.assertEqual(inst.phash(), "physical_path = ('',)")
def test_it_call_val_tuple_True(self):
inst = self._makeOne(('', 'abc'), None)
root = Dummy()
root.__name__ = ''
root.__parent__ = None
context = Dummy()
context.__name__ = 'abc'
context.__parent__ = root
self.assertTrue(inst(context, None))
def test_it_call_val_list_True(self):
inst = self._makeOne(['', 'abc'], None)
root = Dummy()
root.__name__ = ''
root.__parent__ = None
context = Dummy()
context.__name__ = 'abc'
context.__parent__ = root
self.assertTrue(inst(context, None))
def test_it_call_val_str_True(self):
inst = self._makeOne('/abc', None)
root = Dummy()
root.__name__ = ''
root.__parent__ = None
context = Dummy()
context.__name__ = 'abc'
context.__parent__ = root
self.assertTrue(inst(context, None))
def test_it_call_False(self):
inst = self._makeOne('/', None)
root = Dummy()
root.__name__ = ''
root.__parent__ = None
context = Dummy()
context.__name__ = 'abc'
context.__parent__ = root
self.assertFalse(inst(context, None))
def test_it_call_context_has_no_name(self):
inst = self._makeOne('/', None)
context = Dummy()
self.assertFalse(inst(context, None))
class Test_EffectivePrincipalsPredicate(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, val, config):
from pyramid.config.predicates import EffectivePrincipalsPredicate
return EffectivePrincipalsPredicate(val, config)
def test_text(self):
inst = self._makeOne(('verna', 'fred'), None)
self.assertEqual(inst.text(),
"effective_principals = ['fred', 'verna']")
def test_text_noniter(self):
inst = self._makeOne('verna', None)
self.assertEqual(inst.text(),
"effective_principals = ['verna']")
def test_phash(self):
inst = self._makeOne(('verna', 'fred'), None)
self.assertEqual(inst.phash(),
"effective_principals = ['fred', 'verna']")
def test_it_call_no_authentication_policy(self):
request = testing.DummyRequest()
inst = self._makeOne(('verna', 'fred'), None)
context = Dummy()
self.assertFalse(inst(context, request))
def test_it_call_authentication_policy_provides_superset(self):
request = testing.DummyRequest()
self.config.testing_securitypolicy('fred', groupids=('verna', 'bambi'))
inst = self._makeOne(('verna', 'fred'), None)
context = Dummy()
self.assertTrue(inst(context, request))
def test_it_call_authentication_policy_provides_superset_implicit(self):
from pyramid.security import Authenticated
request = testing.DummyRequest()
self.config.testing_securitypolicy('fred', groupids=('verna', 'bambi'))
inst = self._makeOne(Authenticated, None)
context = Dummy()
self.assertTrue(inst(context, request))
def test_it_call_authentication_policy_doesnt_provide_superset(self):
request = testing.DummyRequest()
self.config.testing_securitypolicy('fred')
inst = self._makeOne(('verna', 'fred'), None)
context = Dummy()
self.assertFalse(inst(context, request))
class predicate(object):
def __repr__(self):
return 'predicate'
def __hash__(self):
return 1
class Dummy(object):
def __init__(self, **kw):
self.__dict__.update(**kw)
| 33.058252 | 80 | 0.610162 |
a77ce546fbe33b5bb39d46e7fd0198ef2ded1d6f | 9,559 | py | Python | toontown/coghq/DistributedMaze.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 99 | 2019-11-02T22:25:00.000Z | 2022-02-03T03:48:00.000Z | toontown/coghq/DistributedMaze.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 42 | 2019-11-03T05:31:08.000Z | 2022-03-16T22:50:32.000Z | toontown/coghq/DistributedMaze.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 57 | 2019-11-03T07:47:37.000Z | 2022-03-22T00:41:49.000Z | from otp.level.BasicEntities import DistributedNodePathEntity
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToontownGlobals import *
import random
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import globalClockDelta
from . import DistributedBarrelBase
from otp.level.BasicEntities import DistributedNodePathEntity
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
from direct.task import Task
from direct.gui.DirectGui import DGG, DirectFrame, DirectLabel
class DistributedMaze(DistributedNodePathEntity):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMaze')
ScheduleTaskName = 'mazeScheduler'
RemoveBlocksDict = {2: ('HedgeBlock_0_1',),
4: (('HedgeBlock_0_1', 'HedgeBlock_1_3', 'HedgeBlock_2_3'), ('HedgeBlock_0_2', 'HedgeBlock_2_3', 'HedgeBlock_1_3'), ('HedgeBlock_0_1', 'HedgeBlock_0_2', 'HedgeBlock_1_3', 'HedgeBlock_2_3'))}
def __init__(self, cr):
DistributedNodePathEntity.__init__(self, cr)
self.numSections = 0
self.GameDuration = 35.0 + self.numSections * 15.0
self.timer = None
self.frame2D = None
self.gameLabel = None
self.gameStarted = 0
self.finished = 0
self.timedOut = 0
self.toonFinishedText = TTLocalizer.toonFinishedHedgeMaze
self.toonEnteredText = TTLocalizer.enterHedgeMaze
return
def announceGenerate(self):
DistributedNodePathEntity.announceGenerate(self)
self.addHints(self.roomHold)
self.loadGui()
def disable(self):
DistributedNodePathEntity.disable(self)
self.unloadGui()
self.cleanupTimer()
self.ignoreAll()
def delete(self):
self.cleanupTimer()
DistributedNodePathEntity.delete(self)
def setRoomDoId(self, roomDoId):
self.roomDoId = roomDoId
room = self.cr.doId2do.get(roomDoId)
if room:
self.gotRoom([room])
else:
self.roomRequest = self.cr.relatedObjectMgr.requestObjects([roomDoId], allCallback=self.gotRoom, timeout=5)
def gotRoom(self, rooms):
self.roomRequest = None
room = rooms[0]
self.roomHold = room
rotations = [0,
0,
90,
90,
180,
180,
270,
270]
self.getRng().shuffle(rotations)
self.numSections = 0
for i in range(0, 4):
maze = room.getGeom().find('**/Maze_Inside_%d' % i)
if not maze.isEmpty():
self.numSections += 1
if rotations:
maze.setH(rotations.pop())
self.GameDuration = 35.0 + self.numSections * 15.0
self.removeHedgeBlocks(room)
return
def addHints(self, room):
self.focusPoint = self.attachNewNode('GolfGreenGameFrame')
hintList = room.getGeom().findAllMatches('**/dead*')
for hint in hintList:
self.actSphere = CollisionSphere(0, 0, 0, 7.0)
self.actSphereNode = CollisionNode('mazegame_hint-%s-%s' % (self.level.getLevelId(), self.entId))
self.actSphereNode.addSolid(self.actSphere)
self.actSphereNodePath = hint.attachNewNode(self.actSphereNode)
self.actSphereNode.setCollideMask(WallBitmask)
self.actSphere.setTangible(0)
self.enterEvent = 'enter' + self.actSphereNode.getName()
self.accept(self.enterEvent, self.__handleToonEnterHint)
self.exitEvent = 'exit' + self.actSphereNode.getName()
self.accept(self.exitEvent, self.__handleToonExitHint)
enterance = room.getGeom().find('**/ENTRANCE')
self.enterSphere = CollisionSphere(0, 0, 0, 8.0)
self.enterSphereNode = CollisionNode('mazegame_enter-%s-%s' % (self.level.getLevelId(), self.entId))
self.enterSphereNode.addSolid(self.enterSphere)
self.enterSphereNodePath = enterance.attachNewNode(self.enterSphereNode)
self.enterSphereNode.setCollideMask(WallBitmask)
self.enterSphere.setTangible(0)
self.enteranceEvent = 'enter' + self.enterSphereNode.getName()
self.accept(self.enteranceEvent, self.__handleToonEnterance)
finish = room.getGeom().find('**/finish')
self.finishSphere = CollisionSphere(0, 0, 0, 15.0)
self.finishSphereNode = CollisionNode('mazegame_finish-%s-%s' % (self.level.getLevelId(), self.entId))
self.finishSphereNode.addSolid(self.finishSphere)
self.finishSphereNodePath = finish.attachNewNode(self.finishSphereNode)
self.finishSphereNode.setCollideMask(WallBitmask)
self.finishSphere.setTangible(0)
self.finishEvent = 'enter' + self.finishSphereNode.getName()
self.accept(self.finishEvent, self.__handleToonFinish)
def __handleToonEnterance(self, collEntry):
if not self.gameStarted:
self.notify.debug('sending clientTriggered for %d' % self.doId)
self.sendUpdate('setClientTriggered', [])
self.level.countryClub.showInfoText(self.toonEnteredText)
def __handleToonFinish(self, collEntry):
self.sendUpdate('setFinishedMaze', [])
self.finished = 1
def __handleToonEnterHint(self, collEntry):
camHeight = base.localAvatar.getClampedAvatarHeight()
heightScaleFactor = camHeight * 0.3333333333
defLookAt = Point3(0.0, 1.5, camHeight)
cameraPoint = Point3(0.0, -22.0 * heightScaleFactor, camHeight + 54.0)
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.startUpdateSmartCamera(push=0)
base.localAvatar.setIdealCameraPos(cameraPoint)
def __handleToonExitHint(self, collEntry):
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.startUpdateSmartCamera()
base.localAvatar.setCameraPositionByIndex(base.localAvatar.cameraIndex)
self.cameraHold = None
return
def getRng(self):
return random.Random(self.entId * self.doId)
def removeHedgeBlocks(self, room):
if self.numSections in self.RemoveBlocksDict:
blocksToRemove = self.getRng().choice(self.RemoveBlocksDict[self.numSections])
for blockName in blocksToRemove:
block = room.getGeom().find('**/%s' % blockName)
if not block.isEmpty():
block.removeNode()
def setGameStart(self, timestamp):
self.notify.debug('%d setGameStart: Starting game' % self.doId)
self.gameStartTime = globalClockDelta.networkToLocalTime(timestamp)
self.gameStarted = True
curGameTime = self.getCurrentGameTime()
timeLeft = self.GameDuration - curGameTime
self.cleanupTimer()
self.timer = ToontownTimer.ToontownTimer()
self.timer.posBelowTopRightCorner()
self.timer.setTime(timeLeft)
self.timer.countdown(timeLeft, self.timerExpired)
self.startScheduleTask()
self.frame2D.show()
def setGameOver(self):
self.timedOut = 1
if not self.finished:
self.sendUpdate('damageMe', [])
roomNum = self.level.roomNum
club = self.level.countryClub
self.gameOverTrack = Sequence()
self.gameOverTrack.append(localAvatar.getTeleportOutTrack())
self.gameOverTrack.append(Func(localAvatar.setPos, self.finishSphereNodePath.getPos(render)))
self.gameOverTrack.append(Func(localAvatar.play, 'jump'))
self.gameOverTrack.append(Func(self.level.countryClub.camEnterRoom, roomNum))
self.gameOverTrack.start()
self.timerExpired()
def local2GameTime(self, timestamp):
return timestamp - self.gameStartTime
def game2LocalTime(self, timestamp):
return timestamp + self.gameStartTime
def getCurrentGameTime(self):
return self.local2GameTime(globalClock.getFrameTime())
def startScheduleTask(self):
taskMgr.add(self.scheduleTask, self.ScheduleTaskName)
def stopScheduleTask(self):
taskMgr.remove(self.ScheduleTaskName)
def scheduleTask(self, task):
curTime = self.getCurrentGameTime()
def cleanupTimer(self):
if self.timer:
self.timer.stop()
self.timer.destroy()
self.timer = None
return
def timerExpired(self):
self.cleanupTimer()
self.unloadGui()
def loadGui(self):
self.frame2D = DirectFrame(scale=1.0, pos=(0.0, 0, 0.9), relief=DGG.FLAT, parent=aspect2d, frameSize=(-0.3,
0.3,
-0.05,
0.05), frameColor=(0.737, 0.573, 0.345, 0.3))
self.frame2D.hide()
self.gameLabel = DirectLabel(parent=self.frame2D, relief=None, pos=(0, 0, 0), scale=1.0, text=TTLocalizer.mazeLabel, text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text_scale=0.075, text_pos=(0, -0.02))
return
def unloadGui(self):
if self.frame2D:
self.frame2D.destroy()
self.frame2D = None
if self.gameLabel:
self.gameLabel.destroy()
self.gameLabel = None
return
def toonFinished(self, avId, place, lastToon):
toon = base.cr.doId2do.get(avId)
if toon and not self.timedOut:
self.level.countryClub.showInfoText(self.toonFinishedText % (toon.getName(), TTLocalizer.hedgeMazePlaces[place]))
if lastToon:
self.setGameOver()
| 40.504237 | 227 | 0.662726 |
93adc288c228687845d5bd9e2d214c966d41e0c6 | 3,934 | py | Python | BatchDatsetReader.py | sunyunan1999/FCN | 3e4ea6eef5d26e3540c6ab273725eeb5b9aad20f | [
"MIT"
] | null | null | null | BatchDatsetReader.py | sunyunan1999/FCN | 3e4ea6eef5d26e3540c6ab273725eeb5b9aad20f | [
"MIT"
] | null | null | null | BatchDatsetReader.py | sunyunan1999/FCN | 3e4ea6eef5d26e3540c6ab273725eeb5b9aad20f | [
"MIT"
] | null | null | null | # coding=utf-8
import numpy as np
import scipy.misc as misc
from PIL import Image
# 批量读取数据集的类
class BatchDatset:
files = []
images = []
annotations = []
image_options = {}
batch_offset = 0
epochs_completed = 0
def __init__(self, records_list, image_options={}):
"""
Intialize a generic file reader with batching for list of files
:param records_list: list of file records to read -
sample record:
{'image': f, 'annotation': annotation_file, 'filename': filename}
:param image_options: A dictionary of options for modifying the output image
Available options:
resize = True/ False
resize_size = #size of output image - does bilinear resize
color=True/False
"""
print("Initializing Batch Dataset Reader...")
print(image_options)
self.files = records_list
self.image_options = image_options
self._read_images()
def _read_images(self):
self.__channels = True
# 读取训练集图像
self.images = np.array([self._transform(filename['image']) for filename in self.files])
self.__channels = False
# 读取label的图像,由于label图像是二维的,这里需要扩展为三维
self.annotations = np.array(
[np.expand_dims(self._transform(filename['annotation']), axis=2) for filename in self.files])
print(self.images.shape)
print(self.annotations.shape)
# 把图像转为 numpy数组
# def _transform(self, filename):
# image = misc.imread(filename)
# if self.__channels and len(image.shape) < 3: # make sure images are of shape(h,w,3)
# image = np.array([image for i in range(3)])
#
# if self.image_options.get("resize", False) and self.image_options["resize"]:
# resize_size = int(self.image_options["resize_size"])
# resize_image = misc.imresize(image, [resize_size, resize_size], interp='nearest') # 使用最近邻插值法resize图片
# else:
# resize_image = image
#
# return np.array(resize_image)
def _transform(self, filename):
image = Image.open(filename)
if self.image_options.get("resize", False) and self.image_options["resize"]:
resize_size = int(self.image_options["resize_size"])
resize_image = misc.imresize(image, [resize_size, resize_size], interp='nearest') # 使用最近邻插值法resize图片
else:
resize_image = image
return np.array(resize_image)
def get_records(self):
return self.images, self.annotations # 返回图片和标签全路径
def reset_batch_offset(self, offset=0):
self.batch_offset = offset
def next_batch(self, batch_size):
start = self.batch_offset # 当前第几个batch
self.batch_offset += batch_size # 读取下一个batch 所有offset偏移量+batch_size
if self.batch_offset > self.images.shape[0]: # 如果下一个batch的偏移量超过了图片总数说明完成了一个epoch
# Finished epoch
self.epochs_completed += 1 # epochs完成总数+1
print("****************** Epochs completed: " + str(self.epochs_completed) + "******************")
# Shuffle the data
perm = np.arange(self.images.shape[0]) # arange生成数组(0 - len-1) 获取图片索引
np.random.shuffle(perm) # 对图片索引洗牌
self.images = self.images[perm] # 洗牌之后的图片顺序
self.annotations = self.annotations[perm]
# Start next epoch
start = 0 # 下一个epoch从0开始
self.batch_offset = batch_size # 已完成的batch偏移量
end = self.batch_offset # 开始到结束self.batch_offset self.batch_offset+batch_size
return self.images[start:end], self.annotations[start:end] # 取出batch
def get_random_batch(self, batch_size): # 按照一个batch_size一个块,进行对所有图片总数进行随机操作,相当于洗牌工作
indexes = np.random.randint(0, self.images.shape[0], size=[batch_size]).tolist()
return self.images[indexes], self.annotations[indexes]
| 38.950495 | 115 | 0.629385 |
712254dfa37e97be5c4bc4ba488d782627ce9348 | 9,013 | py | Python | QWeb/internal/decorators.py | sthagen/qentinelqi-qweb | e372729514e124a36cd41ee1ec0cff091e11ff8d | [
"Apache-2.0"
] | 1 | 2021-11-08T09:26:44.000Z | 2021-11-08T09:26:44.000Z | QWeb/internal/decorators.py | sthagen/qentinelqi-qweb | e372729514e124a36cd41ee1ec0cff091e11ff8d | [
"Apache-2.0"
] | null | null | null | QWeb/internal/decorators.py | sthagen/qentinelqi-qweb | e372729514e124a36cd41ee1ec0cff091e11ff8d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------
# Copyright © 2014 - Qentinel Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------
from __future__ import annotations
from types import MappingProxyType
from typing import Callable, Any, Union
import time
from inspect import signature
from functools import wraps
from robot.utils import timestr_to_secs
from robot.api import logger
from selenium.common.exceptions import InvalidSelectorException, \
NoSuchElementException, StaleElementReferenceException, WebDriverException, \
UnexpectedAlertPresentException, InvalidSessionIdException
from QWeb.keywords import config
from QWeb.internal import frame
from QWeb.internal.config_defaults import CONFIG, SHORT_DELAY, LONG_DELAY
from QWeb.internal.exceptions import QWebElementNotFoundError, \
QWebStalingElementError, QWebDriverError, QWebTimeoutError, QWebValueError, \
QWebUnexpectedConditionError, QWebValueMismatchError, QWebSearchingMode, QWebUnexpectedAlert, \
QWebIconNotFoundError, QWebBrowserError, FATAL_MESSAGES
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
def timeout_decorator(fn: Callable[..., Any]) -> Callable[..., Any]:
@wraps(fn)
def get_elements_from_dom_content( # type: ignore[return] # pylint: disable=R1710
*args: Any, **kwargs: Any) -> Union[Callable[..., Any], int, bool, None]:
try:
args, kwargs, locator = _equal_sign_handler(args, kwargs, fn)
msg = None
params = signature(fn).parameters
args, kwargs = _args_to_kwargs(params, args, kwargs)
timeout = get_timeout(**kwargs)
logger.debug('Timeout is {} sec'.format(timeout))
try:
if 'go_to' not in str(fn) and 'switch_window' not in str(fn):
frame.wait_page_loaded()
except UnexpectedAlertPresentException as e:
if not CONFIG["HandleAlerts"]:
raise QWebUnexpectedAlert(str(e)) from e
logger.debug('Got {}. Trying to retry..'.format(e))
time.sleep(SHORT_DELAY)
start = time.time()
while time.time() < timeout + start:
try:
kwargs['timeout'] = float(timeout + start - time.time())
config.set_config('FrameTimeout', float(timeout + start - time.time()))
return fn(*args, **kwargs)
except (QWebUnexpectedConditionError, QWebTimeoutError) as e:
logger.debug('Got {}'.format(e))
except (InvalidSelectorException, NoSuchElementException, QWebElementNotFoundError,
UnexpectedAlertPresentException, QWebStalingElementError,
StaleElementReferenceException, QWebIconNotFoundError) as e:
time.sleep(SHORT_DELAY)
logger.debug('Got exception: {}. Trying to retry..'.format(e))
except InvalidSessionIdException as e:
CONFIG.set_value("OSScreenshots", True)
raise QWebBrowserError("Browser session lost. Did browser crash?") from e
except (WebDriverException, QWebDriverError) as e:
if any(s in str(e) for s in FATAL_MESSAGES):
CONFIG.set_value("OSScreenshots", True)
raise QWebBrowserError(e) # pylint: disable=W0707
logger.info('From timeout decorator: Webdriver exception. Retrying..')
logger.info(e)
time.sleep(SHORT_DELAY)
err = QWebDriverError
msg = e
except QWebValueError as ve:
logger.debug('Got QWebValueError: {}. Trying to retry..'.format(ve))
err = QWebValueError # type: ignore[assignment]
msg = ve
time.sleep(SHORT_DELAY)
if msg:
raise err(msg)
if 'count' in str(fn):
return 0
if 'is_text' in str(fn) or 'is_no_text' in str(fn):
return False
raise QWebElementNotFoundError('Unable to find element for locator {} in {} sec'.format(
locator, timeout))
except QWebSearchingMode:
pass
return get_elements_from_dom_content
def timeout_decorator_for_actions(fn: Callable[..., Any]) -> Callable[..., Any]:
@wraps(fn)
def perform(*args: Any, **kwargs: Any) -> Callable[..., Any]:
params = signature(fn).parameters
args, kwargs = _args_to_kwargs(params, args, kwargs)
timeout = get_timeout(**kwargs)
err = None
msg = None
performed = False
logger.debug('time to run {}'.format(timeout))
start = time.time()
while time.time() < timeout + start:
try:
return fn(*args, **kwargs)
except QWebValueMismatchError as mismatch:
if 'text_appearance' not in str(fn) and 'get_or_compare_text' not in str(fn):
err = QWebValueError
msg = mismatch
logger.trace('Value mismatch: {}'.format(mismatch))
continue
except (QWebElementNotFoundError, UnexpectedAlertPresentException):
logger.debug('Not found')
time.sleep(SHORT_DELAY)
except QWebValueError as ve:
if performed:
break
raise ve
except (QWebStalingElementError, StaleElementReferenceException) as S:
if 'execute_click' in str(fn) or 'text_appearance' in str(fn):
logger.info('Got staling element err from retry click.'
'Action is probably triggered.')
raise QWebUnexpectedConditionError(S) # pylint: disable=W0707
raise QWebStalingElementError('Staling element') # pylint: disable=W0707
except (WebDriverException, QWebDriverError) as wde:
if 'alert' in str(fn):
time.sleep(LONG_DELAY)
logger.info("Got webdriver exception..{}. Retrying..".format(wde))
err = QWebDriverError # type: ignore[assignment]
msg = wde # type: ignore[assignment]
else:
raise QWebDriverError(wde) # pylint: disable=W0707
if msg:
raise err(msg) # type: ignore[misc]
raise QWebTimeoutError('Timeout exceeded')
return perform
def get_timeout(**kwargs: Any) -> Union[int, float]:
timeout = timestr_to_secs(CONFIG["DefaultTimeout"])
if 'timeout' in kwargs:
if timestr_to_secs(kwargs['timeout']) != 0:
timeout = timestr_to_secs(kwargs['timeout'])
return timeout
def _args_to_kwargs(params: MappingProxyType[str, Any], args: tuple,
kwargs: dict) -> tuple[tuple, dict]:
if 'timeout' not in kwargs:
for i, p in enumerate(params.values()):
if p.name not in kwargs:
if len(args) > i:
kwargs[p.name] = args[i]
else:
kwargs[p.name] = p.default
args = tuple('')
return tuple(args), kwargs
def _equal_sign_handler(args: Union[tuple, list], kwargs: dict,
function_name: Union[str, Callable[..., Any]]) -> tuple[tuple, dict, str]:
try:
locator = args[0]
except IndexError:
for key, value in kwargs.items():
# if present any of these is always the first argument
# locator can be the 2nd arg but it is handled later on
if key in ('locator', 'xpath', 'steps', 'image', 'input_texts', 'input_values', 'text',
'coordinates', 'texts_to_verify', 'url', 'title'):
locator = value
break
else:
# index can be unnamed first argument or named argument
locator = kwargs.get('index', None)
# The only decorated method with 'locator' as NOT the first argument
if str(function_name) == "scroll_to":
locator = kwargs.get('text_to_find', None)
if locator is None:
logger.console(f"args: {args}, \nkwargs: {kwargs}")
raise QWebElementNotFoundError("Use \\= instead of = in xpaths")
return tuple(args), kwargs, locator
| 45.291457 | 100 | 0.594031 |
589bd2e552ec936b88b07f61553459fc8092795a | 5,080 | py | Python | src/app/aws/sqs.py | glucn/tofino | 64c603b2356f22eecbf8fd592f3656a613646c53 | [
"MIT"
] | 1 | 2021-01-04T10:07:41.000Z | 2021-01-04T10:07:41.000Z | src/app/aws/sqs.py | glucn/tofino | 64c603b2356f22eecbf8fd592f3656a613646c53 | [
"MIT"
] | 231 | 2020-11-05T06:37:11.000Z | 2022-03-28T03:02:49.000Z | src/app/aws/sqs.py | glucn/tofino | 64c603b2356f22eecbf8fd592f3656a613646c53 | [
"MIT"
] | null | null | null | import hashlib
import logging
from typing import List
import boto3
from botocore.exceptions import ClientError
import config
class Message:
"""
SQS Message
"""
message_id: str
receipt_handle: str
body: str
_representation: str
def __init__(self, **kwargs):
if 'ReceiptHandle' in kwargs:
self.receipt_handle = kwargs['ReceiptHandle']
else:
raise ValueError('ReceiptHandle is required')
self.message_id = kwargs['MessageId'] if 'MessageId' in kwargs else None
if 'Body' in kwargs:
if 'MD5OfBody' in kwargs:
body_hash = hashlib.md5(kwargs['Body'].encode('utf-8')).hexdigest()
if body_hash != kwargs['MD5OfBody']:
raise ValueError(
u'MD5 hash of Body %s does not match MD5OfBody %s' % (kwargs['Body'], kwargs['MD5OfBody']))
self.body = kwargs['Body']
self._representation = u'Message(message_id: {0}, receipt_handle: {1}, body: {2})'.format(
self.message_id, self.receipt_handle, self.body)
def __repr__(self):
return self._representation
def __str__(self):
return self._representation
class SQS:
"""
The client of AWS SQS
"""
_client = None
@classmethod
def _get_client(cls):
if not cls._client:
session = boto3.session.Session()
cls._client = session.client(
service_name='sqs',
region_name=config.AWS_REGION
)
return cls._client
@classmethod
def receive_message(cls,
queue_url: str,
max_number_of_messages: int = 1,
wait_time_seconds: int = 1) -> List[Message]:
"""
Retrieves one or more messages (up to 10), from the specified queue.
https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html
:param queue_url: The URL of the Amazon SQS queue from which messages are received (case-sensitive).
:param max_number_of_messages: The maximum number of messages to return. Valid values: 1 to 10. Default: 1.
:param wait_time_seconds: The duration (in seconds) for which the call waits for a message to arrive in
the queue before returning. Default: 1.
:return: A list of messages
"""
if not queue_url:
raise ValueError(u'queue_url is required')
if max_number_of_messages < 1 or max_number_of_messages > 10:
raise ValueError(u'max_number_of_messages valid values: 1 to 10')
try:
receive_message_response = cls._get_client().receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=max_number_of_messages,
WaitTimeSeconds=wait_time_seconds,
)
if 'Messages' in receive_message_response:
return [Message(**message) for message in receive_message_response['Messages']]
else:
return []
except ClientError as e:
# TODO: change back to logging.error
logging.warning(e)
raise e
@classmethod
def delete_message(cls, queue_url: str, receipt_handle: str) -> None:
"""
Deletes the specified message from the specified queue.
https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessage.html
:param queue_url: The URL of the Amazon SQS queue from which messages are deleted.
:param receipt_handle: The receipt handle associated with the message to delete.
:return: None
"""
if not queue_url:
raise ValueError(u'queue_url is required')
if not receipt_handle:
raise ValueError(u'receipt_handle is required')
try:
cls._get_client().delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle,
)
except ClientError as e:
# TODO: change back to logging.error
logging.warning(e)
raise e
@classmethod
def send_message(cls, queue_url: str, message_body: str) -> None:
"""
Delivers a message to the specified queue.
:param queue_url: The URL of the Amazon SQS queue to which a message is sent.
:param message_body: The message to send. The minimum size is one character. The maximum size is 256 KB.
TODO: validate the message length
:return:
"""
if not queue_url:
raise ValueError(u'queue_url is required')
if not message_body:
raise ValueError(u'message_body is required')
try:
cls._get_client().send_message(
QueueUrl=queue_url,
MessageBody=message_body,
)
except ClientError as e:
# TODO: change back to logging.error
logging.warning(e)
raise e
| 32.564103 | 115 | 0.600591 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.