hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88b0cb79676c38c2e63f4aeff9b36c69f7412146 | 1,551 | py | Python | connectfour/agents/computer_player.py | rmit-s3607407-Tony-Huang/ai1901-connectfour | 3faf58296f8bcb6b99e9707d1e17c5a107c39094 | [
"MIT"
] | 2 | 2019-04-17T07:42:28.000Z | 2019-04-24T08:55:17.000Z | connectfour/agents/computer_player.py | rmit-s3607407-Tony-Huang/ai1901-connectfour | 3faf58296f8bcb6b99e9707d1e17c5a107c39094 | [
"MIT"
] | null | null | null | connectfour/agents/computer_player.py | rmit-s3607407-Tony-Huang/ai1901-connectfour | 3faf58296f8bcb6b99e9707d1e17c5a107c39094 | [
"MIT"
] | null | null | null | import random
from connectfour.agents.monte_carlo import Node, MTCS
from connectfour.agents.agent import Agent
MAX_DEPTH = 3
class MonteCarloAgent(Agent):
def __init__(self, name):
super().__init__(name)
def get_move(self, board):
best_move = self.find_best_move(board)
return self._find_move_from_new_board_state(board.board, best_move.state.board)
def find_best_move(self, board, factor=2.0):
"""
Returns the best move using MonteCarlo Tree Search
"""
o = Node(board)
return MTCS(MAX_DEPTH, o, factor, self.id)
def _find_move_from_new_board_state(self, old, new):
"""
Making a move in Connect Four makes exactly one change to the board.
Searching through all x,y positions for a difference between the old and
new board will tell us exactly where a move was made.
"""
for x in range(len(old)):
for y in range(len(old[0])):
if old[x][y] != new[x][y]:
return x, y
# there is no difference between old and new board states
return -1, -1
class RandomAgent(Agent):
def __init__(self, name):
super().__init__(name)
def get_move(self, board):
"""
RandomAgent always returns a valid (ie. partially empty) column to place token in
"""
while True:
col = random.randint(0, board.width)
row = board.try_move(col)
if row >= 0:
break
return row, col
| 28.2 | 89 | 0.604771 |
ddf90f4b5c45ace337bfc739e847555805737703 | 1,529 | py | Python | test/test_unit.py | firekg/Explore-vs-Teach | 2b3b6b8fbc61e28fda75555d5bf0e068f0a9ab8a | [
"MIT"
] | null | null | null | test/test_unit.py | firekg/Explore-vs-Teach | 2b3b6b8fbc61e28fda75555d5bf0e068f0a9ab8a | [
"MIT"
] | null | null | null | test/test_unit.py | firekg/Explore-vs-Teach | 2b3b6b8fbc61e28fda75555d5bf0e068f0a9ab8a | [
"MIT"
] | 1 | 2018-10-28T11:52:37.000Z | 2018-10-28T11:52:37.000Z | """ unit test """
from utils_pattern import genMasterPermSet
from e_vs_t import model
from simulate import example_model
from simulate import perf_all_configs
from simulate import perf_all_learner_configs
def setup_test_hypo(guy):
""" test P(h|X,Y) for initialize_model("full") """
possible_val = guy.getPossPostVals()
x_full = np.arange(guy.nx)
for ihypo in range(guy.nhypo):
for iconfig in range(guy.nperm[ihypo]):
perm = guy.perm[ihypo][iconfig]
y_full = [guy.gety(perm, x) for x in x_full]
post_joint = guy.posteriorJoint(x_full, y_full)
post_hypo = guy.posteriorHypo(post_joint)
for prob in post_hypo:
assert prob in possible_val
def test_2hypos():
perm_set = genMasterPermSet()
perm = [0]*2
perm[0] = perm_set[0:2] + perm_set[6:10]
perm[1] = perm_set[2:4] + perm_set[10:14]
guy = model(perm)
setup_test_hypo(guy)
def test_3hypo():
guy = example_model("full")
setup_test_hypo(guy)
def test_smoke_perf_all():
max_step = 5
person = example_model("full")
perf_all_configs(person, max_step, "explore")
learner = example_model("simple")
teacher = example_model("full")
perf_all_learner_configs(learner, teacher, max_step)
assert True
def test_is_isomorphic():
a = extended_hypo_ind(3,([0],[0],[1],[2]))
b = extended_hypo_ind(3,([0],[0],[2],[1]))
flag = is_hypo_isomorphic(a,b)
# print(a)
# print(b)
# print(flag)
assert flag == True
| 28.849057 | 59 | 0.653368 |
64f0fdc1537d90ae3be33c4db7a06c6765cc2e7a | 4,031 | py | Python | setup.py | corentinravoux/lelantos | 8736999e734058e66de5cc7cb4d0d302fc3d793b | [
"MIT"
] | 4 | 2021-09-29T07:49:57.000Z | 2022-03-16T16:30:38.000Z | setup.py | corentinravoux/lelantos | 8736999e734058e66de5cc7cb4d0d302fc3d793b | [
"MIT"
] | null | null | null | setup.py | corentinravoux/lelantos | 8736999e734058e66de5cc7cb4d0d302fc3d793b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
import glob
from shutil import rmtree
from setuptools import setup, Command
# Package meta-data.
NAME = 'lelantos'
DESCRIPTION = 'Package for tomographic reconstruction of large-scale structure based on Lyman-alpha forest data'
URL = 'https://github.com/corentinravoux/lelantos'
EMAIL = 'corentin.ravoux01@hotmail.fr'
AUTHOR = 'Corentin Ravoux'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '1.0'
scripts = glob.glob('scripts/*')
packages = ['lelantos','lelantos.saclaymocks','lelantos.exec']
# What packages are required for this module to be executed?
REQUIRED = [
'fitsio', 'numpy', 'scipy','matplotlib','picca'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['pyevtk'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
# packages=find_packages(exclude=('tests',)),
packages = packages,
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
scripts = scripts,
#entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
#},
# package_data={'lelantos' :}
install_requires=REQUIRED,
extras_require=EXTRAS,
package_data={'lelantos.exec': ['dachshund.exe']},
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 28.792857 | 112 | 0.64773 |
ce8c3526b50f026ed21fb2597aed5f38c57d68be | 1,733 | py | Python | python_web_exam/web_app/models.py | NikolaKolew/softuni-python-web-exam | dfa8bf561a75fdae0083798f953a75afb8e820b9 | [
"MIT"
] | null | null | null | python_web_exam/web_app/models.py | NikolaKolew/softuni-python-web-exam | dfa8bf561a75fdae0083798f953a75afb8e820b9 | [
"MIT"
] | null | null | null | python_web_exam/web_app/models.py | NikolaKolew/softuni-python-web-exam | dfa8bf561a75fdae0083798f953a75afb8e820b9 | [
"MIT"
] | null | null | null | from django.core.validators import MinLengthValidator, MinValueValidator
from django.db import models
class Profile(models.Model):
USER_NAME_MAX_CHARS = 15
USER_NAME_MIN_CHARS = 2
MIN_AGE = 0
user_name = models.CharField(
max_length=USER_NAME_MAX_CHARS,
validators=(
MinLengthValidator(USER_NAME_MIN_CHARS),
)
)
email = models.EmailField()
age = models.IntegerField(
null=True,
blank=True,
validators=(
MinValueValidator(MIN_AGE),
)
)
class Album(models.Model):
ALBUM_NAME_MAX_CHARS = 30
ARTIST_NAME_MAX_CHARS = 30
GENRE_MAX_CHARS = 30
MIN_PRICE = 0.0
POPMUSIC = 'Pop Music'
JAZZMUSIC = 'Jazz Music'
RBMUSIC = 'R&B Music'
ROCKMUSIC = 'Rock Music'
COUNTRYMUSIC = 'Country Music'
DANCEMUSIC = 'Dance Music'
HIPHOPMUSIC = 'Hip Hop Music'
OTHER = 'Other'
GENRE_CHOICES = [
(POPMUSIC, 'Pop Music'),
(JAZZMUSIC, 'Jazz Music'),
(RBMUSIC, 'R&B Music'),
(ROCKMUSIC, 'Rock Music'),
(COUNTRYMUSIC, 'Country Music'),
(DANCEMUSIC, 'Dance Music'),
(HIPHOPMUSIC, 'Hip Hop Music'),
(OTHER, 'Other'),
]
name = models.CharField(
max_length=ALBUM_NAME_MAX_CHARS,
unique=True,
)
artist = models.CharField(
max_length=ARTIST_NAME_MAX_CHARS,
)
genre = models.CharField(
max_length=GENRE_MAX_CHARS,
choices=GENRE_CHOICES,
)
description = models.TextField(
null=True,
blank=True,
)
image = models.URLField()
price = models.FloatField(
validators=(
MinValueValidator(MIN_PRICE),
)
)
| 21.6625 | 72 | 0.600115 |
788314d4a73895ea1c771fb87b985876d6ff475a | 7,643 | py | Python | tests/mesh/test_basemesh.py | KyuboNoh/HY | 8ba9815137c2cff2f1931a1940e1b762e8df0b02 | [
"MIT"
] | 1 | 2020-11-27T03:26:22.000Z | 2020-11-27T03:26:22.000Z | tests/mesh/test_basemesh.py | KyuboNoh/HY | 8ba9815137c2cff2f1931a1940e1b762e8df0b02 | [
"MIT"
] | null | null | null | tests/mesh/test_basemesh.py | KyuboNoh/HY | 8ba9815137c2cff2f1931a1940e1b762e8df0b02 | [
"MIT"
] | null | null | null | import unittest
import sys
from SimPEG.Mesh.BaseMesh import BaseRectangularMesh
import numpy as np
class TestBaseMesh(unittest.TestCase):
def setUp(self):
self.mesh = BaseRectangularMesh([6, 2, 3])
def test_meshDimensions(self):
self.assertTrue(self.mesh.dim, 3)
def test_mesh_nc(self):
self.assertTrue(self.mesh.nC == 36)
self.assertTrue(np.all(self.mesh.vnC == [6, 2, 3]))
def test_mesh_nc_xyz(self):
self.assertTrue(np.all(self.mesh.nCx == 6))
self.assertTrue(np.all(self.mesh.nCy == 2))
self.assertTrue(np.all(self.mesh.nCz == 3))
def test_mesh_nf(self):
self.assertTrue(np.all(self.mesh.vnFx == [7, 2, 3]))
self.assertTrue(np.all(self.mesh.vnFy == [6, 3, 3]))
self.assertTrue(np.all(self.mesh.vnFz == [6, 2, 4]))
def test_mesh_ne(self):
self.assertTrue(np.all(self.mesh.vnEx == [6, 3, 4]))
self.assertTrue(np.all(self.mesh.vnEy == [7, 2, 4]))
self.assertTrue(np.all(self.mesh.vnEz == [7, 3, 3]))
def test_mesh_numbers(self):
self.assertTrue(self.mesh.nC == 36)
self.assertTrue(np.all(self.mesh.vnF == [42, 54, 48]))
self.assertTrue(np.all(self.mesh.vnE == [72, 56, 63]))
self.assertTrue(np.all(self.mesh.nF == np.sum([42, 54, 48])))
self.assertTrue(np.all(self.mesh.nE == np.sum([72, 56, 63])))
def test_mesh_r_E_V(self):
ex = np.ones(self.mesh.nEx)
ey = np.ones(self.mesh.nEy)*2
ez = np.ones(self.mesh.nEz)*3
e = np.r_[ex, ey, ez]
tex = self.mesh.r(e, 'E', 'Ex', 'V')
tey = self.mesh.r(e, 'E', 'Ey', 'V')
tez = self.mesh.r(e, 'E', 'Ez', 'V')
self.assertTrue(np.all(tex == ex))
self.assertTrue(np.all(tey == ey))
self.assertTrue(np.all(tez == ez))
tex, tey, tez = self.mesh.r(e, 'E', 'E', 'V')
self.assertTrue(np.all(tex == ex))
self.assertTrue(np.all(tey == ey))
self.assertTrue(np.all(tez == ez))
def test_mesh_r_F_V(self):
fx = np.ones(self.mesh.nFx)
fy = np.ones(self.mesh.nFy)*2
fz = np.ones(self.mesh.nFz)*3
f = np.r_[fx, fy, fz]
tfx = self.mesh.r(f, 'F', 'Fx', 'V')
tfy = self.mesh.r(f, 'F', 'Fy', 'V')
tfz = self.mesh.r(f, 'F', 'Fz', 'V')
self.assertTrue(np.all(tfx == fx))
self.assertTrue(np.all(tfy == fy))
self.assertTrue(np.all(tfz == fz))
tfx, tfy, tfz = self.mesh.r(f, 'F', 'F', 'V')
self.assertTrue(np.all(tfx == fx))
self.assertTrue(np.all(tfy == fy))
self.assertTrue(np.all(tfz == fz))
def test_mesh_r_E_M(self):
g = np.ones((np.prod(self.mesh.vnEx), 3))
g[:, 1] = 2
g[:, 2] = 3
Xex, Yex, Zex = self.mesh.r(g, 'Ex', 'Ex', 'M')
self.assertTrue(np.all(Xex.shape == self.mesh.vnEx))
self.assertTrue(np.all(Yex.shape == self.mesh.vnEx))
self.assertTrue(np.all(Zex.shape == self.mesh.vnEx))
self.assertTrue(np.all(Xex == 1))
self.assertTrue(np.all(Yex == 2))
self.assertTrue(np.all(Zex == 3))
def test_mesh_r_F_M(self):
g = np.ones((np.prod(self.mesh.vnFx), 3))
g[:, 1] = 2
g[:, 2] = 3
Xfx, Yfx, Zfx = self.mesh.r(g, 'Fx', 'Fx', 'M')
self.assertTrue(np.all(Xfx.shape == self.mesh.vnFx))
self.assertTrue(np.all(Yfx.shape == self.mesh.vnFx))
self.assertTrue(np.all(Zfx.shape == self.mesh.vnFx))
self.assertTrue(np.all(Xfx == 1))
self.assertTrue(np.all(Yfx == 2))
self.assertTrue(np.all(Zfx == 3))
def test_mesh_r_CC_M(self):
g = np.ones((self.mesh.nC, 3))
g[:, 1] = 2
g[:, 2] = 3
Xc, Yc, Zc = self.mesh.r(g, 'CC', 'CC', 'M')
self.assertTrue(np.all(Xc.shape == self.mesh.vnC))
self.assertTrue(np.all(Yc.shape == self.mesh.vnC))
self.assertTrue(np.all(Zc.shape == self.mesh.vnC))
self.assertTrue(np.all(Xc == 1))
self.assertTrue(np.all(Yc == 2))
self.assertTrue(np.all(Zc == 3))
class TestMeshNumbers2D(unittest.TestCase):
def setUp(self):
self.mesh = BaseRectangularMesh([6, 2])
def test_meshDimensions(self):
self.assertTrue(self.mesh.dim, 2)
def test_mesh_nc(self):
self.assertTrue(np.all(self.mesh.vnC == [6, 2]))
def test_mesh_nc_xyz(self):
self.assertTrue(np.all(self.mesh.nCx == 6))
self.assertTrue(np.all(self.mesh.nCy == 2))
self.assertTrue(self.mesh.nCz is None)
def test_mesh_nf(self):
self.assertTrue(np.all(self.mesh.vnFx == [7, 2]))
self.assertTrue(np.all(self.mesh.vnFy == [6, 3]))
self.assertTrue(self.mesh.vnFz is None)
def test_mesh_ne(self):
self.assertTrue(np.all(self.mesh.vnEx == [6, 3]))
self.assertTrue(np.all(self.mesh.vnEy == [7, 2]))
self.assertTrue(self.mesh.vnEz is None)
def test_mesh_numbers(self):
c = self.mesh.nC == 12
self.assertTrue(np.all(self.mesh.vnF == [14, 18]))
self.assertTrue(np.all(self.mesh.nFx == 14))
self.assertTrue(np.all(self.mesh.nFy == 18))
self.assertTrue(np.all(self.mesh.nEx == 18))
self.assertTrue(np.all(self.mesh.nEy == 14))
self.assertTrue(np.all(self.mesh.vnE == [18, 14]))
self.assertTrue(np.all(self.mesh.vnE == [18, 14]))
self.assertTrue(np.all(self.mesh.nF == np.sum([14, 18])))
self.assertTrue(np.all(self.mesh.nE == np.sum([18, 14])))
def test_mesh_r_E_V(self):
ex = np.ones(self.mesh.nEx)
ey = np.ones(self.mesh.nEy)*2
e = np.r_[ex, ey]
tex = self.mesh.r(e, 'E', 'Ex', 'V')
tey = self.mesh.r(e, 'E', 'Ey', 'V')
self.assertTrue(np.all(tex == ex))
self.assertTrue(np.all(tey == ey))
tex, tey = self.mesh.r(e, 'E', 'E', 'V')
self.assertTrue(np.all(tex == ex))
self.assertTrue(np.all(tey == ey))
self.assertRaises(AssertionError, self.mesh.r, e, 'E', 'Ez', 'V')
def test_mesh_r_F_V(self):
fx = np.ones(self.mesh.nFx)
fy = np.ones(self.mesh.nFy)*2
f = np.r_[fx, fy]
tfx = self.mesh.r(f, 'F', 'Fx', 'V')
tfy = self.mesh.r(f, 'F', 'Fy', 'V')
self.assertTrue(np.all(tfx == fx))
self.assertTrue(np.all(tfy == fy))
tfx, tfy = self.mesh.r(f, 'F', 'F', 'V')
self.assertTrue(np.all(tfx == fx))
self.assertTrue(np.all(tfy == fy))
self.assertRaises(AssertionError, self.mesh.r, f, 'F', 'Fz', 'V')
def test_mesh_r_E_M(self):
g = np.ones((np.prod(self.mesh.vnEx), 2))
g[:, 1] = 2
Xex, Yex = self.mesh.r(g, 'Ex', 'Ex', 'M')
self.assertTrue(np.all(Xex.shape == self.mesh.vnEx))
self.assertTrue(np.all(Yex.shape == self.mesh.vnEx))
self.assertTrue(np.all(Xex == 1))
self.assertTrue(np.all(Yex == 2))
def test_mesh_r_F_M(self):
g = np.ones((np.prod(self.mesh.vnFx), 2))
g[:, 1] = 2
Xfx, Yfx = self.mesh.r(g, 'Fx', 'Fx', 'M')
self.assertTrue(np.all(Xfx.shape == self.mesh.vnFx))
self.assertTrue(np.all(Yfx.shape == self.mesh.vnFx))
self.assertTrue(np.all(Xfx == 1))
self.assertTrue(np.all(Yfx == 2))
def test_mesh_r_CC_M(self):
g = np.ones((self.mesh.nC, 2))
g[:, 1] = 2
Xc, Yc = self.mesh.r(g, 'CC', 'CC', 'M')
self.assertTrue(np.all(Xc.shape == self.mesh.vnC))
self.assertTrue(np.all(Yc.shape == self.mesh.vnC))
self.assertTrue(np.all(Xc == 1))
self.assertTrue(np.all(Yc == 2))
if __name__ == '__main__':
unittest.main()
| 37.650246 | 75 | 0.55541 |
d42ff521a4ffef837a6470ad14dec23f5ae0e885 | 3,512 | py | Python | tests/parsers/mac_appfirewall.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | 27 | 2019-04-05T12:01:49.000Z | 2022-02-08T02:26:25.000Z | tests/parsers/mac_appfirewall.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | null | null | null | tests/parsers/mac_appfirewall.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | 8 | 2019-11-28T08:06:34.000Z | 2020-08-29T13:53:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for Mac AppFirewall log file parser."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import mac_appfirewall as _ # pylint: disable=unused-import
from plaso.parsers import mac_appfirewall
from tests.parsers import test_lib
class MacAppFirewallUnitTest(test_lib.ParserTestCase):
"""Tests for Mac AppFirewall log file parser."""
def testParseFile(self):
"""Test parsing of a Mac Wifi log file."""
parser = mac_appfirewall.MacAppFirewallParser()
knowledge_base_values = {'year': 2013}
storage_writer = self._ParseFile(
['appfirewall.log'], parser,
knowledge_base_values=knowledge_base_values)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 47)
events = list(storage_writer.GetEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2013-11-02 04:07:35.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.agent, 'socketfilterfw[112]')
self.assertEqual(event_data.computer_name, 'DarkTemplar-2.local')
self.assertEqual(event_data.status, 'Error')
self.assertEqual(event_data.process_name, 'Logging')
self.assertEqual(event_data.action, 'creating /var/log/appfirewall.log')
expected_message = (
'Computer: DarkTemplar-2.local '
'Agent: socketfilterfw[112] '
'Status: Error '
'Process name: Logging '
'Log: creating /var/log/appfirewall.log')
expected_short_message = (
'Process name: Logging '
'Status: Error')
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[9]
self.CheckTimestamp(event.timestamp, '2013-11-03 13:25:15.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.agent, 'socketfilterfw[87]')
self.assertEqual(event_data.computer_name, 'DarkTemplar-2.local')
self.assertEqual(event_data.status, 'Info')
self.assertEqual(event_data.process_name, 'Dropbox')
self.assertEqual(event_data.action, 'Allow TCP LISTEN (in:0 out:1)')
expected_message = (
'Computer: DarkTemplar-2.local '
'Agent: socketfilterfw[87] '
'Status: Info '
'Process name: Dropbox '
'Log: Allow TCP LISTEN (in:0 out:1)')
expected_short_message = (
'Process name: Dropbox '
'Status: Info')
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check repeated lines.
event = events[38]
event_data = self._GetEventDataOfEvent(storage_writer, event)
repeated_event = events[39]
repeated_event_data = self._GetEventDataOfEvent(
storage_writer, repeated_event)
self.assertEqual(event_data.agent, repeated_event_data.agent)
self.assertEqual(
event_data.computer_name, repeated_event_data.computer_name)
self.assertEqual(event_data.status, repeated_event_data.status)
self.assertEqual(
event_data.process_name, repeated_event_data.process_name)
self.assertEqual(event_data.action, repeated_event_data.action)
# Year changes.
event = events[45]
self.CheckTimestamp(event.timestamp, '2013-12-31 23:59:23.000000')
event = events[46]
self.CheckTimestamp(event.timestamp, '2014-01-01 01:13:23.000000')
if __name__ == '__main__':
unittest.main()
| 32.82243 | 82 | 0.714977 |
1cd2c347614b4b014a6aaa831e3b9dd721e025d0 | 3,406 | py | Python | src/sqlfluff/rules/L026.py | r0fls/sqlfluff | 3bc658e26758d1eb1ce35dade2e2cf064a4d6675 | [
"MIT"
] | null | null | null | src/sqlfluff/rules/L026.py | r0fls/sqlfluff | 3bc658e26758d1eb1ce35dade2e2cf064a4d6675 | [
"MIT"
] | 8 | 2022-01-26T21:43:03.000Z | 2022-01-31T10:22:02.000Z | src/sqlfluff/rules/L026.py | r0fls/sqlfluff | 3bc658e26758d1eb1ce35dade2e2cf064a4d6675 | [
"MIT"
] | 1 | 2022-01-24T10:10:43.000Z | 2022-01-24T10:10:43.000Z | """Implementation of Rule L026."""
from sqlfluff.core.rules.analysis.select import get_aliases_from_select
from sqlfluff.core.rules.base import EvalResultType, LintResult, RuleContext
from sqlfluff.core.rules.doc_decorators import document_configuration
from sqlfluff.rules.L020 import Rule_L020
@document_configuration
class Rule_L026(Rule_L020):
"""References cannot reference objects not present in ``FROM`` clause.
NB: This rule is disabled by default for BigQuery due to its use of
structs which trigger false positives. It can be enabled with the
``force_enable = True`` flag.
| **Anti-pattern**
| In this example, the reference ``vee`` has not been declared.
.. code-block:: sql
SELECT
vee.a
FROM foo
| **Best practice**
| Remove the reference.
.. code-block:: sql
SELECT
a
FROM foo
"""
config_keywords = ["force_enable"]
@staticmethod
def _is_bad_tbl_ref(table_aliases, parent_select, tbl_ref):
"""Given a table reference, try to find what it's referring to."""
# Is it referring to one of the table aliases?
if tbl_ref[0] in [a.ref_str for a in table_aliases]:
# Yes. Therefore okay.
return False
# Not a table alias. It it referring to a correlated subquery?
if parent_select:
parent_aliases, _ = get_aliases_from_select(parent_select)
if parent_aliases and tbl_ref[0] in [a[0] for a in parent_aliases]:
# Yes. Therefore okay.
return False
# It's not referring to an alias or a correlated subquery. Looks like a
# bad reference (i.e. referring to something unknown.)
return True
def _lint_references_and_aliases(
self,
table_aliases,
standalone_aliases,
references,
col_aliases,
using_cols,
parent_select,
):
# A buffer to keep any violations.
violation_buff = []
# Check all the references that we have, do they reference present aliases?
for r in references:
tbl_refs = r.extract_possible_references(level=r.ObjectReferenceLevel.TABLE)
if tbl_refs and all(
self._is_bad_tbl_ref(table_aliases, parent_select, tbl_ref)
for tbl_ref in tbl_refs
):
violation_buff.append(
LintResult(
# Return the first segment rather than the string
anchor=tbl_refs[0].segments[0],
description=f"Reference {r.raw!r} refers to table/view "
"not found in the FROM clause or found in parent "
"subquery.",
)
)
return violation_buff or None
def _eval(self, context: RuleContext) -> EvalResultType:
"""Override Rule L020 for dialects that use structs.
Some dialects use structs (e.g. column.field) which look like
table references and so incorrectly trigger this rule.
"""
# Config type hints
self.force_enable: bool
if (
context.dialect.name in ["bigquery", "hive", "redshift"]
and not self.force_enable
):
return LintResult()
return super()._eval(context=context)
| 32.75 | 88 | 0.608045 |
bd47124098c74efe48c2a648371e9c72bc012173 | 809 | py | Python | backend/blog/handler/open/review.py | o8oo8o/blog | 2a6f44f86469bfbb472dfd1bec4238587d8402bf | [
"MIT"
] | null | null | null | backend/blog/handler/open/review.py | o8oo8o/blog | 2a6f44f86469bfbb472dfd1bec4238587d8402bf | [
"MIT"
] | null | null | null | backend/blog/handler/open/review.py | o8oo8o/blog | 2a6f44f86469bfbb472dfd1bec4238587d8402bf | [
"MIT"
] | null | null | null | #!/usr/bin/evn python3
# coding=utf-8
from handler.open.base import WebBaseHandler
from service.blog import BlogSrv
from service.review import ReviewSrv
from handler.open.verifycode import check_verify_code
class ReviewHandler(WebBaseHandler):
"""
# 新增评论
"""
# 检查验证码装饰器
@check_verify_code
async def post(self):
"""
# 新增评论
:return: success_data 或者 error_data
"""
blog_id = self.get_argument_int("blog_id")
email = self.get_argument_email("email")
name = self.get_argument_str("name", max_len=127)
text = self.get_argument_str("text")
ReviewSrv.add_review(blog_id, name, email, text)
self.send_json({
"code": 0,
"review_list": BlogSrv.get_blog_review_list(blog_id)
})
| 24.515152 | 64 | 0.640297 |
46552e20822fa6fd305833a6e4dc9d22ce83a066 | 1,242 | py | Python | adaptive_attention_XELoss/inference_wrapper.py | vanpersie32/adaptive_attention | 4417f7d10dcf1c7b8188ee4fc4efc36c987f72f3 | [
"MIT"
] | 1 | 2018-06-01T01:37:46.000Z | 2018-06-01T01:37:46.000Z | adaptive_attention_XELoss/inference_wrapper.py | vanpersie32/adaptive_attention | 4417f7d10dcf1c7b8188ee4fc4efc36c987f72f3 | [
"MIT"
] | null | null | null | adaptive_attention_XELoss/inference_wrapper.py | vanpersie32/adaptive_attention | 4417f7d10dcf1c7b8188ee4fc4efc36c987f72f3 | [
"MIT"
] | null | null | null | from language_model import LanguageModel
import tensorflow as tf
class inference_wrapper(object):
def __init__(self,opt,reuse = False):
# build the model
# separate inference op with train op, especially in train and validation steps
with tf.name_scope('inference'):
LM = LanguageModel(opt,'test',reuse = reuse)
LM.build()
self.model = LM
def inference_step(self,sess,objects_features,attributes,input_feed,state_feed):
feed_dict = {'inference/objects_features:0':objects_features,
'inference/input_feed:0':input_feed,
'inference/attributes:0':attributes,
'inference/state_feed:0':state_feed}
prob, new_state,top20_weights = sess.run(['inference/prob:0','inference/new_states:0',self.model.top20_weights],feed_dict)
return prob, new_state, None, top20_weights
def init_state(self,sess,image_features,input_feed):
feed_dict = {'inference/image_features:0':image_features,
'inference/input_feed:0':input_feed}
init_state = sess.run('inference/init_states:0',feed_dict)
return init_state
| 38.8125 | 130 | 0.641707 |
38d379a4c83c1bad3baf274f9e847431da6e8974 | 89,259 | py | Python | lib/sqlalchemy/orm/strategies.py | aadel/sqlalchemy | 380f4389922004589bfa7cb4f9b8c8208aa68659 | [
"MIT"
] | null | null | null | lib/sqlalchemy/orm/strategies.py | aadel/sqlalchemy | 380f4389922004589bfa7cb4f9b8c8208aa68659 | [
"MIT"
] | null | null | null | lib/sqlalchemy/orm/strategies.py | aadel/sqlalchemy | 380f4389922004589bfa7cb4f9b8c8208aa68659 | [
"MIT"
] | null | null | null | # orm/strategies.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from __future__ import absolute_import
import collections
import itertools
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import properties
from . import query
from . import unitofwork
from . import util as orm_util
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
from .util import _none_set
from .util import aliased
from .. import event
from .. import exc as sa_exc
from .. import inspect
from .. import log
from .. import sql
from .. import util
from ..sql import util as sql_util
from ..sql import visitors
def _register_attribute(
prop,
mapper,
useobject,
compare_function=None,
typecallable=None,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
listen_hooks = []
uselist = useobject and prop.uselist
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc, prop.key, fn, **opts
)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
if useobject:
backref = prop.back_populates
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc, backref, uselist
)
)
# a single MapperProperty is shared down a class inheritance
# hierarchy, so we set up attribute instrumentation and backref event
# for each mapper down the hierarchy.
# typically, "mapper" is the same as prop.parent, due to the way
# the configure_mappers() process runs, however this is not strongly
# enforced, and in the case of a second configure_mappers() run the
# mapper here might not be prop.parent; also, a subclass mapper may
# be called here before a superclass mapper. That is, can't depend
# on mappers not already being set up so we have to check each one.
for m in mapper.self_and_descendants:
if prop is m._props.get(
prop.key
) and not m.class_manager._attr_has_impl(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
trackparent=useobject
and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY
),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super(ColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or mapper.version_id_col in set(self.columns)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for c in columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(
deferred=True, instrument=True, raiseload=True
)
@properties.ColumnProperty.strategy_for(do_nothing=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group", "raiseload"
def __init__(self, parent, strategy_key):
super(DeferredColumnLoader, self).__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
)
self.raiseload = self.strategy_opts.get("raiseload", False)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# for a DeferredColumnLoader, this method is only used during a
# "row processor only" query; see test_deferred.py ->
# tests with "rowproc_only" in their name. As of the 1.0 series,
# loading._instance_processor doesn't use a "row processing" function
# to populate columns, instead it uses data in the "populators"
# dictionary. Normally, the DeferredColumnLoader.setup_query()
# sets up that data in the "memoized_populators" dictionary
# and "create_row_processor()" here is never invoked.
if not self.is_class_level:
if self.raiseload:
set_deferred_for_local_state = (
self.parent_property._raise_column_loader
)
else:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
load_on_unexpire=False,
)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw
):
if (
(
loadopt
and "undefer_pks" in loadopt.local_opts
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
elif not self.raiseload:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
else:
memoized_populators[self.parent_property] = _RAISE_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
if self.raiseload:
self._invoke_raise_load(state, passive, "raise")
query = session.query(localparent)
if (
loading.load_on_ident(
query, state.key, only_load_props=group, refresh_state=state
)
is None
):
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to raiseload=True" % (self,)
)
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key, raiseload=False):
self.key = key
self.raiseload = raiseload
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
if self.raiseload:
strategy_key = (
("deferred", True),
("instrument", True),
("raiseload", True),
)
else:
strategy_key = (("deferred", True), ("instrument", True))
strategy = prop._get_strategy(strategy_key)
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(do_nothing=True)
class DoNothingLoader(LoaderStrategy):
"""Relationship loader that makes no change to the object's state.
Compared to NoLoader, this loader does not initialize the
collection/attribute to empty/none; the usual default LazyLoader will
take effect.
"""
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def invoke_no_load(state, dict_, row):
if self.uselist:
attributes.init_state_collection(state, dict_, self.key)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
@properties.RelationshipProperty.strategy_for(lazy="raise")
@properties.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
"_bakery",
)
def __init__(self, parent, strategy_key):
super(LazyLoader, self).__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use query.get() to " "optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history
or self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
)
def _memoized_attr__simple_lazy_clause(self):
criterion, bind_to_col = (self._lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(self, state, passive):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
if (not passive & attributes.SQL_OK and not self.use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return attributes.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & attributes.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not self.use_get
or passive & attributes.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & attributes.NO_RAISE:
return attributes.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if attributes.PASSIVE_NO_RESULT in primary_key_identity:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in primary_key_identity:
return attributes.NEVER_SET
if _none_set.issuperset(primary_key_identity):
return None
if self.key in state.dict:
return attributes.ATTR_WAS_SET
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
return instance
elif (
not passive & attributes.SQL_OK
or not passive & attributes.RELATED_OBJECT_OK
):
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(
session, state, primary_key_identity, passive
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self, strategy_options, session, state, primary_key_identity, passive
):
# emit lazy load now using BakedQuery, to cut way down on the overhead
# of generating queries.
# there are two big things we are trying to guard against here:
#
# 1. two different lazy loads that need to have a different result,
# being cached on the same key. The results between two lazy loads
# can be different due to the options passed to the query, which
# take effect for descendant objects. Therefore we have to make
# sure paths and load options generate good cache keys, and if they
# don't, we don't cache.
# 2. a lazy load that gets cached on a key that includes some
# "throwaway" object, like a per-query AliasedClass, meaning
# the cache key will never be seen again and the cache itself
# will fill up. (the cache is an LRU cache, so while we won't
# run out of memory, it will perform terribly when it's full. A
# warning is emitted if this occurs.) We must prevent the
# generation of a cache key that is including a throwaway object
# in the key.
# note that "lazy='select'" and "lazy=True" make two separate
# lazy loaders. Currently the LRU cache is local to the LazyLoader,
# however add ourselves to the initial cache key just to future
# proof in case it moves
q = self._bakery(lambda session: session.query(self.entity), self)
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property,
)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q: q.select_from(
self.mapper, self.parent_property.secondary
)
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_options:
# here, if any of the options cannot return a cache key,
# the BakedQuery "spoils" and caching will not occur. a path
# that features Cls.attribute.of_type(some_alias) will cancel
# caching, for example, since "some_alias" is user-defined and
# is usually a throwaway object.
effective_path = state.load_path[self.parent_property]
q._add_lazyload_options(state.load_options, effective_path)
if self.use_get:
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
return (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
._load_on_pk_identity(
session.query(self.mapper), primary_key_identity
)
)
if self.parent_property.order_by:
q.add_criteria(
lambda q: q.order_by(
*util.to_list(self.parent_property.order_by)
)
)
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, LazyLoader)
):
q.add_criteria(
lambda q: q.options(
strategy_options.Load.for_existing_path(
q._current_path[rev.parent]
).lazyload(rev.key)
)
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
if self.key in state.dict:
return attributes.ATTR_WAS_SET
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
q.add_criteria(lambda q: q.filter(lazy_clause))
# set parameters in the query such that we don't overwrite
# parameters that are already set within it
def set_default_params(q):
params.update(q._params)
q._params = params
return q
result = (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
.with_post_criteria(set_default_params)
.all()
)
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(mapper.class_manager, LoadLazyAttribute(key, self), key)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key, initiating_strategy):
self.key = key
self.strategy_key = initiating_strategy.strategy_key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(state, passive)
class PostLoader(AbstractRelationshipLoader):
"""A relationship loader that emits a second SELECT statement."""
def _immediateload_create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
return self.parent_property._get_strategy(
(("lazy", "immediate"),)
).create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(PostLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
pass
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(PostLoader):
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
super(SubqueryLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
if not context.query._enable_eagerloads or context.refresh_state:
return
elif context.query._yield_per:
context.query._no_yield_per("subquery")
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.entity
subq_path = context.attributes.get(
("subquery_path", None), orm_util.PathRegistry.root
)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if (
(
context.query._current_path.length
if context.query._current_path
else 0
)
+ path.length
) / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
(
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
) = self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader), context.query
)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
entity.entity_zero,
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
("subquery_path", None): subq_path,
}
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = self._prep_for_joins(
left_alias, subq_path
)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias, parent_alias, effective_entity
)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if (
self.parent.isa(subq_mapper)
and self.parent_property is subq_path[1]
):
leftmost_mapper, leftmost_prop = self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
subq_path[0].entity, leftmost_mapper._columntoproperty[c].key
)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(
self,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
orig_entity,
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set the query's "FROM" list explicitly to what the
# FROM list would be in any case, as we will be limiting
# the columns in the SELECT list which may no longer include
# all entities mentioned in things like WHERE, JOIN, etc.
if not q._from_obj:
q._set_select_from(
list(
set(
[
ent["entity"]
for ent in orig_query.column_descriptions
if ent["entity"] is not None
]
)
),
False,
)
# select from the identity columns of the outer (specifically, these
# are the 'local_cols' of the property). This will remove
# other columns from the query that might suggest the right entity
# which is why we do _set_select_from above.
target_cols = q._adapt_col_list(leftmost_attr)
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q, use_mapper_path=True
)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
else:
info = inspect(to_join[-1][0])
if info.is_aliased_class:
parent_alias = info.entity
else:
# alias a plain mapper as we may be
# joining multiple times
parent_alias = orm_util.AliasedClass(
info.entity, use_mapper_path=True
)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias, effective_entity
):
ltj = len(to_join)
if ltj == 1:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(effective_entity)
]
elif ltj == 2:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(parent_alias),
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
),
]
elif ltj > 2:
middle = [
(
orm_util.AliasedClass(item[0])
if not inspect(item[0]).is_aliased_class
else item[0].entity,
item[1],
)
for item in to_join[1:-1]
]
inner = []
while middle:
item = middle.pop(0)
attr = getattr(item[0], item[1])
if middle:
attr = attr.of_type(middle[0][0])
else:
attr = attr.of_type(parent_alias)
inner.append(attr)
to_join = (
[getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ inner
+ [
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
)
]
)
for attr in to_join:
q = q.join(attr, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(self.subq, lambda x: x[1:])
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
path = path[self.parent_property]
subq = path.get(context.attributes, "subquery")
if subq is None:
return
assert subq.session is context.session, (
"Subquery session doesn't refer to that of "
"our context. Are there broken context caching "
"schemes being used?"
)
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, "collections", collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, result, collections, local_cols, populators
)
else:
self._create_scalar_loader(
context, result, collections, local_cols, populators
)
def _create_collection_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_collection_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), ())
state.get_impl(self.key).set_committed_value(
state, dict_, collection
)
def load_collection_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_collection_from_subq(state, dict_, row)
populators["new"].append((self.key, load_collection_from_subq))
populators["existing"].append(
(self.key, load_collection_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), (None,))
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' " % self
)
scalar = collection[0]
state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def load_scalar_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_scalar_from_subq(state, dict_, row)
populators["new"].append((self.key, load_scalar_from_subq))
populators["existing"].append(
(self.key, load_scalar_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
super(JoinedLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs
):
"""Add a left outer join to the statement that's being constructed."""
if not context.query._enable_eagerloads:
return
elif context.query._yield_per and self.uselist:
context.query._no_yield_per("joined collection")
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
context, query_entity, path, adapter, user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
with_poly_info = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_info is not None:
with_polymorphic = with_poly_info.with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
context,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
if with_poly_info is not None and None in set(
context.secondary_columns
):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
context.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias, equivalents=prop.mapper._equivalent_columns
)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_info = path.get(
context.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
with_poly_info.entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = context.query._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
context.attributes, "user_defined_eager_row_processor", adapter
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _gen_pooled_aliased_class(self, context):
# keep a local pool of AliasedClass objects that get re-used.
# we need one unique AliasedClass per query per appearance of our
# entity in the query.
if inspect(self.entity).is_aliased_class:
alt_selectable = inspect(self.entity).selectable
else:
alt_selectable = None
key = ("joinedloader_ac", self)
if key not in context.attributes:
context.attributes[key] = idx = 0
else:
context.attributes[key] = idx = context.attributes[key] + 1
if idx >= len(self._aliased_class_pool):
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=alt_selectable.alias(flat=True)
if alt_selectable is not None
else None,
flat=True,
use_mapper_path=True,
)
# load up the .columns collection on the Alias() before
# the object becomes shared among threads. this prevents
# races for column identities.
inspect(to_adapt).selectable.c
self._aliased_class_pool.append(to_adapt)
return self._aliased_class_pool[idx]
def _generate_row_adapter(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_info = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_info:
to_adapt = with_poly_info.entity
else:
to_adapt = self._gen_pooled_aliased_class(context)
clauses = inspect(to_adapt)._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.aliased_class is not None
if self.parent_property.uselist:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
context.create_eager_joins.append(
(
self._create_eager_join,
context,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
context,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = (
context.multi_row_eager_loaders
and context.query._should_nest_selectable
)
query_entity_key = None
if (
query_entity not in context.eager_joins
and not should_nest_selectable
and context.from_clause
):
indexes = sql_util.find_left_clause_that_matches_given(
context.from_clause, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = context.from_clause[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = context.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "aliased_class", None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class)._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=self.mapper,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause
)
context.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += (
eagerjoin._target_adapter.copy_and_process
)(util.to_list(self.parent_property.order_by))
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, splicing=False
):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path, join_obj.element, clauses, onclause, splicing
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj,
clauses.aliased_class,
onclause,
isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper,
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path, join_obj.right, clauses, onclause, join_obj._right_memo
)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path, join_obj.left, clauses, onclause, join_obj._left_memo
)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert (
splicing is not False
), "assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
else:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
# note this must unconditionally clear out any existing collection.
# an existing collection would be present only in the case of
# populate_existing().
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="selectin")
class SelectInLoader(PostLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
"_bakery",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super(SelectInLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = aliased(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
selectin_path = (
context.query._current_path or orm_util.PathRegistry.root
) + path
if not orm_util._entity_isa(path[-1], self.parent):
return
if loading.PostLoad.path_exists(context, selectin_path, self.key):
return
path_w_prop = path[self.parent_property]
selectin_path_w_prop = selectin_path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.entity
if not path_w_prop.contains(context.attributes, "loader"):
if self.join_depth:
if selectin_path_w_prop.length / 2 > self.join_depth:
return
elif selectin_path_w_prop.contains_mapper(self.mapper):
return
loading.PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.key,
self._load_for_path,
effective_entity,
)
@util.dependencies("sqlalchemy.ext.baked")
def _load_for_path(
self, baked, context, path, states, load_only, effective_entity
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if attributes.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
insp = inspect(effective_entity)
if insp.is_aliased_class:
pk_cols = [insp._adapt_element(col) for col in pk_cols]
in_expr = insp._adapt_element(in_expr)
pk_cols = [insp._adapt_element(col) for col in pk_cols]
q = self._bakery(
lambda session: session.query(
query.Bundle("pk", *pk_cols), effective_entity
),
self,
)
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q.add_criteria(lambda q: q.select_from(effective_entity))
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
pa = self._parent_alias
q.add_criteria(
lambda q: q.select_from(pa).join(
getattr(pa, self.parent_property.key).of_type(
effective_entity
)
)
)
if query_info.load_only_child:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
)
)
else:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*pk_cols)
)
orig_query = context.query
q._add_lazyload_options(
orig_query._with_options, path[self.parent_property]
)
if orig_query._populate_existing:
q.add_criteria(lambda q: q.populate_existing())
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if insp.is_aliased_class:
eager_order_by = [
insp._adapt_element(elem) for elem in eager_order_by
]
q.add_criteria(lambda q: q.order_by(*eager_order_by))
else:
def _setup_outermost_orderby(q):
# imitate the same method that subquery eager loading uses,
# looking for the adapted "secondary" table
eagerjoin = q._from_obj[0]
return q.order_by(
*eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
)
q.add_criteria(_setup_outermost_orderby)
if query_info.load_only_child:
self._load_via_child(
our_states, none_states, query_info, q, context
)
else:
self._load_via_parent(our_states, query_info, q, context)
def _load_via_child(self, our_states, none_states, query_info, q, context):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in q(context.session).params(
primary_keys=[
key[0] if query_info.zero_idx else key for key in chunk
]
)
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(self, our_states, query_info, q, context):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = {
k: [vv[1] for vv in v]
for k, v in itertools.groupby(
q(context.session).params(primary_keys=primary_keys),
lambda x: x[0],
)
}
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
| 34.731128 | 79 | 0.578183 |
a80b22a5e0d9307266771aa481a302416331be48 | 6,360 | py | Python | Tut 04 Objects at Rest/ShaderPerspective.py | khok/PyOpenGL-Tutorial | ecdd123b2b79c96c5656453c75b6f5d2bbc11c52 | [
"MIT"
] | 77 | 2017-04-17T10:15:33.000Z | 2021-12-26T00:21:22.000Z | Tut 04 Objects at Rest/ShaderPerspective.py | khok/PyOpenGL-Tutorial | ecdd123b2b79c96c5656453c75b6f5d2bbc11c52 | [
"MIT"
] | 1 | 2020-02-12T11:53:44.000Z | 2020-02-12T11:53:44.000Z | Tut 04 Objects at Rest/ShaderPerspective.py | khok/PyOpenGL-Tutorial | ecdd123b2b79c96c5656453c75b6f5d2bbc11c52 | [
"MIT"
] | 30 | 2017-04-17T14:19:25.000Z | 2021-11-22T08:09:29.000Z | # Mario Rosasco, 2016
# adapted from ShaderPerspective.cpp, Copyright (C) 2010-2012 by Jason L. McKesson
# This file is licensed under the MIT License.
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import numpy as np
from framework import *
# A 1-D array of 3 4-D vertices (X,Y,Z,W)
# Note that this must be a numpy array, since as of
# 170111 support for lists has not been implemented.
vertexData = np.array(
[0.25, 0.25, -1.25, 1.0,
0.25, -0.25, -1.25, 1.0,
-0.25, 0.25, -1.25, 1.0,
0.25, -0.25, -1.25, 1.0,
-0.25, -0.25, -1.25, 1.0,
-0.25, 0.25, -1.25, 1.0,
0.25, 0.25, -2.75, 1.0,
-0.25, 0.25, -2.75, 1.0,
0.25, -0.25, -2.75, 1.0,
0.25, -0.25, -2.75, 1.0,
-0.25, 0.25, -2.75, 1.0,
-0.25, -0.25, -2.75, 1.0,
-0.25, 0.25, -1.25, 1.0,
-0.25, -0.25, -1.25, 1.0,
-0.25, -0.25, -2.75, 1.0,
-0.25, 0.25, -1.25, 1.0,
-0.25, -0.25, -2.75, 1.0,
-0.25, 0.25, -2.75, 1.0,
0.25, 0.25, -1.25, 1.0,
0.25, -0.25, -2.75, 1.0,
0.25, -0.25, -1.25, 1.0,
0.25, 0.25, -1.25, 1.0,
0.25, 0.25, -2.75, 1.0,
0.25, -0.25, -2.75, 1.0,
0.25, 0.25, -2.75, 1.0,
0.25, 0.25, -1.25, 1.0,
-0.25, 0.25, -1.25, 1.0,
0.25, 0.25, -2.75, 1.0,
-0.25, 0.25, -1.25, 1.0,
-0.25, 0.25, -2.75, 1.0,
0.25, -0.25, -2.75, 1.0,
-0.25, -0.25, -1.25, 1.0,
0.25, -0.25, -1.25, 1.0,
0.25, -0.25, -2.75, 1.0,
-0.25, -0.25, -2.75, 1.0,
-0.25, -0.25, -1.25, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.8, 0.8, 0.8, 1.0,
0.8, 0.8, 0.8, 1.0,
0.8, 0.8, 0.8, 1.0,
0.8, 0.8, 0.8, 1.0,
0.8, 0.8, 0.8, 1.0,
0.8, 0.8, 0.8, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.5, 0.5, 0.0, 1.0,
0.5, 0.5, 0.0, 1.0,
0.5, 0.5, 0.0, 1.0,
0.5, 0.5, 0.0, 1.0,
0.5, 0.5, 0.0, 1.0,
0.5, 0.5, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 1.0, 1.0],
dtype='float32'
)
vertexDim = 4
nVertices = 12*3
# Global variable to represent the compiled shader program, written in GLSL
theProgram = None
# Global variable to represent the buffer that will hold the position vectors
vertexBufferObject = None
# Global variable to store the location of the shader's uniform variables
offsetUniform = None
# Set up the list of shaders, and call functions to compile them
def initializeProgram():
shaderList = []
shaderList.append(loadShader(GL_VERTEX_SHADER, "ManualPerspective.vert"))
shaderList.append(loadShader(GL_FRAGMENT_SHADER, "StandardColors.frag"))
global theProgram
theProgram = createProgram(shaderList)
for shader in shaderList:
glDeleteShader(shader)
global offsetUniform
offsetUniform = glGetUniformLocation(theProgram, "offset")
# note that these uniform variable holders do not need to be global,
# since they are only set once in this program, in this function
frustumScaleUnif = glGetUniformLocation(theProgram, "frustumScale")
zNearUnif = glGetUniformLocation(theProgram, "zNear")
zFarUnif = glGetUniformLocation(theProgram, "zFar")
glUseProgram(theProgram)
glUniform1f(frustumScaleUnif, 1.0)
glUniform1f(zNearUnif, 1.0)
glUniform1f(zFarUnif, 3.0)
glUseProgram(0)
# Set up the vertex buffer that will store our vertex coordinates for OpenGL's access
def initializeVertexBuffer():
global vertexBufferObject
vertexBufferObject = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject)
glBufferData( # PyOpenGL allows for the omission of the size parameter
GL_ARRAY_BUFFER,
vertexData,
GL_STREAM_DRAW
)
glBindBuffer(GL_ARRAY_BUFFER, 0)
# Initialize the OpenGL environment
def init():
initializeProgram()
initializeVertexBuffer()
glBindVertexArray(glGenVertexArrays(1))
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
glFrontFace(GL_CW)
# Called to update the display.
# Because we are using double-buffering, glutSwapBuffers is called at the end
# to write the rendered buffer to the display.
def display():
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(theProgram)
glUniform2f(offsetUniform, 0.5, 0.5)
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject)
glEnableVertexAttribArray(0)
glEnableVertexAttribArray(1)
glVertexAttribPointer(0, vertexDim, GL_FLOAT, GL_FALSE, 0, None)
# a ctype void pointer must be used to pass in the offset into the bound GL_ARRAY_BUFFER
# also note that python's underlying float type is usally 64-bit, but
# we have specified that our vertex array contains float32 data.
colorOffset = c_void_p(vertexDim*nVertices*4)
glVertexAttribPointer(1, vertexDim, GL_FLOAT, GL_FALSE, 0, colorOffset)
glDrawArrays(GL_TRIANGLES, 0, nVertices)
glDisableVertexAttribArray(0)
glDisableVertexAttribArray(1)
glUseProgram(0)
glutSwapBuffers()
glutPostRedisplay()
# keyboard input handler: exits the program if 'esc' is pressed
def keyboard(key, x, y):
if ord(key) == 27: # ord() is needed to get the keycode
glutLeaveMainLoop()
return
# Called whenever the window's size changes (including once when the program starts)
def reshape(w, h):
glViewport(0, 0, w, h)
# The main function
def main():
glutInit()
displayMode = GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH | GLUT_STENCIL;
glutInitDisplayMode (displayMode)
width = 500;
height = 500;
glutInitWindowSize (width, height)
glutInitWindowPosition (300, 200)
window = glutCreateWindow("Tutorial Window")
init()
glutDisplayFunc(display)
glutReshapeFunc(reshape)
glutKeyboardFunc(keyboard)
glutMainLoop();
if __name__ == '__main__':
main()
| 26.5 | 93 | 0.600786 |
fd76243a6110b567c2c992d30aa7191df474bdc7 | 12,753 | py | Python | keras_detection/utils.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | 2 | 2017-05-25T01:26:41.000Z | 2019-08-16T13:38:57.000Z | keras_detection/utils.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | null | null | null | keras_detection/utils.py | Walter1218/Self_Driving_Car_ND | 526a9583a2bc616cb19cdfc7921b5e1c0f9711bd | [
"MIT"
] | 1 | 2017-05-25T01:26:50.000Z | 2017-05-25T01:26:50.000Z | import numpy as np
import tensorflow as tf
import keras.backend
import random
import bbox_encode, bbox_decode, batch_generate
"""
anchor generate;
shifts Generate;
filters;
nms;
bbox_overlaps;
etc.
"""
def anchor(base_size=16, ratios=None, scales=None):
"""
Generates a regular grid of multi-aspect and multi-scale anchor boxes.
"""
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([8, 16, 32])
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])])
return anchors
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def clip_cpu(boxes):
#boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], int(shape[1] - 1)), 0)
# y1 >= 0
#boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], int(shape[0] - 1)), 0)
# x2 < im_shape[1]
#boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], int(shape[1] - 1)), 0)
# y2 < im_shape[0]
#oxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], int(shape[0] - 1)), 0)
x1 = np.where(boxes[:,0] < 0)[0]
y1 = np.where(boxes[:,1] < 0)[0]
x2 = np.where(boxes[:,2] > 960)[0]
y2 = np.where(boxes[:,3] > 640)[0]
boxes[x1,0] = 0
boxes[y1,1] = 0
boxes[x2,2] = 960
boxes[y2,3] = 640
return boxes
def clip(boxes, shape):
proposals = [
keras.backend.maximum(keras.backend.minimum(boxes[:, 0::4], shape[1] - 1), 0),
keras.backend.maximum(keras.backend.minimum(boxes[:, 1::4], shape[1] - 1), 0),
keras.backend.maximum(keras.backend.minimum(boxes[:, 2::4], shape[1] - 1), 0),
keras.backend.maximum(keras.backend.minimum(boxes[:, 3::4], shape[0] - 1), 0)
]
return keras.backend.concatenate(proposals)
def union(au, bu):
x = min(au[0], bu[0])
y = min(au[1], bu[1])
w = max(au[2], bu[2]) - x
h = max(au[3], bu[3]) - y
return x, y, w, h
def intersection(ai, bi):
x = max(ai[0], bi[0])
y = max(ai[1], bi[1])
w = min(ai[2], bi[2]) - x
h = min(ai[3], bi[3]) - y
if w < 0 or h < 0:
return 0, 0, 0, 0
return x, y, w, h
def iou(a, b):
# a and b should be (x1,y1,x2,y2)
if a[0] >= a[2] or a[1] >= a[3] or b[0] >= b[2] or b[1] >= b[3]:
return 0.0
i = intersection(a, b)
u = union(a, b)
area_i = i[2] * i[3]
area_u = u[2] * u[3]
return float(area_i) / float(area_u)
def shift(shape, stride):
shift_x = np.arange(0, shape[0]) * stride
shift_y = np.arange(0, shape[1]) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
anchors = anchor()
# Create all bbox
number_of_anchors = len(anchors)
k = len(shifts) # number of base points = feat_h * feat_w
bbox = anchors.reshape(1, number_of_anchors, 4) + shifts.reshape(k, 1, 4)
bbox = bbox.reshape(k * number_of_anchors, 4)
return bbox
def cal_accuracy(gta, bbox, scores):
bbox = bbox[0]
overlaps = bbox_overlaps(np.ascontiguousarray(gta, dtype=np.float),np.ascontiguousarray(bbox, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
#print(overlaps)
#pos = np.where(argmax_overlaps > 0.6)[0]
#print('after nms we have postive samples', len(pos))
#max_overlaps = np.zeros((output_height * output_width * num_anchors))
max_overlaps = overlaps[np.arange(len(gta)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
#print('max_overlaps',max_overlaps)
#print('gt_argmax_overlaps',gt_argmax_overlaps)
gt_max_overlaps = overlaps[gt_argmax_overlaps,np.arange(overlaps.shape[1])]
#print(gt_max_overlaps)
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
pos = np.where(gt_max_overlaps > 0.45)[0]
print('groundtruth', gta.shape)
print('after nms we have postive samples', len(pos))
def non_max_suppression_fast(boxes, probs, max_boxes, overlap_thresh=0.9):
if len(boxes) == 0:
return []
max_boxes = max_boxes
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
#print('coordinates', x1, y1, x2, y2)
#print('coordinates shape', x1.shape, y1.shape, x2.shape, y2.shape)
#np.testing.assert_array_less(x1, x2)
#np.testing.assert_array_less(y1, y2)
#boxes = boxes.astype('float')
pick = []
#print('probs',probs)
#print('shape of probs', probs.shape)
probs = probs.reshape(-1)
#print(probs.shape)
idx = np.argsort(probs[:])
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#print('sorted index',idx)
while(len(idx)> 0):
last = len(idx) - 1
i = idx[last]
pick.append(i)
# find the intersection
xx1_int = np.maximum(x1[i], x1[idx[:last]])
yy1_int = np.maximum(y1[i], y1[idx[:last]])
xx2_int = np.minimum(x2[i], x2[idx[:last]])
yy2_int = np.minimum(y2[i], y2[idx[:last]])
# find the union
xx1_un = np.minimum(x1[i], x1[idx[:last]])
yy1_un = np.minimum(y1[i], y1[idx[:last]])
xx2_un = np.maximum(x2[i], x2[idx[:last]])
yy2_un = np.maximum(y2[i], y2[idx[:last]])
# compute the width and height of the bounding box
w = np.maximum(0.0, xx2_int - xx1_int + 1)
h = np.maximum(0.0, yy2_int - yy1_int + 1)
inter = w * h
overlap = inter / (areas[i] + areas[idx[:last]] - inter)
# delete all indexes from the index list that have
idx = np.delete(idx, np.concatenate(([last],np.where(overlap > overlap_thresh)[0])))
if len(pick) >= max_boxes:
break
boxes = boxes[pick].astype("int")
probs = probs[pick]
#print('bbox', boxes)
#print('probs',probs)
return boxes, probs
def propose_cpu(boxes, scores, maximum=300):
(output_width, output_height) = (60, 40)
num_anchors = 9
_allowed_border = 0
im_info = (640,960)
anchor_box = batch_generate._generate_all_bbox(output_width, output_height)
total_anchors = anchor_box.shape[0]
inds_inside = np.where(
(anchor_box[:, 0] >= -_allowed_border) &
(anchor_box[:, 1] >= -_allowed_border) &
(anchor_box[:, 2] < im_info[1] + _allowed_border) & # width
(anchor_box[:, 3] < im_info[0] + _allowed_border) # height
)[0]
#shape = (40, 60)
#shifted = shift(shape, 16)
proposals = np.reshape(boxes, (-1, 4))
#print('before decode',proposals.shape)
#print(proposals)
proposals[inds_inside] = bbox_decode.bbox_transform_inv_cpu(anchor_box[inds_inside], proposals[inds_inside])
proposals = np.reshape(proposals, (-1, 4))
#print(proposals)
#print('proposals', proposals)
#print('after decode',proposals.shape)
proposals = clip_cpu(proposals)
#indicies = filter_boxes_cpu(proposals, 1)
#proposals = proposals[indicies]
#print(proposals.shape)
#print('input score shape',scores.shape)
scores = scores[:,:,:,:9]
scores = np.reshape(scores, (-1, 1))
#print('reshape score',scores.shape)
#scores = scores[indicies]
#print('valid score shape',scores.shape)
#print('score', scores)
#print('shape', scores.shape)
#print(scores[0])
#idx = np.where(scores[:] > 0.5)[0]
#print('> 0.7', len(idx))
#print('> 0. scores',len(np.where(scores>0.7)[0]))
#print('diplay', np.where(scores[:,0]>0.2)[0])
boxes, scores = non_max_suppression_fast(proposals[inds_inside], scores[inds_inside,:],maximum)
#print('> 0.7 scores',len(np.where(scores>0.7)[0]))
#print('after nms box shape',boxes.shape)
#print('after nms score display',scores)
boxes = np.expand_dims(boxes, axis = 0)
scores = np.expand_dims(scores, axis = 0)
return boxes, scores
def propose(boxes, scores, maximum):
#shape = keras.backend.int_shape(boxes)[1:3]
shape = (40,60)
shifted = shift(shape, 16)
proposals = keras.backend.reshape(boxes, (-1, 4))
proposals = bbox_decode.bbox_transform_inv(shifted, proposals)
proposals = clip(proposals, shape)
indicies = filter_boxes(proposals, 1)
proposals = keras.backend.gather(proposals, indicies)
scores = scores[:, :, :, :9]
scores = keras.backend.reshape(scores, (-1, 1))
scores = keras.backend.gather(scores, indicies)
scores = keras.backend.flatten(scores)
proposals = keras.backend.cast(proposals, tf.float32)
scores = keras.backend.cast(scores, tf.float32)
indicies = non_maximum_suppression(proposals, scores, maximum, 0.7)
proposals = keras.backend.gather(proposals, indicies)
return keras.backend.expand_dims(proposals, 0)
def resize_images(images, shape):
return tf.image.resize_images(images, shape)
#CPU version
#TODO, ADD
def filter_boxes_cpu(proposals, minimum):
#ws = proposals[:, 2] - proposals[:, 0] + 1
#hs = proposals[:, 3] - proposals[:, 1] + 1
#indicies = np.where((ws >= minimum) & (hs >= minimum))
#indicies = np.flatten(indicies)
#print(proposals.shape)
#proposals = proposals.reshape()
ws = proposals[:, 2] - proposals[:, 0] + 1
hs = proposals[:, 3] - proposals[:, 1] + 1
keep = np.where((ws >= minimum) & (hs >= minimum))[0]
return keep
#return np.cast(indicies, np.int32)
#GPU version
def filter_boxes(proposals, minimum):
ws = proposals[:, 2] - proposals[:, 0] + 1
hs = proposals[:, 3] - proposals[:, 1] + 1
indicies = tf.where((ws >= minimum) & (hs >= minimum))
indicies = keras.backend.flatten(indicies)
return keras.backend.cast(indicies, tf.int32)
def non_maximum_suppression(boxes, scores, maximum, threshold=0.5):
return tf.image.non_max_suppression(
boxes=boxes,
iou_threshold=threshold,
max_output_size=maximum,
scores=scores
)
def bbox_overlaps(boxes, query_boxes):
"""
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=np.float)
for k in range(K):
box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1)
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1)
if ih > 0:
ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + box_area - iw * ih)
overlaps[n, k] = iw * ih / ua
return overlaps
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def _compute_targets(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
return bbox_encode.bbox_transform_cpu(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False)
| 31.488889 | 118 | 0.605269 |
9059ac42b8aad2dbfd426eef03bdd793fcab0ac0 | 462 | py | Python | env/Lib/site-packages/plotly/validators/heatmap/colorbar/tickfont/_size.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/heatmap/colorbar/tickfont/_size.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/heatmap/colorbar/tickfont/_size.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="heatmap.colorbar.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs
)
| 30.8 | 83 | 0.638528 |
951c7551d4830ba2854e51a98c8d95abcd6728d7 | 13,033 | py | Python | cache_highway_translate.py | RogerTsai917/attention-is-all-you-need-pytorch | 64197e55d275e5c819bc786a9ff19849cdf2f6b9 | [
"MIT"
] | null | null | null | cache_highway_translate.py | RogerTsai917/attention-is-all-you-need-pytorch | 64197e55d275e5c819bc786a9ff19849cdf2f6b9 | [
"MIT"
] | null | null | null | cache_highway_translate.py | RogerTsai917/attention-is-all-you-need-pytorch | 64197e55d275e5c819bc786a9ff19849cdf2f6b9 | [
"MIT"
] | null | null | null | ''' Translate input text with trained model. '''
import os
import time
import math
import torch
import argparse
import dill as pickle
from tqdm import tqdm
import transformer.Constants as Constants
from torchtext.data import Dataset
from transformer.CacheHighWayModels import HighWayTransformer
from transformer.CacheHighWayTranslator import HighWayTranslator
from transformer.Cache import CacheVocabulary
def load_model(opt, device, cache_vocab_dict):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
model = HighWayTransformer(
model_opt.src_vocab_size,
model_opt.trg_vocab_size,
model_opt.src_pad_idx,
model_opt.trg_pad_idx,
encoder_early_exit=opt.encoder_early_exit,
decoder_teacher=model_opt.decoder_teacher,
decoder_early_exit=opt.decoder_early_exit,
trg_emb_prj_weight_sharing=model_opt.proj_share_weight,
emb_src_trg_weight_sharing=model_opt.embs_share_weight,
encoder_weight_sharing=model_opt.encoder_share_weight,
decoder_weight_sharing=model_opt.decoder_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout,
cache_vocab_dict=cache_vocab_dict).to(device)
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
return model
def creat_count_early_exit_dict(n_layers):
exit_layer_dict = {}
for i in range(n_layers):
exit_layer_dict[i+1] = 0
return exit_layer_dict
def merge_two_dict(origin_dict, new_dict):
for key in origin_dict.keys():
origin_dict[key] += new_dict[key]
return origin_dict
def encoder_layer(model_opt):
total_flops = 0.0
q = model_opt.d_model * model_opt.d_model * model_opt.n_head * model_opt.d_k
k = model_opt.d_model * model_opt.d_model * model_opt.n_head * model_opt.d_k
v = model_opt.d_model * model_opt.d_model * model_opt.n_head * model_opt.d_v
total_flops += q + k + v
ScaledDotProductAttention = model_opt.d_model * ((model_opt.n_head * model_opt.d_k) + (model_opt.n_head * model_opt.d_k) + (model_opt.n_head * model_opt.d_v))
Attention_linear = model_opt.d_model * model_opt.n_head * model_opt.d_v * model_opt.d_model
PositionwiseFeedForward = model_opt.d_model * ((model_opt.d_model * model_opt.d_inner_hid) + (model_opt.d_inner_hid * model_opt.d_model))
total_flops += ScaledDotProductAttention + Attention_linear + PositionwiseFeedForward
return total_flops
def decoder_layer(model_opt):
total_flops = 0.0
q = model_opt.d_model * model_opt.d_model * model_opt.n_head * model_opt.d_k
k = model_opt.d_model * model_opt.d_model * model_opt.n_head * model_opt.d_k
v = model_opt.d_model * model_opt.d_model * model_opt.n_head * model_opt.d_v
total_flops += q + k + v
ScaledDotProductAttention = model_opt.d_model * ((model_opt.n_head * model_opt.d_k) + (model_opt.n_head * model_opt.d_k) + (model_opt.n_head * model_opt.d_v))
Attention_linear = model_opt.d_model * model_opt.n_head * model_opt.d_v * model_opt.d_model
PositionwiseFeedForward = model_opt.d_model * ((model_opt.d_model * model_opt.d_inner_hid) + (model_opt.d_inner_hid * model_opt.d_model))
total_flops += 2 * (ScaledDotProductAttention + Attention_linear + PositionwiseFeedForward)
return total_flops
def predict_layer(model_opt):
return model_opt.d_model * model_opt.n_head * model_opt.d_k * model_opt.trg_vocab_size
def calculate_FLOPs(model_opt, encoder_exit_layer, decoder_exit_layer_dict, cache_vocab_dict):
total_flops = 0.0
encoder_layer_FLOPs = encoder_layer(model_opt)
decoder_layer_FLOPs = decoder_layer(model_opt)
predict_layer_FLOPS = predict_layer(model_opt)
total_flops += encoder_exit_layer * encoder_layer_FLOPs
for key in decoder_exit_layer_dict.keys():
total_flops += decoder_exit_layer_dict[key] * decoder_layer_FLOPs
total_flops += predict_layer_FLOPS
return total_flops
def add_lsit_to_dict(_list, _dict):
for value in _list:
if value not in _dict:
_dict[value] = 1
else:
_dict[value] += 1
return _dict
def perpare_cache_vocab(opt):
data = pickle.load(open(opt.data_pkl, 'rb'))
SRC, TRG = data['vocab']['src'], data['vocab']['trg']
print('[Info] Get vocabulary size:', len(TRG.vocab))
fields = {'src': data['vocab']['src'], 'trg':data['vocab']['trg']}
train = Dataset(examples=data['train'], fields=fields)
unk_idx = SRC.vocab.stoi[SRC.unk_token]
words_frequency = {}
for example in tqdm(train, mininterval=0.1, desc=' - (train)', leave=False):
sentence = [Constants.BOS_WORD] + example.trg + [Constants.EOS_WORD]
sentence = [TRG.vocab.stoi.get(word, unk_idx) for word in sentence]
words_frequency = add_lsit_to_dict(sentence, words_frequency)
sentence = example.src
sentence = [SRC.vocab.stoi.get(word, unk_idx) for word in sentence]
words_frequency = add_lsit_to_dict(sentence, words_frequency)
sorted_words_frquency = dict(sorted(words_frequency.items(), key=lambda item: item[1], reverse=True))
print("len(sorted_words_frquency):", len(sorted_words_frquency))
len_ = math.pow(len(sorted_words_frquency), 1.0/6)
# cache_vocab_0 = CacheVocabulary(TRG, sorted_words_frquency, 5000, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_1 = CacheVocabulary(TRG, sorted_words_frquency, 2500, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_2 = CacheVocabulary(TRG, sorted_words_frquency, 1250, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_3 = CacheVocabulary(TRG, sorted_words_frquency, 625, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_4 = CacheVocabulary(TRG, sorted_words_frquency, 313, Constants.UNK_WORD, Constants.PAD_WORD)
cache_vocab_0 = CacheVocabulary(TRG, sorted_words_frquency, 313, Constants.UNK_WORD, Constants.PAD_WORD)
cache_vocab_1 = CacheVocabulary(TRG, sorted_words_frquency, 625, Constants.UNK_WORD, Constants.PAD_WORD)
cache_vocab_2 = CacheVocabulary(TRG, sorted_words_frquency, 1250, Constants.UNK_WORD, Constants.PAD_WORD)
cache_vocab_3 = CacheVocabulary(TRG, sorted_words_frquency, 2500, Constants.UNK_WORD, Constants.PAD_WORD)
cache_vocab_4 = CacheVocabulary(TRG, sorted_words_frquency, 5000, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_0 = CacheVocabulary(TRG, sorted_words_frquency, 5000, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_1 = CacheVocabulary(TRG, sorted_words_frquency, 6000, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_2 = CacheVocabulary(TRG, sorted_words_frquency, 7000, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_3 = CacheVocabulary(TRG, sorted_words_frquency, 8000, Constants.UNK_WORD, Constants.PAD_WORD)
# cache_vocab_4 = CacheVocabulary(TRG, sorted_words_frquency, 9000, Constants.UNK_WORD, Constants.PAD_WORD)
result_dict = {
0: cache_vocab_0,
1: cache_vocab_1,
2: cache_vocab_2,
3: cache_vocab_3,
4: cache_vocab_4}
return result_dict, TRG
def main(similarity=1.0, entropy=0.0):
'''Main Function'''
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model weight file')
parser.add_argument('-data_pkl', required=True,
help='Pickle file with both instances and vocabulary.')
parser.add_argument('-save_folder', required=True)
parser.add_argument('-beam_size', type=int, default=1)
parser.add_argument('-max_seq_len', type=int, default=100)
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-encoder_early_exit', action='store_true')
parser.add_argument('-decoder_early_exit', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
data = pickle.load(open(opt.data_pkl, 'rb'))
SRC, TRG = data['vocab']['src'], data['vocab']['trg']
opt.src_pad_idx = SRC.vocab.stoi[Constants.PAD_WORD]
opt.trg_pad_idx = TRG.vocab.stoi[Constants.PAD_WORD]
opt.trg_bos_idx = TRG.vocab.stoi[Constants.BOS_WORD]
opt.trg_eos_idx = TRG.vocab.stoi[Constants.EOS_WORD]
test_loader = Dataset(examples=data['test'], fields={'src': SRC, 'trg': TRG})
cache_vocab_dict, TRG = perpare_cache_vocab(opt)
device = torch.device('cuda' if opt.cuda else 'cpu')
translator = HighWayTranslator(
model=load_model(opt, device, cache_vocab_dict),
beam_size=opt.beam_size,
max_seq_len=opt.max_seq_len,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_bos_idx=opt.trg_bos_idx,
trg_eos_idx=opt.trg_eos_idx).to(device)
# set the early exit threshold
translator.model.encoder.set_early_exit_similarity(similarity)
translator.model.decoder.set_early_exit_entropy(entropy)
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
n_layers=model_opt.n_layers
tatoal_encoder_exit_layer_dict = creat_count_early_exit_dict(n_layers)
tatoal_decoder_exit_layer_dict = creat_count_early_exit_dict(n_layers)
unk_idx = SRC.vocab.stoi[SRC.unk_token]
output_file_name = os.path.join(opt.save_folder, "prediction_similarity_" + str(similarity) + "_entropy_" + str(entropy) + ".txt")
with open(output_file_name, 'w') as f:
total_encoder_words = 0
total_deocder_words = 0
total_FLOPs = 0
start_time = time.time()
for example in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
src_seq = [SRC.vocab.stoi.get(word, unk_idx) for word in example.src]
total_encoder_words += len(src_seq)
pred_seq, encoder_exit_layer, decoder_exit_layer_dict = translator.translate_sentence(torch.LongTensor([src_seq]).to(device), n_layers, cache_vocab_dict, TRG)
# total_FLOPs += calculate_FLOPs(model_opt, encoder_exit_layer, decoder_exit_layer_dict, cache_vocab_dict)
# print(pred_seq)
pred_line = ' '.join(TRG.vocab.itos[idx] for idx in pred_seq)
# print(pred_line)
total_deocder_words += len(pred_line.split(" ")) - 2
pred_line = pred_line.replace(Constants.BOS_WORD, '').replace(Constants.EOS_WORD, '')
f.write(pred_line.strip() + '\n')
tatoal_encoder_exit_layer_dict[encoder_exit_layer] += 1
tatoal_decoder_exit_layer_dict = merge_two_dict(tatoal_decoder_exit_layer_dict, decoder_exit_layer_dict)
end_time = time.time()
run_time = end_time - start_time
print('[Info] Finished.')
print("[Info] Predict finished with entropy: ", entropy)
print("[Info] Encoder early exit dict: ", tatoal_encoder_exit_layer_dict)
print("[Info] Decoder early exit dict: ", tatoal_decoder_exit_layer_dict)
print("[Info] Total input words: ", total_encoder_words)
print("[Info] Total time: ", run_time)
print("[Info] Total predict words: ", total_deocder_words)
print("[Info] Average predict a word time: ", run_time/total_deocder_words)
# print("[Info] Total FLOPs: ", int(total_FLOPs/1000000), "M")
# print("[Info] Average predict a word FLOPs: ", int(total_FLOPs/total_deocder_words/1000000), "M")
output_record_file_name = os.path.join(opt.save_folder, "prediction_record.txt")
with open(output_record_file_name, 'a') as f:
f.write("Predict with similarity: " + str(similarity) + "\n")
f.write("Predict with entropy: " + str(entropy) + "\n")
f.write("Encoder early exit dict: " + str(tatoal_encoder_exit_layer_dict) + "\n")
f.write("Decoder early exit dict: " + str(tatoal_decoder_exit_layer_dict) + "\n")
f.write("Total input words: " + str(total_encoder_words) + "\n")
f.write("Total time: " + str(run_time) + "\n")
f.write("Total predict words: " + str(total_deocder_words) + "\n")
f.write("Average predict a word time: " + str(run_time/total_deocder_words) + "\n")
# f.write("Total FLOPs: " + str(int(total_FLOPs/1000000)) + "M" + "\n")
# f.write("Average predict a word FLOPs: " + str(total_FLOPs/total_deocder_words/1000000) + "M" + "\n")
f.write("\n")
if __name__ == "__main__":
'''
Usage: python hightway_translate.py -model model/base_early_exit/trained_highway.chkpt -data m30k_deen_shr.pkl -save_folder prediction/encoder_3_decoder_early_exit
'''
encoder_similarity = 1
entropy_list = [0.0, 0.3, 0.6, 0.9, 1.2, 1.5, 1.8, 2.1, 2.4, 2.7, 3.0, 3.5, 4.0]
# entropy_list = [0.0]
for entropy in entropy_list:
main(encoder_similarity, entropy)
| 45.56993 | 170 | 0.711271 |
1bbc3e3c912b5ec1e04794f7c687fdfe108a6c43 | 6,615 | py | Python | processImage.py | AxonneQ/safety-helmet-opencv | 75b6ca95d2b856227ca52c7bf7e20695dd920fb1 | [
"MIT"
] | 1 | 2021-10-09T17:15:30.000Z | 2021-10-09T17:15:30.000Z | processImage.py | AxonneQ/safety-helmet-opencv | 75b6ca95d2b856227ca52c7bf7e20695dd920fb1 | [
"MIT"
] | null | null | null | processImage.py | AxonneQ/safety-helmet-opencv | 75b6ca95d2b856227ca52c7bf7e20695dd920fb1 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
from skinDetector import SkinDetector
def openCloseMask(mask, iterations = 2):
# Create structural element
shape = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6,6))
# perform opening and closing on the image until all the blobs have been removed for this
# particular mask and gaps have been filled
newMask = mask.copy()
for i in range(iterations):
newMask = cv2.morphologyEx(newMask, cv2.MORPH_OPEN, shape)
newMask = cv2.morphologyEx(newMask, cv2.MORPH_CLOSE, shape)
return newMask
def getContours(binary_img):
# find contours
contours, hierarchy = cv2.findContours(binary_img, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_SIMPLE)
# sort the contours by size
newContours = sorted(contours, key=cv2.contourArea, reverse=True)
return newContours
def getSkinMask(img):
image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
skinD = SkinDetector(image)
skinD.findSkin()
skinMask = skinD.getMask()
skinMask = openCloseMask(skinMask)
return skinMask
def preProcess(img):
image = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
smoothImg = cv2.fastNlMeansDenoising(image, h=6) # noise removal
return smoothImg
def combineBoundingBox(box1, box2):
x = box1[0] if box1[0] < box2[0] else box2[0]
y = box1[1] if box1[1] < box2[1] else box2[1]
w = box1[2] if box1[2] > box2[2] else box2[2]
h = box1[3] if box1[3] > box2[3] else box2[3]
return (x, y, w, h)
def touchingRect(box1, box2):
if box1[0] < box2[0] + box2[2] and \
box1[0] + box1[2] > box2[0] and \
box1[1] < box2[1] + box2[3] and \
box1[1] + box1[3] > box2[1]:
return True
else:
return False
def containsRect(box1, box2):
x, y, w, h = box1
x2, y2, w2, h2 = box2
if ((x >= x2 and x <= x2+w2) and (y >= y2 and y <= y2+h2)) or \
((x <= x2 and x >= x2+w2) and (y <= y2 and y >= y2+h2)):
return True
def getFaces(img, skinMask):
image = img.copy()
contours = getContours(skinMask)
newRects = []
largestArea = cv2.contourArea(contours[0])
# Discard irrelevant contours (5x smaller than the biggest area contours)
for c in range(len(contours)):
area = cv2.contourArea(contours[c])
if area > largestArea * 0.20:
newRects.append(cv2.boundingRect(contours[c]))
# cv2.rectangle(image, cv2.boundingRect(contours[c]), color=(0,255,155), thickness=4)
# Merge boxes into one
mergedRects = []
for i in range(len(newRects)):
if i+1 <= len(newRects):
for j in range(i+1, len(newRects)):
if touchingRect(newRects[i], newRects[j]) == True:
newBox = combineBoundingBox(newRects[i], newRects[j])
if not newBox in newRects:
# cv2.rectangle(image, newBox, color=(0,255,255), thickness=4) # Comment this in final
mergedRects.append(newBox)
newRects.append(newBox)
# nullify rect if its a child of another rect
for i in range(len(mergedRects)):
if i+1 <= len(mergedRects):
for j in range(i+1, len(mergedRects)):
if containsRect(mergedRects[i], mergedRects[j]):
area = mergedRects[i][2] * mergedRects[i][3]
area1 = mergedRects[j][2] * mergedRects[j][3]
if area > area1:
mergedRects[j] = (0,0,0,0)
elif area1 > area:
mergedRects[i] = (0,0,0,0)
# If there were no merged boxes then fallback to base face rects
if len(mergedRects) == 0:
mergedRects = newRects
# remove any rectangles directly below
for i in range(len(mergedRects)):
if i+1 <= len(mergedRects):
for j in range(i+1, len(mergedRects)):
curr = mergedRects[i]
comp = mergedRects[j]
if comp[0] >= curr[0] and comp[0]+comp[2] <= curr[0]+curr[2] and comp[1] >= curr[1]:
mergedRects[j] = (0,0,0,0)
faces = []
for r in mergedRects: # final array with non empty values
if r != (0,0,0,0):
# get the coordinates of the rectangle
x, y, w, h = r
# calculate coordinates of top-left corner
# Y = y - 1.2*h and if it going outside the image, it takes a value of 0
newY = y-int(1.2*h)
if newY < 0:
newY = 0
# X = x - w - w*0.2 and if it going outside the image, it takes a value of 0
left = x - int(w*0.2)
if left < 0:
left = 0
width = w + int(w*0.5)
height = int(2.2*h)
newFace = (left,newY,width,height)
# cv2.rectangle(image, newFace, color=(0,255,255), thickness=4)
faces.append(newFace)
return faces
def processHelmet(img):
# convert to hsv
h, w = img.shape[:2]
area = h * w
hsvImage = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
helmetColors = [
((56,3,133), (116,255,241)), # green
((15,0,180), (115,37,236)) # white
]
isHelmet = False
for color in helmetColors:
try:
lower, upper = color
helmet_mask = cv2.inRange(hsvImage,lower,upper)
finalMask = openCloseMask(helmet_mask, 4)
rect = cv2.boundingRect(getContours(finalMask)[0]) + finalMask.std()
helmetArea = rect[2] * rect[3]
percentage = float(helmetArea / area) * 100
if percentage >= 39.0:
isHelmet = True
except:
''
return isHelmet
def getHelmets(img, skinMask, faces):
image = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB)
roi_img = cv2.bitwise_and(image, image, mask=cv2.bitwise_not(skinMask))
roi_img[np.all(roi_img >= 250, axis=2)] = 0
roi_img[np.all(roi_img <= 150, axis=2)] = 0
for f in faces:
faceArea = roi_img[
f[1]:f[1]+f[3],
f[0]:f[0]+f[2],
]
isHelmet = processHelmet(faceArea)
if isHelmet == True:
cv2.rectangle(image, f, color=(0,255,0), thickness=6)
else:
cv2.rectangle(image, f, color=(255,0,0), thickness=6)
return image
def process(img):
preImg = preProcess(img)
skinMask = getSkinMask(preImg)
foundFaces = getFaces(preImg, skinMask)
helmetImg = getHelmets(img, skinMask, foundFaces)
return helmetImg | 32.747525 | 110 | 0.570522 |
e19b5a1f93a8275fdf8f566102ad48606063bd99 | 936 | py | Python | trix/trix_core/tests/test_tagutils.py | Elthan/trix2 | 5fea5eaea8dfd2a6201377fc7775e89ce99f6cbd | [
"BSD-3-Clause"
] | 1 | 2020-03-15T12:29:13.000Z | 2020-03-15T12:29:13.000Z | trix/trix_core/tests/test_tagutils.py | devilry/trix2 | 5bbf15671b4a72ef1ec41240b975950f413735d8 | [
"BSD-3-Clause"
] | 63 | 2015-01-03T17:21:38.000Z | 2020-10-07T13:21:08.000Z | trix/trix_core/tests/test_tagutils.py | Elthan/trix2 | 5fea5eaea8dfd2a6201377fc7775e89ce99f6cbd | [
"BSD-3-Clause"
] | 4 | 2015-01-08T14:49:44.000Z | 2018-06-18T06:55:30.000Z | from django.test import TestCase
from trix.trix_core import models as coremodels
from trix.trix_core.tagutils import bulk_update_assignment_tags
class TestTagUtils(TestCase):
def test_bulk_update_assignment_tags(self):
assignment1 = coremodels.Assignment.objects.create(
title='A1', text='text1')
assignment2 = coremodels.Assignment.objects.create(
title='A2', text='text2')
bulk_update_assignment_tags(
assignments_by_tag={
'duck1000': [assignment1, assignment2],
'oblig2': [assignment1]
},
existing_assignments=[assignment1, assignment2])
self.assertEqual(
set([tagobject.tag for tagobject in assignment1.tags.all()]),
set(['duck1000', 'oblig2']))
self.assertEqual(
set([tagobject.tag for tagobject in assignment2.tags.all()]),
set(['duck1000']))
| 37.44 | 73 | 0.637821 |
619aebb70f92653f8d415053eb0486a746b83e8f | 26,691 | py | Python | rasa/core/test.py | PLNech/rasa | e6be2fe356677e667473566727dff10112d2a5a6 | [
"Apache-2.0"
] | null | null | null | rasa/core/test.py | PLNech/rasa | e6be2fe356677e667473566727dff10112d2a5a6 | [
"Apache-2.0"
] | null | null | null | rasa/core/test.py | PLNech/rasa | e6be2fe356677e667473566727dff10112d2a5a6 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import warnings
import typing
from collections import defaultdict, namedtuple
from typing import Any, Dict, List, Optional, Text, Tuple
from rasa.core.channels import UserMessage
from rasa.core.training.story_writer.yaml_story_writer import YAMLStoryWriter
import rasa.utils.io as io_utils
from rasa.core.domain import Domain
from rasa.nlu.constants import (
ENTITIES,
EXTRACTOR,
ENTITY_ATTRIBUTE_VALUE,
ENTITY_ATTRIBUTE_TEXT,
ENTITY_ATTRIBUTE_START,
ENTITY_ATTRIBUTE_END,
ENTITY_ATTRIBUTE_TYPE,
INTENT,
)
from rasa.constants import RESULTS_FILE, PERCENTAGE_KEY
from rasa.core.utils import pad_lists_to_size
from rasa.core.events import ActionExecuted, UserUttered
from rasa.core.trackers import DialogueStateTracker
from rasa.nlu.training_data.formats.readerwriter import TrainingDataWriter
from rasa.utils.io import DEFAULT_ENCODING
if typing.TYPE_CHECKING:
from rasa.core.agent import Agent
from rasa.core.processor import MessageProcessor
CONFUSION_MATRIX_STORIES_FILE = "story_confusion_matrix.png"
REPORT_STORIES_FILE = "story_report.json"
FAILED_STORIES_FILE = "failed_test_stories.yml"
SUCCESSFUL_STORIES_FILE = "successful_test_stories.yml"
logger = logging.getLogger(__name__)
StoryEvaluation = namedtuple(
"StoryEvaluation",
[
"evaluation_store",
"failed_stories",
"successful_stories",
"action_list",
"in_training_data_fraction",
],
)
class EvaluationStore:
"""Class storing action, intent and entity predictions and targets."""
def __init__(
self,
action_predictions: Optional[List[Text]] = None,
action_targets: Optional[List[Text]] = None,
intent_predictions: Optional[List[Text]] = None,
intent_targets: Optional[List[Text]] = None,
entity_predictions: Optional[List[Dict[Text, Any]]] = None,
entity_targets: Optional[List[Dict[Text, Any]]] = None,
) -> None:
self.action_predictions = action_predictions or []
self.action_targets = action_targets or []
self.intent_predictions = intent_predictions or []
self.intent_targets = intent_targets or []
self.entity_predictions = entity_predictions or []
self.entity_targets = entity_targets or []
def add_to_store(
self,
action_predictions: Optional[List[Text]] = None,
action_targets: Optional[List[Text]] = None,
intent_predictions: Optional[List[Text]] = None,
intent_targets: Optional[List[Text]] = None,
entity_predictions: Optional[List[Dict[Text, Any]]] = None,
entity_targets: Optional[List[Dict[Text, Any]]] = None,
) -> None:
"""Add items or lists of items to the store"""
self.action_predictions.extend(action_predictions or [])
self.action_targets.extend(action_targets or [])
self.intent_targets.extend(intent_targets or [])
self.intent_predictions.extend(intent_predictions or [])
self.entity_predictions.extend(entity_predictions or [])
self.entity_targets.extend(entity_targets or [])
def merge_store(self, other: "EvaluationStore") -> None:
"""Add the contents of other to self"""
self.add_to_store(
action_predictions=other.action_predictions,
action_targets=other.action_targets,
intent_predictions=other.intent_predictions,
intent_targets=other.intent_targets,
entity_predictions=other.entity_predictions,
entity_targets=other.entity_targets,
)
def has_prediction_target_mismatch(self) -> bool:
return (
self.intent_predictions != self.intent_targets
or self.entity_predictions != self.entity_targets
or self.action_predictions != self.action_targets
)
def serialise(self) -> Tuple[List[Text], List[Text]]:
"""Turn targets and predictions to lists of equal size for sklearn."""
targets = (
self.action_targets
+ self.intent_targets
+ [
TrainingDataWriter.generate_entity(gold.get("text"), gold)
for gold in self.entity_targets
]
)
predictions = (
self.action_predictions
+ self.intent_predictions
+ [
TrainingDataWriter.generate_entity(predicted.get("text"), predicted)
for predicted in self.entity_predictions
]
)
# sklearn does not cope with lists of unequal size, nor None values
return pad_lists_to_size(targets, predictions, padding_value="None")
class WronglyPredictedAction(ActionExecuted):
"""The model predicted the wrong action.
Mostly used to mark wrong predictions and be able to
dump them as stories."""
type_name = "wrong_action"
def __init__(
self,
action_name_target: Text,
action_name_prediction: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None,
) -> None:
self.action_name_prediction = action_name_prediction
super().__init__(action_name_target, policy, confidence, timestamp, metadata)
def inline_comment(self) -> Text:
"""A comment attached to this event. Used during dumping."""
return f"predicted: {self.action_name_prediction}"
def as_story_string(self) -> Text:
return f"{self.action_name} <!-- {self.inline_comment()} -->"
class EndToEndUserUtterance(UserUttered):
"""End-to-end user utterance.
Mostly used to print the full end-to-end user message in the
`failed_test_stories.yml` output file."""
def as_story_string(self, e2e: bool = True) -> Text:
return super().as_story_string(e2e=True)
class WronglyClassifiedUserUtterance(UserUttered):
"""The NLU model predicted the wrong user utterance.
Mostly used to mark wrong predictions and be able to
dump them as stories."""
type_name = "wrong_utterance"
def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None:
if not eval_store.intent_predictions:
self.predicted_intent = None
else:
self.predicted_intent = eval_store.intent_predictions[0]
self.predicted_entities = eval_store.entity_predictions
intent = {"name": eval_store.intent_targets[0]}
super().__init__(
event.text,
intent,
eval_store.entity_targets,
event.parse_data,
event.timestamp,
event.input_channel,
)
def inline_comment(self) -> Text:
"""A comment attached to this event. Used during dumping."""
from rasa.core.events import md_format_message
predicted_message = md_format_message(
self.text, self.predicted_intent, self.predicted_entities
)
return f"predicted: {self.predicted_intent}: {predicted_message}"
def as_story_string(self, e2e: bool = True) -> Text:
from rasa.core.events import md_format_message
correct_message = md_format_message(
self.text, self.intent.get("name"), self.entities
)
return (
f"{self.intent.get('name')}: {correct_message} "
f"<!-- {self.inline_comment()} -->"
)
async def _generate_trackers(
resource_name: Text,
agent: "Agent",
max_stories: Optional[int] = None,
use_e2e: bool = False,
) -> List[Any]:
from rasa.core.training.generator import TrainingDataGenerator
from rasa.core import training
story_graph = await training.extract_story_graph(
resource_name, agent.domain, use_e2e
)
g = TrainingDataGenerator(
story_graph,
agent.domain,
use_story_concatenation=False,
augmentation_factor=0,
tracker_limit=max_stories,
)
return g.generate_story_trackers()
def _clean_entity_results(
text: Text, entity_results: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Extract only the token variables from an entity dict."""
cleaned_entities = []
for r in tuple(entity_results):
cleaned_entity = {ENTITY_ATTRIBUTE_TEXT: text}
for k in (
ENTITY_ATTRIBUTE_START,
ENTITY_ATTRIBUTE_END,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_VALUE,
):
if k in set(r):
if k == ENTITY_ATTRIBUTE_VALUE and EXTRACTOR in set(r):
# convert values to strings for evaluation as
# target values are all of type string
r[k] = str(r[k])
cleaned_entity[k] = r[k]
cleaned_entities.append(cleaned_entity)
return cleaned_entities
def _collect_user_uttered_predictions(
event: UserUttered,
predicted: Dict[Text, Any],
partial_tracker: DialogueStateTracker,
fail_on_prediction_errors: bool,
) -> EvaluationStore:
user_uttered_eval_store = EvaluationStore()
intent_gold = event.intent.get("name")
predicted_intent = predicted.get(INTENT, {}).get("name")
user_uttered_eval_store.add_to_store(
intent_predictions=[predicted_intent], intent_targets=[intent_gold]
)
entity_gold = event.entities
predicted_entities = predicted.get(ENTITIES)
if entity_gold or predicted_entities:
user_uttered_eval_store.add_to_store(
entity_targets=_clean_entity_results(event.text, entity_gold),
entity_predictions=_clean_entity_results(event.text, predicted_entities),
)
if user_uttered_eval_store.has_prediction_target_mismatch():
partial_tracker.update(
WronglyClassifiedUserUtterance(event, user_uttered_eval_store)
)
if fail_on_prediction_errors:
raise ValueError(
"NLU model predicted a wrong intent. Failed Story:"
" \n\n{}".format(
YAMLStoryWriter().dumps(partial_tracker.as_story().story_steps)
)
)
else:
end_to_end_user_utterance = EndToEndUserUtterance(
event.text, event.intent, event.entities
)
partial_tracker.update(end_to_end_user_utterance)
return user_uttered_eval_store
def _emulate_form_rejection(partial_tracker: DialogueStateTracker) -> None:
from rasa.core.events import ActionExecutionRejected
rejected_action_name: Text = partial_tracker.active_loop["name"]
partial_tracker.update(ActionExecutionRejected(rejected_action_name))
def _collect_action_executed_predictions(
processor: "MessageProcessor",
partial_tracker: DialogueStateTracker,
event: ActionExecuted,
fail_on_prediction_errors: bool,
circuit_breaker_tripped: bool,
) -> Tuple[EvaluationStore, Optional[Text], Optional[float]]:
from rasa.core.policies.form_policy import FormPolicy
action_executed_eval_store = EvaluationStore()
gold = event.action_name
if circuit_breaker_tripped:
predicted = "circuit breaker tripped"
policy = None
confidence = None
else:
action, policy, confidence = processor.predict_next_action(partial_tracker)
predicted = action.name()
if (
policy
and predicted != gold
and _form_might_have_been_rejected(
processor.domain, partial_tracker, predicted
)
):
# Wrong action was predicted,
# but it might be Ok if form action is rejected.
_emulate_form_rejection(partial_tracker)
# try again
action, policy, confidence = processor.predict_next_action(partial_tracker)
# Even if the prediction is also wrong, we don't have to undo the emulation
# of the action rejection as we know that the user explicitly specified
# that something else than the form was supposed to run.
predicted = action.name()
action_executed_eval_store.add_to_store(
action_predictions=[predicted], action_targets=[gold]
)
if action_executed_eval_store.has_prediction_target_mismatch():
partial_tracker.update(
WronglyPredictedAction(
gold, predicted, event.policy, event.confidence, event.timestamp
)
)
if fail_on_prediction_errors:
error_msg = (
"Model predicted a wrong action. Failed Story: "
"\n\n{}".format(
YAMLStoryWriter().dumps(partial_tracker.as_story().story_steps)
)
)
if FormPolicy.__name__ in policy:
error_msg += (
"FormAction is not run during "
"evaluation therefore it is impossible to know "
"if validation failed or this story is wrong. "
"If the story is correct, add it to the "
"training stories and retrain."
)
raise ValueError(error_msg)
else:
partial_tracker.update(event)
return action_executed_eval_store, policy, confidence
def _form_might_have_been_rejected(
domain: Domain, tracker: DialogueStateTracker, predicted_action_name: Text
) -> bool:
return (
tracker.active_loop.get("name") == predicted_action_name
and predicted_action_name in domain.form_names
)
async def _predict_tracker_actions(
tracker: DialogueStateTracker,
agent: "Agent",
fail_on_prediction_errors: bool = False,
use_e2e: bool = False,
) -> Tuple[EvaluationStore, DialogueStateTracker, List[Dict[Text, Any]]]:
processor = agent.create_processor()
tracker_eval_store = EvaluationStore()
events = list(tracker.events)
partial_tracker = DialogueStateTracker.from_events(
tracker.sender_id,
events[:1],
agent.domain.slots,
sender_source=tracker.sender_source,
)
tracker_actions = []
should_predict_another_action = True
num_predicted_actions = 0
for event in events[1:]:
if isinstance(event, ActionExecuted):
circuit_breaker_tripped = processor.is_action_limit_reached(
num_predicted_actions, should_predict_another_action
)
(
action_executed_result,
policy,
confidence,
) = _collect_action_executed_predictions(
processor,
partial_tracker,
event,
fail_on_prediction_errors,
circuit_breaker_tripped,
)
tracker_eval_store.merge_store(action_executed_result)
tracker_actions.append(
{
"action": action_executed_result.action_targets[0],
"predicted": action_executed_result.action_predictions[0],
"policy": policy,
"confidence": confidence,
}
)
should_predict_another_action = processor.should_predict_another_action(
action_executed_result.action_predictions[0]
)
num_predicted_actions += 1
elif use_e2e and isinstance(event, UserUttered):
predicted = await processor.parse_message(UserMessage(event.text))
user_uttered_result = _collect_user_uttered_predictions(
event, predicted, partial_tracker, fail_on_prediction_errors
)
tracker_eval_store.merge_store(user_uttered_result)
else:
partial_tracker.update(event)
if isinstance(event, UserUttered):
num_predicted_actions = 0
return tracker_eval_store, partial_tracker, tracker_actions
def _in_training_data_fraction(action_list: List[Dict[Text, Any]]) -> float:
"""Given a list of action items, returns the fraction of actions
that were predicted using one of the Memoization policies."""
from rasa.core.policies.ensemble import SimplePolicyEnsemble
in_training_data = [
a["action"]
for a in action_list
if a["policy"] and not SimplePolicyEnsemble.is_not_memo_policy(a["policy"])
]
return len(in_training_data) / len(action_list) if action_list else 0
async def _collect_story_predictions(
completed_trackers: List["DialogueStateTracker"],
agent: "Agent",
fail_on_prediction_errors: bool = False,
use_e2e: bool = False,
) -> Tuple[StoryEvaluation, int]:
"""Test the stories from a file, running them through the stored model."""
from rasa.test import get_evaluation_metrics
from tqdm import tqdm
story_eval_store = EvaluationStore()
failed = []
success = []
correct_dialogues = []
number_of_stories = len(completed_trackers)
logger.info(f"Evaluating {number_of_stories} stories\nProgress:")
action_list = []
for tracker in tqdm(completed_trackers):
(
tracker_results,
predicted_tracker,
tracker_actions,
) = await _predict_tracker_actions(
tracker, agent, fail_on_prediction_errors, use_e2e
)
story_eval_store.merge_store(tracker_results)
action_list.extend(tracker_actions)
if tracker_results.has_prediction_target_mismatch():
# there is at least one wrong prediction
failed.append(predicted_tracker)
correct_dialogues.append(0)
else:
correct_dialogues.append(1)
success.append(predicted_tracker)
logger.info("Finished collecting predictions.")
with warnings.catch_warnings():
from sklearn.exceptions import UndefinedMetricWarning
warnings.simplefilter("ignore", UndefinedMetricWarning)
report, precision, f1, accuracy = get_evaluation_metrics(
[1] * len(completed_trackers), correct_dialogues
)
in_training_data_fraction = _in_training_data_fraction(action_list)
_log_evaluation_table(
[1] * len(completed_trackers),
"END-TO-END" if use_e2e else "CONVERSATION",
report,
precision,
f1,
accuracy,
in_training_data_fraction,
include_report=False,
)
return (
StoryEvaluation(
evaluation_store=story_eval_store,
failed_stories=failed,
successful_stories=success,
action_list=action_list,
in_training_data_fraction=in_training_data_fraction,
),
number_of_stories,
)
def _log_stories(trackers: List[DialogueStateTracker], file_path: Text) -> None:
"""Write given stories to the given file."""
with open(file_path, "w", encoding=DEFAULT_ENCODING) as f:
if not trackers:
f.write("# None of the test stories failed - all good!")
else:
stories = [tracker.as_story(include_source=True) for tracker in trackers]
steps = [step for story in stories for step in story.story_steps]
f.write(YAMLStoryWriter().dumps(steps))
async def test(
stories: Text,
agent: "Agent",
max_stories: Optional[int] = None,
out_directory: Optional[Text] = None,
fail_on_prediction_errors: bool = False,
e2e: bool = False,
disable_plotting: bool = False,
successes: bool = False,
errors: bool = True,
) -> Dict[Text, Any]:
"""Run the evaluation of the stories, optionally plot the results.
Args:
stories: the stories to evaluate on
agent: the agent
max_stories: maximum number of stories to consider
out_directory: path to directory to results to
fail_on_prediction_errors: boolean indicating whether to fail on prediction
errors or not
e2e: boolean indicating whether to use end to end evaluation or not
disable_plotting: boolean indicating whether to disable plotting or not
successes: boolean indicating whether to write down successful predictions or
not
errors: boolean indicating whether to write down incorrect predictions or not
Returns:
Evaluation summary.
"""
from rasa.test import get_evaluation_metrics
completed_trackers = await _generate_trackers(stories, agent, max_stories, e2e)
story_evaluation, _ = await _collect_story_predictions(
completed_trackers, agent, fail_on_prediction_errors, e2e
)
evaluation_store = story_evaluation.evaluation_store
with warnings.catch_warnings():
from sklearn.exceptions import UndefinedMetricWarning
warnings.simplefilter("ignore", UndefinedMetricWarning)
targets, predictions = evaluation_store.serialise()
if out_directory:
report, precision, f1, accuracy = get_evaluation_metrics(
targets, predictions, output_dict=True
)
report_filename = os.path.join(out_directory, REPORT_STORIES_FILE)
io_utils.dump_obj_as_json_to_file(report_filename, report)
logger.info(f"Stories report saved to {report_filename}.")
else:
report, precision, f1, accuracy = get_evaluation_metrics(
targets, predictions, output_dict=True
)
_log_evaluation_table(
evaluation_store.action_targets,
"ACTION",
report,
precision,
f1,
accuracy,
story_evaluation.in_training_data_fraction,
include_report=False,
)
if not disable_plotting and out_directory:
_plot_story_evaluation(
evaluation_store.action_targets,
evaluation_store.action_predictions,
out_directory,
)
if errors and out_directory:
_log_stories(
story_evaluation.failed_stories,
os.path.join(out_directory, FAILED_STORIES_FILE),
)
if successes and out_directory:
_log_stories(
story_evaluation.successful_stories,
os.path.join(out_directory, SUCCESSFUL_STORIES_FILE),
)
return {
"report": report,
"precision": precision,
"f1": f1,
"accuracy": accuracy,
"actions": story_evaluation.action_list,
"in_training_data_fraction": story_evaluation.in_training_data_fraction,
"is_end_to_end_evaluation": e2e,
}
def _log_evaluation_table(
golds: List[Any],
name: Text,
report: Dict[Text, Any],
precision: float,
f1: float,
accuracy: float,
in_training_data_fraction: float,
include_report: bool = True,
) -> None: # pragma: no cover
"""Log the sklearn evaluation metrics."""
logger.info(f"Evaluation Results on {name} level:")
logger.info(f"\tCorrect: {int(len(golds) * accuracy)} / {len(golds)}")
logger.info(f"\tF1-Score: {f1:.3f}")
logger.info(f"\tPrecision: {precision:.3f}")
logger.info(f"\tAccuracy: {accuracy:.3f}")
logger.info(f"\tIn-data fraction: {in_training_data_fraction:.3g}")
if include_report:
logger.info(f"\tClassification report: \n{report}")
def _plot_story_evaluation(
targets: List[Text], predictions: List[Text], output_directory: Optional[Text]
) -> None:
"""Plot a confusion matrix of story evaluation."""
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from rasa.utils.plotting import plot_confusion_matrix
confusion_matrix_filename = CONFUSION_MATRIX_STORIES_FILE
if output_directory:
confusion_matrix_filename = os.path.join(
output_directory, confusion_matrix_filename
)
cnf_matrix = confusion_matrix(targets, predictions)
plot_confusion_matrix(
cnf_matrix,
classes=unique_labels(targets, predictions),
title="Action Confusion matrix",
output_file=confusion_matrix_filename,
)
async def compare_models_in_dir(
model_dir: Text, stories_file: Text, output: Text
) -> None:
"""Evaluate multiple trained models in a directory on a test set.
Args:
model_dir: path to directory that contains the models to evaluate
stories_file: path to the story file
output: output directory to store results to
"""
number_correct = defaultdict(list)
for run in io_utils.list_subdirectories(model_dir):
number_correct_in_run = defaultdict(list)
for model in sorted(io_utils.list_files(run)):
if not model.endswith("tar.gz"):
continue
# The model files are named like <config-name>PERCENTAGE_KEY<number>.tar.gz
# Remove the percentage key and number from the name to get the config name
config_name = os.path.basename(model).split(PERCENTAGE_KEY)[0]
number_of_correct_stories = await _evaluate_core_model(model, stories_file)
number_correct_in_run[config_name].append(number_of_correct_stories)
for k, v in number_correct_in_run.items():
number_correct[k].append(v)
io_utils.dump_obj_as_json_to_file(
os.path.join(output, RESULTS_FILE), number_correct
)
async def compare_models(models: List[Text], stories_file: Text, output: Text) -> None:
"""Evaluate provided trained models on a test set.
Args:
models: list of trained model paths
stories_file: path to the story file
output: output directory to store results to
"""
number_correct = defaultdict(list)
for model in models:
number_of_correct_stories = await _evaluate_core_model(model, stories_file)
number_correct[os.path.basename(model)].append(number_of_correct_stories)
io_utils.dump_obj_as_json_to_file(
os.path.join(output, RESULTS_FILE), number_correct
)
async def _evaluate_core_model(model: Text, stories_file: Text) -> int:
from rasa.core.agent import Agent
logger.info(f"Evaluating model '{model}'")
agent = Agent.load(model)
completed_trackers = await _generate_trackers(stories_file, agent)
story_eval_store, number_of_stories = await _collect_story_predictions(
completed_trackers, agent
)
failed_stories = story_eval_store.failed_stories
return number_of_stories - len(failed_stories)
if __name__ == "__main__":
raise RuntimeError(
"Calling `rasa.core.test` directly is no longer supported. Please use "
"`rasa test` to test a combined Core and NLU model or `rasa test core` "
"to test a Core model."
)
| 34.001274 | 87 | 0.66322 |
c8aa14ff3a56a006a8f56d4c6f257dea05ef1fba | 16,373 | py | Python | mindspore/nn/optim/lamb.py | fufunoyu/mindspore | 704e367ada35653e8144eb0528c714f4b0231508 | [
"Apache-2.0"
] | 2 | 2021-04-22T07:00:59.000Z | 2021-11-08T02:49:09.000Z | mindspore/nn/optim/lamb.py | fufunoyu/mindspore | 704e367ada35653e8144eb0528c714f4b0231508 | [
"Apache-2.0"
] | 1 | 2020-12-29T06:46:38.000Z | 2020-12-29T06:46:38.000Z | mindspore/nn/optim/lamb.py | kungfu-ml/mindspore | 3fa5dd4495f4071b701e7ff490b7085b8824aaaa | [
"Apache-2.0"
] | 1 | 2021-05-10T03:30:36.000Z | 2021-05-10T03:30:36.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""lamb"""
import numpy as np
from mindspore import context
from mindspore.common import dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer
from .. import layer
from .. import graph_kernels as G
num_one = Tensor(np.ones([1]), mstype.float32)
_lamb_opt = C.MultitypeFuncGraph("lamb_opt")
@_lamb_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number", "Tensor", "Tensor", "Tensor",
"Tensor", "Bool", "Bool")
def _update_run_op(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_flag, optim_filter):
"""
Update parameters.
Args:
beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).
eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
lr (Tensor): Learning rate.
weight_decay (Number): Weight decay. Should be equal to or greater than 0.
global_step (Tensor): Global step.
param (Tensor): Parameters.
m (Tensor): m value of parameters.
v (Tensor): v value of parameters.
gradient (Tensor): Gradient of parameters.
decay_flag (bool): Specifies whether param update with weight decay.
optim_filter(bool): Applies parameter update or not.
Returns:
Tensor, the new value of v after updating.
"""
if optim_filter:
op_mul = P.Mul()
op_sqrt = P.Sqrt()
op_rsqrt = P.Rsqrt()
op_square = P.Square()
op_cast = P.Cast()
op_reshape = P.Reshape()
op_shape = P.Shape()
op_pow = P.Pow()
op_norm = layer.Norm()
op_select = P.Select()
op_greater = P.Greater()
op_fill = P.Fill()
op_dtype = P.DType()
param_fp32 = op_cast(param, mstype.float32)
m_fp32 = op_cast(m, mstype.float32)
v_fp32 = op_cast(v, mstype.float32)
gradient_fp32 = op_cast(gradient, mstype.float32)
next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta1, gradient_fp32)
next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta2, op_square(gradient_fp32))
next_mm = next_m / (op_cast(num_one, mstype.float32)
- op_pow(beta1, op_cast(global_step + num_one, mstype.float32)))
next_vv = next_v / (op_cast(num_one, mstype.float32) -
op_pow(beta2, op_cast(global_step + num_one, mstype.float32)))
w_norm = op_norm(param_fp32)
g_norm = op_norm(gradient_fp32)
g_norm_hat = op_norm(op_mul(next_mm, op_rsqrt(next_vv + eps)) + weight_decay * param_fp32)
zeros = F.zeros_like(w_norm)
ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0)
trust_ratio = op_select(
op_greater(w_norm, zeros),
op_select(op_greater(g_norm, zeros), w_norm / g_norm_hat, ones),
ones)
tens = op_fill(op_dtype(trust_ratio), op_shape(trust_ratio), 10.0)
trust_ratio = C.clip_by_value(trust_ratio, zeros, tens)
update = next_mm / (op_sqrt(next_vv) + eps)
if decay_flag:
update = update + op_mul(weight_decay, param_fp32)
update_with_lr = op_mul(op_mul(trust_ratio, lr), update)
next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))
next_param = F.depend(next_param, F.assign(param, op_cast(next_param, F.dtype(param))))
next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m))))
next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v))))
return op_cast(next_param, F.dtype(param))
return gradient
lamb_opt_graph_kernel = C.MultitypeFuncGraph("lamb_opt_graph_kernel")
@lamb_opt_graph_kernel.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number",
"Tensor", "Tensor", "Tensor", "Tensor", "Bool")
def _update_run_op_graph_kernel(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_flag):
"""
Update parameters.
Args:
beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).
eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.
lr (Tensor): Learning rate.
weight_decay (Number): Weight decay. Should be equal to or greater than 0.
global_step (Tensor): Global step.
param (Tensor): Parameters.
m (Tensor): m value of parameters.
v (Tensor): v value of parameters.
gradient (Tensor): Gradient of parameters.
decay_flag (bool): Specifies whether param update with weight decay.
Returns:
Tensor, the new value of v after updating.
"""
op_mul = P.Mul()
op_square = P.Square()
op_cast = P.Cast()
op_shape = P.Shape()
op_pow = P.Pow()
op_norm = layer.Norm()
op_fill = P.Fill()
op_dtype = P.DType()
param_fp32 = op_cast(param, mstype.float32)
gradient_fp32 = op_cast(gradient, mstype.float32)
i6_ex = op_cast(global_step + num_one, mstype.float32)
i9 = op_cast(num_one, mstype.float32) - beta1
x1 = op_cast(num_one, mstype.float32) - beta2
i6 = op_cast(num_one, mstype.float32) - op_pow(beta1, i6_ex)
i3 = op_cast(num_one, mstype.float32) - op_pow(beta2, i6_ex)
i1 = op_square(gradient_fp32)
add3, update = G.LambNextMV()(i1, v, i3, gradient, m, i6, param, beta1, i9, beta2, x1, weight_decay, eps)
if decay_flag:
update = update + op_mul(weight_decay, param_fp32)
w_norm = op_norm(param_fp32)
g_norm = op_norm(gradient_fp32)
g_norm_hat = op_norm(add3)
zeros = F.zeros_like(w_norm)
ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0)
tens = op_fill(op_dtype(w_norm), op_shape(w_norm), 10.0)
next_param = G.LambUpdateWithLR()(g_norm, w_norm, g_norm_hat, lr, update, param, zeros, ones, tens)
next_v = F.control_depend(add3, next_param)
return next_v
def _check_param_value(beta1, beta2, eps, prim_name):
validator.check_value_type("beta1", beta1, [float], prim_name)
validator.check_value_type("beta2", beta2, [float], prim_name)
validator.check_value_type("eps", eps, [float], prim_name)
validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
validator.check_number_range("eps", eps, 0.0, float("inf"), Rel.INC_NEITHER, prim_name)
class Lamb(Optimizer):
"""
Lamb Dynamic Learning Rate.
LAMB is an optimization algorithm employing a layerwise adaptive large batch
optimization technique. Refer to the paper `LARGE BATCH OPTIMIZATION FOR DEEP LEARNING: TRAINING BERT IN 76
MINUTES <https://arxiv.org/abs/1904.00962>`_.
Note:
When separating parameter groups, the weight decay in each group will be applied on the parameters if the
weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied
on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.
To improve parameter groups performance, the customized order of parameters can be supported.
Args:
params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated,
the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params",
"lr", "weight_decay" and "order_params" are the keys can be parsed.
- params: Required. The value must be a list of `Parameter`.
- lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
If not, the `learning_rate` in the API will be used.
- weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
will be used. If not, the `weight_decay` in the API will be used.
- order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and
the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which
in the value of 'order_params' must be in one of group parameters.
learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate.
When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then
the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
use dynamic learning rate, the i-th learning rate will be calculated during the process of training
according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero
dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be
equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float.
beta1 (float): The exponential decay rate for the 1st moment estimations. Default: 0.9.
Should be in range (0.0, 1.0).
beta2 (float): The exponential decay rate for the 2nd moment estimations. Default: 0.999.
Should be in range (0.0, 1.0).
eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.
Should be greater than 0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0. Should be equal to or greater than 0.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
tuple[bool], all elements are True.
Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay
>>> optim = nn.Lamb(params=net.trainable_params())
>>>
>>> #2) Use parameter groups and set different values
>>> poly_decay_lr = learning_rate_schedule.PolynomialDecayLR()
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
>>> {'params': no_conv_params, 'lr': poly_decay_lr},
>>> {'order_params': net.trainable_params(0.01, 0.0001, 10, 0.5)}]
>>> optim = nn.Lamb(group_params, learning_rate=0.1, weight_decay=0.0)
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.
>>> # The no_conv_params's parameters will use dynamic learning rate of poly decay learning rate and default
>>> # weight decay of 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
>>>
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> model = Model(net, loss_fn=loss, optimizer=optim)
"""
def __init__(self, params, learning_rate, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0):
super(Lamb, self).__init__(learning_rate, params, weight_decay)
_check_param_value(beta1, beta2, eps, self.cls_name)
# turn them to scalar when me support scalar/tensor mix operations
self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
self.eps = Tensor(np.array([eps]).astype(np.float32))
self.params = self.parameters
self.moments1 = self.params.clone(prefix="lamb_m", init='zeros')
self.moments2 = self.params.clone(prefix="lamb_v", init='zeros')
if not self.dynamic_lr:
self.global_step = Parameter(initializer(0, [1]), name='global_step')
self.assignadd = P.AssignAdd()
self.hyper_map = C.HyperMap()
self.enable_graph_kernel = context.get_context("enable_graph_kernel") and \
context.get_context("device_target") == "Ascend"
def construct(self, gradients):
lr = self.get_lr()
if self.enable_graph_kernel:
if self.is_group:
if self.is_group_lr:
optim_result = self.hyper_map(F.partial(lamb_opt_graph_kernel, self.beta1, self.beta2, self.eps,
self.global_step),
lr, self.weight_decay, self.params, self.moments1, self.moments2,
gradients, self.decay_flags)
else:
optim_result = self.hyper_map(F.partial(lamb_opt_graph_kernel, self.beta1, self.beta2, self.eps,
self.global_step, lr),
self.weight_decay, self.params, self.moments1, self.moments2,
gradients, self.decay_flags)
else:
optim_result = self.hyper_map(F.partial(lamb_opt_graph_kernel, self.beta1, self.beta2, self.eps,
self.global_step, lr, self.weight_decay),
self.params, self.moments1, self.moments2, gradients, self.decay_flags)
else:
if self.is_group:
if self.is_group_lr:
optim_result = self.hyper_map(F.partial(_lamb_opt, self.beta1, self.beta2, self.eps,
self.global_step),
lr, self.weight_decay, self.params, self.moments1, self.moments2,
gradients, self.decay_flags, self.optim_filter)
else:
optim_result = self.hyper_map(F.partial(_lamb_opt, self.beta1, self.beta2, self.eps,
self.global_step, lr),
self.weight_decay, self.params, self.moments1, self.moments2,
gradients, self.decay_flags, self.optim_filter)
else:
optim_result = self.hyper_map(F.partial(_lamb_opt, self.beta1, self.beta2, self.eps,
self.global_step, lr, self.weight_decay),
self.params, self.moments1, self.moments2, gradients,
self.decay_flags, self.optim_filter)
if self.use_parallel:
self.broadcast_params(optim_result)
if not self.dynamic_lr:
F.control_depend(lr, self.assignadd(self.global_step, 1))
return optim_result
| 50.847826 | 120 | 0.630489 |
0a0e957c98248fd7f25af8cca4906ccde8e634c9 | 636 | py | Python | convOldTorrentStore.py | hydrogen18/fairywren | 2d99c9bd3a645bf7be1a8e14d4afce30513aceaa | [
"MIT"
] | 39 | 2015-04-02T10:54:24.000Z | 2021-12-17T04:01:37.000Z | convOldTorrentStore.py | hydrogen18/fairywren | 2d99c9bd3a645bf7be1a8e14d4afce30513aceaa | [
"MIT"
] | null | null | null | convOldTorrentStore.py | hydrogen18/fairywren | 2d99c9bd3a645bf7be1a8e14d4afce30513aceaa | [
"MIT"
] | 21 | 2015-03-24T13:58:23.000Z | 2021-06-08T11:26:24.000Z | import sys
import gdbm
import torrents
import os
import os.path
import cPickle as pickle
if __name__ == "__main__":
torrentsDir = sys.argv[1]
db = gdbm.open(sys.argv[2],'cf',0600)
for root,dirs,files in os.walk(torrentsDir):
for f in files:
fpath = os.path.join(root,f)
with open(fpath) as fin:
torrentdict = pickle.load(fin)
path = fpath.split(os.sep)
torrentId = int(path[-4] + path[-3] + path[-2] + path[-1],16)
print '%.8x' % torrentId
db['%.8x' % torrentId ] = pickle.dumps(torrentdict,-1)
db[('%.8x' % torrentId) + torrents.TorrentStore.EXTENDED_SUFFIX ] = pickle.dumps({},-1)
db.sync()
| 24.461538 | 90 | 0.650943 |
23d4d34218811b39c1c60367d1e40666368ff267 | 3,264 | py | Python | selia_managers/views/detail_views/data_collection.py | CONABIO-audio/selia-managers | 4d603bd329bd47654839596793e168db7a0ec35f | [
"BSD-4-Clause"
] | null | null | null | selia_managers/views/detail_views/data_collection.py | CONABIO-audio/selia-managers | 4d603bd329bd47654839596793e168db7a0ec35f | [
"BSD-4-Clause"
] | 10 | 2020-04-15T17:54:57.000Z | 2022-03-12T00:11:27.000Z | selia_managers/views/detail_views/data_collection.py | CONABIO-audio/selia-managers | 4d603bd329bd47654839596793e168db7a0ec35f | [
"BSD-4-Clause"
] | null | null | null | from django.views.generic.detail import SingleObjectMixin
from django import forms
from django.db.models import Count, Max
from irekua_database.models import Collection
from irekua_permissions.object_types.data_collections import (
collection_types as permissions)
from selia_templates.views.detail_base import SeliaDetailView
from selia_templates.forms.json_field import JsonField
class CollectionUpdateForm(forms.ModelForm):
metadata = JsonField()
class Meta:
model = Collection
fields = [
'name',
'description',
'metadata',
'logo'
]
class DetailCollectionView(SeliaDetailView, SingleObjectMixin):
form_class = CollectionUpdateForm
template_name = 'selia_managers/detail/managed_collection.html'
help_template = 'selia_managers/help/managed_collection_detail.html'
detail_template = 'selia_managers/details/managed_collection.html'
update_form_template = 'selia_managers/update/managed_collection.html'
summary_template = 'selia_managers/summaries/managed_collection.html'
delete_redirect_url = 'selia_managers:collections'
def has_view_permission(self):
user = self.request.user
collection_type = self.object.collection_type
return permissions.view(user, collection_type=collection_type)
def has_change_permission(self):
user = self.request.user
collection_type = self.object.collection_type
return permissions.view(user, collection_type=collection_type)
def has_delete_permission(self):
user = self.request.user
collection_type = self.object.collection_type
return permissions.view(user, collection_type=collection_type)
def get_object(self, *args, **kwargs):
return (
Collection.objects
.annotate(
user_count=Count('users', distinct=True),
admin_count=Count('administrators', distinct=True),
device_count=Count('collectiondevice', distinct=True),
sampling_event_count=Count('samplingevent', distinct=True),
site_count=Count('collectionsite', distinct=True),
deployment_count=Count('samplingevent__samplingeventdevice', distinct=True),
item_count=Count('samplingevent__samplingeventdevice__item', distinct=True),
annotation_count=Count('samplingevent__samplingeventdevice__item__annotation', distinct=True),
last_item=Max('samplingevent__samplingeventdevice__item__created_on'),
last_annotation=Max('samplingevent__samplingeventdevice__item__annotation__created_on')
).get(pk=self.kwargs['pk']))
def post(self, *args, **kwargs):
if 'remove_admin' in self.request.GET:
print('borrar')
return super().post(*args, **kwargs)
def get_form(self, **kwargs):
form = super().get_form(**kwargs)
schema = self.object.collection_type.metadata_schema
form.fields['metadata'].update_schema(schema)
return form
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['collection'] = self.object
return context
| 39.325301 | 110 | 0.699755 |
469a418758aa353692009096f558526cfce085fe | 2,510 | py | Python | emol/emol/views/combatant/combatant.py | lrt512/emol | e1dd3462632a525c3b9701d4fd9a332d19c93b85 | [
"MIT"
] | null | null | null | emol/emol/views/combatant/combatant.py | lrt512/emol | e1dd3462632a525c3b9701d4fd9a332d19c93b85 | [
"MIT"
] | null | null | null | emol/emol/views/combatant/combatant.py | lrt512/emol | e1dd3462632a525c3b9701d4fd9a332d19c93b85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Combatant self-serve views."""
# standard library imports
import uuid
from datetime import datetime
# third-party imports
from flask import Blueprint, render_template, current_app
from flask_login import current_user
# application imports
from emol.models import Combatant, Discipline, UpdateRequest
from emol.models.faux_user import FauxUserSwitch
from emol.utility.hash import Sha256
BLUEPRINT = Blueprint('combatant', __name__)
@BLUEPRINT.route('/card/<card_id>', methods=['GET'])
def view_card(card_id):
"""Handle requests to view a combatant's card.
Args:
card_id: A combatant card ID
Returns:
- The message view if the card ID is invalid
- The combatant's card if the card ID is valid
"""
current_app.logger.info('Card for {}'.format(card_id))
combatant = Combatant.query.filter(Combatant.card_id == card_id).one_or_none()
if combatant is None:
current_app.logger.error(
'No combatant record for card ID: {}'.format(card_id)
)
return render_template(
'message/message.html',
message='Could not find the specified combatant'
)
return render_template(
'combatant/card.html',
success=True,
disciplines=Discipline.query.all(),
combatant=combatant
)
@BLUEPRINT.route('/update/<token>', methods=['GET'])
def update_info(token):
"""Handle requests to consume a combatant update info request.
Args:
token: An info update request token
Returns:
- The message view if the token is invalid
- The self-serve info update view if the token is valid
"""
token_valid = True
update_request = UpdateRequest.query.filter(UpdateRequest.token == token).one_or_none()
if update_request is None:
token_valid = False
elif update_request.expiry < datetime.utcnow():
token_valid = False
elif update_request.consumed is not None:
token_valid = False
if False and token_valid is False:
return render_template(
'message/message.html',
message='Invalid token provided'
)
# Use the FauxUserSwitch context manager to provide an
# "authorized user" for Jinja environment
with FauxUserSwitch():
return render_template(
'combatant/combatant_update_info.html',
combatant=update_request.combatant,
token=token,
is_self_serve=True
)
| 28.522727 | 91 | 0.666534 |
810371e60ecd816bb2c450d4099885e58df2944f | 12,263 | py | Python | nipy/algorithms/statistics/tests/test_intrinsic_volumes.py | arokem/nipy | d6b2e862c65558bb5747c36140fd6261a7e1ecfe | [
"BSD-3-Clause"
] | 1 | 2016-03-08T15:01:06.000Z | 2016-03-08T15:01:06.000Z | nipy/algorithms/statistics/tests/test_intrinsic_volumes.py | fabianp/nipy | 40e89f3ca7f34df05631623807993026134e6de3 | [
"BSD-3-Clause"
] | 1 | 2015-09-09T07:49:57.000Z | 2015-09-25T01:50:40.000Z | nipy/algorithms/statistics/tests/test_intrinsic_volumes.py | fabianp/nipy | 40e89f3ca7f34df05631623807993026134e6de3 | [
"BSD-3-Clause"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
import numpy.linalg as npl
from .. import intvol, utils
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_almost_equal
def symnormal(p=10):
M = np.random.standard_normal((p,p))
return (M + M.T) / np.sqrt(2)
def randorth(p=10):
"""
A random orthogonal matrix.
"""
A = symnormal(p)
return npl.eig(A)[1]
def box(shape, edges):
data = np.zeros(shape)
sl = []
for i in range(len(shape)):
sl.append(slice(edges[i][0], edges[i][1],1))
data[sl] = 1
return data.astype(np.int)
def randombox(shape):
"""
Generate a random box, returning the box and the edge lengths
"""
edges = [np.random.random_integers(0, shape[j], size=(2,))
for j in range(len(shape))]
for j in range(len(shape)):
edges[j].sort()
if edges[j][0] == edges[j][1]:
edges[j][0] = 0; edges[j][1] = shape[j]/2+1
return edges, box(shape, edges)
def elsym(edgelen, order=1):
"""
Elementary symmetric polynomial of a given order
"""
l = len(edgelen)
if order == 0:
return 1
r = 0
for v in utils.combinations(range(l), order):
r += np.product([edgelen[vv] for vv in v])
return r
def nonintersecting_boxes(shape):
"""
The Lips's are supposed to be additive, so disjoint things
should be additive. But, if they ALMOST intersect, different
things get added to the triangulation.
>>> b1 = np.zeros(40, np.int)
>>> b1[:11] = 1
>>> b2 = np.zeros(40, np.int)
>>> b2[11:] = 1
>>> (b1*b2).sum()
0
>>> c = np.indices((40,)).astype(np.float)
>>> intvol.Lips1d(c, b1)
array([ 1., 10.])
>>> intvol.Lips1d(c, b2)
array([ 1., 28.])
>>> intvol.Lips1d(c, b1+b2)
array([ 1., 39.])
The function creates two boxes such that the 'dilated' box1 does not
intersect with box2. Additivity works in this case.
"""
while True:
edge1, box1 = randombox(shape)
edge2, box2 = randombox(shape)
diledge1 = [[max(ed[0]-1, 0), min(ed[1]+1, sh)]
for ed, sh in zip(edge1, box1.shape)]
dilbox1 = box(box1.shape, diledge1)
if set(np.unique(dilbox1 + box2)).issubset([0,1]):
break
return box1, box2, edge1, edge2
def pts2dots(d, a, b, c):
""" Convert point coordinates to dot products
"""
D00 = np.dot(d, d)
D01 = np.dot(d, a)
D02 = np.dot(d, b)
D03 = np.dot(d, c)
D11 = np.dot(a, a)
D12 = np.dot(a, b)
D13 = np.dot(a, c)
D22 = np.dot(b, b)
D23 = np.dot(b, c)
D33 = np.dot(c, c)
return D00, D01, D02, D03, D11, D12, D13, D22, D23, D33
def pts2mu3_tet(d, a, b, c):
""" Accept point coordinates for calling mu3tet
"""
return intvol.mu3_tet(*pts2dots(d, a, b, c))
def wiki_tet_vol(d, a, b, c):
# Wikipedia formula for generalized tetrahedron volume
d, a, b, c = [np.array(e) for e in d, a, b, c]
cp = np.cross((b-d),(c-d))
v2t6 = np.dot((a-d), cp)
return np.sqrt(v2t6) / 6.
def test_mu3tet():
assert_equal(intvol.mu3_tet(0,0,0,0,1,0,0,1,0,1), 1./6)
assert_equal(intvol.mu3_tet(0,0,0,0,0,0,0,0,0,0), 0)
d = [2,2,2]
a = [3,2,2]
b = [2,3,2]
c = [2,2,3]
assert_equal(pts2mu3_tet(d, a, b, c), 1./6)
assert_equal(wiki_tet_vol(d, a, b, c), 1./6)
# This used to generate nan values
assert_equal(intvol.mu3_tet(0,0,0,0,1,0,0,-1,0,1), 0)
def test_mu2tri():
assert_equal(intvol.mu2_tri(0,0,0,1,0,1), 1./2)
def test_mu1tri():
assert_equal(intvol.mu1_tri(0,0,0,1,0,1), 1+np.sqrt(2)/2)
def test_mu2tet():
assert_equal(intvol.mu2_tet(0,0,0,0,1,0,0,1,0,1), (3./2 + np.sqrt(3./4))/2)
def pts2mu1_tet(d, a, b, c):
""" Accept point coordinates for calling mu1_tet
"""
return intvol.mu1_tet(*pts2dots(d, a, b, c))
def test_mu1_tet():
res1 = pts2mu1_tet([2,2,2],[3,2,2],[2,3,2],[2,2,3])
res2 = pts2mu1_tet([0,0,0],[1,0,0],[0,1,0],[0,0,1])
assert_equal(res1, res2)
assert_equal(intvol.mu1_tet(0,0,0,0,0,0,0,0,0,0), 0)
# This used to generate nan values
assert_equal(intvol.mu1_tet(0,0,0,0,1,0,0,-1,0,1), 0)
def test__mu1_tetface():
# Test for out of range acos value sequences. I'm ashamed to say I found
# these sequences accidentally in a failing test with random numbers
_mu1_tetface = intvol._mu1_tetface
assert_almost_equal(_mu1_tetface(1, 0, 0, 10, 10, 0, 0, 20, 20, 40), 0)
assert_almost_equal(_mu1_tetface(36, 0, 0, 18, 48, 0, 0, 1, 30, 63), 3)
def test_ec():
for i in range(1, 4):
_, box1 = randombox((40,)*i)
f = {3:intvol.EC3d,
2:intvol.EC2d,
1:intvol.EC1d}[i]
yield assert_almost_equal, f(box1), 1
def test_ec_disjoint():
for i in range(1, 4):
e = {3:intvol.EC3d,
2:intvol.EC2d,
1:intvol.EC1d}[i]
box1, box2, _, _ = nonintersecting_boxes((40,)*i)
assert_almost_equal(e(box1 + box2), e(box1) + e(box2))
def test_lips_wrapping():
# Test that shapes touching the edge do not combine by wrapping
b1 = np.zeros(40, np.int)
b1[:11] = 1
b2 = np.zeros(40, np.int)
b2[11:] = 1
# lines are disjoint
assert_equal((b1*b2).sum(), 0)
c = np.indices(b1.shape).astype(np.float)
assert_array_equal(intvol.Lips1d(c, b1), (1, 10))
assert_array_equal(intvol.Lips1d(c, b2), (1, 28))
assert_array_equal(intvol.Lips1d(c, b1+b2), (1, 39.0))
# 2D
b1 = b1[:,None]
b2 = b2[:,None]
# boxes are disjoint
assert_equal((b1*b2).sum(), 0)
c = np.indices(b1.shape).astype(np.float)
assert_array_equal(intvol.Lips2d(c, b1), (1, 10, 0))
assert_array_equal(intvol.Lips2d(c, b2), (1, 28, 0))
assert_array_equal(intvol.Lips2d(c, b1+b2), (1, 39.0, 0))
# 3D
b1 = b1[:,:,None]
b2 = b2[:,:,None]
assert_equal(b1.shape, (40,1,1))
# boxes are disjoint
assert_equal((b1*b2).sum(), 0)
c = np.indices(b1.shape).astype(np.float)
assert_array_equal(intvol.Lips3d(c, b1), (1, 10, 0, 0))
assert_array_equal(intvol.Lips3d(c, b2), (1, 28, 0, 0))
assert_array_equal(intvol.Lips3d(c, b1+b2), (1, 39.0, 0, 0))
# Shapes which are squeezable should still return sensible answers
# Test simple ones line / box / volume
funcer = {1: (intvol.Lips1d, intvol.EC1d),
2: (intvol.Lips2d, intvol.EC2d),
3: (intvol.Lips3d, intvol.EC3d)}
for box_shape, exp_ivs in [[(10,),(1,9)],
[(10,1),(1,9,0)],
[(1,10),(1,9,0)],
[(10,1,1), (1,9,0,0)],
[(1, 10, 1), (1,9,0,0)],
[(1, 1, 10), (1,9,0,0)]]:
nd = len(box_shape)
lips_func, ec_func = funcer[nd]
c = np.indices(box_shape).astype(np.float)
b = np.ones(box_shape, dtype=np.int)
assert_array_equal(lips_func(c, b), exp_ivs)
assert_equal(ec_func(b), exp_ivs[0])
def test_lips1_disjoint():
phi = intvol.Lips1d
box1, box2, edge1, edge2 = nonintersecting_boxes((30,))
c = np.indices((30,)).astype(np.float)
# Test N dimensional coordinates (N=10)
d = np.random.standard_normal((10,)+(30,))
# Test rotation causes no change in volumes
U = randorth(p=6)[:1]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2))
assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2))
assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2))
assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2))
assert_almost_equal(phi(e, box1 + box2),
(np.array(
[elsym([e[1]-e[0]-1
for e in edge1], i) for i in range(2)]) +
np.array(
[elsym([e[1]-e[0]-1
for e in edge2], i) for i in range(2)])))
assert_raises(ValueError, phi, c[...,None], box1)
def test_lips2_disjoint():
phi = intvol.Lips2d
box1, box2, edge1, edge2 = nonintersecting_boxes((40,40))
c = np.indices((40,40)).astype(np.float)
# Test N dimensional coordinates (N=10)
d = np.random.standard_normal((10,40,40))
# Test rotation causes no change in volumes
U = randorth(p=6)[0:2]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
assert_almost_equal(phi(c, box1 + box2),
phi(c, box1) + phi(c, box2))
assert_almost_equal(phi(d, box1 + box2),
phi(d, box1) + phi(d, box2))
assert_almost_equal(phi(e, box1 + box2),
phi(e, box1) + phi(e, box2))
assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2))
assert_almost_equal(phi(e, box1 + box2),
np.array([elsym([e[1]-e[0]-1 for e in edge1], i)
for i in range(3)]) +
np.array([elsym([e[1]-e[0]-1 for e in edge2], i)
for i in range(3)])
)
assert_raises(ValueError, phi, c[...,None], box1)
assert_raises(ValueError, phi, c[:,:,1], box1)
def test_lips3_disjoint():
phi = intvol.Lips3d
box1, box2, edge1, edge2 = nonintersecting_boxes((40,)*3)
c = np.indices((40,)*3).astype(np.float)
# Test N dimensional coordinates (N=10)
d = np.random.standard_normal((10,40,40,40))
# Test rotation causes no change in volumes
U = randorth(p=6)[0:3]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2))
assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2))
assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2))
assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2))
assert_almost_equal(
phi(e, box1 + box2),
(np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(4)]) +
np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(4)])))
assert_raises(ValueError, phi, c[...,None], box1)
assert_raises(ValueError, phi, c[:,:,:,1], box1)
def test_lips3_nans():
# These boxes caused nans in the Lips3 disjoint box tests
phi = intvol.Lips3d
box1 = np.zeros((40,40,40), dtype=np.int)
box2 = box1.copy()
box1[23:30,22:32,9:13] = 1
box2[7:22,0,8:17] = 1
c = np.indices(box1.shape).astype(np.float)
assert_array_equal(np.isnan(phi(c, box2)), False)
U = randorth(p=6)[0:3]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
assert_array_equal(np.isnan(phi(e, box1 + box2)), False)
def test_slices():
# Slices have EC 1...
e = intvol.EC3d
p = intvol.Lips3d
m = np.zeros((40,)*3, np.int)
D = np.indices(m.shape).astype(np.float)
m[10,10,10] = 1
yield assert_almost_equal, e(m), 1
yield assert_almost_equal, p(D,m), [1,0,0,0]
m = np.zeros((40,)*3, np.int)
m[10,10:14,10] = 1
yield assert_almost_equal, e(m), 1
yield assert_almost_equal, p(D,m), [1,3,0,0]
m = np.zeros((40,)*3, np.int)
m[10,10:14,9:15] = 1
yield assert_almost_equal, e(m), 1
yield assert_almost_equal, p(D,m), [1,8,15,0]
def test_ec_wrapping():
# Test wrapping for EC1 calculation
assert_equal(intvol.EC1d(np.ones((6,), dtype=np.int)), 1)
box1 = np.array([1, 1, 0, 1, 1, 1], dtype=np.int)
assert_equal(intvol.EC1d(box1), 2)
# 2D
box1 = np.zeros((3,6), dtype=np.int)
box1[1] = 1
assert_equal(intvol.EC2d(box1), 1)
box1[1, 3] = 0
assert_equal(intvol.EC2d(box1), 2)
# 3D
box1 = np.zeros((3,6,3), dtype=np.int)
box1[1, :, 1] = 1
assert_equal(intvol.EC3d(box1), 1)
box1[1, 3, 1] = 0
assert_equal(intvol.EC3d(box1), 2)
| 32.965054 | 79 | 0.567479 |
e14b747829bb6661dceff0a8c9e3134c6f87e6cd | 6,033 | py | Python | google/cloud/dialogflow_v2beta1/services/knowledge_bases/pagers.py | reichenbch/python-dialogflow | 74a54c6fd9d6e03741206ff1e95939123362cab9 | [
"Apache-2.0"
] | 80 | 2020-05-19T20:54:47.000Z | 2022-03-27T01:35:30.000Z | google/cloud/dialogflow_v2beta1/services/knowledge_bases/pagers.py | reichenbch/python-dialogflow | 74a54c6fd9d6e03741206ff1e95939123362cab9 | [
"Apache-2.0"
] | 135 | 2020-12-10T00:33:13.000Z | 2022-03-27T16:14:25.000Z | google/cloud/dialogflow_v2beta1/services/knowledge_bases/pagers.py | reichenbch/python-dialogflow | 74a54c6fd9d6e03741206ff1e95939123362cab9 | [
"Apache-2.0"
] | 37 | 2020-12-12T15:09:15.000Z | 2022-03-08T02:25:28.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.dialogflow_v2beta1.types import knowledge_base
class ListKnowledgeBasesPager:
"""A pager for iterating through ``list_knowledge_bases`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesResponse` object, and
provides an ``__iter__`` method to iterate through its
``knowledge_bases`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListKnowledgeBases`` requests and continue to iterate
through the ``knowledge_bases`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., knowledge_base.ListKnowledgeBasesResponse],
request: knowledge_base.ListKnowledgeBasesRequest,
response: knowledge_base.ListKnowledgeBasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesRequest):
The initial request object.
response (google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = knowledge_base.ListKnowledgeBasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[knowledge_base.ListKnowledgeBasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[knowledge_base.KnowledgeBase]:
for page in self.pages:
yield from page.knowledge_bases
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListKnowledgeBasesAsyncPager:
"""A pager for iterating through ``list_knowledge_bases`` requests.
This class thinly wraps an initial
:class:`google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``knowledge_bases`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListKnowledgeBases`` requests and continue to iterate
through the ``knowledge_bases`` field on the
corresponding responses.
All the usual :class:`google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[knowledge_base.ListKnowledgeBasesResponse]],
request: knowledge_base.ListKnowledgeBasesRequest,
response: knowledge_base.ListKnowledgeBasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesRequest):
The initial request object.
response (google.cloud.dialogflow_v2beta1.types.ListKnowledgeBasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = knowledge_base.ListKnowledgeBasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[knowledge_base.ListKnowledgeBasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[knowledge_base.KnowledgeBase]:
async def async_generator():
async for page in self.pages:
for response in page.knowledge_bases:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 38.673077 | 91 | 0.688215 |
4b8c70fbeccec816347413be381b20b99570af6b | 168 | py | Python | text/fn.py | cy486/DomRead | 852289588b902dedba5fd3f2a57b39d2a2b027ba | [
"Apache-2.0"
] | null | null | null | text/fn.py | cy486/DomRead | 852289588b902dedba5fd3f2a57b39d2a2b027ba | [
"Apache-2.0"
] | null | null | null | text/fn.py | cy486/DomRead | 852289588b902dedba5fd3f2a57b39d2a2b027ba | [
"Apache-2.0"
] | null | null | null | def fn(a,b):
s=0
num=str(a)
for i in range(1,b+1):
s+=int(num*i)
return str(s)
a,b=input().split()
s=fn(int(a),int(b))
print(s)
print() | 16.8 | 27 | 0.488095 |
adf16c21b37d2fc6c3344d2c1121e851454ede1e | 79,087 | py | Python | pyVmomi/PbmObjects.py | timgates42/pyvmomi | a2d5df0b57d7e359b220493c9c77d1635705ef12 | [
"Apache-2.0"
] | 1 | 2021-01-13T06:54:10.000Z | 2021-01-13T06:54:10.000Z | pyVmomi/PbmObjects.py | timgates42/pyvmomi | a2d5df0b57d7e359b220493c9c77d1635705ef12 | [
"Apache-2.0"
] | null | null | null | pyVmomi/PbmObjects.py | timgates42/pyvmomi | a2d5df0b57d7e359b220493c9c77d1635705ef12 | [
"Apache-2.0"
] | null | null | null | # ******* WARNING - AUTO GENERATED CODE - DO NOT EDIT *******
from .VmomiSupport import CreateDataType, CreateManagedType
from .VmomiSupport import CreateEnumType
from .VmomiSupport import AddVersion, AddVersionParent
from .VmomiSupport import AddBreakingChangesInfo
from .VmomiSupport import F_LINK, F_LINKABLE
from .VmomiSupport import F_OPTIONAL, F_SECRET
from .VmomiSupport import newestVersions, stableVersions
from .VmomiSupport import publicVersions, dottedVersions
from .VmomiSupport import oldestVersions
AddVersion("pbm.version.version11", "pbm", "6.5", 0, "pbm")
AddVersion("vmodl.query.version.version4", "", "", 0, "vim25")
AddVersion("vmodl.query.version.version3", "", "", 0, "vim25")
AddVersion("vmodl.query.version.version2", "", "", 0, "vim25")
AddVersion("vmodl.query.version.version1", "", "", 0, "vim25")
AddVersion("vim.version.version8", "vim25", "5.1", 0, "vim25")
AddVersion("vim.version.version9", "vim25", "5.5", 0, "vim25")
AddVersion("vim.version.version6", "vim25", "4.1", 0, "vim25")
AddVersion("vim.version.version7", "vim25", "5.0", 0, "vim25")
AddVersion("pbm.version.v7_0_0_1", "pbm", "7.0.0.1", 0, "pbm")
AddVersion("pbm.version.v7_0_1_0", "pbm", "7.0.1.0", 0, "pbm")
AddVersion("vim.version.v7_0_0_2", "vim25", "7.0.0.2", 0, "vim25")
AddVersion("vim.version.version1", "vim2", "2.0", 0, "vim25")
AddVersion("vim.version.version4", "vim25", "2.5u2server", 0, "vim25")
AddVersion("pbm.version.version14", "pbm", "6.7.2", 0, "pbm")
AddVersion("vim.version.version5", "vim25", "4.0", 0, "vim25")
AddVersion("pbm.version.version12", "pbm", "6.7", 0, "pbm")
AddVersion("vim.version.version2", "vim25", "2.5", 0, "vim25")
AddVersion("vim.version.version3", "vim25", "2.5u2", 0, "vim25")
AddVersion("pbm.version.version13", "pbm", "6.7.1", 0, "pbm")
AddVersion("pbm.version.v7_0", "pbm", "7.0.0.0", 0, "pbm")
AddVersion("vim.version.version13", "vim25", "6.7.1", 0, "vim25")
AddVersion("vim.version.version14", "vim25", "6.7.2", 0, "vim25")
AddVersion("vim.version.version15", "vim25", "6.7.3", 0, "vim25")
AddVersion("pbm.version.version1", "pbm", "1.0", 0, "pbm")
AddVersion("vim.version.v7_0_1_0", "vim25", "7.0.1.0", 0, "vim25")
AddVersion("pbm.version.version2", "pbm", "2.0", 0, "pbm")
AddVersion("vmodl.version.version0", "", "", 0, "vim25")
AddVersion("vmodl.version.version1", "", "", 0, "vim25")
AddVersion("vmodl.version.version2", "", "", 0, "vim25")
AddVersion("vim.version.v6_9_1", "vim25", "6.9.1", 0, "vim25")
AddVersion("vim.version.v6_8_7", "vim25", "6.8.7", 0, "vim25")
AddVersion("vim.version.v7_0", "vim25", "7.0.0.0", 0, "vim25")
AddVersion("vmodl.reflect.version.version1", "reflect", "1.0", 0, "reflect")
AddVersion("vim.version.version10", "vim25", "6.0", 0, "vim25")
AddVersion("vmodl.reflect.version.version2", "reflect", "2.0", 0, "reflect")
AddVersion("vim.version.version11", "vim25", "6.5", 0, "vim25")
AddVersion("vim.version.version12", "vim25", "6.7", 0, "vim25")
AddVersionParent("pbm.version.version11", "pbm.version.version11")
AddVersionParent("pbm.version.version11", "vmodl.query.version.version4")
AddVersionParent("pbm.version.version11", "vmodl.query.version.version3")
AddVersionParent("pbm.version.version11", "vmodl.query.version.version2")
AddVersionParent("pbm.version.version11", "vmodl.query.version.version1")
AddVersionParent("pbm.version.version11", "vim.version.version8")
AddVersionParent("pbm.version.version11", "vim.version.version9")
AddVersionParent("pbm.version.version11", "vim.version.version6")
AddVersionParent("pbm.version.version11", "vim.version.version7")
AddVersionParent("pbm.version.version11", "vim.version.version1")
AddVersionParent("pbm.version.version11", "vim.version.version4")
AddVersionParent("pbm.version.version11", "vim.version.version5")
AddVersionParent("pbm.version.version11", "vim.version.version2")
AddVersionParent("pbm.version.version11", "vim.version.version3")
AddVersionParent("pbm.version.version11", "pbm.version.version1")
AddVersionParent("pbm.version.version11", "pbm.version.version2")
AddVersionParent("pbm.version.version11", "vmodl.version.version0")
AddVersionParent("pbm.version.version11", "vmodl.version.version1")
AddVersionParent("pbm.version.version11", "vmodl.version.version2")
AddVersionParent("pbm.version.version11", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.version11", "vim.version.version10")
AddVersionParent("pbm.version.version11", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.version11", "vim.version.version11")
AddVersionParent("vmodl.query.version.version4", "vmodl.query.version.version4")
AddVersionParent("vmodl.query.version.version4", "vmodl.query.version.version3")
AddVersionParent("vmodl.query.version.version4", "vmodl.query.version.version2")
AddVersionParent("vmodl.query.version.version4", "vmodl.query.version.version1")
AddVersionParent("vmodl.query.version.version4", "vmodl.version.version0")
AddVersionParent("vmodl.query.version.version4", "vmodl.version.version1")
AddVersionParent("vmodl.query.version.version4", "vmodl.version.version2")
AddVersionParent("vmodl.query.version.version3", "vmodl.query.version.version3")
AddVersionParent("vmodl.query.version.version3", "vmodl.query.version.version2")
AddVersionParent("vmodl.query.version.version3", "vmodl.query.version.version1")
AddVersionParent("vmodl.query.version.version3", "vmodl.version.version0")
AddVersionParent("vmodl.query.version.version3", "vmodl.version.version1")
AddVersionParent("vmodl.query.version.version2", "vmodl.query.version.version2")
AddVersionParent("vmodl.query.version.version2", "vmodl.query.version.version1")
AddVersionParent("vmodl.query.version.version2", "vmodl.version.version0")
AddVersionParent("vmodl.query.version.version2", "vmodl.version.version1")
AddVersionParent("vmodl.query.version.version1", "vmodl.query.version.version1")
AddVersionParent("vmodl.query.version.version1", "vmodl.version.version0")
AddVersionParent("vim.version.version8", "vmodl.query.version.version4")
AddVersionParent("vim.version.version8", "vmodl.query.version.version3")
AddVersionParent("vim.version.version8", "vmodl.query.version.version2")
AddVersionParent("vim.version.version8", "vmodl.query.version.version1")
AddVersionParent("vim.version.version8", "vim.version.version8")
AddVersionParent("vim.version.version8", "vim.version.version6")
AddVersionParent("vim.version.version8", "vim.version.version7")
AddVersionParent("vim.version.version8", "vim.version.version1")
AddVersionParent("vim.version.version8", "vim.version.version4")
AddVersionParent("vim.version.version8", "vim.version.version5")
AddVersionParent("vim.version.version8", "vim.version.version2")
AddVersionParent("vim.version.version8", "vim.version.version3")
AddVersionParent("vim.version.version8", "vmodl.version.version0")
AddVersionParent("vim.version.version8", "vmodl.version.version1")
AddVersionParent("vim.version.version8", "vmodl.version.version2")
AddVersionParent("vim.version.version8", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version9", "vmodl.query.version.version4")
AddVersionParent("vim.version.version9", "vmodl.query.version.version3")
AddVersionParent("vim.version.version9", "vmodl.query.version.version2")
AddVersionParent("vim.version.version9", "vmodl.query.version.version1")
AddVersionParent("vim.version.version9", "vim.version.version8")
AddVersionParent("vim.version.version9", "vim.version.version9")
AddVersionParent("vim.version.version9", "vim.version.version6")
AddVersionParent("vim.version.version9", "vim.version.version7")
AddVersionParent("vim.version.version9", "vim.version.version1")
AddVersionParent("vim.version.version9", "vim.version.version4")
AddVersionParent("vim.version.version9", "vim.version.version5")
AddVersionParent("vim.version.version9", "vim.version.version2")
AddVersionParent("vim.version.version9", "vim.version.version3")
AddVersionParent("vim.version.version9", "vmodl.version.version0")
AddVersionParent("vim.version.version9", "vmodl.version.version1")
AddVersionParent("vim.version.version9", "vmodl.version.version2")
AddVersionParent("vim.version.version9", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version6", "vmodl.query.version.version3")
AddVersionParent("vim.version.version6", "vmodl.query.version.version2")
AddVersionParent("vim.version.version6", "vmodl.query.version.version1")
AddVersionParent("vim.version.version6", "vim.version.version6")
AddVersionParent("vim.version.version6", "vim.version.version1")
AddVersionParent("vim.version.version6", "vim.version.version4")
AddVersionParent("vim.version.version6", "vim.version.version5")
AddVersionParent("vim.version.version6", "vim.version.version2")
AddVersionParent("vim.version.version6", "vim.version.version3")
AddVersionParent("vim.version.version6", "vmodl.version.version0")
AddVersionParent("vim.version.version6", "vmodl.version.version1")
AddVersionParent("vim.version.version7", "vmodl.query.version.version4")
AddVersionParent("vim.version.version7", "vmodl.query.version.version3")
AddVersionParent("vim.version.version7", "vmodl.query.version.version2")
AddVersionParent("vim.version.version7", "vmodl.query.version.version1")
AddVersionParent("vim.version.version7", "vim.version.version6")
AddVersionParent("vim.version.version7", "vim.version.version7")
AddVersionParent("vim.version.version7", "vim.version.version1")
AddVersionParent("vim.version.version7", "vim.version.version4")
AddVersionParent("vim.version.version7", "vim.version.version5")
AddVersionParent("vim.version.version7", "vim.version.version2")
AddVersionParent("vim.version.version7", "vim.version.version3")
AddVersionParent("vim.version.version7", "vmodl.version.version0")
AddVersionParent("vim.version.version7", "vmodl.version.version1")
AddVersionParent("vim.version.version7", "vmodl.version.version2")
AddVersionParent("vim.version.version7", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.version11")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.query.version.version4")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.query.version.version3")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.query.version.version2")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.query.version.version1")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version8")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version9")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version6")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version7")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.v7_0_0_1")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version1")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version4")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.version14")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version5")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.version12")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version2")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version3")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.version13")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.v7_0")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version13")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version14")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version15")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.version1")
AddVersionParent("pbm.version.v7_0_0_1", "pbm.version.version2")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.version.version0")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.version.version1")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.version.version2")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.v6_9_1")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.v6_8_7")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.v7_0")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version10")
AddVersionParent("pbm.version.v7_0_0_1", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version11")
AddVersionParent("pbm.version.v7_0_0_1", "vim.version.version12")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.version11")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.query.version.version4")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.query.version.version3")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.query.version.version2")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.query.version.version1")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version8")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version9")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version6")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version7")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.v7_0_0_1")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.v7_0_1_0")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.v7_0_0_2")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version1")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version4")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.version14")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version5")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.version12")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version2")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version3")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.version13")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.v7_0")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version13")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version14")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version15")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.version1")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.v7_0_1_0")
AddVersionParent("pbm.version.v7_0_1_0", "pbm.version.version2")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.version.version0")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.version.version1")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.version.version2")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.v6_9_1")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.v6_8_7")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.v7_0")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version10")
AddVersionParent("pbm.version.v7_0_1_0", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version11")
AddVersionParent("pbm.version.v7_0_1_0", "vim.version.version12")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.query.version.version4")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.query.version.version3")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.query.version.version2")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.query.version.version1")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version8")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version9")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version6")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version7")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.v7_0_0_2")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version1")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version4")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version5")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version2")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version3")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version13")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version14")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version15")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.version.version0")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.version.version1")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.version.version2")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.v6_9_1")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.v6_8_7")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.v7_0")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version10")
AddVersionParent("vim.version.v7_0_0_2", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version11")
AddVersionParent("vim.version.v7_0_0_2", "vim.version.version12")
AddVersionParent("vim.version.version1", "vmodl.query.version.version1")
AddVersionParent("vim.version.version1", "vim.version.version1")
AddVersionParent("vim.version.version1", "vmodl.version.version0")
AddVersionParent("vim.version.version4", "vmodl.query.version.version1")
AddVersionParent("vim.version.version4", "vim.version.version1")
AddVersionParent("vim.version.version4", "vim.version.version4")
AddVersionParent("vim.version.version4", "vim.version.version2")
AddVersionParent("vim.version.version4", "vim.version.version3")
AddVersionParent("vim.version.version4", "vmodl.version.version0")
AddVersionParent("pbm.version.version14", "pbm.version.version11")
AddVersionParent("pbm.version.version14", "vmodl.query.version.version4")
AddVersionParent("pbm.version.version14", "vmodl.query.version.version3")
AddVersionParent("pbm.version.version14", "vmodl.query.version.version2")
AddVersionParent("pbm.version.version14", "vmodl.query.version.version1")
AddVersionParent("pbm.version.version14", "vim.version.version8")
AddVersionParent("pbm.version.version14", "vim.version.version9")
AddVersionParent("pbm.version.version14", "vim.version.version6")
AddVersionParent("pbm.version.version14", "vim.version.version7")
AddVersionParent("pbm.version.version14", "vim.version.version1")
AddVersionParent("pbm.version.version14", "vim.version.version4")
AddVersionParent("pbm.version.version14", "pbm.version.version14")
AddVersionParent("pbm.version.version14", "vim.version.version5")
AddVersionParent("pbm.version.version14", "pbm.version.version12")
AddVersionParent("pbm.version.version14", "vim.version.version2")
AddVersionParent("pbm.version.version14", "vim.version.version3")
AddVersionParent("pbm.version.version14", "pbm.version.version13")
AddVersionParent("pbm.version.version14", "vim.version.version13")
AddVersionParent("pbm.version.version14", "vim.version.version14")
AddVersionParent("pbm.version.version14", "pbm.version.version1")
AddVersionParent("pbm.version.version14", "pbm.version.version2")
AddVersionParent("pbm.version.version14", "vmodl.version.version0")
AddVersionParent("pbm.version.version14", "vmodl.version.version1")
AddVersionParent("pbm.version.version14", "vmodl.version.version2")
AddVersionParent("pbm.version.version14", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.version14", "vim.version.version10")
AddVersionParent("pbm.version.version14", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.version14", "vim.version.version11")
AddVersionParent("pbm.version.version14", "vim.version.version12")
AddVersionParent("vim.version.version5", "vmodl.query.version.version2")
AddVersionParent("vim.version.version5", "vmodl.query.version.version1")
AddVersionParent("vim.version.version5", "vim.version.version1")
AddVersionParent("vim.version.version5", "vim.version.version4")
AddVersionParent("vim.version.version5", "vim.version.version5")
AddVersionParent("vim.version.version5", "vim.version.version2")
AddVersionParent("vim.version.version5", "vim.version.version3")
AddVersionParent("vim.version.version5", "vmodl.version.version0")
AddVersionParent("vim.version.version5", "vmodl.version.version1")
AddVersionParent("pbm.version.version12", "pbm.version.version11")
AddVersionParent("pbm.version.version12", "vmodl.query.version.version4")
AddVersionParent("pbm.version.version12", "vmodl.query.version.version3")
AddVersionParent("pbm.version.version12", "vmodl.query.version.version2")
AddVersionParent("pbm.version.version12", "vmodl.query.version.version1")
AddVersionParent("pbm.version.version12", "vim.version.version8")
AddVersionParent("pbm.version.version12", "vim.version.version9")
AddVersionParent("pbm.version.version12", "vim.version.version6")
AddVersionParent("pbm.version.version12", "vim.version.version7")
AddVersionParent("pbm.version.version12", "vim.version.version1")
AddVersionParent("pbm.version.version12", "vim.version.version4")
AddVersionParent("pbm.version.version12", "vim.version.version5")
AddVersionParent("pbm.version.version12", "pbm.version.version12")
AddVersionParent("pbm.version.version12", "vim.version.version2")
AddVersionParent("pbm.version.version12", "vim.version.version3")
AddVersionParent("pbm.version.version12", "pbm.version.version1")
AddVersionParent("pbm.version.version12", "pbm.version.version2")
AddVersionParent("pbm.version.version12", "vmodl.version.version0")
AddVersionParent("pbm.version.version12", "vmodl.version.version1")
AddVersionParent("pbm.version.version12", "vmodl.version.version2")
AddVersionParent("pbm.version.version12", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.version12", "vim.version.version10")
AddVersionParent("pbm.version.version12", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.version12", "vim.version.version11")
AddVersionParent("pbm.version.version12", "vim.version.version12")
AddVersionParent("vim.version.version2", "vmodl.query.version.version1")
AddVersionParent("vim.version.version2", "vim.version.version1")
AddVersionParent("vim.version.version2", "vim.version.version2")
AddVersionParent("vim.version.version2", "vmodl.version.version0")
AddVersionParent("vim.version.version3", "vmodl.query.version.version1")
AddVersionParent("vim.version.version3", "vim.version.version1")
AddVersionParent("vim.version.version3", "vim.version.version2")
AddVersionParent("vim.version.version3", "vim.version.version3")
AddVersionParent("vim.version.version3", "vmodl.version.version0")
AddVersionParent("pbm.version.version13", "pbm.version.version11")
AddVersionParent("pbm.version.version13", "vmodl.query.version.version4")
AddVersionParent("pbm.version.version13", "vmodl.query.version.version3")
AddVersionParent("pbm.version.version13", "vmodl.query.version.version2")
AddVersionParent("pbm.version.version13", "vmodl.query.version.version1")
AddVersionParent("pbm.version.version13", "vim.version.version8")
AddVersionParent("pbm.version.version13", "vim.version.version9")
AddVersionParent("pbm.version.version13", "vim.version.version6")
AddVersionParent("pbm.version.version13", "vim.version.version7")
AddVersionParent("pbm.version.version13", "vim.version.version1")
AddVersionParent("pbm.version.version13", "vim.version.version4")
AddVersionParent("pbm.version.version13", "vim.version.version5")
AddVersionParent("pbm.version.version13", "pbm.version.version12")
AddVersionParent("pbm.version.version13", "vim.version.version2")
AddVersionParent("pbm.version.version13", "vim.version.version3")
AddVersionParent("pbm.version.version13", "pbm.version.version13")
AddVersionParent("pbm.version.version13", "vim.version.version13")
AddVersionParent("pbm.version.version13", "pbm.version.version1")
AddVersionParent("pbm.version.version13", "pbm.version.version2")
AddVersionParent("pbm.version.version13", "vmodl.version.version0")
AddVersionParent("pbm.version.version13", "vmodl.version.version1")
AddVersionParent("pbm.version.version13", "vmodl.version.version2")
AddVersionParent("pbm.version.version13", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.version13", "vim.version.version10")
AddVersionParent("pbm.version.version13", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.version13", "vim.version.version11")
AddVersionParent("pbm.version.version13", "vim.version.version12")
AddVersionParent("pbm.version.v7_0", "pbm.version.version11")
AddVersionParent("pbm.version.v7_0", "vmodl.query.version.version4")
AddVersionParent("pbm.version.v7_0", "vmodl.query.version.version3")
AddVersionParent("pbm.version.v7_0", "vmodl.query.version.version2")
AddVersionParent("pbm.version.v7_0", "vmodl.query.version.version1")
AddVersionParent("pbm.version.v7_0", "vim.version.version8")
AddVersionParent("pbm.version.v7_0", "vim.version.version9")
AddVersionParent("pbm.version.v7_0", "vim.version.version6")
AddVersionParent("pbm.version.v7_0", "vim.version.version7")
AddVersionParent("pbm.version.v7_0", "vim.version.version1")
AddVersionParent("pbm.version.v7_0", "vim.version.version4")
AddVersionParent("pbm.version.v7_0", "pbm.version.version14")
AddVersionParent("pbm.version.v7_0", "vim.version.version5")
AddVersionParent("pbm.version.v7_0", "pbm.version.version12")
AddVersionParent("pbm.version.v7_0", "vim.version.version2")
AddVersionParent("pbm.version.v7_0", "vim.version.version3")
AddVersionParent("pbm.version.v7_0", "pbm.version.version13")
AddVersionParent("pbm.version.v7_0", "pbm.version.v7_0")
AddVersionParent("pbm.version.v7_0", "vim.version.version13")
AddVersionParent("pbm.version.v7_0", "vim.version.version14")
AddVersionParent("pbm.version.v7_0", "vim.version.version15")
AddVersionParent("pbm.version.v7_0", "pbm.version.version1")
AddVersionParent("pbm.version.v7_0", "pbm.version.version2")
AddVersionParent("pbm.version.v7_0", "vmodl.version.version0")
AddVersionParent("pbm.version.v7_0", "vmodl.version.version1")
AddVersionParent("pbm.version.v7_0", "vmodl.version.version2")
AddVersionParent("pbm.version.v7_0", "vim.version.v6_9_1")
AddVersionParent("pbm.version.v7_0", "vim.version.v6_8_7")
AddVersionParent("pbm.version.v7_0", "vim.version.v7_0")
AddVersionParent("pbm.version.v7_0", "vmodl.reflect.version.version1")
AddVersionParent("pbm.version.v7_0", "vim.version.version10")
AddVersionParent("pbm.version.v7_0", "vmodl.reflect.version.version2")
AddVersionParent("pbm.version.v7_0", "vim.version.version11")
AddVersionParent("pbm.version.v7_0", "vim.version.version12")
AddVersionParent("vim.version.version13", "vmodl.query.version.version4")
AddVersionParent("vim.version.version13", "vmodl.query.version.version3")
AddVersionParent("vim.version.version13", "vmodl.query.version.version2")
AddVersionParent("vim.version.version13", "vmodl.query.version.version1")
AddVersionParent("vim.version.version13", "vim.version.version8")
AddVersionParent("vim.version.version13", "vim.version.version9")
AddVersionParent("vim.version.version13", "vim.version.version6")
AddVersionParent("vim.version.version13", "vim.version.version7")
AddVersionParent("vim.version.version13", "vim.version.version1")
AddVersionParent("vim.version.version13", "vim.version.version4")
AddVersionParent("vim.version.version13", "vim.version.version5")
AddVersionParent("vim.version.version13", "vim.version.version2")
AddVersionParent("vim.version.version13", "vim.version.version3")
AddVersionParent("vim.version.version13", "vim.version.version13")
AddVersionParent("vim.version.version13", "vmodl.version.version0")
AddVersionParent("vim.version.version13", "vmodl.version.version1")
AddVersionParent("vim.version.version13", "vmodl.version.version2")
AddVersionParent("vim.version.version13", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version13", "vim.version.version10")
AddVersionParent("vim.version.version13", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.version13", "vim.version.version11")
AddVersionParent("vim.version.version13", "vim.version.version12")
AddVersionParent("vim.version.version14", "vmodl.query.version.version4")
AddVersionParent("vim.version.version14", "vmodl.query.version.version3")
AddVersionParent("vim.version.version14", "vmodl.query.version.version2")
AddVersionParent("vim.version.version14", "vmodl.query.version.version1")
AddVersionParent("vim.version.version14", "vim.version.version8")
AddVersionParent("vim.version.version14", "vim.version.version9")
AddVersionParent("vim.version.version14", "vim.version.version6")
AddVersionParent("vim.version.version14", "vim.version.version7")
AddVersionParent("vim.version.version14", "vim.version.version1")
AddVersionParent("vim.version.version14", "vim.version.version4")
AddVersionParent("vim.version.version14", "vim.version.version5")
AddVersionParent("vim.version.version14", "vim.version.version2")
AddVersionParent("vim.version.version14", "vim.version.version3")
AddVersionParent("vim.version.version14", "vim.version.version13")
AddVersionParent("vim.version.version14", "vim.version.version14")
AddVersionParent("vim.version.version14", "vmodl.version.version0")
AddVersionParent("vim.version.version14", "vmodl.version.version1")
AddVersionParent("vim.version.version14", "vmodl.version.version2")
AddVersionParent("vim.version.version14", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version14", "vim.version.version10")
AddVersionParent("vim.version.version14", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.version14", "vim.version.version11")
AddVersionParent("vim.version.version14", "vim.version.version12")
AddVersionParent("vim.version.version15", "vmodl.query.version.version4")
AddVersionParent("vim.version.version15", "vmodl.query.version.version3")
AddVersionParent("vim.version.version15", "vmodl.query.version.version2")
AddVersionParent("vim.version.version15", "vmodl.query.version.version1")
AddVersionParent("vim.version.version15", "vim.version.version8")
AddVersionParent("vim.version.version15", "vim.version.version9")
AddVersionParent("vim.version.version15", "vim.version.version6")
AddVersionParent("vim.version.version15", "vim.version.version7")
AddVersionParent("vim.version.version15", "vim.version.version1")
AddVersionParent("vim.version.version15", "vim.version.version4")
AddVersionParent("vim.version.version15", "vim.version.version5")
AddVersionParent("vim.version.version15", "vim.version.version2")
AddVersionParent("vim.version.version15", "vim.version.version3")
AddVersionParent("vim.version.version15", "vim.version.version13")
AddVersionParent("vim.version.version15", "vim.version.version14")
AddVersionParent("vim.version.version15", "vim.version.version15")
AddVersionParent("vim.version.version15", "vmodl.version.version0")
AddVersionParent("vim.version.version15", "vmodl.version.version1")
AddVersionParent("vim.version.version15", "vmodl.version.version2")
AddVersionParent("vim.version.version15", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version15", "vim.version.version10")
AddVersionParent("vim.version.version15", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.version15", "vim.version.version11")
AddVersionParent("vim.version.version15", "vim.version.version12")
AddVersionParent("pbm.version.version1", "pbm.version.version1")
AddVersionParent("pbm.version.version1", "vmodl.version.version0")
AddVersionParent("pbm.version.version1", "vmodl.version.version1")
AddVersionParent("pbm.version.version1", "vmodl.version.version2")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.query.version.version4")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.query.version.version3")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.query.version.version2")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.query.version.version1")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version8")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version9")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version6")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version7")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.v7_0_0_2")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version1")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version4")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version5")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version2")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version3")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version13")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version14")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version15")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.v7_0_1_0")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.version.version0")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.version.version1")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.version.version2")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.v6_9_1")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.v6_8_7")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.v7_0")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version10")
AddVersionParent("vim.version.v7_0_1_0", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version11")
AddVersionParent("vim.version.v7_0_1_0", "vim.version.version12")
AddVersionParent("pbm.version.version2", "pbm.version.version1")
AddVersionParent("pbm.version.version2", "pbm.version.version2")
AddVersionParent("pbm.version.version2", "vmodl.version.version0")
AddVersionParent("pbm.version.version2", "vmodl.version.version1")
AddVersionParent("pbm.version.version2", "vmodl.version.version2")
AddVersionParent("vmodl.version.version0", "vmodl.version.version0")
AddVersionParent("vmodl.version.version1", "vmodl.version.version0")
AddVersionParent("vmodl.version.version1", "vmodl.version.version1")
AddVersionParent("vmodl.version.version2", "vmodl.version.version0")
AddVersionParent("vmodl.version.version2", "vmodl.version.version1")
AddVersionParent("vmodl.version.version2", "vmodl.version.version2")
AddVersionParent("vim.version.v6_9_1", "vmodl.query.version.version4")
AddVersionParent("vim.version.v6_9_1", "vmodl.query.version.version3")
AddVersionParent("vim.version.v6_9_1", "vmodl.query.version.version2")
AddVersionParent("vim.version.v6_9_1", "vmodl.query.version.version1")
AddVersionParent("vim.version.v6_9_1", "vim.version.version8")
AddVersionParent("vim.version.v6_9_1", "vim.version.version9")
AddVersionParent("vim.version.v6_9_1", "vim.version.version6")
AddVersionParent("vim.version.v6_9_1", "vim.version.version7")
AddVersionParent("vim.version.v6_9_1", "vim.version.version1")
AddVersionParent("vim.version.v6_9_1", "vim.version.version4")
AddVersionParent("vim.version.v6_9_1", "vim.version.version5")
AddVersionParent("vim.version.v6_9_1", "vim.version.version2")
AddVersionParent("vim.version.v6_9_1", "vim.version.version3")
AddVersionParent("vim.version.v6_9_1", "vim.version.version13")
AddVersionParent("vim.version.v6_9_1", "vim.version.version14")
AddVersionParent("vim.version.v6_9_1", "vim.version.version15")
AddVersionParent("vim.version.v6_9_1", "vmodl.version.version0")
AddVersionParent("vim.version.v6_9_1", "vmodl.version.version1")
AddVersionParent("vim.version.v6_9_1", "vmodl.version.version2")
AddVersionParent("vim.version.v6_9_1", "vim.version.v6_9_1")
AddVersionParent("vim.version.v6_9_1", "vim.version.v6_8_7")
AddVersionParent("vim.version.v6_9_1", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.v6_9_1", "vim.version.version10")
AddVersionParent("vim.version.v6_9_1", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.v6_9_1", "vim.version.version11")
AddVersionParent("vim.version.v6_9_1", "vim.version.version12")
AddVersionParent("vim.version.v6_8_7", "vmodl.query.version.version4")
AddVersionParent("vim.version.v6_8_7", "vmodl.query.version.version3")
AddVersionParent("vim.version.v6_8_7", "vmodl.query.version.version2")
AddVersionParent("vim.version.v6_8_7", "vmodl.query.version.version1")
AddVersionParent("vim.version.v6_8_7", "vim.version.version8")
AddVersionParent("vim.version.v6_8_7", "vim.version.version9")
AddVersionParent("vim.version.v6_8_7", "vim.version.version6")
AddVersionParent("vim.version.v6_8_7", "vim.version.version7")
AddVersionParent("vim.version.v6_8_7", "vim.version.version1")
AddVersionParent("vim.version.v6_8_7", "vim.version.version4")
AddVersionParent("vim.version.v6_8_7", "vim.version.version5")
AddVersionParent("vim.version.v6_8_7", "vim.version.version2")
AddVersionParent("vim.version.v6_8_7", "vim.version.version3")
AddVersionParent("vim.version.v6_8_7", "vim.version.version13")
AddVersionParent("vim.version.v6_8_7", "vim.version.version14")
AddVersionParent("vim.version.v6_8_7", "vim.version.version15")
AddVersionParent("vim.version.v6_8_7", "vmodl.version.version0")
AddVersionParent("vim.version.v6_8_7", "vmodl.version.version1")
AddVersionParent("vim.version.v6_8_7", "vmodl.version.version2")
AddVersionParent("vim.version.v6_8_7", "vim.version.v6_8_7")
AddVersionParent("vim.version.v6_8_7", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.v6_8_7", "vim.version.version10")
AddVersionParent("vim.version.v6_8_7", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.v6_8_7", "vim.version.version11")
AddVersionParent("vim.version.v6_8_7", "vim.version.version12")
AddVersionParent("vim.version.v7_0", "vmodl.query.version.version4")
AddVersionParent("vim.version.v7_0", "vmodl.query.version.version3")
AddVersionParent("vim.version.v7_0", "vmodl.query.version.version2")
AddVersionParent("vim.version.v7_0", "vmodl.query.version.version1")
AddVersionParent("vim.version.v7_0", "vim.version.version8")
AddVersionParent("vim.version.v7_0", "vim.version.version9")
AddVersionParent("vim.version.v7_0", "vim.version.version6")
AddVersionParent("vim.version.v7_0", "vim.version.version7")
AddVersionParent("vim.version.v7_0", "vim.version.version1")
AddVersionParent("vim.version.v7_0", "vim.version.version4")
AddVersionParent("vim.version.v7_0", "vim.version.version5")
AddVersionParent("vim.version.v7_0", "vim.version.version2")
AddVersionParent("vim.version.v7_0", "vim.version.version3")
AddVersionParent("vim.version.v7_0", "vim.version.version13")
AddVersionParent("vim.version.v7_0", "vim.version.version14")
AddVersionParent("vim.version.v7_0", "vim.version.version15")
AddVersionParent("vim.version.v7_0", "vmodl.version.version0")
AddVersionParent("vim.version.v7_0", "vmodl.version.version1")
AddVersionParent("vim.version.v7_0", "vmodl.version.version2")
AddVersionParent("vim.version.v7_0", "vim.version.v6_9_1")
AddVersionParent("vim.version.v7_0", "vim.version.v6_8_7")
AddVersionParent("vim.version.v7_0", "vim.version.v7_0")
AddVersionParent("vim.version.v7_0", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.v7_0", "vim.version.version10")
AddVersionParent("vim.version.v7_0", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.v7_0", "vim.version.version11")
AddVersionParent("vim.version.v7_0", "vim.version.version12")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.version.version0")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.version.version1")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.version.version2")
AddVersionParent("vmodl.reflect.version.version1", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version10", "vmodl.query.version.version4")
AddVersionParent("vim.version.version10", "vmodl.query.version.version3")
AddVersionParent("vim.version.version10", "vmodl.query.version.version2")
AddVersionParent("vim.version.version10", "vmodl.query.version.version1")
AddVersionParent("vim.version.version10", "vim.version.version8")
AddVersionParent("vim.version.version10", "vim.version.version9")
AddVersionParent("vim.version.version10", "vim.version.version6")
AddVersionParent("vim.version.version10", "vim.version.version7")
AddVersionParent("vim.version.version10", "vim.version.version1")
AddVersionParent("vim.version.version10", "vim.version.version4")
AddVersionParent("vim.version.version10", "vim.version.version5")
AddVersionParent("vim.version.version10", "vim.version.version2")
AddVersionParent("vim.version.version10", "vim.version.version3")
AddVersionParent("vim.version.version10", "vmodl.version.version0")
AddVersionParent("vim.version.version10", "vmodl.version.version1")
AddVersionParent("vim.version.version10", "vmodl.version.version2")
AddVersionParent("vim.version.version10", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version10", "vim.version.version10")
AddVersionParent("vim.version.version10", "vmodl.reflect.version.version2")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.version.version0")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.version.version1")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.version.version2")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.reflect.version.version1")
AddVersionParent("vmodl.reflect.version.version2", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.version11", "vmodl.query.version.version4")
AddVersionParent("vim.version.version11", "vmodl.query.version.version3")
AddVersionParent("vim.version.version11", "vmodl.query.version.version2")
AddVersionParent("vim.version.version11", "vmodl.query.version.version1")
AddVersionParent("vim.version.version11", "vim.version.version8")
AddVersionParent("vim.version.version11", "vim.version.version9")
AddVersionParent("vim.version.version11", "vim.version.version6")
AddVersionParent("vim.version.version11", "vim.version.version7")
AddVersionParent("vim.version.version11", "vim.version.version1")
AddVersionParent("vim.version.version11", "vim.version.version4")
AddVersionParent("vim.version.version11", "vim.version.version5")
AddVersionParent("vim.version.version11", "vim.version.version2")
AddVersionParent("vim.version.version11", "vim.version.version3")
AddVersionParent("vim.version.version11", "vmodl.version.version0")
AddVersionParent("vim.version.version11", "vmodl.version.version1")
AddVersionParent("vim.version.version11", "vmodl.version.version2")
AddVersionParent("vim.version.version11", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version11", "vim.version.version10")
AddVersionParent("vim.version.version11", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.version11", "vim.version.version11")
AddVersionParent("vim.version.version12", "vmodl.query.version.version4")
AddVersionParent("vim.version.version12", "vmodl.query.version.version3")
AddVersionParent("vim.version.version12", "vmodl.query.version.version2")
AddVersionParent("vim.version.version12", "vmodl.query.version.version1")
AddVersionParent("vim.version.version12", "vim.version.version8")
AddVersionParent("vim.version.version12", "vim.version.version9")
AddVersionParent("vim.version.version12", "vim.version.version6")
AddVersionParent("vim.version.version12", "vim.version.version7")
AddVersionParent("vim.version.version12", "vim.version.version1")
AddVersionParent("vim.version.version12", "vim.version.version4")
AddVersionParent("vim.version.version12", "vim.version.version5")
AddVersionParent("vim.version.version12", "vim.version.version2")
AddVersionParent("vim.version.version12", "vim.version.version3")
AddVersionParent("vim.version.version12", "vmodl.version.version0")
AddVersionParent("vim.version.version12", "vmodl.version.version1")
AddVersionParent("vim.version.version12", "vmodl.version.version2")
AddVersionParent("vim.version.version12", "vmodl.reflect.version.version1")
AddVersionParent("vim.version.version12", "vim.version.version10")
AddVersionParent("vim.version.version12", "vmodl.reflect.version.version2")
AddVersionParent("vim.version.version12", "vim.version.version11")
AddVersionParent("vim.version.version12", "vim.version.version12")
newestVersions.Add("pbm.version.v7_0_1_0")
stableVersions.Add("pbm.version.v7_0_1_0")
publicVersions.Add("pbm.version.v7_0_1_0")
dottedVersions.Add("pbm.version.v7_0_1_0")
oldestVersions.Add("pbm.version.version1")
CreateDataType("pbm.AboutInfo", "PbmAboutInfo", "vmodl.DynamicData", "pbm.version.version1", [("name", "string", "pbm.version.version1", 0), ("version", "string", "pbm.version.version1", 0), ("instanceUuid", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.ExtendedElementDescription", "PbmExtendedElementDescription", "vmodl.DynamicData", "pbm.version.version1", [("label", "string", "pbm.version.version1", 0), ("summary", "string", "pbm.version.version1", 0), ("key", "string", "pbm.version.version1", 0), ("messageCatalogKeyPrefix", "string", "pbm.version.version1", 0), ("messageArg", "vmodl.KeyAnyValue[]", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.ServerObjectRef", "PbmServerObjectRef", "vmodl.DynamicData", "pbm.version.version1", [("objectType", "string", "pbm.version.version1", 0), ("key", "string", "pbm.version.version1", 0), ("serverUuid", "string", "pbm.version.version1", F_OPTIONAL)])
CreateEnumType("pbm.ServerObjectRef.ObjectType", "PbmObjectType", "pbm.version.version1", ["virtualMachine", "virtualMachineAndDisks", "virtualDiskId", "virtualDiskUUID", "datastore", "fileShareId", "unknown"])
CreateEnumType("pbm.ServerObjectRef.VvolType", "PbmVvolType", "pbm.version.version1", ["Config", "Data", "Swap"])
CreateManagedType("pbm.ServiceInstance", "PbmServiceInstance", "vmodl.ManagedObject", "pbm.version.version1", [("content", "pbm.ServiceInstanceContent", "pbm.version.version1", 0, "System.Anonymous")], [("retrieveContent", "PbmRetrieveServiceContent", "pbm.version.version1", (), (0, "pbm.ServiceInstanceContent", "pbm.ServiceInstanceContent"), "System.Anonymous", None)])
CreateDataType("pbm.ServiceInstanceContent", "PbmServiceInstanceContent", "vmodl.DynamicData", "pbm.version.version1", [("aboutInfo", "pbm.AboutInfo", "pbm.version.version1", 0), ("sessionManager", "pbm.auth.SessionManager", "pbm.version.version1", 0), ("capabilityMetadataManager", "pbm.capability.CapabilityMetadataManager", "pbm.version.version1", 0), ("profileManager", "pbm.profile.ProfileManager", "pbm.version.version1", 0), ("complianceManager", "pbm.compliance.ComplianceManager", "pbm.version.version1", 0), ("placementSolver", "pbm.placement.PlacementSolver", "pbm.version.version1", 0), ("replicationManager", "pbm.replication.ReplicationManager", "pbm.version.version11", F_OPTIONAL)])
CreateManagedType("pbm.auth.SessionManager", "PbmSessionManager", "vmodl.ManagedObject", "pbm.version.version1", None, None)
CreateDataType("pbm.capability.CapabilityMetadata", "PbmCapabilityMetadata", "vmodl.DynamicData", "pbm.version.version1", [("id", "pbm.capability.CapabilityMetadata.UniqueId", "pbm.version.version1", 0), ("summary", "pbm.ExtendedElementDescription", "pbm.version.version1", 0), ("mandatory", "boolean", "pbm.version.version1", F_OPTIONAL), ("hint", "boolean", "pbm.version.version1", F_OPTIONAL), ("keyId", "string", "pbm.version.version1", F_OPTIONAL), ("allowMultipleConstraints", "boolean", "pbm.version.version1", F_OPTIONAL), ("propertyMetadata", "pbm.capability.PropertyMetadata[]", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.CapabilityMetadata.UniqueId", "PbmCapabilityMetadataUniqueId", "vmodl.DynamicData", "pbm.version.version1", [("namespace", "string", "pbm.version.version1", 0), ("id", "string", "pbm.version.version1", 0)])
CreateManagedType("pbm.capability.CapabilityMetadataManager", "PbmCapabilityMetadataManager", "vmodl.ManagedObject", "pbm.version.version1", None, None)
CreateDataType("pbm.capability.ConstraintInstance", "PbmCapabilityConstraintInstance", "vmodl.DynamicData", "pbm.version.version1", [("propertyInstance", "pbm.capability.PropertyInstance[]", "pbm.version.version1", 0)])
CreateEnumType("pbm.capability.Operator", "PbmCapabilityOperator", "pbm.version.version11", ["NOT"])
CreateDataType("pbm.capability.PropertyInstance", "PbmCapabilityPropertyInstance", "vmodl.DynamicData", "pbm.version.version1", [("id", "string", "pbm.version.version1", 0), ("operator", "string", "pbm.version.version11", F_OPTIONAL), ("value", "anyType", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.PropertyMetadata", "PbmCapabilityPropertyMetadata", "vmodl.DynamicData", "pbm.version.version1", [("id", "string", "pbm.version.version1", 0), ("summary", "pbm.ExtendedElementDescription", "pbm.version.version1", 0), ("mandatory", "boolean", "pbm.version.version1", 0), ("type", "pbm.capability.TypeInfo", "pbm.version.version1", F_OPTIONAL), ("defaultValue", "anyType", "pbm.version.version1", F_OPTIONAL), ("allowedValue", "anyType", "pbm.version.version1", F_OPTIONAL), ("requirementsTypeHint", "string", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.capability.TypeInfo", "PbmCapabilityTypeInfo", "vmodl.DynamicData", "pbm.version.version1", [("typeName", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.provider.CapabilityObjectMetadataPerCategory", "PbmCapabilityMetadataPerCategory", "vmodl.DynamicData", "pbm.version.version1", [("subCategory", "string", "pbm.version.version1", 0), ("capabilityMetadata", "pbm.capability.CapabilityMetadata[]", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.provider.CapabilityObjectSchema", "PbmCapabilitySchema", "vmodl.DynamicData", "pbm.version.version1", [("vendorInfo", "pbm.capability.provider.CapabilityObjectSchema.VendorInfo", "pbm.version.version1", 0), ("namespaceInfo", "pbm.capability.provider.CapabilityObjectSchema.NamespaceInfo", "pbm.version.version1", 0), ("lineOfService", "pbm.capability.provider.LineOfServiceInfo", "pbm.version.version11", F_OPTIONAL), ("capabilityMetadataPerCategory", "pbm.capability.provider.CapabilityObjectMetadataPerCategory[]", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.provider.CapabilityObjectSchema.NamespaceInfo", "PbmCapabilityNamespaceInfo", "vmodl.DynamicData", "pbm.version.version1", [("version", "string", "pbm.version.version1", 0), ("namespace", "string", "pbm.version.version1", 0), ("info", "pbm.ExtendedElementDescription", "pbm.version.version11", F_OPTIONAL)])
CreateDataType("pbm.capability.provider.CapabilityObjectSchema.VendorInfo", "PbmCapabilitySchemaVendorInfo", "vmodl.DynamicData", "pbm.version.version1", [("vendorUuid", "string", "pbm.version.version1", 0), ("info", "pbm.ExtendedElementDescription", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.provider.CapabilityObjectSchema.VendorNamespaceInfo", "PbmCapabilityVendorNamespaceInfo", "vmodl.DynamicData", "pbm.version.version1", [("vendorInfo", "pbm.capability.provider.CapabilityObjectSchema.VendorInfo", "pbm.version.version1", 0), ("namespaceInfo", "pbm.capability.provider.CapabilityObjectSchema.NamespaceInfo", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.provider.CapabilityObjectSchema.VendorResourceTypeInfo", "PbmCapabilityVendorResourceTypeInfo", "vmodl.DynamicData", "pbm.version.version1", [("resourceType", "string", "pbm.version.version1", 0), ("vendorNamespaceInfo", "pbm.capability.provider.CapabilityObjectSchema.VendorNamespaceInfo[]", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.provider.LineOfServiceInfo", "PbmLineOfServiceInfo", "vmodl.DynamicData", "pbm.version.version11", [("lineOfService", "string", "pbm.version.version11", 0), ("name", "pbm.ExtendedElementDescription", "pbm.version.version11", 0), ("description", "pbm.ExtendedElementDescription", "pbm.version.version11", F_OPTIONAL)])
CreateEnumType("pbm.capability.provider.LineOfServiceInfo.LineOfServiceEnum", "PbmLineOfServiceInfoLineOfServiceEnum", "pbm.version.version11", ["INSPECTION", "COMPRESSION", "ENCRYPTION", "REPLICATION", "CACHING", "PERSISTENCE", "DATA_PROVIDER", "DATASTORE_IO_CONTROL", "DATA_PROTECTION"])
CreateDataType("pbm.capability.provider.PersistenceBasedDataServiceInfo", "PbmPersistenceBasedDataServiceInfo", "pbm.capability.provider.LineOfServiceInfo", "pbm.version.version11", [("compatiblePersistenceSchemaNamespace", "string[]", "pbm.version.version11", F_OPTIONAL)])
CreateDataType("pbm.capability.provider.VaioDataServiceInfo", "PbmVaioDataServiceInfo", "pbm.capability.provider.LineOfServiceInfo", "pbm.version.version11", None)
CreateEnumType("pbm.capability.types.BuiltinGenericTypesEnum", "PbmBuiltinGenericType", "pbm.version.version1", ["VMW_RANGE", "VMW_SET"])
CreateEnumType("pbm.capability.types.BuiltinTypesEnum", "PbmBuiltinType", "pbm.version.version1", ["XSD_LONG", "XSD_SHORT", "XSD_INTEGER", "XSD_INT", "XSD_STRING", "XSD_BOOLEAN", "XSD_DOUBLE", "XSD_DATETIME", "VMW_TIMESPAN", "VMW_POLICY"])
CreateDataType("pbm.capability.types.DescriptiveValue", "PbmCapabilityDescription", "vmodl.DynamicData", "pbm.version.version1", [("description", "pbm.ExtendedElementDescription", "pbm.version.version1", 0), ("value", "anyType", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.types.DiscreteSet", "PbmCapabilityDiscreteSet", "vmodl.DynamicData", "pbm.version.version1", [("values", "anyType[]", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.types.Range", "PbmCapabilityRange", "vmodl.DynamicData", "pbm.version.version1", [("min", "anyType", "pbm.version.version1", 0), ("max", "anyType", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.types.TimeSpan", "PbmCapabilityTimeSpan", "vmodl.DynamicData", "pbm.version.version1", [("value", "int", "pbm.version.version1", 0), ("unit", "string", "pbm.version.version1", 0)])
CreateEnumType("pbm.capability.types.TimeUnitEnum", "PbmCapabilityTimeUnitType", "pbm.version.version1", ["SECONDS", "MINUTES", "HOURS", "DAYS", "WEEKS", "MONTHS", "YEARS"])
CreateManagedType("pbm.compliance.ComplianceManager", "PbmComplianceManager", "vmodl.ManagedObject", "pbm.version.version1", None, [("checkCompliance", "PbmCheckCompliance", "pbm.version.version1", (("entities", "pbm.ServerObjectRef[]", "pbm.version.version1", 0, None),("profile", "pbm.profile.ProfileId", "pbm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.compliance.ComplianceResult[]", "pbm.compliance.ComplianceResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("checkRollupCompliance", "PbmCheckRollupCompliance", "pbm.version.version1", (("entity", "pbm.ServerObjectRef[]", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.compliance.RollupComplianceResult[]", "pbm.compliance.RollupComplianceResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("fetchComplianceResult", "PbmFetchComplianceResult", "pbm.version.version1", (("entities", "pbm.ServerObjectRef[]", "pbm.version.version1", 0, None),("profile", "pbm.profile.ProfileId", "pbm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.compliance.ComplianceResult[]", "pbm.compliance.ComplianceResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("fetchRollupComplianceResult", "PbmFetchRollupComplianceResult", "pbm.version.version1", (("entity", "pbm.ServerObjectRef[]", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.compliance.RollupComplianceResult[]", "pbm.compliance.RollupComplianceResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryByRollupComplianceStatus", "PbmQueryByRollupComplianceStatus", "pbm.version.version11", (("status", "string", "pbm.version.version11", 0, None),), (F_OPTIONAL, "pbm.ServerObjectRef[]", "pbm.ServerObjectRef[]"), "StorageProfile.View", ["vmodl.fault.InvalidArgument", "pbm.fault.PBMFault", ])])
CreateDataType("pbm.compliance.ComplianceResult", "PbmComplianceResult", "vmodl.DynamicData", "pbm.version.version1", [("checkTime", "vmodl.DateTime", "pbm.version.version1", 0), ("entity", "pbm.ServerObjectRef", "pbm.version.version1", 0), ("profile", "pbm.profile.ProfileId", "pbm.version.version1", F_OPTIONAL), ("complianceTaskStatus", "string", "pbm.version.version11", F_OPTIONAL), ("complianceStatus", "string", "pbm.version.version1", 0), ("mismatch", "boolean", "pbm.version.version1", 0), ("violatedPolicies", "pbm.compliance.PolicyStatus[]", "pbm.version.version1", F_OPTIONAL), ("errorCause", "vmodl.MethodFault[]", "pbm.version.version11", F_OPTIONAL), ("operationalStatus", "pbm.compliance.OperationalStatus", "pbm.version.version1", F_OPTIONAL), ("info", "pbm.ExtendedElementDescription", "pbm.version.version11", F_OPTIONAL)])
CreateEnumType("pbm.compliance.ComplianceResult.ComplianceStatus", "PbmComplianceStatus", "pbm.version.version1", ["compliant", "nonCompliant", "unknown", "notApplicable", "outOfDate"])
CreateEnumType("pbm.compliance.ComplianceResult.ComplianceTaskStatus", "PbmComplianceResultComplianceTaskStatus", "pbm.version.version11", ["inProgress", "success", "failed"])
CreateEnumType("pbm.compliance.EntityHealthStatus.HealthStatus", "PbmHealthStatusForEntity", "pbm.version.v7_0", ["red", "yellow", "green", "unknown"])
CreateDataType("pbm.compliance.OperationalStatus", "PbmComplianceOperationalStatus", "vmodl.DynamicData", "pbm.version.version1", [("healthy", "boolean", "pbm.version.version1", F_OPTIONAL), ("operationETA", "vmodl.DateTime", "pbm.version.version1", F_OPTIONAL), ("operationProgress", "long", "pbm.version.version1", F_OPTIONAL), ("transitional", "boolean", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.compliance.PolicyStatus", "PbmCompliancePolicyStatus", "vmodl.DynamicData", "pbm.version.version1", [("expectedValue", "pbm.capability.CapabilityInstance", "pbm.version.version1", 0), ("currentValue", "pbm.capability.CapabilityInstance", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.compliance.RollupComplianceResult", "PbmRollupComplianceResult", "vmodl.DynamicData", "pbm.version.version1", [("oldestCheckTime", "vmodl.DateTime", "pbm.version.version1", 0), ("entity", "pbm.ServerObjectRef", "pbm.version.version1", 0), ("overallComplianceStatus", "string", "pbm.version.version1", 0), ("overallComplianceTaskStatus", "string", "pbm.version.version11", F_OPTIONAL), ("result", "pbm.compliance.ComplianceResult[]", "pbm.version.version1", F_OPTIONAL), ("errorCause", "vmodl.MethodFault[]", "pbm.version.version11", F_OPTIONAL), ("profileMismatch", "boolean", "pbm.version.version1", 0)])
CreateDataType("pbm.fault.PBMFault", "PbmFault", "vmodl.MethodFault", "pbm.version.version1", None)
CreateDataType("pbm.fault.ProfileStorageFault", "PbmFaultProfileStorageFault", "pbm.fault.PBMFault", "pbm.version.version1", None)
CreateDataType("pbm.fault.ResourceInUse", "PbmResourceInUse", "pbm.fault.PBMFault", "pbm.version.version1", [("type", "vmodl.TypeName", "pbm.version.version1", F_OPTIONAL), ("name", "string", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.placement.CompatibilityResult", "PbmPlacementCompatibilityResult", "vmodl.DynamicData", "pbm.version.version1", [("hub", "pbm.placement.PlacementHub", "pbm.version.version1", 0), ("matchingResources", "pbm.placement.MatchingResources[]", "pbm.version.version11", F_OPTIONAL), ("howMany", "long", "pbm.version.version11", F_OPTIONAL), ("utilization", "pbm.placement.ResourceUtilization[]", "pbm.version.version11", F_OPTIONAL), ("warning", "vmodl.MethodFault[]", "pbm.version.version1", F_OPTIONAL), ("error", "vmodl.MethodFault[]", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.placement.MatchingResources", "PbmPlacementMatchingResources", "vmodl.DynamicData", "pbm.version.version11", None)
CreateDataType("pbm.placement.PlacementHub", "PbmPlacementHub", "vmodl.DynamicData", "pbm.version.version1", [("hubType", "string", "pbm.version.version1", 0), ("hubId", "string", "pbm.version.version1", 0)])
CreateManagedType("pbm.placement.PlacementSolver", "PbmPlacementSolver", "vmodl.ManagedObject", "pbm.version.version1", None, [("checkCompatibility", "PbmCheckCompatibility", "pbm.version.version1", (("hubsToSearch", "pbm.placement.PlacementHub[]", "pbm.version.version1", F_OPTIONAL, None),("profile", "pbm.profile.ProfileId", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.placement.CompatibilityResult[]", "pbm.placement.CompatibilityResult[]"), "StorageProfile.View", None), ("checkCompatibilityWithSpec", "PbmCheckCompatibilityWithSpec", "pbm.version.version1", (("hubsToSearch", "pbm.placement.PlacementHub[]", "pbm.version.version1", F_OPTIONAL, None),("profileSpec", "pbm.profile.CapabilityBasedProfileCreateSpec", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.placement.CompatibilityResult[]", "pbm.placement.CompatibilityResult[]"), "StorageProfile.View", None), ("checkRequirements", "PbmCheckRequirements", "pbm.version.version11", (("hubsToSearch", "pbm.placement.PlacementHub[]", "pbm.version.version11", F_OPTIONAL, None),("placementSubjectRef", "pbm.ServerObjectRef", "pbm.version.version11", F_OPTIONAL, None),("placementSubjectRequirement", "pbm.placement.Requirement[]", "pbm.version.version11", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.placement.CompatibilityResult[]", "pbm.placement.CompatibilityResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryMatchingHub", "PbmQueryMatchingHub", "pbm.version.version1", (("hubsToSearch", "pbm.placement.PlacementHub[]", "pbm.version.version1", F_OPTIONAL, None),("profile", "pbm.profile.ProfileId", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.placement.PlacementHub[]", "pbm.placement.PlacementHub[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryMatchingHubWithSpec", "PbmQueryMatchingHubWithSpec", "pbm.version.version1", (("hubsToSearch", "pbm.placement.PlacementHub[]", "pbm.version.version1", F_OPTIONAL, None),("createSpec", "pbm.profile.CapabilityBasedProfileCreateSpec", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.placement.PlacementHub[]", "pbm.placement.PlacementHub[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ])])
CreateDataType("pbm.placement.Requirement", "PbmPlacementRequirement", "vmodl.DynamicData", "pbm.version.version11", None)
CreateDataType("pbm.placement.ResourceUtilization", "PbmPlacementResourceUtilization", "vmodl.DynamicData", "pbm.version.version11", [("name", "pbm.ExtendedElementDescription", "pbm.version.version11", 0), ("description", "pbm.ExtendedElementDescription", "pbm.version.version11", 0), ("availableBefore", "long", "pbm.version.version11", F_OPTIONAL), ("availableAfter", "long", "pbm.version.version11", F_OPTIONAL), ("total", "long", "pbm.version.version11", F_OPTIONAL)])
CreateDataType("pbm.profile.CapabilityBasedProfileCreateSpec", "PbmCapabilityProfileCreateSpec", "vmodl.DynamicData", "pbm.version.version1", [("name", "string", "pbm.version.version1", 0), ("description", "string", "pbm.version.version1", F_OPTIONAL), ("category", "string", "pbm.version.version11", F_OPTIONAL), ("resourceType", "pbm.profile.ResourceType", "pbm.version.version1", 0), ("constraints", "pbm.profile.CapabilityConstraints", "pbm.version.version1", 0)])
CreateDataType("pbm.profile.CapabilityBasedProfileUpdateSpec", "PbmCapabilityProfileUpdateSpec", "vmodl.DynamicData", "pbm.version.version1", [("name", "string", "pbm.version.version1", F_OPTIONAL), ("description", "string", "pbm.version.version1", F_OPTIONAL), ("constraints", "pbm.profile.CapabilityConstraints", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.profile.CapabilityConstraints", "PbmCapabilityConstraints", "vmodl.DynamicData", "pbm.version.version1", None)
CreateDataType("pbm.profile.DataServiceToPoliciesMap", "PbmDataServiceToPoliciesMap", "vmodl.DynamicData", "pbm.version.version11", [("dataServicePolicy", "pbm.profile.ProfileId", "pbm.version.version11", 0), ("parentStoragePolicies", "pbm.profile.ProfileId[]", "pbm.version.version11", F_OPTIONAL), ("fault", "vmodl.MethodFault", "pbm.version.version11", F_OPTIONAL)])
CreateDataType("pbm.profile.DefaultProfileInfo", "PbmDefaultProfileInfo", "vmodl.DynamicData", "pbm.version.version2", [("datastores", "pbm.placement.PlacementHub[]", "pbm.version.version2", 0), ("defaultProfile", "pbm.profile.Profile", "pbm.version.version2", F_OPTIONAL)])
CreateEnumType("pbm.profile.EntityAssociations.Operation", "PbmOperation", "pbm.version.version11", ["CREATE", "REGISTER", "RECONFIGURE", "MIGRATE", "CLONE"])
CreateEnumType("pbm.profile.IofilterInfo.FilterType", "PbmIofilterInfoFilterType", "pbm.version.version1", ["INSPECTION", "COMPRESSION", "ENCRYPTION", "REPLICATION", "CACHE", "DATAPROVIDER", "DATASTOREIOCONTROL"])
CreateDataType("pbm.profile.Profile", "PbmProfile", "vmodl.DynamicData", "pbm.version.version1", [("profileId", "pbm.profile.ProfileId", "pbm.version.version1", 0), ("name", "string", "pbm.version.version1", 0), ("description", "string", "pbm.version.version1", F_OPTIONAL), ("creationTime", "vmodl.DateTime", "pbm.version.version1", 0), ("createdBy", "string", "pbm.version.version1", 0), ("lastUpdatedTime", "vmodl.DateTime", "pbm.version.version1", 0), ("lastUpdatedBy", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.profile.ProfileId", "PbmProfileId", "vmodl.DynamicData", "pbm.version.version1", [("uniqueId", "string", "pbm.version.version1", 0)])
CreateManagedType("pbm.profile.ProfileManager", "PbmProfileProfileManager", "vmodl.ManagedObject", "pbm.version.version1", None, [("assignDefaultRequirementProfile", "PbmAssignDefaultRequirementProfile", "pbm.version.version2", (("profile", "pbm.profile.ProfileId", "pbm.version.version2", 0, None),("datastores", "pbm.placement.PlacementHub[]", "pbm.version.version2", 0, None),), (0, "void", "void"), "StorageProfile.Update", ["vmodl.fault.InvalidArgument", "pbm.fault.LegacyHubsNotSupported", "pbm.fault.NonExistentHubs", "pbm.fault.PBMFault", ]), ("create", "PbmCreate", "pbm.version.version1", (("createSpec", "pbm.profile.CapabilityBasedProfileCreateSpec", "pbm.version.version1", 0, None),), (0, "pbm.profile.ProfileId", "pbm.profile.ProfileId"), "StorageProfile.Update", ["vmodl.fault.InvalidArgument", "pbm.fault.ProfileStorageFault", "pbm.fault.DuplicateName", ]), ("delete", "PbmDelete", "pbm.version.version1", (("profileId", "pbm.profile.ProfileId[]", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.profile.ProfileOperationOutcome[]", "pbm.profile.ProfileOperationOutcome[]"), "StorageProfile.Update", None), ("fetchCapabilityMetadata", "PbmFetchCapabilityMetadata", "pbm.version.version1", (("resourceType", "pbm.profile.ResourceType", "pbm.version.version1", F_OPTIONAL, None),("vendorUuid", "string", "pbm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.capability.provider.CapabilityObjectMetadataPerCategory[]", "pbm.capability.provider.CapabilityObjectMetadataPerCategory[]"), "StorageProfile.View", None), ("fetchCapabilitySchema", "PbmFetchCapabilitySchema", "pbm.version.version11", (("vendorUuid", "string", "pbm.version.version11", F_OPTIONAL, None),("lineOfService", "string[]", "pbm.version.version11", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.capability.provider.CapabilityObjectSchema[]", "pbm.capability.provider.CapabilityObjectSchema[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("fetchResourceType", "PbmFetchResourceType", "pbm.version.version1", (), (F_OPTIONAL, "pbm.profile.ResourceType[]", "pbm.profile.ResourceType[]"), "StorageProfile.View", None), ("fetchVendorInfo", "PbmFetchVendorInfo", "pbm.version.version1", (("resourceType", "pbm.profile.ResourceType", "pbm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.capability.provider.CapabilityObjectSchema.VendorResourceTypeInfo[]", "pbm.capability.provider.CapabilityObjectSchema.VendorResourceTypeInfo[]"), "StorageProfile.View", None), ("findApplicableDefaultProfile", "PbmFindApplicableDefaultProfile", "pbm.version.version2", (("datastores", "pbm.placement.PlacementHub[]", "pbm.version.version2", 0, None),), (F_OPTIONAL, "pbm.profile.Profile[]", "pbm.profile.Profile[]"), "StorageProfile.View", ["pbm.fault.LegacyHubsNotSupported", "pbm.fault.NonExistentHubs", "pbm.fault.PBMFault", "vmodl.fault.InvalidArgument", ]), ("queryAssociatedEntities", "PbmQueryAssociatedEntities", "pbm.version.version11", (("profiles", "pbm.profile.ProfileId[]", "pbm.version.version11", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.profile.QueryProfileResult[]", "pbm.profile.QueryProfileResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryAssociatedEntity", "PbmQueryAssociatedEntity", "pbm.version.version1", (("profile", "pbm.profile.ProfileId", "pbm.version.version1", 0, None),("entityType", "string", "pbm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.ServerObjectRef[]", "pbm.ServerObjectRef[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryAssociatedProfile", "PbmQueryAssociatedProfile", "pbm.version.version1", (("entity", "pbm.ServerObjectRef", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.profile.ProfileId[]", "pbm.profile.ProfileId[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryAssociatedProfiles", "PbmQueryAssociatedProfiles", "pbm.version.version1", (("entities", "pbm.ServerObjectRef[]", "pbm.version.version1", 0, None),), (F_OPTIONAL, "pbm.profile.QueryProfileResult[]", "pbm.profile.QueryProfileResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ]), ("queryDefaultRequirementProfile", "PbmQueryDefaultRequirementProfile", "pbm.version.version2", (("hub", "pbm.placement.PlacementHub", "pbm.version.version2", 0, None),), (F_OPTIONAL, "pbm.profile.ProfileId", "pbm.profile.ProfileId"), "StorageProfile.View", ["vmodl.fault.InvalidArgument", "pbm.fault.NonExistentHubs", "pbm.fault.PBMFault", ]), ("queryDefaultRequirementProfiles", "PbmQueryDefaultRequirementProfiles", "pbm.version.version2", (("datastores", "pbm.placement.PlacementHub[]", "pbm.version.version2", 0, None),), (0, "pbm.profile.DefaultProfileInfo[]", "pbm.profile.DefaultProfileInfo[]"), "StorageProfile.View", ["vmodl.fault.InvalidArgument", "pbm.fault.NonExistentHubs", "pbm.fault.PBMFault", ]), ("queryProfile", "PbmQueryProfile", "pbm.version.version1", (("resourceType", "pbm.profile.ResourceType", "pbm.version.version1", 0, None),("profileCategory", "string", "pbm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.profile.ProfileId[]", "pbm.profile.ProfileId[]"), "StorageProfile.View", ["vmodl.fault.InvalidArgument", ]), ("querySpaceStatsForStorageContainer", "PbmQuerySpaceStatsForStorageContainer", "pbm.version.version2", (("datastore", "pbm.ServerObjectRef", "pbm.version.version2", 0, None),("capabilityProfileId", "pbm.profile.ProfileId[]", "pbm.version.version2", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.profile.provider.DatastoreSpaceStatistics[]", "pbm.profile.provider.DatastoreSpaceStatistics[]"), "StorageProfile.View", ["vmodl.fault.InvalidArgument", "pbm.fault.PBMFault", ]), ("resetDefaultRequirementProfile", "PbmResetDefaultRequirementProfile", "pbm.version.version1", (("profile", "pbm.profile.ProfileId", "pbm.version.version1", F_OPTIONAL, None),), (0, "void", "void"), "StorageProfile.Update", None), ("resetVSanDefaultProfile", "PbmResetVSanDefaultProfile", "pbm.version.version2", (), (0, "void", "void"), "StorageProfile.Update", None), ("retrieveContent", "PbmRetrieveContent", "pbm.version.version1", (("profileIds", "pbm.profile.ProfileId[]", "pbm.version.version1", 0, None),), (0, "pbm.profile.Profile[]", "pbm.profile.Profile[]"), "StorageProfile.View", ["vmodl.fault.InvalidArgument", ]), ("update", "PbmUpdate", "pbm.version.version1", (("profileId", "pbm.profile.ProfileId", "pbm.version.version1", 0, None),("updateSpec", "pbm.profile.CapabilityBasedProfileUpdateSpec", "pbm.version.version1", 0, None),), (0, "void", "void"), "StorageProfile.Update", ["vmodl.fault.InvalidArgument", "pbm.fault.ProfileStorageFault", ])])
CreateDataType("pbm.profile.ProfileOperationOutcome", "PbmProfileOperationOutcome", "vmodl.DynamicData", "pbm.version.version1", [("profileId", "pbm.profile.ProfileId", "pbm.version.version1", 0), ("fault", "vmodl.MethodFault", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.profile.ProfileType", "PbmProfileType", "vmodl.DynamicData", "pbm.version.version1", [("uniqueId", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.profile.QueryProfileResult", "PbmQueryProfileResult", "vmodl.DynamicData", "pbm.version.version1", [("object", "pbm.ServerObjectRef", "pbm.version.version1", 0), ("profileId", "pbm.profile.ProfileId[]", "pbm.version.version1", F_OPTIONAL), ("fault", "vmodl.MethodFault", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.profile.ResourceType", "PbmProfileResourceType", "vmodl.DynamicData", "pbm.version.version1", [("resourceType", "string", "pbm.version.version1", 0)])
CreateEnumType("pbm.profile.ResourceTypeEnum", "PbmProfileResourceTypeEnum", "pbm.version.version1", ["STORAGE"])
CreateDataType("pbm.profile.SubProfileCapabilityConstraints", "PbmCapabilitySubProfileConstraints", "pbm.profile.CapabilityConstraints", "pbm.version.version1", [("subProfiles", "pbm.profile.SubProfileCapabilityConstraints.SubProfile[]", "pbm.version.version1", 0)])
CreateDataType("pbm.profile.SubProfileCapabilityConstraints.SubProfile", "PbmCapabilitySubProfile", "vmodl.DynamicData", "pbm.version.version1", [("name", "string", "pbm.version.version1", 0), ("capability", "pbm.capability.CapabilityInstance[]", "pbm.version.version1", 0), ("forceProvision", "boolean", "pbm.version.version1", F_OPTIONAL)])
CreateEnumType("pbm.profile.VmAssociations.Operation", "PbmVmOperation", "pbm.version.version11", ["CREATE", "RECONFIGURE", "MIGRATE", "CLONE"])
CreateDataType("pbm.profile.provider.DatastoreSpaceStatistics", "PbmDatastoreSpaceStatistics", "vmodl.DynamicData", "pbm.version.version2", [("profileId", "string", "pbm.version.version2", F_OPTIONAL), ("physicalTotalInMB", "long", "pbm.version.version2", 0), ("physicalFreeInMB", "long", "pbm.version.version2", 0), ("physicalUsedInMB", "long", "pbm.version.version2", 0), ("logicalLimitInMB", "long", "pbm.version.version2", F_OPTIONAL), ("logicalFreeInMB", "long", "pbm.version.version2", 0), ("logicalUsedInMB", "long", "pbm.version.version2", 0)])
CreateManagedType("pbm.provider.Provider", "PbmProvider", "vmodl.ManagedObject", "pbm.version.version1", None, None)
CreateDataType("pbm.replication.QueryReplicationGroupResult", "PbmQueryReplicationGroupResult", "vmodl.DynamicData", "pbm.version.version11", [("object", "pbm.ServerObjectRef", "pbm.version.version11", 0), ("replicationGroupId", "vim.vm.replication.ReplicationGroupId", "pbm.version.version11", F_OPTIONAL), ("fault", "vmodl.MethodFault", "pbm.version.version11", F_OPTIONAL)])
CreateManagedType("pbm.replication.ReplicationManager", "PbmReplicationManager", "vmodl.ManagedObject", "pbm.version.version11", None, [("queryReplicationGroups", "PbmQueryReplicationGroups", "pbm.version.version11", (("entities", "pbm.ServerObjectRef[]", "pbm.version.version11", F_OPTIONAL, None),), (F_OPTIONAL, "pbm.replication.QueryReplicationGroupResult[]", "pbm.replication.QueryReplicationGroupResult[]"), "StorageProfile.View", ["pbm.fault.PBMFault", ])])
CreateDataType("pbm.capability.CapabilityInstance", "PbmCapabilityInstance", "vmodl.DynamicData", "pbm.version.version1", [("id", "pbm.capability.CapabilityMetadata.UniqueId", "pbm.version.version1", 0), ("constraint", "pbm.capability.ConstraintInstance[]", "pbm.version.version1", 0)])
CreateDataType("pbm.capability.GenericTypeInfo", "PbmCapabilityGenericTypeInfo", "pbm.capability.TypeInfo", "pbm.version.version1", [("genericTypeName", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.fault.AlreadyExists", "PbmAlreadyExists", "pbm.fault.PBMFault", "pbm.version.version1", [("name", "string", "pbm.version.version1", F_OPTIONAL)])
CreateDataType("pbm.fault.CompatibilityCheckFault", "PbmCompatibilityCheckFault", "pbm.fault.PBMFault", "pbm.version.version1", [("hub", "pbm.placement.PlacementHub", "pbm.version.version1", 0)])
CreateDataType("pbm.fault.DefaultProfileAppliesFault", "PbmDefaultProfileAppliesFault", "pbm.fault.CompatibilityCheckFault", "pbm.version.version1", None)
CreateDataType("pbm.fault.DuplicateName", "PbmDuplicateName", "pbm.fault.PBMFault", "pbm.version.version1", [("name", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.fault.InvalidLogin", "PbmFaultInvalidLogin", "pbm.fault.PBMFault", "pbm.version.version2", None)
CreateDataType("pbm.fault.LegacyHubsNotSupported", "PbmLegacyHubsNotSupported", "pbm.fault.PBMFault", "pbm.version.version2", [("hubs", "pbm.placement.PlacementHub[]", "pbm.version.version2", 0)])
CreateDataType("pbm.fault.NonExistentHubs", "PbmNonExistentHubs", "pbm.fault.PBMFault", "pbm.version.version2", [("hubs", "pbm.placement.PlacementHub[]", "pbm.version.version2", 0)])
CreateDataType("pbm.fault.NotFound", "PbmFaultNotFound", "pbm.fault.PBMFault", "pbm.version.version1", None)
CreateDataType("pbm.fault.PropertyMismatchFault", "PbmPropertyMismatchFault", "pbm.fault.CompatibilityCheckFault", "pbm.version.version1", [("capabilityInstanceId", "pbm.capability.CapabilityMetadata.UniqueId", "pbm.version.version1", 0), ("requirementPropertyInstance", "pbm.capability.PropertyInstance", "pbm.version.version1", 0)])
CreateDataType("pbm.placement.CapabilityConstraintsRequirement", "PbmPlacementCapabilityConstraintsRequirement", "pbm.placement.Requirement", "pbm.version.version11", [("constraints", "pbm.profile.CapabilityConstraints", "pbm.version.version11", 0)])
CreateDataType("pbm.placement.CapabilityProfileRequirement", "PbmPlacementCapabilityProfileRequirement", "pbm.placement.Requirement", "pbm.version.version11", [("profileId", "pbm.profile.ProfileId", "pbm.version.version11", 0)])
CreateDataType("pbm.placement.MatchingReplicationResources", "PbmPlacementMatchingReplicationResources", "pbm.placement.MatchingResources", "pbm.version.version11", [("replicationGroup", "vim.vm.replication.ReplicationGroupId[]", "pbm.version.version11", F_OPTIONAL)])
CreateDataType("pbm.profile.CapabilityBasedProfile", "PbmCapabilityProfile", "pbm.profile.Profile", "pbm.version.version1", [("profileCategory", "string", "pbm.version.version1", 0), ("resourceType", "pbm.profile.ResourceType", "pbm.version.version1", 0), ("constraints", "pbm.profile.CapabilityConstraints", "pbm.version.version1", 0), ("generationId", "long", "pbm.version.version1", F_OPTIONAL), ("isDefault", "boolean", "pbm.version.version1", 0), ("systemCreatedProfileType", "string", "pbm.version.version2", F_OPTIONAL), ("lineOfService", "string", "pbm.version.version11", F_OPTIONAL)])
CreateEnumType("pbm.profile.CapabilityBasedProfile.ProfileCategoryEnum", "PbmProfileCategoryEnum", "pbm.version.version1", ["REQUIREMENT", "RESOURCE", "DATA_SERVICE_POLICY"])
CreateEnumType("pbm.profile.CapabilityBasedProfile.SystemCreatedProfileType", "PbmSystemCreatedProfileType", "pbm.version.version2", ["VsanDefaultProfile", "VVolDefaultProfile", "PmemDefaultProfile"])
CreateDataType("pbm.profile.DefaultCapabilityBasedProfile", "PbmDefaultCapabilityProfile", "pbm.profile.CapabilityBasedProfile", "pbm.version.version1", [("vvolType", "string[]", "pbm.version.version1", 0), ("containerId", "string", "pbm.version.version1", 0)])
CreateDataType("pbm.fault.CapabilityProfilePropertyMismatchFault", "PbmCapabilityProfilePropertyMismatchFault", "pbm.fault.PropertyMismatchFault", "pbm.version.version1", [("resourcePropertyInstance", "pbm.capability.PropertyInstance", "pbm.version.version1", 0)])
CreateDataType("pbm.fault.IncompatibleVendorSpecificRuleSet", "PbmIncompatibleVendorSpecificRuleSet", "pbm.fault.CapabilityProfilePropertyMismatchFault", "pbm.version.version1", None)
| 105.168883 | 6,539 | 0.779724 |
2cee3fa56152e4e3c278b13b6e20c1a6bace6a87 | 3,427 | py | Python | src/ramstk/models/programdb/opstress/record.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 26 | 2019-05-15T02:03:47.000Z | 2022-02-21T07:28:11.000Z | src/ramstk/models/programdb/opstress/record.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 815 | 2019-05-10T12:31:52.000Z | 2022-03-31T12:56:26.000Z | src/ramstk/models/programdb/opstress/record.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 9 | 2019-04-20T23:06:29.000Z | 2022-01-24T21:21:04.000Z | # pylint: disable=duplicate-code
# -*- coding: utf-8 -*-
#
# ramstk.models.programdb.RAMSTKOpStress.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2007 - 2021 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTKOpStress Table."""
# Third Party Imports
from sqlalchemy import Column, ForeignKeyConstraint, Integer, String
from sqlalchemy.orm import relationship
# RAMSTK Package Imports
from ramstk.db import RAMSTK_BASE
from ramstk.models import RAMSTKBaseRecord
class RAMSTKOpStressRecord(RAMSTK_BASE, RAMSTKBaseRecord):
"""Class to represent table ramstk_op_stress in RAMSTK Program database.
This table shares a Many-to-One relationship with ramstk_op_load.
"""
__defaults__ = {
"description": "",
"load_history": "",
"measurable_parameter": "",
"remarks": "",
}
__tablename__ = "ramstk_op_stress"
__table_args__ = (
ForeignKeyConstraint(
[
"fld_revision_id",
"fld_hardware_id",
"fld_mode_id",
"fld_mechanism_id",
"fld_load_id",
],
[
"ramstk_op_load.fld_revision_id",
"ramstk_op_load.fld_hardware_id",
"ramstk_op_load.fld_mode_id",
"ramstk_op_load.fld_mechanism_id",
"ramstk_op_load.fld_load_id",
],
),
{"extend_existing": True},
)
revision_id = Column("fld_revision_id", Integer, primary_key=True, nullable=False)
hardware_id = Column(
"fld_hardware_id", Integer, primary_key=True, default=-1, nullable=False
)
mode_id = Column("fld_mode_id", Integer, primary_key=True, nullable=False)
mechanism_id = Column("fld_mechanism_id", Integer, primary_key=True, nullable=False)
load_id = Column(
"fld_load_id", Integer, primary_key=True, nullable=False, unique=True
)
stress_id = Column(
"fld_stress_id", Integer, primary_key=True, autoincrement=True, nullable=False
)
description = Column(
"fld_description", String(512), default=__defaults__["description"]
)
load_history = Column(
"fld_load_history", String(512), default=__defaults__["load_history"]
)
measurable_parameter = Column(
"fld_measurable_parameter",
String(512),
default=__defaults__["measurable_parameter"],
)
remarks = Column("fld_remarks", String, default=__defaults__["remarks"])
# Define the relationships to other tables in the RAMSTK Program database.
op_load = relationship( # type: ignore
"RAMSTKOpLoadRecord",
back_populates="op_stress",
)
is_mode = False
is_mechanism = False
is_opload = False
is_opstress = True
is_testmethod = False
def get_attributes(self):
"""Retrieve the current values of the Op Stress data model attributes.
:return: {load_id, stress_id, description, load_history,
measurable_parameter, remarks} pairs
:rtype: tuple
"""
_attributes = {
"load_id": self.load_id,
"stress_id": self.stress_id,
"description": self.description,
"load_history": self.load_history,
"measurable_parameter": self.measurable_parameter,
"remarks": self.remarks,
}
return _attributes
| 32.028037 | 88 | 0.635833 |
6984b6744108862504fd119540fd5ca7ff580e8e | 4,326 | py | Python | plugins/callback.py | ilhamr0f11/Media-Extractor-IRBot | 85678cdb6b677f7cd436c2cbef0376a6c4066f61 | [
"MIT"
] | 1 | 2021-10-03T13:21:58.000Z | 2021-10-03T13:21:58.000Z | plugins/callback.py | ilhamr0f11/Media-Extractor-IRBot | 85678cdb6b677f7cd436c2cbef0376a6c4066f61 | [
"MIT"
] | null | null | null | plugins/callback.py | ilhamr0f11/Media-Extractor-IRBot | 85678cdb6b677f7cd436c2cbef0376a6c4066f61 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from config import Config
from script import Script
from helpers.progress import PRGRS
from helpers.tools import clean_up
from helpers.download import download_file, DATA
from helpers.ffmpeg import extract_audio, extract_subtitle
@trojanz.on_callback_query()
async def cb_handler(client, query):
if query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("HELP", callback_data="help_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ JOIN OUR CHANNEL ⭕️", url="https://t.me/irbotsupdate")]
])
await query.message.edit_text(
Script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="start_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ SUPPORT ⭕️", url="https://t.me/irbotsupdate")]
])
await query.message.edit_text(
Script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("START", callback_data="start_data")],
[InlineKeyboardButton("SOURCE CODE", url="https://github.com")]
])
await query.message.edit_text(
Script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
return
elif query.data == "download_file":
await query.answer()
await query.message.delete()
await download_file(client, query.message)
elif query.data == "progress_msg":
try:
msg = "Progress Details...\n\nCompleted : {current}\nTotal Size : {total}\nSpeed : {speed}\nProgress : {progress:.2f}%\nETA: {eta}"
await query.answer(
msg.format(
**PRGRS[f"{query.message.chat.id}_{query.message.message_id}"]
),
show_alert=True
)
except:
await query.answer(
"Processing your file...",
show_alert=True
)
elif query.data == "close":
await query.message.delete()
await query.answer(
"Cancelled...",
show_alert=True
)
elif query.data.startswith('audio'):
await query.answer()
try:
stream_type, mapping, keyword = query.data.split('_')
data = DATA[keyword][int(mapping)]
await extract_audio(client, query.message, data)
except:
await query.message.edit_text("**Details Not Found**")
elif query.data.startswith('subtitle'):
await query.answer()
try:
stream_type, mapping, keyword = query.data.split('_')
data = DATA[keyword][int(mapping)]
await extract_subtitle(client, query.message, data)
except:
await query.message.edit_text("**Details Not Found**")
elif query.data.startswith('cancel'):
try:
query_type, mapping, keyword = query.data.split('_')
data = DATA[keyword][int(mapping)]
await clean_up(data['location'])
await query.message.edit_text("**Cancelled...**")
await query.answer(
"Cancelled...",
show_alert=True
)
except:
await query.answer()
await query.message.edit_text("**Details Not Found**")
| 32.526316 | 144 | 0.567037 |
fc017a019ca1d48d54aab02e8250c196f0c94277 | 121,179 | py | Python | pytests/tuqquery/tuq_UDF.py | mhocouchbase/testrunner | 10faf6955a905dee9a254daf90352881d4687735 | [
"Apache-2.0"
] | null | null | null | pytests/tuqquery/tuq_UDF.py | mhocouchbase/testrunner | 10faf6955a905dee9a254daf90352881d4687735 | [
"Apache-2.0"
] | null | null | null | pytests/tuqquery/tuq_UDF.py | mhocouchbase/testrunner | 10faf6955a905dee9a254daf90352881d4687735 | [
"Apache-2.0"
] | null | null | null |
from remote.remote_util import RemoteMachineShellConnection
from .tuq import QueryTests
import time
from deepdiff import DeepDiff
class QueryUDFTests(QueryTests):
def setUp(self):
super(QueryUDFTests, self).setUp()
self.log.info("============== QueryUDFTests setup has started ==============")
self.shell = RemoteMachineShellConnection(self.master)
self.info = self.shell.extract_remote_info()
if self.info.type.lower() == 'windows':
self.curl_path = "%scurl" % self.path
else:
self.curl_path = "curl"
self.named_params = self.input.param("named_params", False)
self.no_params = self.input.param("no_params", False)
self.special_chars = self.input.param("special_chars", False)
self.namespace = self.input.param("namespace", False)
self.invalid = self.input.param("invalid", False)
self.reserved_word = self.input.param("reserved_word", False)
self.replace = self.input.param("replace", False)
if not self.analytics:
self.run_cbq_query(query="delete from system:prepareds")
users = self.input.param("users", None)
self.inp_users = []
if users:
self.inp_users = eval(eval(users))
self.users = self.get_user_list()
self.roles = self.get_user_role_list()
self.all_buckets = self.input.param("all_buckets", False)
self.scoped = self.input.param("scoped", False)
self.rebalance_in = self.input.param("rebalance_in", False)
self.log.info("============== QuerySanityTests setup has completed ==============")
self.log_config_info()
def suite_setUp(self):
super(QueryUDFTests, self).suite_setUp()
self.log.info("============== QueryUDFTests suite_setup has started ==============")
changed = False
if self.load_collections:
if not self.analytics:
self.run_cbq_query(query='CREATE INDEX idx on default(name)')
self.sleep(5)
self.wait_for_all_indexes_online()
self.collections_helper.create_scope(bucket_name="default", scope_name="test2")
self.collections_helper.create_collection(bucket_name="default", scope_name="test2",
collection_name=self.collections[0])
self.collections_helper.create_collection(bucket_name="default", scope_name="test2",
collection_name=self.collections[1])
if self.analytics:
self.run_cbq_query(query="CREATE DATASET collection3 on default.test2.test1")
self.run_cbq_query(query="CREATE DATASET collection4 on default.test2.test2")
if not self.analytics:
self.run_cbq_query(
query="CREATE INDEX idx1 on default:default.test2.{0}(name)".format(self.collections[0]))
self.run_cbq_query(
query="CREATE INDEX idx2 on default:default.test2.{0}(name)".format(self.collections[1]))
self.sleep(5)
self.wait_for_all_indexes_online()
if self.analytics:
self.analytics = False
changed = True
self.run_cbq_query(
query=('INSERT INTO default:default.test2.{0}'.format(self.collections[
1]) + '(KEY, VALUE) VALUES ("key1", { "type" : "hotel", "name" : "old hotel" })'))
self.run_cbq_query(
query=('INSERT INTO default:default.test2.{0}'.format(self.collections[1]) + '(KEY, VALUE) VALUES ("key2", { "type" : "hotel", "name" : "new hotel" })'))
self.run_cbq_query(
query=('INSERT INTO default:default.test2.{0}'.format(self.collections[1]) + '(KEY, VALUE) VALUES ("key3", { "type" : "hotel", "name" : "new hotel" })'))
self.sleep(20)
if self.load_sample:
self.rest.load_sample("travel-sample")
init_time = time.time()
while True:
next_time = time.time()
query_response = self.run_cbq_query("SELECT COUNT(*) FROM `travel-sample`")
if query_response['results'][0]['$1'] == 31591:
break
if next_time - init_time > 600:
break
time.sleep(1)
if changed:
self.analytics = True
if self.analytics:
self.run_cbq_query(query="CREATE DATASET travel on `travel-sample`")
if changed:
self.analytics = True
self.log.info("============== QueryUDFTests suite_setup has completed ==============")
def tearDown(self):
self.log.info("============== QueryUDFTests tearDown has started ==============")
self.log.info("============== QueryUDFTests tearDown has completed ==============")
super(QueryUDFTests, self).tearDown()
def suite_tearDown(self):
self.log.info("============== QueryUDFTests suite_tearDown has started ==============")
if self.analytics:
self.run_cbq_query(query="DROP DATASET travel")
self.run_cbq_query(query="DROP DATASET collection1")
self.run_cbq_query(query="DROP DATASET collection2")
self.run_cbq_query(query="DROP DATASET collection3")
self.run_cbq_query(query="DROP DATASET collection4")
self.log.info("============== QueryUDFTests suite_tearDown has completed ==============")
super(QueryUDFTests, self).suite_tearDown()
'''Test that makes sure parameters work as expected
-Fixed list of params (can only provide the amount expected, otherwise will error)
-Extra test to make sure that the list of params can have special character in it
-Flexible list of params (can provide ANY number of params regardless of how many are used
-No params (cannot pass any amount of params)'''
def test_inline_params(self):
try:
try:
if self.analytics:
if self.named_params:
if self.special_chars:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION celsius(deg_) { (`deg_` - 32) * 5/9}")
else:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION celsius(degrees) { (degrees - 32) * 5/9 }")
elif self.no_params:
self.run_cbq_query("CREATE ANALYTICS FUNCTION celsius() { (10 - 32) * 5/9 }")
else:
self.run_cbq_query("CREATE ANALYTICS FUNCTION celsius(...) { (args[0] - 32) * 5/9 }")
else:
if self.named_params:
if self.special_chars:
self.run_cbq_query("CREATE FUNCTION celsius(deg_) LANGUAGE INLINE AS (`deg_` - 32) * 5/9")
else:
self.run_cbq_query("CREATE FUNCTION celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9")
elif self.no_params:
self.run_cbq_query("CREATE FUNCTION celsius() LANGUAGE INLINE AS (10 - 32) * 5/9")
else:
self.run_cbq_query("CREATE FUNCTION celsius(...) LANGUAGE INLINE AS (args[0] - 32) * 5/9")
except Exception as e:
self.log.error(str(e))
try:
if self.analytics:
proper = self.run_cbq_query("SELECT RAW celsius(10)")
else:
proper = self.run_cbq_query("EXECUTE FUNCTION celsius(10)")
self.assertEqual(proper['results'], [-12.222222222222221])
except Exception as e:
self.log.error(str(e))
if self.no_params:
if self.analytics:
self.assertTrue("Cannot find function with signature" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.assertTrue("Incorrect number of arguments supplied to function 'celsius'" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.fail()
try:
if self.analytics:
too_many = self.run_cbq_query("SELECT RAW celsius(10,15)")
else:
too_many = self.run_cbq_query("EXECUTE FUNCTION celsius(10,15)")
self.assertEqual(too_many['results'], [-12.222222222222221])
except Exception as e:
self.log.error(str(e))
if self.named_params or self.no_params:
if self.analytics:
self.assertTrue("Cannot find function with signature" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.assertTrue("Incorrect number of arguments supplied to function 'celsius'" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.fail()
try:
if self.analytics:
not_enough = self.run_cbq_query("SELECT RAW celsius()")
else:
not_enough = self.run_cbq_query("EXECUTE FUNCTION celsius()")
if self.no_params:
self.assertEqual(not_enough['results'], [-12.222222222222221])
else:
self.assertEqual(not_enough['results'],[None])
except Exception as e:
self.log.error(str(e))
if self.named_params:
if self.analytics:
self.assertTrue("Cannot find function with signature" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.assertTrue("Incorrect number of arguments supplied to function 'celsius'" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.fail()
finally:
try:
if self.analytics:
if self.named_params:
if self.special_chars:
self.run_cbq_query(
"DROP ANALYTICS FUNCTION celsius(deg_)")
else:
self.run_cbq_query(
"DROP ANALYTICS FUNCTION celsius(degrees) ")
elif self.no_params:
self.run_cbq_query("DROP ANALYTICS FUNCTION celsius()")
else:
self.run_cbq_query("DROP ANALYTICS FUNCTION celsius(...) ")
else:
self.run_cbq_query("DROP FUNCTION celsius")
except Exception as e:
self.log.error(str(e))
def test_inline_drop_function(self):
try:
try:
if self.analytics:
if self.special_chars:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION `c%.-_`(param1) {(param1 - 32) * 5/9}")
else:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION cels(param1) {(param1 - 32) * 5/9}")
else:
if self.special_chars:
self.run_cbq_query("CREATE FUNCTION `c%.-_`(...) LANGUAGE INLINE AS (args[0] - 32) * 5/9")
else:
self.run_cbq_query("CREATE FUNCTION cels(...) LANGUAGE INLINE AS (args[0] - 32) * 5/9")
except Exception as e:
self.log.error(str(e))
try:
if self.analytics:
if self.special_chars:
proper = self.run_cbq_query("SELECT RAW `c%.-_`(10)")
else:
proper = self.run_cbq_query("SELECT RAW cels(10)")
else:
if self.special_chars:
proper = self.run_cbq_query("EXECUTE FUNCTION `c%.-_`(10)")
else:
proper = self.run_cbq_query("EXECUTE FUNCTION cels(10)")
self.assertEqual(proper['results'], [-12.222222222222221])
except Exception as e:
self.log.error(str(e))
try:
if self.analytics:
if self.special_chars:
results = self.run_cbq_query("DROP ANALYTICS FUNCTION `c%.-_`(param1)")
else:
results = self.run_cbq_query("DROP ANALYTICS FUNCTION cels(param1)")
else:
if self.special_chars:
results = self.run_cbq_query("DROP FUNCTION `c%.-_`")
else:
results = self.run_cbq_query("DROP FUNCTION cels")
self.assertEqual(results['status'], 'success')
except Exception as e:
self.log.error(str(e))
self.fail()
try:
if self.analytics:
if self.special_chars:
self.run_cbq_query("SELECT RAW `c%.-_`(10)")
else:
self.run_cbq_query("SELECT RAW cels(10)")
else:
if self.special_chars:
self.run_cbq_query("EXECUTE FUNCTION `c%.-_`(10)")
else:
self.run_cbq_query("EXECUTE FUNCTION cels(10)")
self.fail("Query should have error'd, but it did not")
except Exception as e:
self.log.error(str(e))
if self.analytics:
self.assertTrue('Cannot find function with signature' in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
else:
self.assertTrue('not found' in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
finally:
try:
if self.analytics:
if self.special_chars:
self.run_cbq_query("DROP ANALYTICS FUNCTION `c%.-_`(param1)")
else:
self.run_cbq_query("DROP ANALYTICS FUNCTION cels(param1)")
else:
if self.special_chars:
self.run_cbq_query("DROP FUNCTION `c%.-_`")
else:
self.run_cbq_query("DROP FUNCTION cels")
except Exception as e:
self.log.info(str(e))
def test_inline_drop_missing_function(self):
try:
if self.analytics:
self.run_cbq_query(query="DROP ANALYTICS FUNCTION func_does_not_exist")
else:
self.run_cbq_query(query="DROP FUNCTION func_does_not_exist")
except Exception as e:
self.log.error(str(e))
if self.analytics:
self.assertTrue('DROP ANALYTICS FUNCTION func_does_not_exist' in str(e), "Error message is wrong {0}".format(str(e)))
else:
self.assertTrue("Function 'func_does_not_exist' not found" in str(e), "Error message is wrong {0}".format(str(e)))
def test_inline_function_syntax(self):
try:
try:
self.run_cbq_query("CREATE FUNCTION celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9")
self.run_cbq_query("CREATE FUNCTION fahrenheit(...) { (args[0] * 9/5) + 32 }")
except Exception as e:
self.log.error(str(e))
self.fail("Valid syntax creation error'd {0}".format(str(e)))
try:
results = self.run_cbq_query("EXECUTE FUNCTION celsius(10)")
self.assertEqual(results['results'], [-12.222222222222221])
except Exception as e:
self.log.error(str(e))
self.fail()
try:
results = self.run_cbq_query("EXECUTE FUNCTION fahrenheit(10)")
self.assertEqual(results['results'], [50])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION celsius")
except Exception as e:
self.log.error(str(e))
try:
self.run_cbq_query("DROP FUNCTION fahrenheit")
except Exception as e:
self.log.error(str(e))
def test_inline_function_syntax_scope(self):
try:
if self.analytics:
self.run_cbq_query(
"CREATE OR REPLACE ANALYTICS FUNCTION Default.celsius(degrees) {(degrees - 32) * 5/9}")
results = self.run_cbq_query("SELECT RAW Default.celsius(10)".format(self.scope))
else:
self.run_cbq_query("CREATE FUNCTION default:default.{0}.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9".format(self.scope))
results = self.run_cbq_query("EXECUTE FUNCTION default:default.{0}.celsius(10)".format(self.scope))
self.assertEqual(results['results'], [-12.222222222222221])
if not self.analytics:
results = self.run_cbq_query("EXECUTE FUNCTION celsius(10)".format(self.scope))
self.fail()
except Exception as e:
self.log.info(str(e))
self.assertTrue("Function 'celsius' not found" in str(e))
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION Default.celsius(degrees)")
else:
self.run_cbq_query("DROP FUNCTION default:default.{0}.celsius".format(self.scope))
except Exception as e:
self.log.error(str(e))
def test_inline_function_query_context(self):
try:
self.run_cbq_query("CREATE FUNCTION celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9", query_context='default:default.test')
results = self.run_cbq_query("EXECUTE FUNCTION default:default.{0}.celsius(10)".format(self.scope))
self.assertEqual(results['results'], [-12.222222222222221])
results = self.run_cbq_query("EXECUTE FUNCTION celsius(10)")
except Exception as e:
self.log.info(str(e))
self.assertTrue("Function 'celsius' not found" in str(e))
try:
results = self.run_cbq_query("EXECUTE FUNCTION celsius(10)", query_context='default:default.test')
self.assertEqual(results['results'], [-12.222222222222221])
finally:
try:
self.run_cbq_query("DROP FUNCTION celsius".format(self.scope), query_context='default:default.test')
except Exception as e:
self.log.error(str(e))
def test_inline_join(self):
try:
if self.analytics:
self.run_cbq_query(
"CREATE OR REPLACE ANALYTICS FUNCTION func1(nameval) { (select * from collection1 t1 INNER JOIN collection4 t2 ON t1.name = t2.name where t1.name = nameval) }")
else:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.test.test1 t1 INNER JOIN default:default.test2.test2 t2 ON t1.name = t2.name where t1.name = nameval) }}".format(self.scope,self.collections[0]))
results = self.run_cbq_query("EXECUTE FUNCTION func1('old hotel')")
self.assertEqual(results['results'], [[{'t1': {'name': 'old hotel', 'type': 'hotel'}, 't2': {'name': 'old hotel', 'type': 'hotel'}}, {'t1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}, 't2': {'name': 'old hotel', 'type': 'hotel'}}, {'t1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}, 't2': {'name': 'old hotel', 'type': 'hotel'}}]])
results = self.run_cbq_query("select func1('old hotel')")
self.assertEqual(results['results'], [{'$1': [{'t1': {'name': 'old hotel', 'type': 'hotel'}, 't2': {'name': 'old hotel', 'type': 'hotel'}}, {'t1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}, 't2': {'name': 'old hotel', 'type': 'hotel'}}, {'t1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}, 't2': {'name': 'old hotel', 'type': 'hotel'}}]}])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func1(nameval)")
else:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
'''Test a query that uses a function containing a query in the from'''
def test_inline_subquery_from(self):
if not self.analytics:
string_functions = 'function concater(a,b) { var text = ""; var x; for (x in a) {if (x = b) { return x; }} return "n"; } function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }}'
function_names2 = ["concater","comparator"]
created2 = self.create_library("strings",string_functions,function_names2)
try:
if self.analytics:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION func2(degrees) { (degrees - 32)} ")
self.run_cbq_query(
"CREATE OR REPLACE ANALYTICS FUNCTION func4(nameval) {{ (select * from collection1 where collection1.name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query(
"SELECT f.collection1.name FROM func4('old hotel') as f LET maximum_no = func2(36) WHERE ANY v in f.collection1.numbers SATISFIES v = maximum_no END GROUP BY f.collection1.name LETTING letter = 'o' HAVING f.collection1.name > letter")
self.assertEqual(results['results'], [{'name': 'old hotel'}])
else:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "strings"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
self.run_cbq_query(query='CREATE FUNCTION func3(a,b) LANGUAGE JAVASCRIPT AS "concater" AT "strings"')
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func4(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query(
"SELECT f.test1.name FROM func4('old hotel') as f LET maximum_no = func2(36) WHERE ANY v in f.test1.numbers SATISFIES v = maximum_no END GROUP BY f.test1.name LETTING letter = func3('old hotel', 'o') HAVING f.test1.name > letter")
self.assertEqual(results['results'], [{'name': 'old hotel'}])
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func2(degrees)")
self.run_cbq_query("DROP ANALYTICS FUNCTION func4(nameval)")
else:
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
self.run_cbq_query("DROP FUNCTION func3")
self.run_cbq_query("DROP FUNCTION func4")
except Exception as e:
self.log.error(str(e))
'''Test a function that contains a subquery and uses other functions'''
def test_inline_subquery_nested(self):
string_functions = 'function concater(a,b) { var text = ""; var x; for (x in a) {if (x = b) { return x; }} return "n"; } function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }}'
function_names2 = ["concater","comparator"]
created2 = self.create_library("strings",string_functions,function_names2)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "strings"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
self.run_cbq_query(query='CREATE FUNCTION func3(a,b) LANGUAGE JAVASCRIPT AS "concater" AT "strings"')
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func4(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func5(nameval) {(SELECT f.test1.name FROM func4(nameval) as f LET maximum_no = func2(36) WHERE ANY v in f.test1.numbers SATISFIES v = maximum_no END GROUP BY f.test1.name LETTING letter = func3('old hotel', 'o') HAVING f.test1.name > letter)}")
results = self.run_cbq_query(query="select func5('old hotel')")
self.assertEqual(results['results'], [{'$1': [{'name': 'old hotel'}]}])
results2 = self.run_cbq_query(query="EXECUTE FUNCTION func5('old hotel')")
self.assertEqual(results2['results'], [[{'name': 'old hotel'}]])
finally:
try:
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
self.run_cbq_query("DROP FUNCTION func3")
self.run_cbq_query("DROP FUNCTION func4")
self.run_cbq_query("DROP FUNCTION func5")
except Exception as e:
self.log.error(str(e))
def test_inline_subquery_where(self):
try:
if self.analytics:
self.run_cbq_query(
"CREATE OR REPLACE ANALYTICS FUNCTION func4(doctype) { (SELECT RAW city FROM travel WHERE `type` = doctype) }")
results = self.run_cbq_query(
'SELECT t1.city FROM travel t1 WHERE t1.`type` = "landmark" AND t1.city IN func4("airport")')
self.assertEqual(results['metrics']['resultCount'], 2776)
results = self.run_cbq_query(
'CREATE OR REPLACE ANALYTICS FUNCTION func5(doctype) {(SELECT t1.city FROM travel t1 WHERE t1.`type` = "landmark" AND t1.city IN func4(doctype))}')
results = self.run_cbq_query('SELECT RAW func5("airport")')
self.assertEqual(results['metrics']['resultCount'], 1)
else:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func4(doctype) { (SELECT RAW city FROM `travel-sample` WHERE type = doctype) }")
results = self.run_cbq_query('SELECT t1.city FROM `travel-sample` t1 WHERE t1.type = "landmark" AND t1.city IN func4("airport")')
self.assertEqual(results['metrics']['resultCount'], 2776)
results = self.run_cbq_query(
'CREATE OR REPLACE FUNCTION func5(doctype) {(SELECT t1.city FROM `travel-sample` t1 WHERE t1.type = "landmark" AND t1.city IN func4(doctype))}')
results = self.run_cbq_query('EXECUTE FUNCTION func5("airport")')
self.assertEqual(results['metrics']['resultCount'], 1)
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func4(doctype)")
self.run_cbq_query("DROP ANALYTICS FUNCTION func5(doctype)")
else:
self.run_cbq_query("DROP FUNCTION func4")
self.run_cbq_query("DROP FUNCTION func5")
except Exception as e:
self.log.error(str(e))
def test_inline_subquery_select(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func4() { (SELECT RAW t1.geo.alt FROM `travel-sample`) }")
results = self.run_cbq_query('SELECT array_length(func4()) FROM `travel-sample`')
self.assertEqual(results['metrics']['resultCount'], 31591)
self.assertEqual(results['results'][0], {'$1': 31591})
results = self.run_cbq_query(
'CREATE OR REPLACE FUNCTION func5() {(SELECT array_length(func4()) FROM `travel-sample`)}')
results = self.run_cbq_query('EXECUTE FUNCTION func5()')
self.assertEqual(results['metrics']['resultCount'], 1)
self.assertEqual(results['results'][0][0], {'$1': 31591})
finally:
try:
self.run_cbq_query("DROP FUNCTION func4")
self.run_cbq_query("DROP FUNCTION func5")
except Exception as e:
self.log.error(str(e))
def test_inline_function_naming(self):
function_name = ''
try:
try:
if self.special_chars:
function_name = '`c%.-_`'
results = self.run_cbq_query("CREATE FUNCTION `c%.-_`(deg_) LANGUAGE INLINE AS (`deg_` - 32) * 5/9")
elif self.namespace:
if self.reserved_word:
function_name = 'default:type'
results = self.run_cbq_query("CREATE FUNCTION default:type(...) LANGUAGE INLINE AS (10 - 32) * 5/9")
else:
function_name = 'default:celsius'
results = self.run_cbq_query("CREATE FUNCTION default:celsius(...) LANGUAGE INLINE AS (10 - 32) * 5/9")
elif self.reserved_word:
results = self.run_cbq_query("CREATE FUNCTION join(...) LANGUAGE INLINE AS (10 - 32) * 5/9")
elif self.invalid:
function_name = '`%.-`'
results = self.run_cbq_query("CREATE FUNCTION `%.-`(...) LANGUAGE INLINE AS (10 - 32) * 5/9")
else:
function_name = 'celsius'
results = self.run_cbq_query("CREATE FUNCTION celsius(...) LANGUAGE INLINE AS (args[0] - 32) * 5/9")
self.log.info(results)
self.assertEqual(results['status'], "success")
except Exception as e:
self.log.error(str(e))
if self.reserved_word and not self.namespace:
self.assertTrue('syntax error - line 1, column 17' in str(e))
else:
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION {0}".format(function_name))
except Exception as e:
self.log.error(str(e))
def test_inline_create_or_replace(self):
try:
if self.analytics:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION func1(degrees) {(degrees - 32) * 5/9}")
results = self.run_cbq_query("SELECT RAW func1(10)")
self.assertEqual(results['results'], [-12.222222222222221])
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION func1(degree) { (degree * 9/5) + 32 }")
results = self.run_cbq_query("SELECT RAW func1(10)")
self.assertEqual(results['results'], [50])
else:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) LANGUAGE INLINE AS (args[0] - 32) * 5/9")
results = self.run_cbq_query("EXECUTE FUNCTION func1(10)")
self.assertEqual(results['results'], [-12.222222222222221])
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) { (args[0] * 9/5) + 32 }")
results = self.run_cbq_query("EXECUTE FUNCTION func1(10)")
self.assertEqual(results['results'], [50])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func1(degree)")
else:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_system_functions_create_and_replace(self):
try:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) LANGUAGE INLINE AS (args[0] - 32) * 5/9")
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['expression'], '((((`args`[0]) - 32) * 5) / 9)')
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) { (args[0] * 9/5) + 32 }")
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['expression'], '((((`args`[0]) * 9) / 5) + 32)')
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_system_functions_drop(self):
try:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) { (args[0] * 9/5) + 32 }")
self.run_cbq_query("DROP FUNCTION func1")
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'],[])
except Exception as e:
self.log.error(str(e))
self.fail()
def test_inline_query_function(self):
try:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(self.scope,self.collections[0]))
results = self.run_cbq_query("EXECUTE FUNCTION func1('old hotel')")
self.assertEqual(results['results'],[[{'test1': {'name': 'old hotel', 'type': 'hotel'}}, {'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}}, {'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}]])
results = self.run_cbq_query("select func1('old hotel')")
self.assertEqual(results['results'],[{'$1': [{'test1': {'name': 'old hotel', 'type': 'hotel'}}, {'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}}, {'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}]}])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_inline_query_function_no_index(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where fake = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query("EXECUTE FUNCTION func1('old hotel')")
self.fail()
except Exception as e:
self.log.error(str(e))
self.assertTrue("No index available" in str(e), "Error message is wrong {0}".format(str(e)))
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_inline_query_function_syntax_error(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func1(nameval) {{ (selet * from default:default.{0}.{1} where fake = nameval) }}".format(
self.scope, self.collections[0]))
self.fail()
except Exception as e:
self.log.error(str(e))
self.assertTrue("syntax error" in str(e), "Error message is wrong {0}".format(str(e)))
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_nested_inline_function(self):
try:
if self.analytics:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION celsius(degrees) {(degrees - 32) * 5/9}")
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION invert(param1) { (celsius(param1) * 9/5) + 32 }")
results = self.run_cbq_query("SELECT RAW invert(10)")
self.assertEqual(results['results'], [10])
else:
self.run_cbq_query("CREATE FUNCTION celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9")
self.run_cbq_query("CREATE FUNCTION invert(...) { (celsius(args[0]) * 9/5) + 32 }")
results = self.run_cbq_query("EXECUTE FUNCTION invert(10)")
self.assertEqual(results['results'], [10])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION celsius(degrees)")
else:
self.run_cbq_query("DROP FUNCTION celsius")
except Exception as e:
self.log.error(str(e))
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION invert(param1)")
else:
self.run_cbq_query("DROP FUNCTION invert")
except Exception as e:
self.log.error(str(e))
def test_nested_inline_function_negative(self):
try:
self.run_cbq_query("CREATE FUNCTION invert(...) { (celsius(args[0]) * 9/5) + 32 }")
self.fail("Query did not error and it should have!")
except Exception as e:
self.log.error(str(e))
self.assertTrue("Invalid function celsius" in str(e), "Error message is wrong {0}".format(str(e)))
def test_inline_from(self):
try:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(self.scope,self.collections[0]))
results = self.run_cbq_query('select f.* from func1("old hotel") f')
self.assertEqual(results['results'], [{'test1': {'name': 'old hotel', 'type': 'hotel'}}, {'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}}, {'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_inline_where(self):
try:
try:
if self.analytics:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION func1(degrees) {(degrees - 32)} ")
else:
self.run_cbq_query("CREATE FUNCTION func1(degrees) LANGUAGE INLINE AS (degrees - 32) ")
except Exception as e:
self.log.error(str(e))
self.fail("Valid syntax creation error'd {0}".format(str(e)))
if self.analytics:
results = self.run_cbq_query(
"SELECT * FROM collection1 WHERE ANY v in collection1.numbers SATISFIES v = func1(36) END")
else:
results = self.run_cbq_query("SELECT * FROM default:default.test.test1 WHERE ANY v in test1.numbers SATISFIES v = func1(36) END")
if self.analytics:
self.assertEqual(results['results'], [{'collection1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
else:
self.assertEqual(results['results'], [{'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func1(degrees)")
else:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_agg_udf(self):
try:
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(a,b,c) { (SELECT RAW SUM((a+b+c-40))) }")
results = self.run_cbq_query("EXECUTE FUNCTION func1(10,20,30)")
self.assertEqual(results['results'], [[20]])
results = self.run_cbq_query("SELECT func1(10,20,30)")
self.assertEqual(results['results'], [{'$1': [20]}])
self.run_cbq_query("CREATE OR REPLACE FUNCTION func2(nameval) {{ (select name from default:default.{0}.{1} where name = nameval) }}".format(self.scope,self.collections[0]))
results = self.run_cbq_query("select count(func2('old hotel'))")
self.assertEqual(results['results'], [{'$1': 1}])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_abort_udf(self):
try:
try:
self.run_cbq_query('CREATE FUNCTION variadic(...) { CASE WHEN array_length(args) != 1 THEN abort("wrong args: " || to_string(array_length(args))) WHEN type(args[0]) = "string" THEN args[0] ELSE abort("wrong type " || type(args[0]) || ": " || to_string(args[0])) END }')
except Exception as e:
self.log.error(str(e))
self.fail()
try:
results = self.run_cbq_query("EXECUTE FUNCTION variadic(1,2)")
self.fail("Function should have failed with an error {0}".format(results))
except Exception as e:
self.log.error(str(e))
self.assertTrue("wrong args: 2" in str(e))
try:
results = self.run_cbq_query("EXECUTE FUNCTION variadic(1)")
self.fail("Function should have failed with an error {0}".format(results))
except Exception as e:
self.log.error(str(e))
self.assertTrue("wrong type number" in str(e))
try:
results = self.run_cbq_query("EXECUTE FUNCTION variadic('string')")
self.assertEqual(results['results'], ['string'])
except Exception as e:
self.log.error(str(e))
self.fail()
try:
results = self.run_cbq_query("select variadic(1,2)")
self.fail("Function should have failed with an error {0}".format(results))
except Exception as e:
self.log.error(str(e))
self.assertTrue("wrong args: 2" in str(e))
try:
results = self.run_cbq_query("select variadic(1)")
self.fail("Function should have failed with an error {0}".format(results))
except Exception as e:
self.log.error(str(e))
self.assertTrue("wrong type number" in str(e))
try:
results = self.run_cbq_query("select variadic('string')")
self.assertEqual(results['results'], [{'$1': 'string'}])
except Exception as e:
self.log.error(str(e))
self.fail()
try:
self.run_cbq_query('CREATE FUNCTION abort_msg() { abort("This function simply aborts") }')
results = self.run_cbq_query("select abort_msg()")
except Exception as e:
self.log.error(str(e))
self.assertTrue("This function simply aborts" in str(e))
finally:
try:
self.run_cbq_query("DROP FUNCTION variadic")
self.run_cbq_query("DROP FUNCTION abort_msg")
except Exception as e:
self.log.error(str(e))
##############################################################################################
#
# JAVASCRIPT FUNCTIONS
##############################################################################################
def test_javascript_syntax(self):
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
self.assertEqual(results['results'], [4])
self.run_cbq_query(query='CREATE FUNCTION func2(a,b) LANGUAGE JAVASCRIPT AS "multiplier" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func2(2,3)")
self.assertEqual(results['results'], [6])
self.run_cbq_query(query='CREATE FUNCTION func3(...) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func3(1,3)")
self.assertEqual(results['results'], [4])
self.run_cbq_query(query='CREATE FUNCTION func4(...) LANGUAGE JAVASCRIPT AS "multiplier" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func4(1,3)")
self.assertEqual(results['results'], [3])
self.run_cbq_query(query='CREATE FUNCTION func5() LANGUAGE JAVASCRIPT AS "multiplier" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func5()")
self.assertEqual(results['results'], [None])
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
self.run_cbq_query("DROP FUNCTION func3")
self.run_cbq_query("DROP FUNCTION func4")
self.run_cbq_query("DROP FUNCTION func5")
except Exception as e:
self.log.error(str(e))
def test_javascript_params(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
self.run_cbq_query(query='CREATE FUNCTION func2(...) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func2(1,3,5)")
self.assertEqual(results['results'], [4])
try:
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3,5)")
except Exception as e:
self.log.error(str(e))
self.assertTrue("Incorrect number of arguments supplied to function 'func1'" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
try:
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1)")
except Exception as e:
self.log.error(str(e))
self.assertTrue("Incorrect number of arguments supplied to function 'func1'" in str(e), "The error message is incorrect, please check it {0}".format(str(e)))
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_javascript_create_or_replace(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(...) LANGUAGE JAVASCRIPT AS "multiplier" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'multiplier')
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_create_or_replace(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(...) LANGUAGE JAVASCRIPT AS "multiplier" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'multiplier')
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_if_else(self):
functions = 'function adder(a, b, c) {if (a + b > c) { return a + b - c; } else if (a * b > c) { return a*b - c; } else { return a + b + c; }}'
function_names = ["adder"]
created = self.create_library("math", functions, function_names)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b,c) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3,5)")
self.assertEqual(results['results'], [9])
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,6,5)")
self.assertEqual(results['results'], [2])
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(2,3,5)")
self.assertEqual(results['results'], [1])
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_for_loop(self):
functions = 'function adder(a, b, c) { for (i=0; i< b; i++){ a = a + c; } return a; }'
string_functions = 'function concater(a) { var text = ""; var x; for (x in a) {text += a[x] + " ";} return text; }'
function_names = ["adder"]
string_function_names = ["concater"]
created = self.create_library("math", functions, function_names)
created = self.create_library("strings", string_functions, string_function_names)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b,c) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3,5)")
self.assertEqual(results['results'], [16])
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,2,5)")
self.assertEqual(results['results'], [11])
self.run_cbq_query(query='CREATE FUNCTION func2(a) LANGUAGE JAVASCRIPT AS "concater" AT "strings"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func2({'fname':'John', 'lname':'Doe', 'age':25})")
self.assertEqual(results['results'], ['25 John Doe '])
finally:
try:
self.delete_library("math")
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_javascript_while_loop(self):
functions = 'function adder(a,b) { var i = 0; while (i < 3) { a = a + b; i++; } return a; }'
string_functions = 'function multiplier(a,b) { do{ a = a + b; } while(a > b) return a; }'
function_names = ["adder"]
string_function_names = ["multiplier"]
created = self.create_library("math", functions, function_names)
created = self.create_library("strings", string_functions, string_function_names)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,2)")
self.assertEqual(results['results'], [7])
self.run_cbq_query(query='CREATE FUNCTION func2(a,b) LANGUAGE JAVASCRIPT AS "multiplier" AT "strings"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func2(-1,1)")
self.assertEqual(results['results'], [0])
finally:
try:
self.delete_library("math")
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_javascript_infinite_loop(self):
try:
string_functions = 'function multiplier(a,b) { do{ a = a; } while(a > b) return a; }'
string_function_names = ["multiplier"]
created = self.create_library("strings", string_functions, string_function_names)
try:
# Should timeout after 10 seconds
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "multiplier" AT "strings"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(2,1)", query_params={'timeout':"10s"})
self.fail("Query should have timed out")
except Exception as e:
self.log.error(str(e))
self.assertTrue("Timeout 10s exceeded" in str(e), "Query should have stopped due to timeout, check error message!")
try:
# Should timeout after 2 minutes
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(2,1)", query_params={'timeout':"130s"})
self.fail("Query should have timed out")
except Exception as e:
self.log.error(str(e))
self.assertTrue("stopped after running beyond 120000 ms" in str(e), "Query should have stopped due to timeout, check error message!")
try:
# Should timeout after 2 minutes
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(2,1)")
self.fail("Query should have timed out")
except Exception as e:
self.log.error(str(e))
self.assertTrue("stopped after running beyond 120000 ms" in str(e), "Query should have stopped due to timeout, check error message!")
finally:
try:
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_function_syntax_scope(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION default:default.{0}.func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"'.format(self.scope))
results = self.run_cbq_query("EXECUTE FUNCTION default:default.{0}.func1(1,4)".format(self.scope))
self.assertEqual(results['results'], [5])
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION default:default.{0}.func1".format(self.scope))
except Exception as e:
self.log.error(str(e))
def test_javascript_syntax_error(self):
try:
functions = 'function adder(a, b) { retur a + b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.assertFalse(created, "Library should have failed to create due to a syntax error during compilation, check logs above!")
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_replace_lib_func(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
self.assertEqual(results['results'], [4])
functions = 'function adder(a,b) { return helper(a,b); } function helper(a,b) { return a - b; }'
function_names = ["adder", "helper"]
created = self.create_library("math", functions, function_names)
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(3,1)")
self.assertEqual(results['results'], [2])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_delete_lib_func(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
self.assertEqual(results['results'], [4])
functions = 'function multiplier(a, b) { return a * b; }'
function_names = ["multiplier"]
created = self.create_library("math", functions, function_names)
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
except Exception as e:
self.log.error(str(e))
self.assertTrue('symbol is not a function' in str(e), "The query failed for the wrong reason {0}".format(str(e)))
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_javascript_negative(self):
try:
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
except Exception as e:
self.log.error(str(e))
self.assertTrue('Library or function missing' in str(e),
"The query failed for the wrong reason {0}".format(str(e)))
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
self.assertEqual(results['results'], [4])
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "sub" AT "math"')
results = self.run_cbq_query(query="EXECUTE FUNCTION func1(1,3)")
except Exception as e:
self.log.error(str(e))
self.assertTrue('symbol is not a function' in str(e),
"The query failed for the wrong reason {0}".format(str(e)))
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
##############################################################################################
#
# JS-Inline Hybrid Tests
##############################################################################################
'''The stated udf limit is 500, make sure we can create and use 500 functions'''
def test_udf_limits(self):
try:
function_name = ''
for i in range(0, 500):
function_name = "func" + str(i)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION {0}(...) {{ (args[0] * 9/5) + 32 }}'.format(function_name))
result = self.run_cbq_query(query="select * from system:functions")
self.assertEqual(result['metrics']['resultCount'], 500)
for i in range(0, 500):
function_name = "func" + str(i)
results = self.run_cbq_query(query='EXECUTE FUNCTION {0}(10)'.format(function_name))
self.assertEqual(results['results'], [50])
if self.rebalance_in:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init: (self.nodes_init + 1)], [], services=['n1ql'])
rebalance.result()
for i in range(0,500):
function_name = "func" + str(i)
results = self.run_cbq_query(query='EXECUTE FUNCTION {0}(10)'.format(function_name), server=self.servers[self.nodes_init])
self.assertEqual(results['results'], [50])
finally:
for i in range(0,500):
function_name = "func" + str(i)
try:
self.run_cbq_query(query='DROP FUNCTION {0}'.format(function_name))
except Exception as e:
self.log.error(str(e))
def test_create_or_replace_js_to_inline(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) { (args[0] * 9/5) + 32 }")
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'inline')
self.assertEqual(results['results'][0]['functions']['definition']['expression'], '((((`args`[0]) * 9) / 5) + 32)')
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_create_or_replace_inline_to_js(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) { (args[0] * 9/5) + 32 }")
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'inline')
self.assertEqual(results['results'][0]['functions']['definition']['expression'], '((((`args`[0]) * 9) / 5) + 32)')
self.run_cbq_query(query='CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("select * from system:functions where identity.name = 'func1'")
self.assertEqual(results['results'][0]['functions']['definition']['#language'], 'javascript')
self.assertEqual(results['results'][0]['functions']['definition']['library'], 'math')
self.assertEqual(results['results'][0]['functions']['definition']['object'], 'adder')
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_js_inline_where(self):
try:
self.run_cbq_query("CREATE FUNCTION func1(degrees) LANGUAGE INLINE AS (degrees - 32) ")
results = self.run_cbq_query("SELECT * FROM default:default.test.test1 WHERE ANY v in test1.numbers SATISFIES v = func1(36) END")
self.assertEqual(results['results'], [{'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(
query='CREATE OR REPLACE FUNCTION func2(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("SELECT * FROM default:default.test.test1 WHERE ANY v in test1.numbers SATISFIES v = func2(1,3) END")
self.assertEqual(results['results'], [{'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
'''This test case won't work in Chesire cat'''
def test_udf_index(self):
try:
self.run_cbq_query("CREATE FUNCTION func1(degrees) LANGUAGE INLINE AS (degrees - 32) ")
creation = self.run_cbq_query(query='CREATE INDEX idx on default:default.{0}.{1}(DISTINCT ARRAY v for v in numbers when v < func1(36) END)'.format(self.scope,self.collections[0]))
results = self.run_cbq_query("SELECT * FROM default:default.test.test1 WHERE ANY v in test1.numbers SATISFIES v < func1(36) END")
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_udf_insert(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query(query='INSERT INTO default (KEY, VALUE) VALUES ("key5", {"field1":func1("old hotel")}) RETURNING *')
self.assertEqual(results['results'], [{'default': {'field1': [{'test1': {'name': 'old hotel', 'type': 'hotel'}}, {'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}}, {'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}]}}])
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_udf_let(self):
try:
if self.analytics:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION func1(degrees) {(degrees - 32)} ")
results = self.run_cbq_query(
"SELECT * FROM collection1 LET maximum_no = func1(36) WHERE ANY v in collection1.numbers SATISFIES v = maximum_no END")
self.assertEqual(results['results'],
[{'maximum_no': 4, 'collection1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
else:
self.run_cbq_query("CREATE FUNCTION func1(degrees) LANGUAGE INLINE AS (degrees - 32) ")
results = self.run_cbq_query(
"SELECT * FROM default:default.test.test1 LET maximum_no = func1(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END")
self.assertEqual(results['results'], [{'maximum_no': 4, 'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(
query='CREATE OR REPLACE FUNCTION func2(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query(
"SELECT * FROM default:default.test.test1 LET maxi=func2(1,3) WHERE ANY v in test1.numbers SATISFIES v = maxi END")
self.assertEqual(results['results'], [{'maxi': 4, 'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func1(degrees)")
else:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_udf_groupby(self):
if not self.analytics:
functions = 'function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }}'
function_names = ["comparator"]
created = self.create_library("math", functions, function_names)
try:
if self.analytics:
self.run_cbq_query("CREATE ANALYTICS FUNCTION func2(degrees) {(degrees - 32)}")
results = self.run_cbq_query(
"SELECT COUNT(name), hotel_name FROM collection1 LET maximum_no = func2(36) WHERE ANY v in collection1.numbers SATISFIES v = maximum_no END GROUP BY name AS hotel_name")
self.assertEqual(results['results'], [{'$1': 1, 'hotel_name': 'old hotel'}])
else:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "math"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
results = self.run_cbq_query(
"SELECT COUNT(name), hotel_name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY func1(1,2) AS hotel_name")
self.assertEqual(results['results'], [{'$1': 1, 'hotel_name': 'new hotel'}])
results2 = self.run_cbq_query(
"SELECT COUNT(name), hotel_name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY func1(2,1) AS hotel_name")
self.assertEqual(results2['results'], [{'$1': 1, 'hotel_name': 'old hotel'}])
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func2(degrees)")
else:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_udf_having(self):
if not self.analytics:
functions = 'function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }}'
function_names = ["comparator"]
created = self.create_library("math", functions, function_names)
try:
if self.analytics:
self.run_cbq_query("CREATE OR REPLACE ANALYTICS FUNCTION func2(degrees) {(degrees - 32)} ")
results = self.run_cbq_query(
"SELECT name FROM collection1 LET maximum_no = func2(36) WHERE ANY v in collection1.numbers SATISFIES v = maximum_no END GROUP BY name HAVING name = 'old hotel'")
self.assertEqual(results['results'], [{'name': 'old hotel'}])
else:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "math"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
results = self.run_cbq_query(
"SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY name HAVING name = func1(1,2)")
self.assertEqual(results['results'], [])
results2 = self.run_cbq_query(
"SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY name HAVING name = func1(2,1)")
self.assertEqual(results2['results'], [{'name': 'old hotel'}])
finally:
try:
if self.analytics:
self.run_cbq_query("DROP ANALYTICS FUNCTION func2(degrees)")
else:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
except Exception as e:
self.log.error(str(e))
def test_udf_letting(self):
string_functions = 'function concater(a,b) { var text = ""; var x; for (x in a) {if (x = b) { return x; }} return "n"; } function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }}'
function_names2 = ["concater","comparator"]
created2 = self.create_library("strings",string_functions,function_names2)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "strings"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
self.run_cbq_query(query='CREATE FUNCTION func3(a,b) LANGUAGE JAVASCRIPT AS "concater" AT "strings"')
results = self.run_cbq_query(
"SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY name LETTING letter = func3('random string', 'x') HAVING name > letter")
self.assertEqual(results['results'], [])
results2 = self.run_cbq_query(
"SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY name LETTING letter = func3('old hotel','o') HAVING name > letter")
self.assertEqual(results2['results'], [{'name': 'old hotel'}])
finally:
try:
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
self.run_cbq_query("DROP FUNCTION func3")
except Exception as e:
self.log.error(str(e))
def test_udf_orderby(self):
string_functions = 'function concater(a,b) { var text = ""; var x; for (x in a) {if (x = b) { return x; }} return "n"; } function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }} '
function_names2 = ["concater","comparator"]
created2 = self.create_library("strings",string_functions,function_names2)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "strings"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
self.run_cbq_query(query='CREATE FUNCTION func3(a,b) LANGUAGE JAVASCRIPT AS "concater" AT "strings"')
results = self.run_cbq_query(
"SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END ORDER BY func1(2,1)")
self.assertEqual(results['results'], [{'name': 'old hotel'}])
results2 = self.run_cbq_query(
"SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END ORDER BY func1(1,2) ")
self.assertEqual(results2['results'], [{'name': 'old hotel'}])
finally:
try:
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
self.run_cbq_query("DROP FUNCTION func3")
except Exception as e:
self.log.error(str(e))
def test_advise_udf(self):
string_functions = 'function concater(a,b) { var text = ""; var x; for (x in a) {if (x = b) { return x; }} return "n"; } function comparator(a, b) {if (a > b) { return "old hotel"; } else { return "new hotel" }}'
function_names2 = ["concater","comparator"]
created2 = self.create_library("strings",string_functions,function_names2)
try:
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "comparator" AT "strings"')
self.run_cbq_query("CREATE FUNCTION func2(degrees) LANGUAGE INLINE AS (degrees - 32) ")
self.run_cbq_query(query='CREATE FUNCTION func3(a,b) LANGUAGE JAVASCRIPT AS "concater" AT "strings"')
self.run_cbq_query("CREATE OR REPLACE FUNCTION func4(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(self.scope,self.collections[0]))
results2 = self.run_cbq_query(
"ADVISE SELECT name FROM default:default.test.test1 LET maximum_no = func2(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END GROUP BY name LETTING letter = func3('old hotel','o') HAVING name > letter")
self.assertTrue('CREATE INDEX adv_DISTINCT_numbers ON `default`:`default`.`test`.`test1`(DISTINCT ARRAY `v` FOR `v` IN `numbers` END)' in str(results2['results']), "Wrong index was advised, check advise output {0}".format(results2))
finally:
try:
self.delete_library("strings")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION func2")
self.run_cbq_query("DROP FUNCTION func3")
self.run_cbq_query("DROP FUNCTION func4")
except Exception as e:
self.log.error(str(e))
def test_udf_prepareds(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query("PREPARE p1 as EXECUTE FUNCTION func1('old hotel')")
results = self.run_cbq_query("EXECUTE p1")
self.assertEqual(results['results'], [[{'test1': {'name': 'old hotel', 'type': 'hotel'}},
{'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}},
{'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}]])
results = self.run_cbq_query("PREPARE p2 as select func1('old hotel')")
results = self.run_cbq_query("EXECUTE p2")
self.assertEqual(results['results'], [{'$1': [{'test1': {'name': 'old hotel', 'type': 'hotel'}}, {
'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}}, {'test1': {'name': 'old hotel',
'numbers': [1, 2, 3,
4]}}]}])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_udf_prepareds_update(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query("PREPARE p1 as EXECUTE FUNCTION func1('old hotel')")
results = self.run_cbq_query("EXECUTE p1")
self.assertEqual(results['results'], [[{'test1': {'name': 'old hotel', 'type': 'hotel'}},
{'test1': {'name': 'old hotel', 'nested': {'fields': 'fake'}}},
{'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}]])
self.run_cbq_query("CREATE OR REPLACE FUNCTION func1(...) {args[0]}")
results = self.run_cbq_query("EXECUTE p1")
self.assertEqual(results['results'], ['old hotel'])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_udf_prepareds_update_js_inline(self):
try:
self.run_cbq_query("CREATE FUNCTION func1(degrees) LANGUAGE INLINE AS (degrees - 32) ")
results = self.run_cbq_query(
"PREPARE p1 as SELECT * FROM default:default.test.test1 LET maximum_no = func1(36) WHERE ANY v in test1.numbers SATISFIES v = maximum_no END")
results = self.run_cbq_query("EXECUTE p1")
self.assertEqual(results['results'], [{'maximum_no': 4, 'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
functions = 'function adder(a) { return (a - 32); } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.run_cbq_query(
query='CREATE OR REPLACE FUNCTION func1(a) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
results = self.run_cbq_query("EXECUTE p1")
self.assertEqual(results['results'], [{'maximum_no': 4, 'test1': {'name': 'old hotel', 'numbers': [1, 2, 3, 4]}}])
except Exception as e:
self.log.error(str(e))
self.fail()
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_udf_prepared_drop(self):
try:
self.run_cbq_query(
"CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(
self.scope, self.collections[0]))
results = self.run_cbq_query("PREPARE p1 as EXECUTE FUNCTION func1('old hotel')")
self.run_cbq_query("DROP FUNCTION func1")
results = self.run_cbq_query("EXECUTE p1")
except Exception as e:
self.log.error(str(e))
self.assertTrue("Function 'func1' not found" in str(e))
finally:
try:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
##############################################################################################
#
# JAVASCRIPT Libraries
##############################################################################################
def test_create_library(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.assertTrue(created, "The library was not created! Check run logs for more details")
finally:
self.delete_library("math")
def test_update_library(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
self.assertTrue(created, "The library was not created! Check run logs for more details")
if self.replace:
functions = 'function sub(a, b) { return a - b; } function divider(a, b) { return a / b; }'
function_names = ["sub", "divider"]
else:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; } function sub(a, b) { return a - b; } function divider(a, b) { return a / b; }'
function_names = ["sub", "divider", "adder", "multiplier"]
created = self.create_library("math", functions, function_names, self.replace)
self.assertTrue(created, "The library was not updated! Check run logs for more details")
finally:
self.delete_library("math")
def test_delete_library(self):
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
self.create_library("math", functions, function_names)
deleted = self.delete_library("math")
self.assertTrue(deleted, "The library was not deleted! Check run logs for more details")
def test_add_function(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
self.create_library("math", functions, function_names)
functions ='function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; } function sub(a,b) { return helper(a,b); } function helper(a,b) { return a - b; }'
function_names = ["adder", "multiplier","sub","helper"]
created = self.create_library("math", functions, function_names)
self.assertTrue(created, "The new library was not created! Check run logs for more details")
finally:
self.delete_library("math")
def test_delete_function(self):
try:
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
self.create_library("math", functions, function_names)
functions = 'function multiplier(a, b) { return a * b; }'
function_names = ["multiplier"]
created = self.create_library("math", functions, function_names)
self.assertTrue(created, "The new library was not created! Check run logs for more details")
finally:
self.delete_library("math")
'''Create a library with functions, check to see that the library was created and the functions were created'''
def create_library(self, library_name='', functions={},function_names=[], replace= False):
created = False
url = "http://{0}:{1}/evaluator/v1/libraries/{2}".format(self.master.ip, self.n1ql_port, library_name)
data = '{0}'.format(functions)
results = self.shell.execute_command("{0} -X POST {1} -u Administrator:password -H 'content-type: application/json' -d '{2}'".format(self.curl_path, url, data))
self.log.info(results[0])
libraries = self.shell.execute_command("{0} {1} -u Administrator:password".format(self.curl_path, url))
if library_name in str(libraries[0]):
created = True
else:
self.log.error("The library {0} was not created: {1}".format(library_name, libraries))
for function in function_names:
if function in str(libraries[0]):
created = True
else:
self.log.error("The function {0} was not created! {1}".format(function, libraries))
created = False
break
return created
'''Delete a library'''
def delete_library(self, library_name=''):
deleted = False
url = "http://{0}:{1}/evaluator/v1/libraries/{2}".format(self.master.ip, self.n1ql_port, library_name)
curl_output = self.shell.execute_command("{0} -X DELETE {1} -u Administrator:password ".format(self.curl_path, url))
self.log.info(curl_output[0])
libraries = self.shell.execute_command("{0} {1} -u Administrator:password".format(self.curl_path, url))
if "No such library" in str(libraries):
deleted = True
return deleted
'''Add a function to an existing library, it is assumed the library already exists'''
def add_function(self, library_name ='', function_name ='', function =''):
added = False
url = "http://{0}:{1}/evaluator/v1/libraries/{2}/functions/{3}".format(self.master.ip, self.n1ql_port, library_name, function_name)
self.shell.execute_command("{0} -X PUT {1} -u Administrator:password -H 'content-type: application/json' -d '{2}'".format(self.curl_path, url, function))
function = self.shell.execute_command("{0} {1} -u Administrator:password".format(self.curl_path, url))
if function_name in str(function[0]):
added = True
else:
url = "http://{0}:{1}/functions/v1/libraries".format(self.master.ip, self.n1ql_port)
library = self.shell.execute_command("{0} {1} -u Administrator:password".format(self.curl_path, url))
self.log.error("Function url was not found, here is the library it should have been added to! {0}".format(library[0]))
return added
'''Delete a specific function'''
def delete_function(self, library_name ='', function_name =''):
deleted = False
url = "http://{0}:{1}/evaluator/v1/libraries/{2}/functions/{3}".format(self.master.ip, self.n1ql_port, library_name,function_name)
curl_output = self.shell.execute_command("{0} -X DELETE {1} -u Administrator:password ".format(self.curl_path, url))
libraries = self.shell.execute_command("{0} {1} -u Administrator:password".format(self.curl_path, url))
if library_name not in str(libraries):
deleted = True
return deleted
''' Test rbac when you have creation/execution on a function that attemps to access a bucket you dont have perms for'''
def test_inline_rbac_query(self):
try:
self.create_users()
self.grant_role()
res = self.curl_with_roles("CREATE OR REPLACE FUNCTION func1(nameval) {{ (select * from default:default.{0}.{1} where name = nameval) }}".format(self.scope, self.collections[0]))
self.assertEqual(res['status'], 'success')
res = self.curl_with_roles('EXECUTE FUNCTION func1("old hotel")')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('select raw func1("old hotel")')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_inline_rbac(self):
try:
self.create_users()
self.grant_role()
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
res = self.curl_with_roles('CREATE OR REPLACE FUNCTION default:default.test.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
if self.scoped:
self.assertEqual(res['status'], 'success')
else:
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE OR REPLACE FUNCTION default:default.test2.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE OR REPLACE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS \"adder\" AT \"math\"')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE OR REPLACE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
if self.scoped:
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['status'], 'success')
res = self.curl_with_roles('CREATE OR REPLACE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
if self.scoped:
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['status'], 'success')
if self.scoped:
res = self.curl_with_roles('SELECT RAW default:default.test.celsius(10)')
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('EXECUTE FUNCTION default:default.test.celsius(10)')
self.assertEqual(res['results'], [-12.222222222222221])
else:
res = self.curl_with_roles('SELECT RAW celsius(10)')
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('SELECT RAW celsius1(10)')
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('EXECUTE FUNCTION celsius(10)')
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('EXECUTE FUNCTION celsius1(10)')
self.assertEqual(res['results'], [-12.222222222222221])
finally:
try:
self.delete_library("math")
if self.scoped:
self.run_cbq_query("DROP FUNCTION default:default.test.celsius")
else:
self.run_cbq_query("DROP FUNCTION celsius")
self.run_cbq_query("DROP FUNCTION celsius1")
except Exception as e:
self.log.error(str(e))
def test_inline_rbac_creation(self):
try:
self.create_users()
self.grant_role()
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
res = self.curl_with_roles('CREATE FUNCTION default:default.test.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
if self.scoped:
self.assertEqual(res['status'], 'success')
else:
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS \"adder\" AT \"math\"')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['status'], 'success')
res = self.curl_with_roles('CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['status'], 'success')
if self.scoped:
res = self.curl_with_roles('SELECT default:default.test.celsius(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION default:default.test.celsius(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
else:
res = self.curl_with_roles('SELECT RAW celsius(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('SELECT RAW celsius1(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION celsius(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION celsius1(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
finally:
try:
self.delete_library("math")
if self.scoped:
self.run_cbq_query("DROP FUNCTION default:default.test.celsius")
else:
self.run_cbq_query("DROP FUNCTION celsius")
self.run_cbq_query("DROP FUNCTION celsius1")
except Exception as e:
self.log.error(str(e))
def test_inline_rbac_execution(self):
try:
self.create_users()
self.grant_role()
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
res = self.curl_with_roles('CREATE FUNCTION default:default.test.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS \"adder\" AT \"math\"')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
self.run_cbq_query(query='CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.run_cbq_query(query='CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.run_cbq_query(query='CREATE FUNCTION default:default.test.celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
res = self.curl_with_roles('SELECT RAW celsius(10)')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('SELECT RAW celsius1(10)')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('EXECUTE FUNCTION celsius(10)')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('EXECUTE FUNCTION celsius1(10)')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['results'], [-12.222222222222221])
res = self.curl_with_roles('SELECT RAW func1(10,20)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION func1(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('SELECT RAW default:default.test.celsius1(10)')
if self.scoped:
self.assertEqual(res['results'], [-12.222222222222221])
else:
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION default:default.test.celsius1(10)')
if self.scoped:
self.assertEqual(res['results'], [-12.222222222222221])
else:
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION celsius")
self.run_cbq_query("DROP FUNCTION celsius1")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION default:default.test.celsius1")
except Exception as e:
self.log.error(str(e))
def test_js_rbac(self):
try:
self.create_users()
self.grant_role()
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
res = self.curl_with_roles('CREATE FUNCTION default:default.test.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test.func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
if self.scoped:
self.assertEqual(res['status'], 'success')
else:
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'], "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
if self.scoped:
self.assertTrue('User does not have credentials' in res['errors'][0]['msg'],
"Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['status'], 'success')
if self.scoped:
res = self.curl_with_roles('SELECT RAW default:default.test.func1(10,20)')
self.assertEqual(res['results'], [30])
res = self.curl_with_roles('EXECUTE FUNCTION default:default.test.func1(10,20)')
self.assertEqual(res['results'], [30])
else:
res = self.curl_with_roles('SELECT RAW func1(10,20)')
self.assertEqual(res['results'], [30])
res = self.curl_with_roles('EXECUTE FUNCTION func1(10,20)')
self.assertEqual(res['results'], [30])
finally:
try:
self.delete_library("math")
if self.scoped:
self.run_cbq_query("DROP FUNCTION default:default.test.func1")
else:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_js_rbac_creation(self):
try:
self.create_users()
self.grant_role()
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
res = self.curl_with_roles('CREATE FUNCTION default:default.test.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test.func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
if self.scoped:
self.assertEqual(res['status'], 'success')
else:
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
if self.scoped:
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
else:
self.assertEqual(res['status'], 'success')
if self.scoped:
res = self.curl_with_roles('SELECT RAW default:default.test.func1(10,20)')
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION default:default.test.func1(10,20)')
self.assertTrue('User does not have credentials' in str(res),
"Error message is wrong: {0}".format(str(res)))
else:
res = self.curl_with_roles('SELECT RAW func1(10,20)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION func1(10,20)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
finally:
try:
self.delete_library("math")
if self.scoped:
self.run_cbq_query("DROP FUNCTION default:default.test.func1")
else:
self.run_cbq_query("DROP FUNCTION func1")
except Exception as e:
self.log.error(str(e))
def test_js_rbac_execution(self):
try:
self.create_users()
self.grant_role()
functions = 'function adder(a, b) { return a + b; } function multiplier(a, b) { return a * b; }'
function_names = ["adder", "multiplier"]
created = self.create_library("math", functions, function_names)
res = self.curl_with_roles('CREATE FUNCTION default:default.test.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:default.test2.celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
self.run_cbq_query(query='CREATE FUNCTION func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
self.run_cbq_query(query='CREATE FUNCTION default:celsius(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.run_cbq_query(query='CREATE FUNCTION celsius1(degrees) LANGUAGE INLINE AS (degrees - 32) * 5/9')
self.run_cbq_query(query='CREATE FUNCTION default:default.test.func1(a,b) LANGUAGE JAVASCRIPT AS "adder" AT "math"')
if self.scoped:
res = self.curl_with_roles('SELECT RAW default:default.test.func1(10,20)')
self.assertEqual(res['results'], [30])
res = self.curl_with_roles('EXECUTE FUNCTION default:default.test.func1(10,20)')
self.assertEqual(res['results'], [30])
else:
res = self.curl_with_roles('SELECT RAW celsius(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('SELECT RAW celsius1(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION celsius(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('EXECUTE FUNCTION celsius1(10)')
self.assertTrue('User does not have credentials' in str(res), "Error message is wrong: {0}".format(str(res)))
res = self.curl_with_roles('SELECT RAW func1(10,20)')
self.assertEqual(res['results'], [30])
res = self.curl_with_roles('EXECUTE FUNCTION func1(10,20)')
self.assertEqual(res['results'], [30])
finally:
try:
self.delete_library("math")
self.run_cbq_query("DROP FUNCTION celsius")
self.run_cbq_query("DROP FUNCTION celsius1")
self.run_cbq_query("DROP FUNCTION func1")
self.run_cbq_query("DROP FUNCTION default:default.test.func1")
except Exception as e:
self.log.error(str(e))
def test_library_name_validation(self):
invalid_names = [
"_libname", "lib$name", "lib@name", "lib!name", "lib^name", "lib*name", "lib(name", "lib)name",
"lib+name", "lib=name", "lib\[name", "lib\]name", "lib:name", "lib,name", "lib;name", "lib<name", "lib>name",
"_libname", "-libname", "lib\?name", "lib\\name", "lib\:name", "lib\#name"
]
for name in invalid_names:
with self.subTest(f"Invalid Library name: {name}"):
url = f"http://{self.master.ip}:{self.n1ql_port}/evaluator/v1/libraries/{name}"
data = 'function add(a,b) { return a+b;}'
results = self.shell.execute_command(f"{self.curl_path} -X POST '{url}' -u Administrator:password -H 'content-type: application/json' -d '{data}'")
self.assertEqual(results[0], ['Library name can start with characters only in range A-Z, a-z, 0-9 and can contain characters only in range A-Z, a-z, 0-9, underscore and hyphen '])
valid_names = [
"libname", "LIBNAME", "LibName", "lib0name", "0libname", "libname0",
"lib_name", "libname_", "lib-name", "libname-", "999"
]
for name in valid_names:
with self.subTest(f"Valid Library name: {name}"):
url = f"http://{self.master.ip}:{self.n1ql_port}/evaluator/v1/libraries/{name}"
data = 'function add(a,b) { return a+b;}'
results = self.shell.execute_command(f"{self.curl_path} -X POST '{url}' -u Administrator:password -H 'content-type: application/json' -d '{data}'")
self.assertEqual(results[0], ['{"status": "OK"}'])
results = self.shell.execute_command(f"{self.curl_path} -X DELETE '{url}' -u Administrator:password")
self.assertEqual(results[0], ['{"status": "OK"}'])
| 59.227273 | 363 | 0.574968 |
f6be19b22ddf18ac096ad696cd25dc620006e01b | 5,426 | py | Python | ub/modules/covid.py | parv779/javes-3.0 | d510717b2756a65b39ff18d9f53d4adc46d8e23f | [
"MIT"
] | 15 | 2020-12-13T17:37:05.000Z | 2021-06-23T00:00:49.000Z | ub/modules/covid.py | parv779/javes-3.0 | d510717b2756a65b39ff18d9f53d4adc46d8e23f | [
"MIT"
] | 2 | 2021-01-11T16:39:31.000Z | 2021-01-25T22:35:28.000Z | ub/modules/covid.py | parv779/javes-3.0 | d510717b2756a65b39ff18d9f53d4adc46d8e23f | [
"MIT"
] | 78 | 2020-12-13T17:52:51.000Z | 2022-03-24T03:43:09.000Z | from datetime import datetime
from covid import Covid
covid = Covid(source="worldometers")
from ub import CMD_HELP
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from ub.javes_main.commands import bot, rekcah05, javes05
javes = bot
@javes05(outgoing=True, pattern="^!corona$")
async def iqless(e):
await e.edit("Antivirus scan was completed \n⚠️ Warning! This donkey has Corona Virus")
@javes05(outgoing=True, pattern="^!covid (.*)")
async def corona(event):
await event.edit("`Processing...`")
country = event.pattern_match.group(1)
covid = Covid()
country_data = covid.get_status_by_country_name(country)
deaths = covid.get_total_deaths()
if country_data:
output_text = f"`Confirmed` : {country_data['confirmed']}\n"
output_text += f"`Active` : {country_data['active']}\n"
output_text += f"`Deaths` : {country_data['deaths']}\n"
output_text += f"`Recovered` : {country_data['recovered']}\n\n"
output_text += f"---------TOTAL----------\n\n"
output_text += f"`Deaths` : {covid.get_total_deaths()}\n"
output_text += f"`Recovered` : {covid.get_total_recovered()}\n"
output_text += f"`Confirmed` : {covid.get_total_confirmed_cases()}\n"
output_text += f"`Active` : {covid.get_total_active_cases()}\n\n"
output_text += ("`Update` : "f"{datetime.utcfromtimestamp(country_data['last_update'] // 1000).strftime('%H:%M')}[GMT]\n")
else:
output_text = "Invalid Country name"
await event.edit(f"Corona Virus Info in {country}:\n\n{output_text}")
@javes05(outgoing=True, pattern="^!covid2 (.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
reply_message = await event.get_reply_message()
chat = "@NovelCoronaBot"
await event.edit("`Processing covid Info...`")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1124136160))
await event.client.send_message(chat, "{}".format(input_str))
response = await response
except YouBlockedUserError:
await event.reply("```please unlock @NovelCoronaBot```")
return
if response.text.startswith("Country"):
await event.edit("`Invalid Country name`")
else:
await event.client.send_message(event.chat_id, response.message)
@javes.on(rekcah05(pattern=f"corona$", allow_sudo=True))
async def iqless(e):
await e.reply("Antivirus scan was completed \n⚠️ Warning! This donkey has Corona Virus")
@javes.on(rekcah05(pattern=f"covid (.*)", allow_sudo=True))
async def corona(event):
rk = await event.reply("`Processing...`")
country = event.pattern_match.group(1)
covid = Covid()
country_data = covid.get_status_by_country_name(country)
deaths = covid.get_total_deaths()
if country_data:
output_text = f"`Confirmed` : {country_data['confirmed']}\n"
output_text += f"`Active` : {country_data['active']}\n"
output_text += f"`Deaths` : {country_data['deaths']}\n"
output_text += f"`Recovered` : {country_data['recovered']}\n\n"
output_text += f"---------TOTAL----------\n\n"
output_text += f"`Deaths` : {covid.get_total_deaths()}\n"
output_text += f"`Recovered` : {covid.get_total_recovered()}\n"
output_text += f"`Confirmed` : {covid.get_total_confirmed_cases()}\n"
output_text += f"`Active` : {covid.get_total_active_cases()}\n\n"
output_text += ("`Update` : "f"{datetime.utcfromtimestamp(country_data['last_update'] // 1000).strftime('%H:%M')}[GMT]\n")
else:
output_text = "Invalid Country name"
await rk.edit(f"Corona Virus Info in {country}:\n\n{output_text}")
@javes.on(rekcah05(pattern=f"covid2 (.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
reply_message = await event.get_reply_message()
chat = "@NovelCoronaBot"
rk = await event.reply("```Processing...```")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1124136160))
await event.client.send_message(chat, "{}".format(input_str))
response = await response
except YouBlockedUserError:
await rk.reply("```please unlock @NovelCoronaBot```")
return
if response.text.startswith("Country"):
await rk.edit("`Invalid Country name`")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message)
CMD_HELP.update({
"covid":
"!corona\
\nUsage: slap the taged user\
\n\n!covid <country>\
\nUsage: Get an information about data covid-19 in your country..\
\n\n!covid2 <country>\
\nUsage: same like !covid \
\n\nSudo Commands ( type !help sudo for more info)\
\n.covid , .covid2 , .corona \
"
})
| 43.063492 | 138 | 0.621637 |
39018c4292fe239c33e00400ac1b5d8994f60633 | 5,938 | py | Python | Tkinter-NoteBook/app_notebook.py | PratyushBasu/Learn-and-Work | 57e4c2a06a2556b8db211bab3dbf1727b25405a9 | [
"Apache-2.0"
] | null | null | null | Tkinter-NoteBook/app_notebook.py | PratyushBasu/Learn-and-Work | 57e4c2a06a2556b8db211bab3dbf1727b25405a9 | [
"Apache-2.0"
] | null | null | null | Tkinter-NoteBook/app_notebook.py | PratyushBasu/Learn-and-Work | 57e4c2a06a2556b8db211bab3dbf1727b25405a9 | [
"Apache-2.0"
] | null | null | null |
import os
import tkinter as tk
from tkinter import ttk, filedialog, messagebox
text_contents = dict()
def check_for_changes():
current = get_text_widget()
content = current.get("1.0", "end-1c")
name = notebook.tab("current")["text"]
if hash(content) != text_contents[str(current)]:
if name[-1] != "*":
notebook.tab("current", text=name + "*")
elif name[-1] == "*":
notebook.tab("current", text=name[:-1])
def get_text_widget():
tab_widget = notebook.nametowidget(notebook.select()) # tab name
text_widget = tab_widget.winfo_children()[0] # gets the text_area children of notebook
return text_widget
def get_current_tab():
current_tab = notebook.select()
return current_tab
def close_current_tab():
# It seemed to me, that the problem emerges, because we are trying to close a tab from
# the notebook while giving (with the get_text_widget( )) a subset of the tab to the
# forget method (which needs a tab ID) -> root - notebook - container (Frame) - text_area and Scrollbar
# Because the Container(Frame) is the actual tab, it needs to be selected to get closed.
# current = get_text_widget()
current = get_current_tab()
if current_tab_unsaved() and not confirm_close():
return
if len(notebook.tabs()) == 1:
create_file()
notebook.forget(current)
def current_tab_unsaved():
text_widget = get_text_widget()
content = text_widget.get("1.0", "end-1c")
return hash(content) != text_contents[str(text_widget)]
def confirm_close():
return messagebox.askyesnocancel(
message="You have unsaved changes. Are you sure you want to close?",
icon="question",
title="Unsaved Changes"
)
def confirm_quit():
unsaved = False
for tab in notebook.tabs():
tab_widget = notebook.nametowidget(tab)
text_widget = tab_widget.winfo_children()[0]
content = text_widget.get("1.0", "end-1c")
if hash(content) != text_contents[str(text_widget)]:
unsaved = True
break
if unsaved and not confirm_close():
return
root.destroy()
def create_file(content="", title="Untitled"):
container = ttk.Frame(notebook)
container.pack()
text_area = tk.Text(container)
text_area.insert("end", content)
text_area.pack(side="left", fill="both", expand=True)
notebook.add(container, text=title)
notebook.select(container)
text_contents[str(text_area)] = hash(content) # hash of the contents
#https://blog.tecladocode.com/tkinter-scrollable-frames/
text_scroll = ttk.Scrollbar(container, orient="vertical", command=text_area.yview)
text_scroll.pack(side="right", fill="y")
text_area["yscrollcommand"] = text_scroll.set
def open_file():
file_path = filedialog.askopenfilename()
try:
filename = os.path.basename(file_path)
with open(file_path) as file:
content = file.read().strip()
except (AttributeError, FileNotFoundError):
print("Open operation cancelled")
return
create_file(content, filename)
def save_file():
file_path = filedialog.asksaveasfilename()
try:
filename = os.path.basename(file_path)
text_widget = get_text_widget()
content = text_widget.get("1.0", "end - 1c") # start = 1st line, before first char to end = end_of_content - 1 char (last char is newline, hence omitting)
with open(file_path, "w") as file:
file.write(content)
except (AttributeError, FileNotFoundError, FileExistsError):
print("Save operation cancelled")
return
notebook.tab("current", text = filename) # tab_id = current (current tab); tab/file name will be replaced with filename
tex_contents[str(text_widget)] = hash(content) # hash of the contents
def show_about_info():
messagebox.showinfo(
title="About",
message="Pratyush's Text Editor is a simple tab enabled text editor to learn about Tkinter"
)
root = tk.Tk()
root.title("Pratyush's Text Editor")
root.option_add("*tearOff", False) # change the behavior of certain elements in different OS. Read Docs
main = ttk.Frame(root)
main.pack(fill="both", expand=True, padx=(1), pady=(4, 0))
menubar = tk.Menu(root)
root.config(menu=menubar)
file_menu = tk.Menu(menubar)
help_menu = tk.Menu(menubar)
menubar.add_cascade(menu=file_menu, label="File")
menubar.add_cascade(menu=help_menu, label="Help")
# Command / functions
file_menu.add_command(label="New", command=create_file, accelerator="Ctrl+N")
file_menu.add_command(label="Open...", command=open_file, accelerator="Ctrl+O")
file_menu.add_command(label="Save", command=save_file, accelerator="Ctrl+S")
file_menu.add_command(label="Close Tab", command=close_current_tab, accelerator="Ctrl+W")
file_menu.add_command(label="Exit", command=confirm_quit)
help_menu.add_command(label="About", command=show_about_info)
notebook = ttk.Notebook(main)
notebook.pack(fill="both", expand=True)
create_file()
# create_file()
# create_file() # last one will be active for inputting texts when multiple function calls are done
# bind the shortcuts / special key presses as long as those are active the tkinter window
# https://stackoverflow.com/questions/16082243/how-to-bind-ctrl-in-python-tkinter
root.bind("<KeyPress>", lambda event: check_for_changes())
root.bind("<Control-n>", lambda event: create_file())
root.bind("<Control-o>", lambda event: open_file())
root.bind("<Control-s>", lambda event: save_file())
root.bind("<Control-w>", lambda event: close_current_tab())
root.mainloop()
| 32.626374 | 170 | 0.656282 |
9b482baae981490240af2359aee110ccde0b3921 | 15,424 | py | Python | mne/gui/_marker_gui.py | Okamille/mne-python | 25df6b63ea3cc2eca1855fcafd8bfbcfd7199263 | [
"BSD-3-Clause"
] | 1 | 2020-11-05T21:30:15.000Z | 2020-11-05T21:30:15.000Z | mne/gui/_marker_gui.py | Okamille/mne-python | 25df6b63ea3cc2eca1855fcafd8bfbcfd7199263 | [
"BSD-3-Clause"
] | 2 | 2016-02-27T13:43:15.000Z | 2018-07-18T19:44:45.000Z | mne/gui/_marker_gui.py | Okamille/mne-python | 25df6b63ea3cc2eca1855fcafd8bfbcfd7199263 | [
"BSD-3-Clause"
] | 1 | 2017-03-05T20:44:07.000Z | 2017-03-05T20:44:07.000Z | """Mayavi/traits GUI for averaging two sets of KIT marker points."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import sys
import numpy as np
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import confirm, error, FileDialog, OK, YES
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, Instance, Property, Array, Bool,
Button, Enum, File, Float, List, Str)
from traitsui.api import View, Item, HGroup, VGroup, CheckListEditor
from traitsui.menu import Action, CancelButton
from ..transforms import apply_trans, rotation, translation
from ..coreg import fit_matched_points
from ..io.kit import read_mrk
from ..io.meas_info import _write_dig_points
from ._viewer import PointObject
backend_is_wx = False # is there a way to determine this?
if backend_is_wx:
mrk_wildcard = ['Supported Files (*.sqd, *.mrk, *.txt, *.pickled)|'
'*.sqd;*.mrk;*.txt;*.pickled',
'Sqd marker file (*.sqd;*.mrk)|*.sqd;*.mrk',
'Text marker file (*.txt)|*.txt',
'Pickled markers (*.pickled)|*.pickled']
mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt"]
else:
if sys.platform in ('win32', 'linux2'):
# on Windows and Ubuntu, multiple wildcards does not seem to work
mrk_wildcard = ["*.sqd", "*.mrk", "*.txt", "*.pickled"]
else:
mrk_wildcard = ["*.sqd;*.mrk;*.txt;*.pickled"]
mrk_out_wildcard = "*.txt"
out_ext = '.txt'
use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)])
use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
mrk_view_editable = View(
VGroup('file',
Item('name', show_label=False, style='readonly'),
HGroup(
Item('use', editor=use_editor_v, enabled_when="enabled",
style='custom'),
'points',
),
HGroup(Item('clear', enabled_when="can_save", show_label=False),
Item('save_as', enabled_when="can_save",
show_label=False)),
))
mrk_view_basic = View(
VGroup('file',
Item('name', show_label=False, style='readonly'),
Item('use', editor=use_editor_h, enabled_when="enabled",
style='custom'),
HGroup(Item('clear', enabled_when="can_save", show_label=False),
Item('edit', show_label=False),
Item('switch_left_right', label="Switch Left/Right",
show_label=False),
Item('reorder', show_label=False),
Item('save_as', enabled_when="can_save",
show_label=False)),
))
mrk_view_edit = View(VGroup('points'))
class ReorderDialog(HasPrivateTraits):
"""Dialog for reordering marker points."""
order = Str("0 1 2 3 4")
index = Property(List, depends_on='order')
is_ok = Property(Bool, depends_on='order')
view = View(
Item('order', label='New order (five space delimited numbers)'),
buttons=[CancelButton, Action(name='OK', enabled_when='is_ok')])
def _get_index(self):
try:
return [int(i) for i in self.order.split()]
except ValueError:
return []
def _get_is_ok(self):
return sorted(self.index) == [0, 1, 2, 3, 4]
class MarkerPoints(HasPrivateTraits):
"""Represent 5 marker points."""
points = Array(float, (5, 3))
can_save = Property(depends_on='points')
save_as = Button()
view = View(VGroup('points',
Item('save_as', enabled_when='can_save')))
@cached_property
def _get_can_save(self):
return np.any(self.points)
def _save_as_fired(self):
dlg = FileDialog(action="save as", wildcard=mrk_out_wildcard,
default_filename=self.name,
default_directory=self.dir)
dlg.open()
if dlg.return_code != OK:
return
path, ext = os.path.splitext(dlg.path)
if not path.endswith(out_ext) and len(ext) != 0:
ValueError("The extension '%s' is not supported." % ext)
path = path + out_ext
if os.path.exists(path):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.save(path)
def save(self, path):
"""Save the marker points.
Parameters
----------
path : str
Path to the file to write. The kind of file to write is determined
based on the extension: '.txt' for tab separated text file,
'.pickled' for pickled file.
"""
_write_dig_points(path, self.points)
class MarkerPointSource(MarkerPoints): # noqa: D401
"""MarkerPoints subclass for source files."""
file = File(filter=mrk_wildcard, exists=True)
name = Property(Str, depends_on='file')
dir = Property(Str, depends_on='file')
use = List(list(range(5)), desc="Which points to use for the interpolated "
"marker.")
enabled = Property(Bool, depends_on=['points', 'use'])
clear = Button(desc="Clear the current marker data")
edit = Button(desc="Edit the marker coordinates manually")
switch_left_right = Button(
desc="Switch left and right marker points; this is intended to "
"correct for markers that were attached in the wrong order")
reorder = Button(desc="Change the order of the marker points")
view = mrk_view_basic
@cached_property
def _get_enabled(self):
return np.any(self.points)
@cached_property
def _get_dir(self):
if self.file:
return os.path.dirname(self.file)
@cached_property
def _get_name(self):
if self.file:
return os.path.basename(self.file)
@on_trait_change('file')
def load(self, fname):
if not fname:
self.reset_traits(['points'])
return
try:
pts = read_mrk(fname)
except Exception as err:
error(None, str(err), "Error Reading mrk")
self.reset_traits(['points'])
else:
self.points = pts
def _clear_fired(self):
self.reset_traits(['file', 'points', 'use'])
def _edit_fired(self):
self.edit_traits(view=mrk_view_edit)
def _reorder_fired(self):
dlg = ReorderDialog()
ui = dlg.edit_traits(kind='modal')
if not ui.result: # user pressed cancel
return
self.points = self.points[dlg.index]
def _switch_left_right_fired(self):
self.points = self.points[[1, 0, 2, 4, 3]]
class MarkerPointDest(MarkerPoints): # noqa: D401
"""MarkerPoints subclass that serves for derived points."""
src1 = Instance(MarkerPointSource)
src2 = Instance(MarkerPointSource)
name = Property(Str, depends_on='src1.name,src2.name')
dir = Property(Str, depends_on='src1.dir,src2.dir')
points = Property(Array(float, (5, 3)),
depends_on=['method', 'src1.points', 'src1.use',
'src2.points', 'src2.use'])
enabled = Property(Bool, depends_on=['points'])
method = Enum('Transform', 'Average', desc="Transform: estimate a rotation"
"/translation from mrk1 to mrk2; Average: use the average "
"of the mrk1 and mrk2 coordinates for each point.")
view = View(VGroup(Item('method', style='custom'),
Item('save_as', enabled_when='can_save',
show_label=False)))
@cached_property
def _get_dir(self):
return self.src1.dir
@cached_property
def _get_name(self):
n1 = self.src1.name
n2 = self.src2.name
if not n1:
if n2:
return n2
else:
return ''
elif not n2:
return n1
if n1 == n2:
return n1
i = 0
l1 = len(n1) - 1
l2 = len(n1) - 2
while n1[i] == n2[i]:
if i == l1:
return n1
elif i == l2:
return n2
i += 1
return n1[:i]
@cached_property
def _get_enabled(self):
return np.any(self.points)
@cached_property
def _get_points(self):
# in case only one or no source is enabled
if not (self.src1 and self.src1.enabled):
if (self.src2 and self.src2.enabled):
return self.src2.points
else:
return np.zeros((5, 3))
elif not (self.src2 and self.src2.enabled):
return self.src1.points
# Average method
if self.method == 'Average':
if len(np.union1d(self.src1.use, self.src2.use)) < 5:
error(None, "Need at least one source for each point.",
"Marker Average Error")
return np.zeros((5, 3))
pts = (self.src1.points + self.src2.points) / 2.
for i in np.setdiff1d(self.src1.use, self.src2.use):
pts[i] = self.src1.points[i]
for i in np.setdiff1d(self.src2.use, self.src1.use):
pts[i] = self.src2.points[i]
return pts
# Transform method
idx = np.intersect1d(np.array(self.src1.use),
np.array(self.src2.use), assume_unique=True)
if len(idx) < 3:
error(None, "Need at least three shared points for trans"
"formation.", "Marker Interpolation Error")
return np.zeros((5, 3))
src_pts = self.src1.points[idx]
tgt_pts = self.src2.points[idx]
est = fit_matched_points(src_pts, tgt_pts, out='params')
rot = np.array(est[:3]) / 2.
tra = np.array(est[3:]) / 2.
if len(self.src1.use) == 5:
trans = np.dot(translation(*tra), rotation(*rot))
pts = apply_trans(trans, self.src1.points)
elif len(self.src2.use) == 5:
trans = np.dot(translation(* -tra), rotation(* -rot))
pts = apply_trans(trans, self.src2.points)
else:
trans1 = np.dot(translation(*tra), rotation(*rot))
pts = apply_trans(trans1, self.src1.points)
trans2 = np.dot(translation(* -tra), rotation(* -rot))
for i in np.setdiff1d(self.src2.use, self.src1.use):
pts[i] = apply_trans(trans2, self.src2.points[i])
return pts
class CombineMarkersModel(HasPrivateTraits):
"""Combine markers model."""
mrk1_file = Instance(File)
mrk2_file = Instance(File)
mrk1 = Instance(MarkerPointSource)
mrk2 = Instance(MarkerPointSource)
mrk3 = Instance(MarkerPointDest)
clear = Button(desc="Clear the current marker data")
# stats
distance = Property(Str, depends_on=['mrk1.points', 'mrk2.points'])
def _clear_fired(self):
self.mrk1.clear = True
self.mrk2.clear = True
self.mrk3.reset_traits(['method'])
def _mrk1_default(self):
return MarkerPointSource()
def _mrk1_file_default(self):
return self.mrk1.trait('file')
def _mrk2_default(self):
return MarkerPointSource()
def _mrk2_file_default(self):
return self.mrk2.trait('file')
def _mrk3_default(self):
return MarkerPointDest(src1=self.mrk1, src2=self.mrk2)
@cached_property
def _get_distance(self):
if (self.mrk1 is None or self.mrk2 is None or
(not np.any(self.mrk1.points)) or
(not np.any(self.mrk2.points))):
return ""
ds = np.sqrt(np.sum((self.mrk1.points - self.mrk2.points) ** 2, 1))
desc = '\t'.join('%.1f mm' % (d * 1000) for d in ds)
return desc
class CombineMarkersPanel(HasTraits): # noqa: D401
"""Has two marker points sources and interpolates to a third one."""
model = Instance(CombineMarkersModel, ())
# model references for UI
mrk1 = Instance(MarkerPointSource)
mrk2 = Instance(MarkerPointSource)
mrk3 = Instance(MarkerPointDest)
distance = Str
# Visualization
scene = Instance(MlabSceneModel)
scale = Float(5e-3)
mrk1_obj = Instance(PointObject)
mrk2_obj = Instance(PointObject)
mrk3_obj = Instance(PointObject)
trans = Array()
view = View(VGroup(VGroup(Item('mrk1', style='custom'),
Item('mrk1_obj', style='custom'),
show_labels=False,
label="Source Marker 1", show_border=True),
VGroup(Item('mrk2', style='custom'),
Item('mrk2_obj', style='custom'),
show_labels=False,
label="Source Marker 2", show_border=True),
VGroup(Item('distance', style='readonly'),
label='Stats', show_border=True),
VGroup(Item('mrk3', style='custom'),
Item('mrk3_obj', style='custom'),
show_labels=False,
label="New Marker", show_border=True),
))
def _mrk1_default(self):
return self.model.mrk1
def _mrk2_default(self):
return self.model.mrk2
def _mrk3_default(self):
return self.model.mrk3
def __init__(self, *args, **kwargs): # noqa: D102
super(CombineMarkersPanel, self).__init__(*args, **kwargs)
self.model.sync_trait('distance', self, 'distance', mutual=False)
self.mrk1_obj = PointObject(scene=self.scene,
color=(0.608, 0.216, 0.216),
point_scale=self.scale)
self.model.mrk1.sync_trait(
'enabled', self.mrk1_obj, 'visible', mutual=False)
self.mrk2_obj = PointObject(scene=self.scene,
color=(0.216, 0.608, 0.216),
point_scale=self.scale)
self.model.mrk2.sync_trait(
'enabled', self.mrk2_obj, 'visible', mutual=False)
self.mrk3_obj = PointObject(scene=self.scene,
color=(0.588, 0.784, 1.),
point_scale=self.scale)
self.model.mrk3.sync_trait(
'enabled', self.mrk3_obj, 'visible', mutual=False)
@on_trait_change('model:mrk1:points,trans')
def _update_mrk1(self):
if self.mrk1_obj is not None:
self.mrk1_obj.points = apply_trans(self.trans,
self.model.mrk1.points)
@on_trait_change('model:mrk2:points,trans')
def _update_mrk2(self):
if self.mrk2_obj is not None:
self.mrk2_obj.points = apply_trans(self.trans,
self.model.mrk2.points)
@on_trait_change('model:mrk3:points,trans')
def _update_mrk3(self):
if self.mrk3_obj is not None:
self.mrk3_obj.points = apply_trans(self.trans,
self.model.mrk3.points)
| 33.824561 | 79 | 0.565288 |
ac456eb4462138315ab3d0779f626a9d8c346023 | 1,077 | py | Python | apps/agentcontroller/jumpscripts/extended/alerts/alert_cpu_core.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | 1 | 2015-10-26T10:38:13.000Z | 2015-10-26T10:38:13.000Z | apps/agentcontroller/jumpscripts/extended/alerts/alert_cpu_core.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | null | null | null | apps/agentcontroller/jumpscripts/extended/alerts/alert_cpu_core.py | Jumpscale/jumpscale6_core | 0502ddc1abab3c37ed982c142d21ea3955d471d3 | [
"BSD-2-Clause"
] | null | null | null |
from JumpScale import j
descr = """
Check on average cpu
"""
organization = "jumpscale"
author = "deboeckj@codescalers.com"
license = "bsd"
version = "1.0"
period = 15*60 # always in sec
startatboot = True
order = 1
enable = True
async = True
log = False
queue ='process'
roles = ['master']
def action():
try:
import JumpScale.baselib.watchdog.client
except Exception:
return
import JumpScale.grid.osis
ocl = j.core.osis.getClientByInstance('main')
scl = j.core.osis.getClientForCategory(ocl, 'system', 'stats')
results = scl.search({'target':'smartSummarize(n*.system.cpu.percent, "1hour", "avg")', 'from': '-1h'})
for noderesult in results:
avgcpu, timestamp = noderesult['datapoints'][-1]
target = noderesult['target']
nid = int(target[len('smartSummarize(n'):].split('.')[0])
if avgcpu > 95:
state = 'CRITICAL'
elif avgcpu > 80:
state = 'WARNING'
else:
state = 'OK'
j.tools.watchdog.client.send("cpu.core", state, avgcpu, nid=nid)
| 25.642857 | 107 | 0.616527 |
4d51c8d8cf2e9b9fe594088a7ac3237f69db6402 | 120 | py | Python | baekjoon/Python/10951.py | Lumia1108/TIL | fe2e233d6d05c7d04f50f688f6c168e4d6d4ce46 | [
"MIT"
] | null | null | null | baekjoon/Python/10951.py | Lumia1108/TIL | fe2e233d6d05c7d04f50f688f6c168e4d6d4ce46 | [
"MIT"
] | null | null | null | baekjoon/Python/10951.py | Lumia1108/TIL | fe2e233d6d05c7d04f50f688f6c168e4d6d4ce46 | [
"MIT"
] | null | null | null | import sys
while True:
try:
a, b = map(int, input().split())
print(a + b)
except:
break | 15 | 40 | 0.475 |
92d21dca7b899407636831542f8e7558613eb690 | 359 | py | Python | scrapySelector/demo1.py | silianpan/seal-spider-demo | 23bf013d08f9edaf23823bc3787f579bccd0ec3a | [
"Apache-2.0"
] | null | null | null | scrapySelector/demo1.py | silianpan/seal-spider-demo | 23bf013d08f9edaf23823bc3787f579bccd0ec3a | [
"Apache-2.0"
] | 3 | 2021-09-08T01:11:16.000Z | 2022-03-02T15:14:03.000Z | scrapySelector/demo1.py | silianpan/seal-spider-demo | 23bf013d08f9edaf23823bc3787f579bccd0ec3a | [
"Apache-2.0"
] | 1 | 2019-08-04T09:57:29.000Z | 2019-08-04T09:57:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-19 22:13
# @Author : liupan
# @Site :
# @File : demo1.py
# @Software: PyCharm
from scrapy import Selector
body= '<html><head><title>Hello World</title></head><body></body> </ html>'
selector = Selector(text=body)
title = selector.xpath('//title/text()').extract_first()
print(title)
| 25.642857 | 75 | 0.637883 |
208bef308bf8ee7de3a1455aa127f3caf7b8f6df | 1,097 | py | Python | optimization_based_action_selection_performance.py | thomasvandewiel-sai/ros2_launch_test | 97008e33ae9ec763fcbd72ce2bc09013a9e62618 | [
"MIT"
] | null | null | null | optimization_based_action_selection_performance.py | thomasvandewiel-sai/ros2_launch_test | 97008e33ae9ec763fcbd72ce2bc09013a9e62618 | [
"MIT"
] | null | null | null | optimization_based_action_selection_performance.py | thomasvandewiel-sai/ros2_launch_test | 97008e33ae9ec763fcbd72ce2bc09013a9e62618 | [
"MIT"
] | null | null | null | import sys
import pickle
import numpy as np
MAX_ALLOWED_COMPUTATION_TIME = 0 # ms
MAX_ALLOWED_COMPUTATION_TIME_MEAN = 0 # ms
MAX_ALLOWED_COMPUTATION_TIME_STD = 0 # ms
MAX_ALLOWED_PREDICTION_ERROR = 0 # m
MAX_ALLOWED_PREDICTION_ERROR_MEAN = 0 # m
MAX_ALLOWED_PREDICTION_ERROR_STD = 0 # m
def main():
# Load data
with open("./dummy.pkl", "rb") as f:
optimization_time, target_error = pickle.load(f)
# Check whether we violate max/mean/std limits
if (
np.max(optimization_time) > MAX_ALLOWED_COMPUTATION_TIME
or np.mean(optimization_time) > MAX_ALLOWED_COMPUTATION_TIME_MEAN
or np.std(optimization_time) > MAX_ALLOWED_COMPUTATION_TIME_STD
or np.max(target_error) > MAX_ALLOWED_PREDICTION_ERROR
or np.mean(target_error) > MAX_ALLOWED_PREDICTION_ERROR_MEAN
or np.std(target_error) > MAX_ALLOWED_PREDICTION_ERROR_STD
):
return 1
else:
return 0
if __name__ == "__main__":
status = main()
print("Optimization-based action selection exiting with status: ", status)
sys.exit(status)
| 27.425 | 78 | 0.7165 |
e151f14c31e70cc75c59587453b145d6f787bf5c | 2,331 | py | Python | conversationinsights/policies/policy.py | osswangxining/conversationinsights-dialogue | 07490b6307667b0d0ddc2c4fb8aa4f8d7b853df9 | [
"Apache-2.0"
] | 8 | 2017-10-10T02:18:09.000Z | 2019-12-16T15:14:13.000Z | conversationinsights/policies/policy.py | osswangxining/conversationinsights-dialogue | 07490b6307667b0d0ddc2c4fb8aa4f8d7b853df9 | [
"Apache-2.0"
] | null | null | null | conversationinsights/policies/policy.py | osswangxining/conversationinsights-dialogue | 07490b6307667b0d0ddc2c4fb8aa4f8d7b853df9 | [
"Apache-2.0"
] | 2 | 2018-06-26T02:03:41.000Z | 2018-08-06T10:54:46.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from builtins import object
from numpy.core.records import ndarray
from typing import Any
from typing import List
from typing import Optional
from typing import Text
from conversationinsights.domain import Domain
from conversationinsights.featurizers import Featurizer
from conversationinsights.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
class Policy(object):
SUPPORTS_ONLINE_TRAINING = False
MAX_HISTORY_DEFAULT = 3
def __init__(self, featurizer=None, max_history=None):
# type: (Optional[Featurizer]) -> None
self.featurizer = featurizer
self.max_history = max_history
def featurize(self, tracker, domain):
# type: (DialogueStateTracker, Domain) -> ndarray
"""Transform tracker into a vector representation.
The tracker, consisting of multiple turns, will be transformed
into a float vector which can be used by a ML model."""
x = domain.feature_vector_for_tracker(self.featurizer, tracker,
self.max_history)
return np.array(x)
def predict_action_probabilities(self, tracker, domain):
# type: (DialogueStateTracker, Domain) -> List[float]
return []
def prepare(self, featurizer, max_history):
self.featurizer = featurizer
self.max_history = max_history
def train(self, X, y, domain, **kwargs):
# type: (ndarray, List[int], Domain, **Any) -> None
"""Trains the policy on given training data."""
raise NotImplementedError
def continue_training(self, X, y, domain, **kwargs):
"""Continues training an already trained policy.
This doesn't need to be supported by every policy. If it is supported,
the policy can be used for online training and the implementation for
the continued training should be put into this function."""
pass
def persist(self, path):
# type: (Text) -> None
"""Persists the policy to storage."""
pass
@classmethod
def load(cls, path, featurizer, max_history):
raise NotImplementedError
| 30.671053 | 78 | 0.692407 |
b269c085d0da05a3cf604f3e40cebdc546b1e26e | 274 | py | Python | exercicios/Lista1/Q12.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista1/Q12.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista1/Q12.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | #Leia uma distancia em milhas e apresente-a convertida em quilometros.
#A formula de conversao eh K =M*1.61 ,
# sendo K a distancia em quilometros e M em milhas.
m=float(input("Informe a distancia em milhas: "))
K=m*1.61
print(f"A distancia convertida em km eh {K}km/h") | 30.444444 | 70 | 0.729927 |
4cca06d760c62de87601467ba3acb9a574f518de | 8,821 | py | Python | tests/milvus_python_test/conftest.py | bo-huang/milvus | a2e7a91964b2a86fd5d58beefea14734987e5cdf | [
"Apache-2.0"
] | 1 | 2021-10-01T18:16:34.000Z | 2021-10-01T18:16:34.000Z | tests/milvus_python_test/conftest.py | bo-huang/milvus | a2e7a91964b2a86fd5d58beefea14734987e5cdf | [
"Apache-2.0"
] | null | null | null | tests/milvus_python_test/conftest.py | bo-huang/milvus | a2e7a91964b2a86fd5d58beefea14734987e5cdf | [
"Apache-2.0"
] | 2 | 2020-03-02T05:16:57.000Z | 2020-03-04T06:05:55.000Z | import socket
import pdb
import logging
import pytest
from utils import gen_unique_str
from milvus import Milvus, IndexType, MetricType
from utils import *
index_file_size = 10
def pytest_addoption(parser):
parser.addoption("--ip", action="store", default="localhost")
parser.addoption("--port", action="store", default=19530)
parser.addoption("--http-port", action="store", default=19121)
parser.addoption("--handler", action="store", default="GRPC")
def check_server_connection(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
connected = True
if ip and (ip not in ['localhost', '127.0.0.1']):
try:
socket.getaddrinfo(ip, port, 0, 0, socket.IPPROTO_TCP)
except Exception as e:
print("Socket connnet failed: %s" % str(e))
connected = False
return connected
@pytest.fixture(scope="module")
def connect(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
milvus = get_milvus(handler=handler)
try:
if handler == "HTTP":
port = http_port
status = milvus.connect(host=ip, port=port)
logging.getLogger().info(status)
if not status.OK():
# try again
logging.getLogger().info("------------------------------------")
logging.getLogger().info("Try to connect again")
logging.getLogger().info("------------------------------------")
res = milvus.connect(host=ip, port=port)
except Exception as e:
logging.getLogger().error(str(e))
pytest.exit("Milvus server can not connected, exit pytest ...")
def fin():
try:
milvus.disconnect()
except:
pass
request.addfinalizer(fin)
return milvus
@pytest.fixture(scope="module")
def dis_connect(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
milvus = get_milvus(handler=handler)
return milvus
@pytest.fixture(scope="module")
def args(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
args = {"ip": ip, "port": port, "handler": handler}
return args
@pytest.fixture(scope="module")
def milvus(request):
handler = request.config.getoption("--handler")
return get_milvus(handler=handler)
@pytest.fixture(scope="function")
def collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
@pytest.fixture(scope="function")
def ip_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
@pytest.fixture(scope="function")
def jac_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
@pytest.fixture(scope="function")
def ham_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
@pytest.fixture(scope="function")
def tanimoto_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.TANIMOTO}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
@pytest.fixture(scope="function")
def substructure_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
@pytest.fixture(scope="function")
def superstructure_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
dim = getattr(request.module, "dim", "128")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
status = connect.create_collection(param)
# logging.getLogger().info(status)
if not status.OK():
pytest.exit("collection can not be created, exit pytest ...")
def teardown():
status, collection_names = connect.show_collections()
for collection_name in collection_names:
connect.drop_collection(collection_name)
request.addfinalizer(teardown)
return collection_name
| 35.143426 | 76 | 0.668518 |
33fcefd0d5a91c79b77c614eddb5dd3ca6c61009 | 1,358 | py | Python | 05/31-32.py | LCL121/start-leanring-pytorch | f077ec892b538f3bff5825acce02872d31a1ab5d | [
"MIT"
] | null | null | null | 05/31-32.py | LCL121/start-leanring-pytorch | f077ec892b538f3bff5825acce02872d31a1ab5d | [
"MIT"
] | null | null | null | 05/31-32.py | LCL121/start-leanring-pytorch | f077ec892b538f3bff5825acce02872d31a1ab5d | [
"MIT"
] | null | null | null | import torch
# norm ==> Returns the matrix norm or vector norm of a given tensor.
# 默认是2-范数
# dime:可以指定维度上的范数
a = torch.full([8], 1, dtype=torch.float)
b = a.view(2, 4)
c = a.view(2, 2, 2)
print(a.shape, b.shape, c.shape)
print(a.norm(1), b.norm(1), c.norm(1))
print(a.norm(), b.norm(), c.norm())
print(b.norm(1, dim=1))
print(b.norm(2, dim=1))
# median ==> 所有项的中间值
# mean ==> 所有项的平均值
# prod ==> 所有项的累乘
d = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float)
print(d.median(), d.mean(), d.prod())
# max ==> 所有项的最大值
# min ==> 所有项的最小值
# argmax ==> 所有项的最大值的索引
# argmin ==> 所有项的最小值的索引
# 说明求最大值或最小值之前是先将维度打平,
# 可以加dim参数 ==> 哪个维度上的最大值,
# keepdim参数 ==> 保证dimension和原来的数据一样
print(d.argmax(), d.argmin())
print(d.argmax(dim=1), d.argmin(dim=1))
# topk ==> 取前面的几个值
# largest ==> 默认是True。True,则取大的;False,则取小的
e = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30],
[31, 32, 33, 34, 35, 36, 37, 38, 39, 40]], dtype=torch.float)
print(e.topk(3, dim=1))
print(e.topk(3, dim=1, largest=False))
# kthvalue ==> 取第几小的值
# 取输入张量input指定维度上第k个最小值。如果不指定dim。默认为最后一维。
# 返回一个元组(value, indices), 其中indices是原始输入张量中沿dim维的第k个最小值下标。
print(e.kthvalue(3, dim=1))
# >, >=, <, <=, !=, ==
# 每一项都和给定的标量进行比较
print(e > 5)
# eq ==> 两个tensor对应位置的每一项进行比较
| 26.627451 | 79 | 0.600147 |
53b007ad1ed5a2ce093b748f21f659ffdcdcbe81 | 37,473 | py | Python | tensorflow/python/data/ops/iterator_ops.py | DwayneDuane/tensorflow | c90698124aa164e7683e3a9d03b69e9aa8461244 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/ops/iterator_ops.py | DwayneDuane/tensorflow | c90698124aa164e7683e3a9d03b69e9aa8461244 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/ops/iterator_ops.py | DwayneDuane/tensorflow | c90698124aa164e7683e3a9d03b69e9aa8461244 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Iterators."""
import abc
import threading
import warnings
import six
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.trackable import base as trackable
from tensorflow.python.training.saver import BaseSaverBuilder
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import lazy_loader
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
# NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple
# times, e.g. when you are distributing different elements to multiple
# devices in a single step. However, a common pitfall arises when
# users call `Iterator.get_next()` in each iteration of their training
# loop. `Iterator.get_next()` adds ops to the graph, and executing
# each op allocates resources (including threads); as a consequence,
# invoking it in every iteration of a training loop causes slowdown
# and eventual resource exhaustion. To guard against this outcome, we
# log a warning when the number of uses crosses a threshold of suspicion.
GET_NEXT_CALL_WARNING_THRESHOLD = 32
GET_NEXT_CALL_WARNING_MESSAGE = (
"An unusually high number of `Iterator.get_next()` calls was detected. "
"This often indicates that `Iterator.get_next()` is being called inside "
"a training loop, which will cause gradual slowdown and eventual resource "
"exhaustion. If this is the case, restructure your code to call "
"`next_element = iterator.get_next()` once outside the loop, and use "
"`next_element` as the input to some computation that is invoked inside "
"the loop.")
# NOTE(jsimsa): Threshold used as a heuristic to check for infinite loop during
# tf.function tracing.
GET_NEXT_CALL_ERROR_THRESHOLD = 32
GET_NEXT_CALL_ERROR_MESSAGE = (
"An unusually high number of `tf.data.Iterator.get_next()` calls was "
"detected. This suggests that the `for elem in dataset: ...` idiom is used "
"within tf.function with AutoGraph disabled. This idiom is only supported "
"when AutoGraph is enabled.")
# Collection of all IteratorResources in the `Graph`.
GLOBAL_ITERATORS = "iterators"
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
def _device_stack_is_empty():
if context.executing_eagerly():
return context.context().device_name is None
# pylint: disable=protected-access
device_stack = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
return not bool(device_stack)
@tf_export(v1=["data.Iterator"])
class Iterator(trackable.Trackable):
"""Represents the state of iterating through a `Dataset`."""
def __init__(self, iterator_resource, initializer, output_types,
output_shapes, output_classes):
"""Creates a new iterator from the given iterator resource.
Note: Most users will not call this initializer directly, and will
instead use `Dataset.make_initializable_iterator()` or
`Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A (nested) structure of `tf.DType` objects corresponding to
each component of an element of this iterator.
output_shapes: A (nested) structure of `tf.TensorShape` objects
corresponding to each component of an element of this iterator.
output_classes: A (nested) structure of Python `type` objects
corresponding to each component of an element of this iterator.
Raises:
TypeError: If `output_types`, `output_shapes`, or `output_classes` is not
specified.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
if (output_types is None or output_shapes is None
or output_classes is None):
raise ValueError(
"All of `output_types`, `output_shapes`, and `output_classes` "
"must be specified to create an iterator. Got "
f"`output_types` = {output_types!r}, "
f"`output_shapes` = {output_shapes!r}, "
f"`output_classes` = {output_classes!r}.")
self._element_spec = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._flat_tensor_shapes = structure.get_flat_tensor_shapes(
self._element_spec)
self._flat_tensor_types = structure.get_flat_tensor_types(
self._element_spec)
self._string_handle = gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource)
self._get_next_call_count = 0
ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)
@staticmethod
def from_structure(output_types,
output_shapes=None,
shared_name=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` with the given structure.
This iterator-constructing method can be used to create an iterator that
is reusable with many different datasets.
The returned iterator is not bound to a particular dataset, and it has
no `initializer`. To initialize the iterator, run the operation returned by
`Iterator.make_initializer(dataset)`.
The following is an example
```python
iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))
dataset_range = Dataset.range(10)
range_initializer = iterator.make_initializer(dataset_range)
dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
evens_initializer = iterator.make_initializer(dataset_evens)
# Define a model based on the iterator; in this example, the model_fn
# is expected to take scalar tf.int64 Tensors as input (see
# the definition of 'iterator' above).
prediction, loss = model_fn(iterator.get_next())
# Train for `num_epochs`, where for each epoch, we first iterate over
# dataset_range, and then iterate over dataset_evens.
for _ in range(num_epochs):
# Initialize the iterator to `dataset_range`
sess.run(range_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
# Initialize the iterator to `dataset_evens`
sess.run(evens_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
```
Args:
output_types: A (nested) structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A (nested) structure of `tf.TensorShape`
objects corresponding to each component of an element of this dataset.
If omitted, each component will have an unconstrainted shape.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
output_classes: (Optional.) A (nested) structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
Raises:
TypeError: If the structures of `output_shapes` and `output_types` are
not the same.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(output_types,
tensor_shape.as_shape,
output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
output_structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator_v2(
container="",
shared_name=shared_name,
output_types=structure.get_flat_tensor_types(output_structure),
output_shapes=structure.get_flat_tensor_shapes(
output_structure))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@staticmethod
def from_string_handle(string_handle,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
between concrete iterators by feeding a value in a `tf.Session.run` call.
In that case, `string_handle` would be a `tf.compat.v1.placeholder`, and you
would
feed it with the value of `tf.data.Iterator.string_handle` in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
each step as follows:
```python
train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
train_iterator_handle = sess.run(train_iterator.string_handle())
test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
test_iterator_handle = sess.run(test_iterator.string_handle())
handle = tf.compat.v1.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_iterator.output_types)
next_element = iterator.get_next()
loss = f(next_element)
train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
```
Args:
string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates to
a handle produced by the `Iterator.string_handle()` method.
output_types: A (nested) structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A (nested) structure of `tf.TensorShape`
objects corresponding to each component of an element of this dataset.
If omitted, each component will have an unconstrainted shape.
output_classes: (Optional.) A (nested) structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(output_types,
tensor_shape.as_shape,
output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
output_structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
output_types=structure.get_flat_tensor_types(output_structure),
output_shapes=structure.get_flat_tensor_shapes(output_structure))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@property
def initializer(self):
"""A `tf.Operation` that should be run to initialize this iterator.
Returns:
A `tf.Operation` that should be run to initialize this iterator
Raises:
ValueError: If this iterator initializes itself automatically.
"""
if self._initializer is not None:
return self._initializer
else:
# TODO(mrry): Consider whether one-shot iterators should have
# initializers that simply reset their state to the beginning.
raise ValueError(
"The iterator does not have an initializer. This means it was likely "
"created using `tf.data.Dataset.make_one_shot_iterator()`. For an "
"initializable iterator, use "
"`tf.data.Dataset.make_initializable_iterator()` instead.")
def make_initializer(self, dataset, name=None):
"""Returns a `tf.Operation` that initializes this iterator on `dataset`.
Args:
dataset: A `Dataset` whose `element_spec` if compatible with this
iterator.
name: (Optional.) A name for the created operation.
Returns:
A `tf.Operation` that can be run to initialize this iterator on the given
`dataset`.
Raises:
TypeError: If `dataset` and this iterator do not have a compatible
`element_spec`.
"""
with ops.name_scope(name, "make_initializer") as name:
# NOTE(mrry): Cannot depend on `dataset_ops.get_legacy_output*()` due
# to that creating a circular dependency.
# pylint: disable=protected-access
dataset_output_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(),
dataset.element_spec)
dataset_output_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(),
dataset.element_spec)
dataset_output_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(),
dataset.element_spec)
# pylint: enable=protected-access
nest.assert_same_structure(self.output_types, dataset_output_types)
nest.assert_same_structure(self.output_shapes, dataset_output_shapes)
for iterator_class, dataset_class in zip(
nest.flatten(self.output_classes),
nest.flatten(dataset_output_classes)):
if iterator_class is not dataset_class:
raise TypeError(
f"Expected output classes {self.output_classes!r} but got "
f"dataset with output classes {dataset_output_classes!r}.")
for iterator_dtype, dataset_dtype in zip(
nest.flatten(self.output_types), nest.flatten(dataset_output_types)):
if iterator_dtype != dataset_dtype:
raise TypeError(
f"Expected output types {self.output_types!r} but got dataset "
f"with output types {dataset_output_types!r}.")
for iterator_shape, dataset_shape in zip(
nest.flatten(self.output_shapes), nest.flatten(
dataset_output_shapes)):
if not iterator_shape.is_compatible_with(dataset_shape):
raise TypeError(
f"Expected output shapes compatible with {self.output_shapes!r} "
f"but got dataset with output shapes {dataset_output_shapes!r}.")
# TODO(b/169442955): Investigate the need for this colocation constraint.
with ops.colocate_with(self._iterator_resource):
# pylint: disable=protected-access
return gen_dataset_ops.make_iterator(
dataset._variant_tensor, self._iterator_resource, name=name)
def get_next(self, name=None):
"""Returns the next element.
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
`tf.Session.run` on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
`tf.errors.OutOfRangeError`. The following skeleton shows how to use
this method when building a training loop:
```python
dataset = ... # A `tf.data.Dataset` object.
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Build a TensorFlow graph that does something with each element.
loss = model_function(next_element)
optimizer = ... # A `tf.compat.v1.train.Optimizer` object.
train_op = optimizer.minimize(loss)
with tf.compat.v1.Session() as sess:
try:
while True:
sess.run(train_op)
except tf.errors.OutOfRangeError:
pass
```
NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
when you are distributing different elements to multiple devices in a single
step. However, a common pitfall arises when users call `Iterator.get_next()`
in each iteration of their training loop. `Iterator.get_next()` adds ops to
the graph, and executing each op allocates resources (including threads); as
a consequence, invoking it in every iteration of a training loop causes
slowdown and eventual resource exhaustion. To guard against this outcome, we
log a warning when the number of uses crosses a fixed threshold of
suspiciousness.
Args:
name: (Optional.) A name for the created operation.
Returns:
A (nested) structure of values matching `tf.data.Iterator.element_spec`.
"""
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)
# TODO(b/169442955): Investigate the need for this colocation constraint.
with ops.colocate_with(self._iterator_resource):
# pylint: disable=protected-access
flat_ret = gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=self._flat_tensor_types,
output_shapes=self._flat_tensor_shapes,
name=name)
return structure.from_tensor_list(self._element_spec, flat_ret)
def get_next_as_optional(self):
# TODO(b/169442955): Investigate the need for this colocation constraint.
with ops.colocate_with(self._iterator_resource):
# pylint: disable=protected-access
return optional_ops._OptionalImpl(
gen_dataset_ops.iterator_get_next_as_optional(
self._iterator_resource,
output_types=structure.get_flat_tensor_types(self.element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self.element_spec)), self.element_spec)
def string_handle(self, name=None):
"""Returns a string-valued `tf.Tensor` that represents this iterator.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.string`.
"""
if name is None:
return self._string_handle
else:
return gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource, name=name)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.")
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.sparse.SparseTensor`.
Returns:
A (nested) structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A (nested) structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(iterator)`.")
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A (nested) structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._element_spec)
@property
def element_spec(self):
"""The type specification of an element of this iterator.
For more information,
read [this guide](https://www.tensorflow.org/guide/data#dataset_structure).
Returns:
A (nested) structure of `tf.TypeSpec` objects matching the structure of an
element of this iterator and specifying the type of individual components.
"""
return self._element_spec
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name):
return _IteratorSaveable(self._iterator_resource, name)
return {"ITERATOR": _saveable_factory}
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
@tf_export("data.Iterator", v1=[])
@six.add_metaclass(abc.ABCMeta)
class IteratorBase(collections_abc.Iterator, trackable.Trackable,
composite_tensor.CompositeTensor):
"""Represents an iterator of a `tf.data.Dataset`.
`tf.data.Iterator` is the primary mechanism for enumerating elements of a
`tf.data.Dataset`. It supports the Python Iterator protocol, which means
it can be iterated over using a for-loop:
>>> dataset = tf.data.Dataset.range(2)
>>> for element in dataset:
... print(element)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
or by fetching individual elements explicitly via `get_next()`:
>>> dataset = tf.data.Dataset.range(2)
>>> iterator = iter(dataset)
>>> print(iterator.get_next())
tf.Tensor(0, shape=(), dtype=int64)
>>> print(iterator.get_next())
tf.Tensor(1, shape=(), dtype=int64)
In addition, non-raising iteration is supported via `get_next_as_optional()`,
which returns the next element (if available) wrapped in a
`tf.experimental.Optional`.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
"""
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this iterator.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> iterator.element_spec
tf.TensorSpec(shape=(), dtype=tf.int32, name=None)
For more information,
read [this guide](https://www.tensorflow.org/guide/data#dataset_structure).
Returns:
A (nested) structure of `tf.TypeSpec` objects matching the structure of an
element of this iterator, specifying the type of individual components.
"""
raise NotImplementedError("Iterator.element_spec")
@abc.abstractmethod
def get_next(self):
"""Returns the next element.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> print(iterator.get_next())
tf.Tensor(42, shape=(), dtype=int32)
Returns:
A (nested) structure of values matching `tf.data.Iterator.element_spec`.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError("Iterator.get_next()")
@abc.abstractmethod
def get_next_as_optional(self):
"""Returns the next element wrapped in `tf.experimental.Optional`.
If the iterator has reached the end of the sequence, the returned
`tf.experimental.Optional` will have no value.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
Returns:
A `tf.experimental.Optional` object representing the next element.
"""
raise NotImplementedError("Iterator.get_next_as_optional()")
class OwnedIterator(IteratorBase):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
The iterator resource created through `OwnedIterator` is owned by the Python
object and the life time of the underlying resource is tied to the life time
of the `OwnedIterator` object. This makes `OwnedIterator` appropriate for use
in eager mode and inside of tf.functions.
"""
def __init__(self, dataset=None, components=None, element_spec=None):
"""Creates a new iterator from the given dataset.
If `dataset` is not specified, the iterator will be created from the given
tensor components and element structure. In particular, the alternative for
constructing the iterator is used when the iterator is reconstructed from
it `CompositeTensor` representation.
Args:
dataset: A `tf.data.Dataset` object.
components: Tensor components to construct the iterator from.
element_spec: A (nested) structure of `TypeSpec` objects that
represents the type specification of elements of the iterator.
Raises:
ValueError: If `dataset` is not provided and either `components` or
`element_spec` is not provided. Or `dataset` is provided and either
`components` and `element_spec` is provided.
"""
super(OwnedIterator, self).__init__()
if dataset is None:
if (components is None or element_spec is None):
raise ValueError(
"When `dataset` is not provided, both `components` and "
"`element_spec` must be specified.")
# pylint: disable=protected-access
self._element_spec = element_spec
self._flat_output_types = structure.get_flat_tensor_types(
self._element_spec)
self._flat_output_shapes = structure.get_flat_tensor_shapes(
self._element_spec)
self._iterator_resource, = components
else:
if (components is not None or element_spec is not None):
raise ValueError(
"When `dataset` is provided, `element_spec` and `components` must "
"not be specified.")
self._create_iterator(dataset)
self._get_next_call_count = 0
def _create_iterator(self, dataset):
# pylint: disable=protected-access
dataset = dataset._apply_debug_options()
# Store dataset reference to ensure that dataset is alive when this iterator
# is being used. For example, `tf.data.Dataset.from_generator` registers
# a few py_funcs that are needed in `self._next_internal`. If the dataset
# is deleted, this iterator crashes on `self.__next__(...)` call.
self._dataset = dataset
ds_variant = dataset._variant_tensor
self._element_spec = dataset.element_spec
self._flat_output_types = structure.get_flat_tensor_types(
self._element_spec)
self._flat_output_shapes = structure.get_flat_tensor_shapes(
self._element_spec)
with ops.colocate_with(ds_variant):
self._iterator_resource = (
gen_dataset_ops.anonymous_iterator_v3(
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes))
gen_dataset_ops.make_iterator(ds_variant, self._iterator_resource)
def __iter__(self):
return self
def next(self): # For Python 2 compatibility
return self.__next__()
def _next_internal(self):
autograph_status = autograph_ctx.control_status_ctx().status
autograph_disabled = autograph_status == autograph_ctx.Status.DISABLED
if not context.executing_eagerly() and autograph_disabled:
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_ERROR_THRESHOLD:
raise ValueError(GET_NEXT_CALL_ERROR_MESSAGE)
if not context.executing_eagerly():
# TODO(b/169442955): Investigate the need for this colocation constraint.
with ops.colocate_with(self._iterator_resource):
ret = gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return structure.from_compatible_tensor_list(self._element_spec, ret)
# TODO(b/77291417): This runs in sync mode as iterators use an error status
# to communicate that there is no more data to iterate over.
with context.execution_mode(context.SYNC):
ret = gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
try:
# Fast path for the case `self._structure` is not a nested structure.
return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access
except AttributeError:
return structure.from_compatible_tensor_list(self._element_spec, ret)
@property
def _type_spec(self):
return IteratorSpec(self.element_spec)
def __next__(self):
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.")
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.sparse.SparseTensor`.
Returns:
A (nested) structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A (nested) structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(iterator)`.")
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A (nested) structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._element_spec)
@property
def element_spec(self):
return self._element_spec
def get_next(self):
return self._next_internal()
def get_next_as_optional(self):
# TODO(b/169442955): Investigate the need for this colocation constraint.
with ops.colocate_with(self._iterator_resource):
# pylint: disable=protected-access
return optional_ops._OptionalImpl(
gen_dataset_ops.iterator_get_next_as_optional(
self._iterator_resource,
output_types=structure.get_flat_tensor_types(self.element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self.element_spec)), self.element_spec)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name):
"""Returns a SaveableObject for serialization/deserialization."""
policy = None
if self._dataset:
policy = self._dataset.options().experimental_external_state_policy
if policy:
return _IteratorSaveable(
self._iterator_resource,
name,
external_state_policy=policy)
else:
return _IteratorSaveable(self._iterator_resource, name)
return {"ITERATOR": _saveable_factory}
def __tf_tracing_type__(self, signature_context):
return signature_context.make_reference_type(self._type_spec,
self._iterator_resource._id) # pylint:disable=protected-access
@tf_export("data.IteratorSpec", v1=[])
class IteratorSpec(type_spec.TypeSpec):
"""Type specification for `tf.data.Iterator`.
For instance, `tf.data.IteratorSpec` can be used to define a tf.function that
takes `tf.data.Iterator` as an input argument:
>>> @tf.function(input_signature=[tf.data.IteratorSpec(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))])
... def square(iterator):
... x = iterator.get_next()
... return x * x
>>> dataset = tf.data.Dataset.from_tensors(5)
>>> iterator = iter(dataset)
>>> print(square(iterator))
tf.Tensor(25, shape=(), dtype=int32)
Attributes:
element_spec: A (nested) structure of `tf.TypeSpec` objects that represents
the type specification of the iterator elements.
"""
__slots__ = ["_element_spec"]
def __init__(self, element_spec):
self._element_spec = element_spec
@property
def value_type(self):
return OwnedIterator
def _serialize(self):
return (self._element_spec,)
@property
def _component_specs(self):
return (tensor_spec.TensorSpec([], dtypes.resource),)
def _to_components(self, value):
return (value._iterator_resource,) # pylint: disable=protected-access
def _from_components(self, components):
return OwnedIterator(
dataset=None,
components=components,
element_spec=self._element_spec)
@staticmethod
def from_value(value):
return IteratorSpec(value.element_spec) # pylint: disable=protected-access
def __tf_tracing_type__(self, signature_context):
# TODO(b/202772221): Validate and enforce this assumption of uniqueness per
# spec instance.
return signature_context.make_reference_type(self, id(self))
# TODO(b/71645805): Expose trackable stateful objects from dataset.
class _IteratorSaveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(
self,
iterator_resource,
name,
external_state_policy=options_lib.ExternalStatePolicy.FAIL):
serialized_iterator = gen_dataset_ops.serialize_iterator(
iterator_resource, external_state_policy=external_state_policy.value)
specs = [
BaseSaverBuilder.SaveSpec(
serialized_iterator,
"",
name + "_STATE",
device=iterator_resource.device)
]
super(_IteratorSaveable, self).__init__(iterator_resource, specs, name)
def restore(self, restored_tensors, restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])
@deprecation.deprecated(
None, "Use `tf.data.Iterator.get_next_as_optional()` instead.")
@tf_export("data.experimental.get_next_as_optional")
def get_next_as_optional(iterator):
"""Returns a `tf.experimental.Optional` with the next element of the iterator.
If the iterator has reached the end of the sequence, the returned
`tf.experimental.Optional` will have no value.
Args:
iterator: A `tf.data.Iterator`.
Returns:
A `tf.experimental.Optional` object which either contains the next element
of the iterator (if it exists) or no value.
"""
return iterator.get_next_as_optional()
_pywrap_utils.RegisterType("OwnedIterator", OwnedIterator)
| 39.197699 | 112 | 0.709951 |
76a8a60088d85939709391d3943021659f46bb8c | 405 | py | Python | app/migrations/0036_auto_20201225_1958.py | ThebiggunSeeoil/app-cbre-exxon | efec395dca662132a19f882b0ff3dbb6318b3e51 | [
"MIT"
] | null | null | null | app/migrations/0036_auto_20201225_1958.py | ThebiggunSeeoil/app-cbre-exxon | efec395dca662132a19f882b0ff3dbb6318b3e51 | [
"MIT"
] | null | null | null | app/migrations/0036_auto_20201225_1958.py | ThebiggunSeeoil/app-cbre-exxon | efec395dca662132a19f882b0ff3dbb6318b3e51 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-12-25 12:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0035_auto_20201225_1940'),
]
operations = [
migrations.AlterField(
model_name='workfromgmail_new',
name='time_make',
field=models.DateTimeField(auto_now_add=True),
),
]
| 21.315789 | 58 | 0.617284 |
9149b7574e1efd58b4c5e450c964e33b8c6e5137 | 1,068 | py | Python | pyvisdk/do/tools_upgrade_cancelled.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/tools_upgrade_cancelled.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/tools_upgrade_cancelled.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ToolsUpgradeCancelled(vim, *args, **kwargs):
'''Thrown when tools install or upgrade fails because the operation was canclled
by the user.'''
obj = vim.client.factory.create('{urn:vim25}ToolsUpgradeCancelled')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 31.411765 | 124 | 0.61236 |
639460e05d96a544a16ada6989b9d97a717b2339 | 2,578 | py | Python | src/netius/common/setup.py | timgates42/netius | c6fa76292be0367557518462e0b2bccd852b0d3d | [
"Apache-2.0"
] | 107 | 2015-03-28T23:38:51.000Z | 2021-12-20T14:42:45.000Z | src/netius/common/setup.py | timgates42/netius | c6fa76292be0367557518462e0b2bccd852b0d3d | [
"Apache-2.0"
] | 25 | 2015-05-22T08:35:56.000Z | 2021-12-26T04:42:14.000Z | src/netius/common/setup.py | timgates42/netius | c6fa76292be0367557518462e0b2bccd852b0d3d | [
"Apache-2.0"
] | 11 | 2015-09-23T00:43:13.000Z | 2021-12-26T03:19:36.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
CA_URL = "https://curl.se/ca/cacert.pem"
COMMON_PATH = os.path.dirname(__file__)
BASE_PATH = os.path.join(COMMON_PATH, "..", "base")
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_CA_PATH = os.path.join(EXTRAS_PATH, "net.ca")
def ensure_setup():
ensure_ca()
def ensure_ca(path = SSL_CA_PATH):
if os.path.exists(path): return
_download_ca(path = path)
def _download_ca(path = SSL_CA_PATH, raise_e = True):
import netius.clients
ca_url = CA_URL
while True:
result = netius.clients.HTTPClient.method_s(
"GET",
ca_url,
asynchronous = False
)
if not result["code"] in (301, 302, 303): break
headers = result.get("headers", {})
location = headers.get("Location", None)
if not location: break
ca_url = location
if not result["code"] == 200:
if not raise_e: return
raise Exception("Error while downloading CA file from '%s'" % CA_URL)
response = netius.clients.HTTPClient.to_response(result)
contents = response.read()
_store_contents(contents, path)
def _store_contents(contents, path):
file = open(path, "wb")
try: file.write(contents)
finally: file.close()
return path
| 31.439024 | 78 | 0.66059 |
444f0af05777270c2a87b79e74c7a06ab7816fba | 2,100 | py | Python | tests/TestBase.py | hasii2011/PyArcadeStarTrek | 370edbb62f15f69322aa7f109d6d36ebf20cbe4a | [
"MIT"
] | 1 | 2021-06-13T00:56:24.000Z | 2021-06-13T00:56:24.000Z | tests/TestBase.py | hasii2011/PyArcadeStarTrek | 370edbb62f15f69322aa7f109d6d36ebf20cbe4a | [
"MIT"
] | 94 | 2021-04-16T20:34:10.000Z | 2022-01-13T19:58:20.000Z | tests/TestBase.py | hasii2011/PyArcadeStarTrek | 370edbb62f15f69322aa7f109d6d36ebf20cbe4a | [
"MIT"
] | null | null | null |
import json
import logging
import logging.config
from unittest import TestCase
from pkg_resources import resource_filename
from pytrek.GameState import GameState
from pytrek.engine.GameEngine import GameEngine
from pytrek.engine.Intelligence import Intelligence
from pytrek.engine.devices.Devices import Devices
from pytrek.engine.futures.EventEngine import EventEngine
from pytrek.mediators.GalaxyMediator import GalaxyMediator
from pytrek.mediators.QuadrantMediator import QuadrantMediator
from pytrek.model.Galaxy import Galaxy
from pytrek.settings.GameSettings import GameSettings
JSON_LOGGING_CONFIG_FILENAME: str = "testLoggingConfiguration.json"
TEST_DIRECTORY: str = 'tests'
class TestBase(TestCase):
RESOURCES_PACKAGE_NAME: str = 'tests.resources'
RESOURCES_TEST_CLASSES_PACKAGE_NAME: str = 'tests.resources.testclass'
"""
A base unit test class to initialize some logging stuff we need
"""
@classmethod
def setUpLogging(cls):
"""
"""
loggingConfigFilename: str = cls.findLoggingConfig()
with open(loggingConfigFilename, 'r') as loggingConfigurationFile:
configurationDictionary = json.load(loggingConfigurationFile)
logging.config.dictConfig(configurationDictionary)
logging.logProcesses = False
logging.logThreads = False
@classmethod
def findLoggingConfig(cls) -> str:
fqFileName = resource_filename(TestBase.RESOURCES_PACKAGE_NAME, JSON_LOGGING_CONFIG_FILENAME)
return fqFileName
@classmethod
def resetSingletons(cls):
"""
Force stateful singletons to re-initialize
"""
GameSettings.__instance__ = None
Intelligence.__instance__ = None
GameState.__instance__ = None
Galaxy.__instance__ = None
GameEngine.__instance__ = None
EventEngine.__instance__ = None
Devices.__instance__ = None
Galaxy.__instance__ = None
GalaxyMediator.__instance__ = None
QuadrantMediator.__instance__ = None
| 29.166667 | 101 | 0.721429 |
de8e385b95612914e1d765ee02bd85540af440f9 | 5,259 | py | Python | py/legacypipe/halos.py | legacysurvey/legacypipe | 435928b03070533535ad56a712f47659bc557ba2 | [
"BSD-3-Clause"
] | 32 | 2015-08-25T00:25:23.000Z | 2022-03-04T06:35:54.000Z | py/legacypipe/halos.py | legacysurvey/legacypipe | 435928b03070533535ad56a712f47659bc557ba2 | [
"BSD-3-Clause"
] | 644 | 2015-07-08T16:26:28.000Z | 2022-03-30T19:09:10.000Z | py/legacypipe/halos.py | legacysurvey/legacypipe | 435928b03070533535ad56a712f47659bc557ba2 | [
"BSD-3-Clause"
] | 22 | 2015-08-24T18:27:36.000Z | 2021-12-04T03:10:42.000Z | import numpy as np
import logging
logger = logging.getLogger('legacypipe.halos')
# def info(*args):
# from legacypipe.utils import log_info
# log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
def subtract_halos(tims, refs, bands, mp, plots, ps, moffat=True,
old_calibs_ok=False):
args = [(tim, refs, moffat, old_calibs_ok) for tim in tims]
haloimgs = mp.map(subtract_one, args)
for tim,h in zip(tims, haloimgs):
tim.data -= h
def subtract_one(X):
tim, refs, moffat, old_calibs_ok = X
if tim.imobj.camera != 'decam':
print('Warning: Stellar halo subtraction is only implemented for DECam')
return 0.
col = 'decam_mag_%s' % tim.band
if not col in refs.get_columns():
print('Warning: no support for halo subtraction in band %s' % tim.band)
return 0.
return decam_halo_model(refs, tim.time.toMjd(), tim.subwcs,
tim.imobj.pixscale, tim.band, tim.imobj, moffat,
old_calibs_ok=old_calibs_ok)
def moffat(rr, alpha, beta):
return (beta-1.)/(np.pi * alpha**2)*(1. + (rr/alpha)**2)**(-beta)
def decam_halo_model(refs, mjd, wcs, pixscale, band, imobj, include_moffat,
old_calibs_ok=False):
from legacypipe.survey import radec_at_mjd
assert(np.all(refs.ref_epoch > 0))
rr,dd = radec_at_mjd(refs.ra, refs.dec, refs.ref_epoch.astype(float),
refs.pmra, refs.pmdec, refs.parallax, mjd)
col = 'decam_mag_%s' % band
if not col in refs.get_columns():
print('No halo subtraction for band', band)
return 0.
mag = refs.get(col)
fluxes = 10.**((mag - 22.5) / -2.5)
have_inner_moffat = False
if include_moffat:
psf = imobj.read_psf_model(0,0, pixPsf=True,
old_calibs_ok=old_calibs_ok)
if hasattr(psf, 'moffat'):
have_inner_moffat = True
inner_alpha, inner_beta = psf.moffat
debug('Read inner Moffat parameters', (inner_alpha, inner_beta),
'from PsfEx file')
H,W = wcs.shape
H = int(H)
W = int(W)
halo = np.zeros((H,W), np.float32)
for ref,flux,ra,dec in zip(refs, fluxes, rr, dd):
_,x,y = wcs.radec2pixelxy(ra, dec)
x -= 1.
y -= 1.
rad_arcsec = ref.radius * 3600.
# We subtract halos out to N x their masking radii.
rad_arcsec *= 4.0
# Rongpu says only apply within:
rad_arcsec = np.minimum(rad_arcsec, 400.)
pixrad = int(np.ceil(rad_arcsec / pixscale))
xlo = int(np.clip(np.floor(x - pixrad), 0, W-1))
xhi = int(np.clip(np.ceil (x + pixrad), 0, W-1))
ylo = int(np.clip(np.floor(y - pixrad), 0, H-1))
yhi = int(np.clip(np.ceil (y + pixrad), 0, H-1))
if xlo == xhi or ylo == yhi:
continue
rads = np.hypot(np.arange(ylo, yhi+1)[:,np.newaxis] - y,
np.arange(xlo, xhi+1)[np.newaxis,:] - x)
maxr = pixrad
# Outer apodization
apr = maxr*0.5
apodize = np.clip((rads - maxr) / (apr - maxr), 0., 1.)
# Inner apodization: ramp from 0 up to 1 between Rongpu's "R3"
# and "R4" radii
apr_i0 = 7. / pixscale
apr_i1 = 8. / pixscale
apodize *= np.clip((rads - apr_i0) / (apr_i1 - apr_i0), 0., 1.)
if band == 'z':
'''
For z band, the outer PSF is a weighted Moffat profile. For most
CCDs, the Moffat parameters (with radius in arcsec and SB in nmgy per
sq arcsec) and the weight are (for a 22.5 magnitude star):
alpha, beta, weight = 17.650, 1.7, 0.0145
However, a small subset of DECam CCDs (which are N20, S8,
S10, S18, S21 and S27) have a more compact outer PSF in z
band, which can still be characterized by a weigthed
Moffat with the following parameters:
alpha, beta, weight = 16, 2.3, 0.0095
'''
if imobj.ccdname.strip() in ['N20', 'S8', 'S10', 'S18', 'S21', 'S27']:
alpha, beta, weight = 16, 2.3, 0.0095
else:
alpha, beta, weight = 17.650, 1.7, 0.0145
if x < 0 or y < 0 or x > W-1 or y > H-1:
# Reduce the weight by half for z-band halos that are off the chip.
weight *= 0.5
# The 'pixscale**2' is because Rongpu's formula is in nanomaggies/arcsec^2
halo[ylo:yhi+1, xlo:xhi+1] += (flux * apodize * weight *
moffat(rads*pixscale, alpha, beta) * pixscale**2)
else:
fd = dict(g=0.00045,
r=0.00033,
i=0.00033)
f = fd[band]
halo[ylo:yhi+1, xlo:xhi+1] += (flux * apodize * f * (rads*pixscale)**-2
* pixscale**2)
if have_inner_moffat:
weight = 1.
halo[ylo:yhi+1, xlo:xhi+1] += (flux * apodize * weight *
moffat(rads*pixscale, inner_alpha, inner_beta) * pixscale**2)
return halo
| 38.955556 | 104 | 0.543639 |
e9a226c5d3fa761134e40a0c637f24273f8015ea | 100 | py | Python | tests/tests.py | slapec/bangoo | 34facf122f15943a4368d5c2f45fe178ff01edaa | [
"MIT"
] | null | null | null | tests/tests.py | slapec/bangoo | 34facf122f15943a4368d5c2f45fe178ff01edaa | [
"MIT"
] | null | null | null | tests/tests.py | slapec/bangoo | 34facf122f15943a4368d5c2f45fe178ff01edaa | [
"MIT"
] | null | null | null | from .test_admin_url import *
from .content.test_new_menu_create import *
from .core import * #NOQA | 25 | 43 | 0.79 |
d46bfee361a72a73bef555bfd3a15b96e0d82bee | 14,069 | py | Python | MV3D_TF_release/lib/rpn_msr/proposal_target_layer_tf.py | ZiningWang/Sparse_Pooling | a160ddf9a03ef53bad630b4ac186a8437bd0475c | [
"Unlicense"
] | 52 | 2018-08-28T03:44:51.000Z | 2022-03-23T16:00:14.000Z | MV3D_TF_release/lib/rpn_msr/proposal_target_layer_tf.py | weidezhang/Sparse_Pooling | a160ddf9a03ef53bad630b4ac186a8437bd0475c | [
"Unlicense"
] | 1 | 2019-06-25T01:32:35.000Z | 2019-07-01T01:34:20.000Z | MV3D_TF_release/lib/rpn_msr/proposal_target_layer_tf.py | weidezhang/Sparse_Pooling | a160ddf9a03ef53bad630b4ac186a8437bd0475c | [
"Unlicense"
] | 20 | 2018-07-31T18:17:35.000Z | 2021-07-09T08:42:06.000Z | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import yaml
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import bbox_transform_3d, bbox_transform_cnr
from utils.cython_bbox import bbox_overlaps
from utils.transform import lidar_3d_to_corners, lidar_to_bv, lidar_cnr_to_img
import pdb
DEBUG = False
def proposal_target_layer_3d(rpn_rois_bv, rpn_rois_3d, gt_boxes_bv, gt_boxes_3d, gt_boxes_corners, calib, _num_classes):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
# TODO(rbg): it's annoying that sometimes I have extra info before
# and other times after box coordinates -- normalize to one format
all_rois = rpn_rois_bv
# if DEBUG:
# print "gt_boxes_bv: ", gt_boxes_bv, gt_boxes_bv.shape
# print "gt_boxes_bv: ", gt_boxes_bv[:, :-1]
# print "gt_boxes_3d: ", gt_boxes_3d, gt_boxes_3d.shape
# print "gt_boxes_3d: ", gt_boxes_3d[:, :-1]
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes_bv.shape[0], 1), dtype=gt_boxes_bv.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes_bv[:, :-1])))
)
all_rois_3d = np.vstack(
(rpn_rois_3d, np.hstack((zeros, gt_boxes_3d[:, :-1])))
)
if DEBUG:
print ("rpn rois 3d shape: ", rpn_rois_3d.shape)
print ("all_rois bv shape: ", all_rois.shape)
print ("all_rois_3d shape: ", all_rois_3d.shape)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Sample rois with classification labels and bounding box regression
# targets
labels, rois_bv, rois_cnr, rois_3d, bbox_targets = _sample_rois_3d(
all_rois, all_rois_3d, gt_boxes_bv, gt_boxes_corners, fg_rois_per_image,
rois_per_image, _num_classes)
if DEBUG:
print ("labels shape: ", labels.shape)
print ("keep_inds: ", keep_inds)
print ("all_rois_bv shape:, ", all_rois_bv.shape)
print ("rois_3d shape:, ", rois_3d.shape)
print ("rois_cnr shape:, ", rois_cnr.shape)
rois_img = lidar_cnr_to_img(rois_cnr[:,1:25],
calib[3], calib[2], calib[0])
rois_img = np.hstack((rois_bv[:,0].reshape(-1, 1), rois_img))
if DEBUG:
print ("after sample")
print (labels.shape)
print ('num fg: {}'.format((labels > 0).sum()))
print ('num bg: {}'.format((labels == 0).sum()))
print ('rois_bv shape: ', rois_bv.shape)
print ('rois_3d shape: ', rois_3d.shape)
print ('bbox_targets shape: ', bbox_targets.shape)
rois_bv = rois_bv.reshape(-1, 5).astype(np.float32)
rois_img = rois_img.reshape(-1, 5).astype(np.float32)
rois_3d = rois_3d.reshape(-1,7).astype(np.float32)
labels = labels.reshape(-1,1).astype(np.int32)
bbox_targets = bbox_targets.reshape(-1,_num_classes*24).astype(np.float32)
return rois_bv, rois_img, labels, bbox_targets, rois_3d
def proposal_target_layer(rpn_rois, gt_boxes,_num_classes):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = rpn_rois
# TODO(rbg): it's annoying that sometimes I have extra info before
# and other times after box coordinates -- normalize to one format
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Sample rois with classification labels and bounding box regression
# targets
labels, rois, bbox_targets, bbox_inside_weights = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, _num_classes)
if DEBUG:
print ('num fg: {}'.format((labels > 0).sum()))
print ('num bg: {}'.format((labels == 0).sum()))
# _count += 1
# _fg_num += (labels > 0).sum()
# _bg_num += (labels == 0).sum()
# print 'num fg avg: {}'.format(_fg_num / _count)
# print 'num bg avg: {}'.format(_bg_num / _count)
# print 'ratio: {:.3f}'.format(float(_fg_num) / float(_bg_num))
rois = rois.reshape(-1,5)
labels = labels.reshape(-1,1)
bbox_targets = bbox_targets.reshape(-1,_num_classes*4)
bbox_inside_weights = bbox_inside_weights.reshape(-1,_num_classes*4)
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
return rois,labels,bbox_targets,bbox_inside_weights,bbox_outside_weights
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = np.array(bbox_target_data[:, 0], dtype=np.uint16, copy=True)
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _get_bbox_regression_labels_3d(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, x0-x7, y0-y7, z0-z7)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 24K blob of regression targets
bbox_inside_weights (ndarray): N x 24K blob of loss weights
"""
clss = np.array(bbox_target_data[:, 0], dtype=np.uint16, copy=True)
bbox_targets = np.zeros((clss.size, 24 * num_classes), dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 24 * cls
end = start + 24
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
return bbox_targets
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _compute_targets_cnr(ex_rois_cnr, gt_rois_cnr, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois_cnr.shape[0] == gt_rois_cnr.shape[0]
assert ex_rois_cnr.shape[1] == 24
assert gt_rois_cnr.shape[1] == 24
assert np.any(gt_rois_cnr), "gt rois cnr should not be empty"
targets = bbox_transform_cnr(ex_rois_cnr, gt_rois_cnr)
# if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# # Optionally normalize targets by a precomputed mean and stdev
# targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
# / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois_3d(all_rois_bv, all_rois_3d, gt_boxes_bv, gt_boxes_corners, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois_bv[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes_bv[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes_bv[gt_assignment, 4]
if DEBUG:
print ("overlaps: ", overlaps.shape)
print ("gt assignment: ", gt_assignment.shape)
print ("max_overlaps: ", max_overlaps.shape)
print ("labels: ", labels.shape)
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
if DEBUG:
print ("fg_inds: ", fg_inds.shape)
# print "fg_rois_per_image: ", fg_rois_per_image
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = int(min(fg_rois_per_image, fg_inds.size))
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois_bv = all_rois_bv[keep_inds]
rois_3d = all_rois_3d[keep_inds]
# convert 3d to corners
rois_cnr = lidar_3d_to_corners(rois_3d[:,1:7])
rois_cnr = np.hstack((rois_3d[:,0].reshape(-1,1), rois_cnr))
if DEBUG:
print ("labels shape: ", labels.shape)
print ("keep_inds: ", keep_inds)
print ("all_rois_bv shape:, ", all_rois_bv.shape)
print ("rois_3d shape:, ", rois_3d.shape)
print ("rois_cnr shape:, ", rois_cnr.shape)
bbox_target_data = _compute_targets_cnr(
rois_cnr[:, 1:25], gt_boxes_corners[gt_assignment[keep_inds], :24], labels)
bbox_targets = \
_get_bbox_regression_labels_3d(bbox_target_data, num_classes)
return labels, rois_bv, rois_cnr, rois_3d, bbox_targets
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = int(min(fg_rois_per_image, fg_inds.size))
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets, bbox_inside_weights
| 40.428161 | 125 | 0.680645 |
1aed481191e2e7aabc9a9beaae2c5440ebd3c1c3 | 14,922 | py | Python | cnld/abstract.py | bdshieh/cnl-dyna | 9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b | [
"MIT"
] | 3 | 2020-07-08T14:42:50.000Z | 2021-11-12T06:11:15.000Z | cnld/abstract.py | bdshieh/cnl-dyna | 9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b | [
"MIT"
] | null | null | null | cnld/abstract.py | bdshieh/cnl-dyna | 9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b | [
"MIT"
] | null | null | null | '''
Abstract representation and manipulation of CMUT and PMUT arrays.
'''
import collections
import inspect
import json
import math
import sys
from collections import OrderedDict
from copy import copy
import numpy as np
from cnld import util
from namedlist import FACTORY, namedlist
__all__ = [
'SquareCmutMembrane', 'CircularCmutMembrane', 'Patch', 'Element', 'Array',
'move_membrane', 'translate_membrane', 'rotate_membrane', 'move_element',
'translate_element', 'rotate_element', 'element_position_from_membranes',
'focus_element', 'dump', 'dumps', 'bias_element', 'activate_element',
'deactivate_element', 'move_array', 'load', 'loads', 'translate_array',
'rotate_array', 'get_element_positions_from_array',
'get_membrane_positions_from_array', 'focus_array', 'get_element_count',
'get_patch_count', 'register_type', 'array_position_from_vertices'
]
def _is_abstract_type(obj):
'''
Method to determine if object is instance of namedlist.
'''
return hasattr(obj, '_asdict')
def _generate_dict_with_name_attr(obj):
'''
Converts abstract object into a nested dictionary with __name__ attribute.
'''
name = type(obj).__name__
# for abstract objects and dicts, add __name__ attr
# if name in names or name is 'dict':
if _is_abstract_type(obj) or isinstance(obj, collections.Mapping):
d = {}
d['__name__'] = name
for k, v in obj._asdict().items():
d[k] = _generate_dict_with_name_attr(v)
return d
elif isinstance(obj, (list, tuple)):
l = []
for i in obj:
l.append(_generate_dict_with_name_attr(i))
return l
else:
return obj
def _generate_object_from_json(js):
'''
Convert json object to abstract object.
'''
if isinstance(js, dict):
name = js.pop('__name__')
d = {}
for key, val in js.items():
d[key] = _generate_object_from_json(val)
# attempt to instantiate abstract object with fallback to dict
try:
return ObjectFactory.create(name, **d)
except KeyError:
return d
elif isinstance(js, (list, tuple)):
l = []
for i in js:
l.append(_generate_object_from_json(i))
return l
else:
return js
class ObjectFactory:
'''
Instantiates abstract types defined in module namespace.
'''
@staticmethod
def create(name, *args, **kwargs):
return globals()[name](*args, **kwargs)
def _repr(self):
return self.__str__()
def _str(self):
return pretty(self)
def _contains(self, key):
return key in self._fields
def _copy(self):
cls = type(self)
obj = loads(dumps(self))
if _is_abstract_type(obj):
return cls(**obj._asdict())
else:
return cls(**obj)
def _memoize(self):
d = self._asdict()
for key in self._memoize_excl:
if key in d:
d.pop(key)
return json.dumps(d)
def __getitem__(self, key):
return self._asdict().__getitem__(key)
def keys(self):
return self._asdict().keys()
def dump(obj, fp, indent=1, mode='w+', *args, **kwargs):
'''
Dumps abstract object to JSON.
'''
json.dump(_generate_dict_with_name_attr(obj),
open(fp, mode),
indent=indent,
*args,
**kwargs)
def dumps(obj, indent=1, *args, **kwargs):
'''
Dumps abstract object to a JSON string.
'''
return json.dumps(_generate_dict_with_name_attr(obj),
indent=indent,
*args,
**kwargs)
def load(fp, *args, **kwargs):
'''
Load JSON and convert to abstract object.
'''
return _generate_object_from_json(json.load(open(fp, 'r'), *args, **kwargs))
def loads(s, *args, **kwargs):
'''
Load JSON string and convert to abstract object.
'''
return _generate_object_from_json(json.loads(s, *args, **kwargs))
def register_type(*args, excl=None, **kwargs):
'''
Creates a new abstract type with the given properties. Properties specified in excl
will be excluded from the memoization key.
'''
if excl is None:
excl = []
cls = namedlist(*args, **kwargs)
cls.__repr__ = _repr
cls.__str__ = _str
cls.__contains__ = _contains
cls.__copy__ = _copy
cls.copy = copy
cls._memoize = _memoize
cls._memoize_excl = excl
cls.__getitem__ = __getitem__
cls.keys = keys
return cls
def pretty(obj, indent=0):
'''
Pretty print abstract objects.
'''
strings = []
# if type(obj).__name__ in names:
if _is_abstract_type(obj):
strings += [' ' * indent, type(obj).__name__, '\n']
for key, val in obj._asdict().items():
# if type(val).__name__ in names:
if _is_abstract_type(val):
strings += [' ' * (indent + 1), str(key), ': ', '\n']
strings += [pretty(val, indent + 1)]
elif isinstance(val, (list, tuple)):
strings += [' ' * (indent + 1), str(key), ': ', '\n']
strings += [pretty(val, indent + 2)]
else:
strings += [' ' * (indent + 1), str(key), ': ', str(val), '\n']
elif isinstance(obj, (list, tuple)):
if len(obj) == 0:
strings += [' ' * indent, '[]', '\n']
# elif type(obj[0]).__name__ in names:
elif _is_abstract_type(obj[0]):
for val in obj:
strings += [pretty(val, indent + 1)]
elif isinstance(obj[0], (list, tuple)):
for val in obj:
strings += [pretty(val, indent + 1)]
else:
strings += [' ' * indent, str(obj), '\n']
else:
pass
return ''.join(strings)
_SquareCmutMembrane = OrderedDict()
_SquareCmutMembrane['id'] = None
_SquareCmutMembrane['position'] = None
_SquareCmutMembrane['shape'] = 'square'
_SquareCmutMembrane['length_x'] = 40e-6
_SquareCmutMembrane['length_y'] = 40e-6
_SquareCmutMembrane['electrode_x'] = 40e-6
_SquareCmutMembrane['electrode_y'] = 40e-6
_SquareCmutMembrane['thickness'] = (2e-6, )
_SquareCmutMembrane['density'] = (2040, )
_SquareCmutMembrane['y_modulus'] = (110e9, )
_SquareCmutMembrane['p_ratio'] = (0.22, )
_SquareCmutMembrane['isolation'] = 200e-9
_SquareCmutMembrane['permittivity'] = 6.3
_SquareCmutMembrane['gap'] = 50e-9
_SquareCmutMembrane['damping_freq_a'] = 0
_SquareCmutMembrane['damping_freq_b'] = 0
_SquareCmutMembrane['damping_mode_a'] = 0
_SquareCmutMembrane['damping_mode_b'] = 4
_SquareCmutMembrane['damping_ratio_a'] = 0.0
_SquareCmutMembrane['damping_ratio_b'] = 0.0
_SquareCmutMembrane['patches'] = FACTORY(list)
SquareCmutMembrane = register_type('SquareCmutMembrane',
_SquareCmutMembrane,
excl=['id', 'position', 'patches'])
_CircularCmutMembrane = OrderedDict()
_CircularCmutMembrane['id'] = None
_CircularCmutMembrane['position'] = None
_CircularCmutMembrane['shape'] = 'circle'
_CircularCmutMembrane['radius'] = 40e-6 / 2
_CircularCmutMembrane['electrode_r'] = 40e-6 / 2
_CircularCmutMembrane['thickness'] = (2e-6, )
_CircularCmutMembrane['density'] = (2040, )
_CircularCmutMembrane['y_modulus'] = (110e9, )
_CircularCmutMembrane['p_ratio'] = (0.22, )
_CircularCmutMembrane['isolation'] = 200e-9
_CircularCmutMembrane['permittivity'] = 6.3
_CircularCmutMembrane['gap'] = 50e-9
_CircularCmutMembrane['damping_freq_a'] = 0
_CircularCmutMembrane['damping_freq_b'] = 0
_CircularCmutMembrane['damping_mode_a'] = 0
_CircularCmutMembrane['damping_mode_b'] = 4
_CircularCmutMembrane['damping_ratio_a'] = 0.0
_CircularCmutMembrane['damping_ratio_b'] = 0.0
_CircularCmutMembrane['patches'] = FACTORY(list)
CircularCmutMembrane = register_type('CircularCmutMembrane',
_CircularCmutMembrane,
excl=['id', 'position', 'patches'])
_Patch = OrderedDict()
_Patch['id'] = None
_Patch['position'] = None
_Patch['length_x'] = None
_Patch['length_y'] = None
_Patch['radius_min'] = None
_Patch['radius_max'] = None
_Patch['theta_min'] = None
_Patch['theta_max'] = None
_Patch['area'] = None
Patch = register_type('Patch', _Patch)
_Element = OrderedDict()
_Element['id'] = None
_Element['position'] = None
_Element['kind'] = None
_Element['active'] = True
_Element['apodization'] = 1
_Element['delay'] = 0
_Element['dc_bias'] = 0
_Element['membranes'] = FACTORY(list)
Element = register_type('Element', _Element)
_Array = OrderedDict()
_Array['id'] = None
_Array['position'] = None
_Array['delay'] = 0
_Array['elements'] = FACTORY(list)
Array = register_type('Array', _Array)
''' DECORATORS '''
def vectorize(f):
'''
Allows function to be called with either a single abstract object or list/tuple of
them.
'''
def decorator(m, *args, **kwargs):
if isinstance(m, (list, tuple)):
res = list()
for i in m:
res.append(f(i, *args, **kwargs))
return res
else:
return f(m, *args, **kwargs)
return decorator
''' MEMBRANE MANIPLUATIONS '''
@vectorize
def move_membrane(m, pos):
'''
'''
m.position = pos
@vectorize
def translate_membrane(m, vec):
'''
'''
m.position = [i + j for i, j in zip(m.position, vec)]
@vectorize
def rotate_membrane(m, origin, vec, angle):
'''
'''
org = np.array(origin)
pos = np.array(m.position)
# determine new membrane position
newpos = util.rotation_matrix(vec, angle).dot(pos - org) + org
m.position = newpos.tolist()
# update membrane rotation list
if len(m.rotations) > 0:
m.rotations.append([vec, angle])
else:
m.rotations = [[vec, angle]]
''' ELEMENT MANIPULATIONS '''
@vectorize
def move_element(e, pos):
'''
'''
vec = [i - j for i, j in zip(pos, e.position)]
translate_element(e, vec)
@vectorize
def translate_element(e, vec):
'''
'''
e.position = [i + j for i, j in zip(e.position, vec)]
for m in e.membranes:
translate_membrane(m, vec)
@vectorize
def rotate_element(e, origin, vec, angle):
'''
'''
org = np.array(origin)
pos = np.array(e.position)
# determine new element position
newpos = util.rotation_matrix(vec, angle).dot(pos - org) + org
e.position = newpos.tolist()
# rotate membranes
for m in e.membranes:
rotate_membrane(m, origin, vec, angle)
@vectorize
def element_position_from_membranes(e):
'''
'''
membranes = e.membranes
x = [m.position[0] for m in membranes]
y = [m.position[1] for m in membranes]
z = [m.position[2] for m in membranes]
e.position = [np.mean(x), np.mean(y), np.mean(z)]
@vectorize
def focus_element(e, pos, sound_speed, quantization=None):
'''
'''
d = float(util.distance(e.position, pos))
if quantization is None or quantization == 0:
t = d / sound_speed
else:
t = round(d / sound_speed / quantization) * quantization
e.delay = -t
@vectorize
def defocus_element(e, pos):
'''
'''
raise NotImplementedError
@vectorize
def bias_element(e, bias):
'''
'''
e.dc_bias = bias
@vectorize
def activate_element(e):
'''
'''
e.active = True
@vectorize
def deactivate_element(e):
'''
'''
e.active = False
''' ARRAY MANIPLUATIONS '''
@vectorize
def move_array(a, pos):
'''
'''
vec = [i - j for i, j in zip(pos, a.position)]
translate_array(a, vec)
@vectorize
def translate_array(a, vec):
'''
'''
a.position = [i + j for i, j in zip(a.position, vec)]
for e in a.elements:
translate_element(e, vec)
@vectorize
def rotate_array(a, vec, angle, origin=None):
'''
'''
org = np.array(origin)
pos = np.array(a.position)
# determine new array position
newpos = util.rotation_matrix(vec, angle).dot(pos - org) + org
a.position = newpos.tolist()
# rotate elements
for e in a.elements:
rotate_element(e, origin, vec, angle)
@vectorize
def get_element_positions_from_array(a):
'''
'''
return np.array([e.position for e in a.elements])
@vectorize
def get_membrane_positions_from_array(a):
'''
'''
return np.array([m.position for e in a.elements for m in e.membranes])
@vectorize
def focus_array(a, pos, sound_speed, quantization=None, kind=None):
'''
'''
if kind.lower() in ['tx', 'transmit']:
elements = [
e for e in a.elements
if e['kind'].lower() in ['tx', 'transmit', 'both', 'txrx']
]
elif kind.lower() in ['rx', 'receive']:
elements = [
e for e in a.elements
if e['kind'].lower() in ['rx', 'receive', 'both', 'txrx']
]
elif kind.lower() in ['txrx', 'both'] or kind is None:
elements = a.elements
for e in elements:
focus_element(e, pos, sound_speed, quantization)
@vectorize
def reset_focus_array(a):
'''
'''
for e in a.elements:
e.delay = 0
@vectorize
def get_element_count(a, kind=None):
'''
'''
if kind is None:
return len(a.elements)
elif kind.lower() in ['tx', 'transmit']:
return len([
e for e in a.elements
if e['kind'].lower() in ['tx', 'transmit', 'both', 'txrx']
])
elif kind.lower() in ['rx', 'receive']:
return len([
e for e in a.elements
if e['kind'].lower() in ['rx', 'receive', 'both', 'txrx']
])
elif kind.lower() in ['txrx', 'both']:
return len([e for e in a.elements if e['kind'].lower() in ['both', 'txrx']])
@vectorize
def get_membrane_count(a):
'''
'''
return sum([len(e.membranes) for e in a.elements])
@vectorize
def get_patch_count(a):
'''
'''
return sum([len(m.patches) for e in a.elements for m in e.membranes])
@vectorize
def get_elements_from_array(array, kind='both'):
'''
'''
if kind.lower() in ['both']:
elements = [e for e in array.elements if e.kind.lower() in ['both', 'txrx']]
elif kind.lower() in ['tx', 'transmit']:
elements = [
e for e in array.elements
if e.kind.lower() in ['tx', 'transmit', 'both', 'txrx']
]
elif kind.lower() in ['rx', 'receive']:
elements = [
e for e in array.elements
if e.kind.lower() in ['rx', 'receive', 'both', 'txrx']
]
return elements
@vectorize
def get_patches_from_array(array):
'''
'''
return [p for e in array.elements for m in e.membranes for p in m.patches]
@vectorize
def get_membranes_from_array(array):
'''
'''
return [m for m in e.membranes for e in array.elements]
@vectorize
def array_position_from_vertices(a):
'''
'''
a.position = np.mean(np.array(a.vertices), axis=0).tolist() | 25.291525 | 87 | 0.607693 |
2f360cacc99413881b704e6c150accad2e5dde6c | 13,391 | py | Python | model/im_drive.py | johannah-23/motulator | 902eceaf62f0a3607a6eb38c2914fa01e9b6ebf0 | [
"MIT"
] | null | null | null | model/im_drive.py | johannah-23/motulator | 902eceaf62f0a3607a6eb38c2914fa01e9b6ebf0 | [
"MIT"
] | null | null | null | model/im_drive.py | johannah-23/motulator | 902eceaf62f0a3607a6eb38c2914fa01e9b6ebf0 | [
"MIT"
] | null | null | null | # pylint: disable=C0103
"""
This module includes a continuous-time model for an induction motor drive. The
space vector models are implemented in stator coordinates, but this is not
shown in the variable naming for simplicity.
"""
import numpy as np
from helpers import abc2complex, complex2abc
# %%
class Motor:
"""
This class represents an induction motor. The inverse-Gamma model and
peak-valued complex space vectors are used.
"""
def __init__(self, R_s=3.7, R_R=2.1, L_sgm=.021, L_M=.224, p=2):
# pylint: disable=R0913
"""
The default values correspond to the 2.2-kW induction motor.
Parameters
----------
R_s : float, optional
Stator resistance. The default is 3.7.
R_R : float, optional
Rotor resistance. The default is 2.1.
L_sgm : float, optional
Leakage inductance. The default is .021.
L_M : float, optional
Magnetizing inductance. The default is .224.
p : int, optional
Number of pole pairs. The default is 2.
"""
self.R_s, self.R_R, self.L_sgm, self.L_M = R_s, R_R, L_sgm, L_M
self.p = p
self.psi_s0, self.psi_R0 = 0j, 0j
def currents(self, psi_s, psi_R):
"""
Computes the stator and rotor currents.
Parameters
----------
psi_s : complex
Stator flux linkage.
psi_R : complex
Rotor flux linkage.
Returns
-------
i_s : complex
Stator current.
i_R : complex
Rotor current.
"""
i_s = (psi_s - psi_R)/self.L_sgm
i_R = psi_R/self.L_M - i_s
return i_s, i_R
def torque(self, psi_s, i_s):
"""
Computes the electromagnetic torque.
Parameters
----------
psi_s : complex
Stator flux linkage.
i_s : complex
Stator current.
Returns
-------
T_M : float
Electromagnetic torque.
"""
T_M = 1.5*self.p*np.imag(i_s*np.conj(psi_s))
return T_M
def f(self, psi_R, i_s, i_R, u_s, w_M):
# pylint: disable=R0913
# To avoid overlapping computations, i_s and i_R are the inputs.
"""
Computes the state derivatives.
Parameters
----------
psi_s : complex
Stator flux linkage.
psi_R : complex
Rotor flux linkage.
i_s : complex
Stator current.
i_R : complex
Rotor current.
u_s : complex
Stator voltage.
w_M : float
Rotor speed (in mechanical rad/s).
Returns
-------
complex list, length 2
Time derivative of the state vector, [dpsi_s, dpsi_R]
"""
dpsi_s = u_s - self.R_s*i_s
dpsi_R = -self.R_R*i_R + 1j*self.p*w_M*psi_R
return [dpsi_s, dpsi_R]
def meas_currents(self):
"""
Returns the phase currents at the end of the sampling period.
Returns
-------
i_s_abc : 3-tuple of floats
Phase currents.
"""
i_s, _ = self.currents(self.psi_s0, self.psi_R0)
i_s_abc = complex2abc(i_s) # + noise + offset ...
return i_s_abc
def __str__(self):
desc = ('Induction motor (inverse-Gamma model):\n'
' p={} R_s={} R_R={} L_sgm={} L_M={}')
return desc.format(self.p, self.R_s, self.R_R, self.L_sgm, self.L_M)
# %%
class SaturationModel:
"""
This data class contains a saturation model based on a simple power
function.
"""
def __init__(self, L_unsat=.34, beta=.84, S=7):
self.L_unsat, self.beta, self.S = L_unsat, beta, S
def __call__(self, psi):
"""
The default values correspond to the stator inductance of the 2.2-kW
induction motor.
Parameters
----------
psi : complex
Flux linkage. If the value is complex, its magnitude is used.
Returns
-------
L_sat : float
Instantatenous saturated value of the inductance.
"""
L_sat = self.L_unsat/(1 + (self.beta*np.abs(psi))**self.S)
return L_sat
def __str__(self):
desc = ('L_sat(psi)=L_unsat/(1+(beta*abs(psi))**S):'
' L_unsat={} beta={} S={}')
return desc.format(self.L_unsat, self.beta, self.S)
# %%
class MotorSaturated(Motor):
"""
This subclass represents the Gamma model of a saturated induction motor.
The Gamma model suits better for modeling the magnetic saturation.
"""
def __init__(self,
R_s=3.7, R_R=2.5, L_sgm=.023, L_M=SaturationModel(), p=2):
# pylint: disable=R0913
"""
The default values correspond to the 2.2-kW induction motor.
Parameters
----------
R_s : float, optional
Stator resistance. The default is 3.7.
R_R : float, optional
Rotor resistance. The default is 2.1.
L_sgm : float, optional
Leakage inductance. The default is .021.
L_M : function, optional
Stator inductance function L_M(psi_s). The default is
SaturationModel().
p : int, optional
Number of pole pairs. The default is 2.
"""
super().__init__(R_s=R_s, R_R=R_R, L_sgm=L_sgm, L_M=L_M, p=p)
def currents(self, psi_s, psi_R):
"""
This method overrides the base class method.
"""
L_M = self.L_M(psi_s)
i_R = (psi_R - psi_s)/self.L_sgm
i_s = psi_s/L_M - i_R
return i_s, i_R
def __str__(self):
desc = ('Saturated induction motor (Gamma model):\n'
' p={} R_s={} R_R={} L_sgm={}\n'
' L_M={}')
return desc.format(self.p, self.R_s, self.R_R, self.L_sgm, self.L_M)
# %%
class Drive:
"""
This class interconnects the subsystems of an induction motor drive
and provides the interface to the solver. More complicated systems
could be simulated using a similar template.
"""
def __init__(self, motor, mech, converter, delay, pwm, datalog):
"""
Instantiate the classes.
"""
self.motor = motor
self.mech = mech
self.converter = converter
self.delay = delay
self.pwm = pwm
self.datalog = datalog
self.q = 0 # Switching-state space vector
self.t0 = 0 # Initial simulation time
def get_initial_values(self):
"""
Returns
-------
x0 : complex list, length 3
Initial values of the state variables.
"""
x0 = [self.motor.psi_s0, self.motor.psi_R0,
self.mech.theta_M0, self.mech.w_M0]
return x0
def set_initial_values(self, t0, x0):
"""
Parameters
----------
x0 : complex ndarray
Initial values of the state variables.
"""
self.t0 = t0
self.motor.psi_s0 = x0[0]
self.motor.psi_R0 = x0[1]
# x0[2].imag and x0[3].imag are always zero
self.mech.theta_M0 = x0[2].real
self.mech.w_M0 = x0[3].real
# Limit the angle [0, 2*pi]
self.mech.theta_M0 = np.mod(self.mech.theta_M0, 2*np.pi)
def f(self, t, x):
"""
Compute the complete state derivative list for the solver.
Parameters
----------
t : float
Time.
x : complex ndarray
State vector.
Returns
-------
complex list
State derivatives.
"""
# Unpack the states
psi_s, psi_R, _, w_M = x
# Interconnections: outputs for computing the state derivatives
u_s = self.converter.ac_voltage(self.q, self.converter.u_dc0)
i_s, i_R = self.motor.currents(psi_s, psi_R)
T_M = self.motor.torque(psi_s, i_s)
# State derivatives
motor_f = self.motor.f(psi_R, i_s, i_R, u_s, w_M)
mech_f = self.mech.f(t, w_M, T_M)
# List of state derivatives
return motor_f + mech_f
# %%
class Datalogger:
"""
This class contains a default datalogger. Here, stator coordinates are
marked with extra s, e.g. i_ss is the stator current in stator
coordinates.
"""
def __init__(self):
"""
Initialize the attributes.
"""
# pylint: disable=too-many-instance-attributes
self.t, self.q = [], []
self.psi_ss, self.psi_Rs = [], []
self.theta_M, self.w_M = [], []
self.u_ss, self.i_ss = 0j, 0j
self.w_m, self.theta_m = 0, 0
self.T_M, self.T_L = 0, 0
def save(self, mdl, sol):
"""
Saves the solution.
Parameters
----------
mdl : instance of a class
Continuous-time model.
sol : bunch object
Solution from the solver.
"""
self.t.extend(sol.t)
self.q.extend(len(sol.t)*[mdl.q])
self.psi_ss.extend(sol.y[0])
self.psi_Rs.extend(sol.y[1])
self.theta_M.extend(sol.y[2].real)
self.w_M.extend(sol.y[3].real)
def post_process(self, mdl):
"""
Transforms the lists to the ndarray format and post-process them.
"""
# From lists to the ndarray
self.t = np.asarray(self.t)
self.q = np.asarray(self.q)
self.psi_ss = np.asarray(self.psi_ss)
self.psi_Rs = np.asarray(self.psi_Rs)
self.theta_M = np.asarray(self.theta_M)
self.w_M = np.asarray(self.w_M)
# Some useful variables
self.i_ss, _ = mdl.motor.currents(self.psi_ss, self.psi_Rs)
self.theta_m = mdl.motor.p*self.theta_M
self.theta_m = np.mod(self.theta_m, 2*np.pi)
self.w_m = mdl.motor.p*self.w_M
self.T_M = mdl.motor.torque(self.psi_ss, self.i_ss)
self.T_L = mdl.mech.T_L_ext(self.t) + mdl.mech.B*self.w_M
self.u_ss = mdl.converter.ac_voltage(self.q, mdl.converter.u_dc0)
# %%
class DriveWithDiodeBridge(Drive):
"""
This subclass models an induction motor drive, equipped with a three-phase
diode bridge.
"""
def get_initial_values(self):
"""
Extends the base class.
"""
x0 = super().get_initial_values() + [self.converter.u_dc0,
self.converter.i_L0]
return x0
def set_initial_values(self, t0, x0):
"""
Extends the base class.
"""
super().set_initial_values(t0, x0[0:4])
self.converter.u_dc0 = x0[4].real
self.converter.i_L0 = x0[5].real
def f(self, t, x):
"""
Overrides the base class.
"""
# Unpack the states for better readability
psi_s, psi_R, _, w_M, u_dc, i_L = x
# Interconnections: outputs for computing the state derivatives
i_s, i_R = self.motor.currents(psi_s, psi_R)
u_s = self.converter.ac_voltage(self.q, u_dc)
i_dc = self.converter.dc_current(self.q, i_s)
T_M = self.motor.torque(psi_s, i_s)
# Returns the list of state derivatives
return (self.motor.f(psi_R, i_s, i_R, u_s, w_M) +
self.mech.f(t, w_M, T_M) +
self.converter.f(t, u_dc, i_L, i_dc))
# %%
class DataloggerExtended(Datalogger):
"""
Extends the default data logger for the model with the DC-bus dynamics.
"""
def __init__(self):
"""
Initialize the attributes.
"""
# pylint: disable=too-many-instance-attributes
super().__init__()
self.u_dc, self.i_L = [], []
self.i_dc, self.u_di, self.u_g, self.i_g = 0, 0, 0, 0
def save(self, mdl, sol):
"""
Extends the base class.
"""
super().save(mdl, sol)
self.u_dc.extend(sol.y[4].real)
self.i_L.extend(sol.y[5].real)
def post_process(self, mdl):
"""
Extends the base class.
"""
super().post_process(mdl)
# From lists to the ndarray
self.u_dc = np.asarray(self.u_dc)
self.i_L = np.asarray(self.i_L)
# Some useful variables
self.u_ss = mdl.converter.ac_voltage(self.q, self.u_dc)
self.i_dc = mdl.converter.dc_current(self.q, self.i_ss)
u_g_abc = mdl.converter.grid_voltages(self.t)
self.u_g = abc2complex(u_g_abc)
# Voltage at the output of the diode bridge
self.u_di = np.amax(u_g_abc, 0) - np.amin(u_g_abc, 0)
# Diode briddge switching states (-1, 0, 1)
q_g_abc = ((np.amax(u_g_abc, 0) == u_g_abc).astype(int) -
(np.amin(u_g_abc, 0) == u_g_abc).astype(int))
# Grid current space vector
self.i_g = abc2complex(q_g_abc)*self.i_L
| 29.301969 | 79 | 0.532895 |
b4a238ad7c67c96e34517c1f9f1433d5e660ce66 | 27,937 | py | Python | zest/server.py | kzinglzy/zest | 98e24ff745740c206206cc6dc873a361933d374c | [
"Apache-2.0"
] | 6 | 2016-09-08T13:13:08.000Z | 2020-07-10T12:58:04.000Z | zest/server.py | Kzinglzy/zest | 98e24ff745740c206206cc6dc873a361933d374c | [
"Apache-2.0"
] | null | null | null | zest/server.py | Kzinglzy/zest | 98e24ff745740c206206cc6dc873a361933d374c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2015 by kzing.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
zest.server
This moudle provides an asynchronous server base on `asyncio`.
"""
import asyncio
import sys
import collections
import os
import traceback
from urllib.parse import unquote
from zest.core import HTTPHeaders, lazy_property, is_future, is_async
from zest.httputil import HTTPError
from zest.helper.util import utc_time, get_logger
from zest.helper.consts import HTTP_STATUS, ERROR_TEMPLETE
logger = get_logger('zest.server')
HTTPVersion = collections.namedtuple('http_version', 'major minor protocol')
HTTPStartLine = collections.namedtuple('http_startline', 'version status')
RequestMessage = collections.namedtuple(
'request_message', 'command path version query close_coon headers'
)
CRLF = '\r\n'
bCRLF = b'\r\n'
SPACE = ' '
HEADER_END = '\r\n\r\n'
COLON = ':'
class Timeout(asyncio.TimeoutError):
""" Raise by the operation exceeded the given deadline.
Parameters:
- seconds: a positive number value of timeout
- handler: funtion will be called when timeout happen
- loop: current event loop
- exc: user-provided exception to be raise after timeout.
set to False to prevent the exception spread.
Useage:
def timeout_hander():
print('timeout')
with Timeout(10, timeout_hander):
time.sleep(10)
"""
def __init__(self, seconds, handler, *, loop=None, exc=None):
self.seconds = seconds
self.handler = handler
self.loop = loop or asyncio.get_event_loop()
self.exc = exc
self._timeout_handler = None
def __enter__(self):
self._is_timeout = False
assert self.seconds >= 0
if self.seconds > 0:
self._timeout_handler = self.loop.call_later(self.seconds,
self._trigger_timeout)
return self
def __exit__(self, exc_type, exc_value, trace):
if self._is_timeout:
self.handler()
if self.exc is not False:
raise self.exc or self
else:
return True # ignore this timeout exception
elif self._timeout_handler:
self._timeout_handler.cancel()
self._timeout_handler = None
def _trigger_timeout(self):
self._is_timeout = True
raise
# TODO
class StreamReader(asyncio.streams.StreamReader):
""" Add some userful methods to base StreamReader.
"""
def _to_bytes(self, s):
return bytes(s, 'utf-8') if not isinstance(s, bytes) else s
@asyncio.coroutine
def readuntil(self, stop, limit=0):
""" Keep reading stream from buffer util find the stop flag.
If buffer is empty or can't find a end pos, it will be sleep
to wait until `feed_data` or `feed_eof` is called.
"""
limit = limit or self._limit
if self._exception is not None:
raise self._exception
stop = self._to_bytes(stop)
result = bytearray()
while True:
pos = self._buffer.find(stop)
if pos >= 0:
size = pos + len(stop)
if size > limit:
raise ValueError('Line is too long to %s.' % limit)
else:
result.extend(self._buffer[:size])
del self._buffer[:size]
break
elif self._eof:
break
else:
yield from self._wait_for_data('readuntil')
self._maybe_resume_transport()
return bytes(result)
@asyncio.coroutine
def read_or_wait(self, wait=True):
""" Read all the stream from buffer.
If wait is set to True then it will be sleep to wait until
`feed_data()` or `feed_eof()` is called, otherwise return None.
"""
if self._exception is not None:
raise self._exception
if wait is True and not self._buffer and not self._eof:
yield from self._wait_for_data('read_or_wait')
result = bytearray()
result.extend(self._buffer)
del self._buffer[:]
return bytes(result)
# TODO
class RFileReader(StreamReader):
""" Provide a conforming `wsgi.input` value for request entities
"""
zlib_obj = None
def __init__(self, rfile, encoding=None):
super().__init__()
self.rfile = rfile
self.encoding = encoding
def feed_data(self, data):
if self.encoding:
# zlib_obj = zlib.decompressobj(
# wbits=16 + zlib.MAX_WBITS if self.encoding == 'gzip'
# else -zlib.MAX_WBITS
# )
# data = zlib_obj.decompress(data, 100)
# self.zlib_obj = zlib_obj
pass
if data:
super().feed_data(data)
def feed_eof(self):
if self.encoding:
super().feed_data(self.zlib_obj.flush())
super().feed_eof()
def is_valid_status(code):
""" Check if a status code is avaliable.
"""
return code in HTTP_STATUS
class HTTPCoonection:
""" Implement the HTTP/1.x protocol. Base on RFC 2616.
"""
DEFAULT_CTYPE = "text/html; charset=UTF-8"
method = None
path = None
version = None
status = None
_parse_message = False
_write_headers = False
_write_chunk = False
_write_chunk_eof = False
def __init__(self, reader, writer, server, *, coding='utf-8',
max_body=65535, max_headers=65535):
self.reader = reader
self.writer = writer
self.server = server
self.coding = coding
self.max_body = max_body
self.max_headers = max_headers
self._timeout = server._timeout
self._timeout_handler = server.cancel_request
self._loop = server.loop
self._keep_alive = server._keep_alive
@asyncio.coroutine
def parse_message(self):
""" Parse request line and request headers
The first line of the request has the form
<command> <path> <version>
Return a namedtuple object when succeed, otherwise failue with
HTTPError.
"""
self._parse_message = True
reader = self.reader
line = yield from reader.readline()
if line == CRLF:
# RFC 2616 sec 4.1
# Ignore the CRLF when it's at the beginning of a message.
line = yield from reader.readline()
line = line.decode(self.coding)
if not line:
raise asyncio.CancelledError
if not line.endswith(CRLF):
raise HTTPError(400, 'HTTP requires CRLF terminators')
command, path, version, query = self._parse_request_line(line)
self.command, self.path, self.version = command, path, version
with Timeout(self._timeout, self._timeout_handler, loop=self._loop):
lines = yield from reader.readuntil(HEADER_END)
lines.decode(self.coding)
headers = HTTPHeaders.from_lines(lines)
connection = headers.get('Connection', '').lower()
if (version.minor == 1 and connection == 'close') or \
(version.minor == 0 and connection != 'keep-alive'):
# HTTP/1.1 alwasys keep alive when HTTP/1.0 default to be close
close_coon = True
else:
close_coon = False
return RequestMessage(command, path, version, query,
close_coon, headers)
def _parse_request_line(self, line):
try:
method, path, version = line.strip().split(None, 2)
version = HTTPVersion(int(version[5]), int(version[7]), version)
except (ValueError, IndexError):
raise HTTPError(400, 'Malformed Request-Line')
if version.major < 1: # Http version should be 1.x
raise HTTPError(505)
query = ''
if '?' in path:
path, query = path.split('?', 1)
return method, path, version, query
@asyncio.coroutine
def parse_body(self, headers):
""" Parse the request payload and return a RFileReader object.
"""
if not self._parse_message:
raise Exception("Should read the request message first"
"before read the body")
reader = self.reader
body = RFileReader(reader, headers.get('Content-Encoding'))
content_length = headers.get('Content-Length', 0, int)
if content_length > self.max_body:
raise HTTPError(413)
chunk_read = False
if self.version.minor == 1: # Transfer_encoding only work for HTTP/1.1
te = headers.get('Transfer-Encoding', '')
if te:
if te.lower() == "chunk":
chunk_read = True
else:
raise HTTPError(501) # only support 'chunk'
if chunk_read:
yield from self._read_chunk_body(reader, body)
elif content_length > 0:
yield from self._read_fixed_body(content_length, reader, body)
elif self.command in ('PUT', 'POST'):
# logger.warn("WARNING: Content-Length or Transfer-Encoding header"
# "is required with PUT or POST method.")
yield from self._read_until_eof(reader, body)
body.feed_eof()
return body
def _read_fixed_body(self, length, input_, output):
assert length > 0, "Content length should be bigger than zero"
while length:
chunk = yield from input_.readexactly(length)
length = len(chunk)
if not length:
logger.warn("WARNING: request body have not read enough "
"(maybe) because of a bad content length.")
break
output.feed_data(chunk)
length -= len(chunk)
def _read_chunk_body(self, input_, output):
while True:
chunk_len = yield from input_.readuntil(b"\r\n", limit=64)
try:
chunk_len = int(chunk_len.strip(), 16)
except ValueError:
raise HTTPError(400, 'Transfer encoding error')
if chunk_len == 0:
break
yield from self._read_fixed_body(chunk_len, input_, output)
yield from input_.readexactly(2) # Skip the crlf \r\n
def _read_until_eof(self, input_, output):
output.feed_data((yield from input_.read_or_wait(False)))
def _make_start_line(self, status, version=None):
return (version or 'HTTP/1.1') + SPACE + str(status) + CRLF
@asyncio.coroutine
def simple_response(self, status, msg='', headers_dict=None):
""" Make a simple response with specify status."""
assert self.server.task is not None, "Server is closed"
if not is_valid_status(status):
raise Exception("HTTP status is invalid: %s." % status)
message = msg or HTTP_STATUS[status]
response = [self._make_start_line(status)]
if headers_dict:
response.extend(["%s: %s\r\n" % (k, v)
for k, v in headers_dict.items()])
else: # Add default headers
response.extend(["Content-Length: %s\r\n" % len(message),
"Content-Type: text/plain\r\n"])
response.append(CRLF)
response.append(message)
self.writer.write(''.join(response).encode('utf-8'))
return (yield from self.write_eof())
def write_headers(self, status, headers, version='HTTP/1.1'):
""" Send headers to client.
:param headers: HTTP headers, could be a dict, list or HTTPHeaders.
"""
if not is_valid_status(status):
raise Exception("HTTP status is invalid: %s" % status)
try:
if isinstance(headers, list):
headers = HTTPHeaders.from_lists(headers)
elif isinstance(headers, dict):
headers = HTTPHeaders(headers)
except Exception:
raise ValueError("Invalid headers object")
self._write_headers = True
if not headers.get('Content-Length') and \
version == 'HTTP/1.1' and status not in (204, 205, 304):
self._write_chunk = True
headers['Transfer-Encoding'] = 'chunked'
if not headers.get('Connection'):
if self.server._keep_alive and version == 'HTTP/1.1':
coon = 'keep-alive'
else:
coon = 'close'
headers.add('Connection', coon)
if not headers.get('Content-Type'):
headers.add('Content-Type', self.DEFAULT_CTYPE)
if not headers.get('Date'):
headers.add('Date', utc_time())
if not headers.get('Server'):
headers.add('Server', self.server.server_name)
data = [self._make_start_line(status, version)]
for k, v in headers.allitems():
data.append(k + COLON + SPACE + str(v) + CRLF)
data.append(CRLF)
self.writer.write(''.join(data).encode('utf-8'))
def write(self, chunk, encoding='utf-8'):
""" Send chunk to client.
You should always call `write_headers` before call this and
call `write_eof` when all chunk is send.
"""
assert self._write_headers is True, 'Should write headers first'
if not isinstance(chunk, bytes):
chunk = chunk.encode(encoding)
if self._write_chunk:
chunk = self._format_chunk(chunk)
return self.writer.write(chunk)
def _format_chunk(self, chunk):
return b'%x\r\n%s%s' % (len(chunk), chunk, bCRLF)
@asyncio.coroutine
def write_eof(self):
""" Complete to write chunk and flush the writer buffer."""
if self._write_chunk:
self.writer.write(b'0\r\n\r\n')
# Succeed to send response, next response
# should send headers again
self._write_headers = False
return (yield from self.writer.drain())
def _exception_callback(future):
""" Catch the excption from Futures object.
When Futures and Tasks set an excption: the exception is never log
unless asks for this exception, so this func is try to catch the
exception after a an excption have seted.
"""
if future.exception():
future.result()
class HTTPServer(asyncio.streams.FlowControlMixin, asyncio.Protocol):
""" The async HTTP server protocol.
Parameters:
- loop: current event loop.
- timeout: seconds time to cancel a slow request.(default to 15)
- keep_alive: keep an request connection open.(default to True)
- keep_alive_period: seconds time to close keepalive connection. only
work when keep_alive is set to True.
- debug: bool value to show more traceback infos.
- if_ssl: bool value to use `https` or `http`.
- server_name: string name show at the response's headers.
Useage:
>> loop = asyncio.get_event_loop()
>> server = HTTPServer(*args, **kwds)
>> asyncio.async(loop.create_server(server, host, port))
>> loop.run_forever()
"""
server_version = '0.0.1'
http_version = 'HTTP/1.1'
task = None
def __init__(self, loop=None, timeout=15, keep_alive_period=75,
keep_alive=False, debug=False, is_ssl=False,
server_name=None, quiet=False):
super().__init__(loop=loop)
self.loop = loop or asyncio.get_event_loop()
self._timeout = timeout
self._keep_alive = keep_alive
self._keep_alive_period = keep_alive
self._keep_alive_handler = None
self._debug = debug
self._is_ssl = is_ssl
self._server_name = server_name
self._quiet = quiet
self.reader = StreamReader(loop=loop)
self.writer = None
def connection_made(self, transport):
""" An new HTTP connection entities is make, start to process.
"""
self.transport = transport
self.reader.set_transport(transport)
self.writer = asyncio.streams.StreamWriter(
transport, self, self.reader, self.loop)
self.task = asyncio.async(self.start(), loop=self.loop)
self.coon = HTTPCoonection(self.reader, self.writer, self)
if self._debug:
# only under debug model we will get the exception info,
# else exception will be mute unless we manual call it.
self.task.add_done_callback(_exception_callback)
def data_received(self, data):
self.reader.feed_data(data)
def eof_received(self):
self.reader.feed_eof()
def connection_lost(self, exc):
""" Request connection is closed.
"""
super().connection_lost(exc)
self.transport = self.writer = self.coon = None
if exc is None:
self.reader.feed_eof()
else:
self.reader.set_exception(exc)
if self.task is not None:
self.task.cancel()
self.task = None
if self._keep_alive_handler is not None:
self._keep_alive_handler.cancel()
self._keep_alive_handler = None
@asyncio.coroutine
def start(self):
""" Start processing of incoming requests.
"""
request = self.coon
while True:
try:
# Before we parse the request body, we have to
# stop the keep-alive timer to avoid the transport close.
self.stop_keep_alive()
message = yield from request.parse_message()
body = yield from request.parse_body(message.headers)
yield from self.handle_request(message, body)
except Timeout:
logger.warn('Parse timeout. Close this slow request.')
break
except asyncio.CancelledError:
logger.debug('Ignored premature client disconnection.')
break
except HTTPError as err:
self.log_http_error(err)
yield from self.handle_error(err.status, err.message)
except Exception as exc:
self.log_exception(exc)
yield from self.handle_error(500, exc if self._debug else '')
else:
self._keep_alive = False # raise exception
finally:
if self.task:
if self._keep_alive:
self.start_keep_alive(self._keep_alive_period)
else:
self.task = None
self.transport.close()
break
else:
break
def cancel_request(self):
""" Ignore and Close this request.
"""
if self.task and not self.task.done():
self.task.cancel()
self.task = None
if self.transport:
self.transport.close()
@lazy_property
def server_name(self):
return '%s:%s' % (self._server_name or 'zest', self.server_version)
@lazy_property
def peername(self):
""" A tuple object for remote_addr and remote_port.
"""
return self.transport.get_extra_info('peername')
def start_keep_alive(self, sendos):
""" Keep a request alive for sendos.
"""
self._keep_alive_handler = self.loop.call_later(
sendos, self.transport.close)
def stop_keep_alive(self):
""" Cancel the keep_alive task for request entities
"""
if self._keep_alive_handler:
self._keep_alive_handler.cancel()
self._keep_alive_handler = None
def set_timeout(self, timeout):
self._timeout = timeout
def set_debug(self, debug):
self._debug = debug
def shut_down_server(self):
if self.loop.is_running():
self.loop.stop()
@asyncio.coroutine
def handle_error(self, status, msg=''):
""" Handle exception and send a specify status response.
"""
try:
status = int(status)
reason = HTTP_STATUS.get(status)
if not reason:
reason = HTTP_STATUS[404]
status = 400
message = ERROR_TEMPLETE.format(status=status,
reason=reason, message=msg)
headers = {'Content-Type': 'text/html; charset=utf-8'}
return (yield from self.coon.simple_response(status,
message, headers))
except:
logger.error(traceback.format_exc())
finally:
self._keep_alive = False
@asyncio.coroutine
def handle_request(self, message, body):
""" Handle a single HTTP request then default to response 404.
Override this method to make your response.
"""
body = '404 Not Found'
headers = HTTPHeaders({
'Content-Type': 'text/plain',
'Content-Length': len(body),
'connection': 'keep-alive'
})
self.coon.write_headers(404, headers)
self.coon.write(body)
self.log_request(404, message)
return (yield from self.coon.write_eof())
def log_request(self, status, msg):
if self._quiet:
return
status = status if is_valid_status(status) else 'UNKNOW'
logger.info('%s %s' % (
status,
'%s %s' % (msg.command, unquote(msg.path))
))
def log_http_error(self, err):
status = err.status if is_valid_status(err.status) else 'UNKNOW'
return logger.info('%s %s' % (status, err.request_line or
'Internal Server Error'))
def log_exception(self, exc):
return logger.error(traceback.format_exc()) if self._debug \
else logger.error(exc)
def __call__(self):
""" Process this class as protocol_factory for event loop.
Since protocol_factory is called once for each new incoming
connection, it should return a **new** Protocol object each time
it is called.
"""
return HTTPServer(self.loop, self._timeout, self._keep_alive_period,
self._keep_alive, self._debug, self.is_ssl,
self.server_name)
def make_http_server(host, port, **kwds):
""" Start a HTTP server.
Override the `handle_request` and `handle_error` to get your own response.
"""
loop = asyncio.get_event_loop()
server = HTTPServer
srv = loop.create_server(server, host, port)
try:
asyncio.async(srv)
loop.run_forever()
finally:
loop.close()
class WsgiServer(HTTPServer):
""" Implement PEP-0333.
"""
def __init__(self, app, *args, **kwds):
super(WsgiServer, self).__init__(*args, **kwds)
self.app = app
self._args = args
self._kwds = kwds
def setup_environ(self, message, body):
env = {
'wsgi.input': body,
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'https' if self._is_ssl else 'http',
'REQUEST_METHOD': message.command,
'PATH_INFO': message.path,
'QUERY_STRING': message.query,
'REMOTE_ADDR': self.peername[0],
'REMOTE_PORT': self.peername[1],
'SERVER_NAME': self.server_name,
'SERVER_PROTOCOL': self.http_version,
'SERVER_PORT': 80 if not self._is_ssl else 443,
'SCRIPT_NAME': os.environ.get('SCRIPT_NAME', ''),
'SERVER_CLASS': self,
'EVENT_LOOP': self.loop,
}
for k, v in message.headers.items():
if k == 'Content-Length':
env['CONTENT_LENGTH'] = v
elif k == 'Content-Type':
env['CONTENT_TYPE'] = v
else:
env["HTTP_" + k.upper().replace("-", "_")] = v
return env
@asyncio.coroutine
def handle_request(self, message, body):
""" Call a WSGI application and make responses.
"""
yield from self.async_run(message, body)
# keep_alive = getattr(result, 'keep_alive', False)
# if keep_alive is not None:
# self._keep_alive = keep_alive
@asyncio.coroutine
def async_run(self, message, body):
""" Implement WSGI interface.
"""
env = self.setup_environ(message, body)
headers_set = []
headers_sent = False
def write(data):
nonlocal headers_set, headers_sent
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, headers = headers_set
status = int(status[:3])
self.coon.write_headers(status, headers, self.http_version)
headers_sent = True
self.coon.write(data)
if self._quiet is False:
self.log_request(status, message)
def start_response(status, headers, exc_info=None):
nonlocal headers_set, headers_sent
if exc_info:
try:
if self._send_headers:
for exc in exc_info:
raise exc
finally:
exc_info = None
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, headers]
return write
response = self.app(env, start_response)
try:
if is_async(response):
response = yield from response
for data in response:
if data:
if is_future(data):
data = yield from data
write(data)
if not headers_sent: # Send headers now if body was empty
write('')
yield from self.coon.write_eof()
finally:
if hasattr(response, 'close'):
response.close()
return response
def __call__(self):
""" Process this class as protocol_factory.
"""
return WsgiServer(self.app, *self._args, **self._kwds)
def make_wsgi_server(app, host='127.0.0.1', port=7676, debug=False, **kwds):
""" Start a WSGI server on specify host and port.
Parameters:
- host: server host.
- port: server port, should be a positive integer.
- app: a WSGI application to dispatch request and make response
- debug: If set to True, will show more traceback info when error.
- options kwds: another option for server. See `HTTPServer` for detail.
"""
loop = asyncio.get_event_loop()
server = WsgiServer(app, debug=debug, **kwds)
srv = loop.create_server(server, host, port)
try:
asyncio.async(srv)
loop.run_forever()
finally:
loop.close()
| 33.904126 | 79 | 0.580664 |
1e88dad312dbb9c5f04ea48c722bc6f2c5814c5e | 3,610 | py | Python | examples/model_compression/pp-minilm/data.py | JeremyZhao1998/PaddleNLP | 5a34684a7f0c8a186043fed386be4b62cb85fb15 | [
"Apache-2.0"
] | 1 | 2022-01-28T06:32:26.000Z | 2022-01-28T06:32:26.000Z | examples/model_compression/pp-minilm/data.py | JeremyZhao1998/PaddleNLP | 5a34684a7f0c8a186043fed386be4b62cb85fb15 | [
"Apache-2.0"
] | null | null | null | examples/model_compression/pp-minilm/data.py | JeremyZhao1998/PaddleNLP | 5a34684a7f0c8a186043fed386be4b62cb85fb15 | [
"Apache-2.0"
] | 1 | 2022-03-30T03:05:52.000Z | 2022-03-30T03:05:52.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.metric import Metric, Accuracy
from paddlenlp.transformers import PPMiniLMForSequenceClassification, PPMiniLMTokenizer
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
MODEL_CLASSES = {
"ppminilm": (PPMiniLMForSequenceClassification, PPMiniLMTokenizer),
"bert": (BertForSequenceClassification, BertTokenizer)
}
METRIC_CLASSES = {
"afqmc": Accuracy,
"tnews": Accuracy,
"iflytek": Accuracy,
"ocnli": Accuracy,
"cmnli": Accuracy,
"cluewsc2020": Accuracy,
"csl": Accuracy,
}
def convert_example(example,
label_list,
tokenizer=None,
is_test=False,
max_seq_length=512,
**kwargs):
"""convert a glue example into necessary features"""
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
example['label'] = np.array(example["label"], dtype="int64")
label = example['label']
# Convert raw text to feature
if 'keyword' in example: # CSL
sentence1 = " ".join(example['keyword'])
example = {
'sentence1': sentence1,
'sentence2': example['abst'],
'label': example['label']
}
elif 'target' in example: # wsc
text, query, pronoun, query_idx, pronoun_idx = example['text'], example[
'target']['span1_text'], example['target']['span2_text'], example[
'target']['span1_index'], example['target']['span2_index']
text_list = list(text)
assert text[pronoun_idx:(pronoun_idx + len(pronoun)
)] == pronoun, "pronoun: {}".format(pronoun)
assert text[query_idx:(query_idx + len(query)
)] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_list.insert(query_idx, "_")
text_list.insert(query_idx + len(query) + 1, "_")
text_list.insert(pronoun_idx + 2, "[")
text_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_list.insert(pronoun_idx, "[")
text_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_list.insert(query_idx + 2, "_")
text_list.insert(query_idx + len(query) + 2 + 1, "_")
text = "".join(text_list)
example['sentence'] = text
if tokenizer is None:
return example
if 'sentence' in example:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
elif 'sentence1' in example:
example = tokenizer(
example['sentence1'],
text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
| 39.23913 | 87 | 0.620499 |
77011e057b1d163b03027dc294fbc9e1531c7baf | 771 | py | Python | cifar_dataloader.py | KyleSuchenkai/ATTA | 107fd6dc2549fb13e4f7720dfeeec8d968cc2733 | [
"MIT"
] | 27 | 2020-06-15T19:58:57.000Z | 2022-02-13T02:41:02.000Z | cifar_dataloader.py | KyleSuchenkai/ATTA | 107fd6dc2549fb13e4f7720dfeeec8d968cc2733 | [
"MIT"
] | 1 | 2021-06-21T05:02:40.000Z | 2021-06-23T06:04:38.000Z | cifar_dataloader.py | KyleSuchenkai/ATTA | 107fd6dc2549fb13e4f7720dfeeec8d968cc2733 | [
"MIT"
] | 4 | 2021-02-18T08:12:11.000Z | 2022-03-22T19:47:26.000Z | """
This file loads the padded data of CIFAR10.
"""
import torchvision
from torchvision import datasets, transforms
import torch
def load_pading_training_data(device):
transform_padding = transforms.Compose([
transforms.Pad(padding=4),
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_padding)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=50000, shuffle=True)
for batch_idx, (data, target) in enumerate(train_loader):
cifar_images, cifar_labels = data.to(device), target.to(device)
cifar_images[:,:,:4,:] = 0.5
cifar_images[:,:,-4:,:] = 0.5
cifar_images[:,:,:,:4] = 0.5
cifar_images[:,:,:,-4:] = 0.5
return cifar_images, cifar_labels | 33.521739 | 113 | 0.71725 |
b7025c768cc58d61f5a440d4a9fe5c7cac9233ca | 23,565 | py | Python | tests/test_variants.py | ltalirz/conda-build | 6d469fd5fadf20cc4829088b3a4ccd58a71b85c9 | [
"BSD-3-Clause"
] | 1 | 2021-09-13T20:18:39.000Z | 2021-09-13T20:18:39.000Z | tests/test_variants.py | ltalirz/conda-build | 6d469fd5fadf20cc4829088b3a4ccd58a71b85c9 | [
"BSD-3-Clause"
] | 4 | 2019-12-09T20:05:06.000Z | 2020-01-07T13:44:41.000Z | tests/test_variants.py | ltalirz/conda-build | 6d469fd5fadf20cc4829088b3a4ccd58a71b85c9 | [
"BSD-3-Clause"
] | 1 | 2020-03-26T18:09:17.000Z | 2020-03-26T18:09:17.000Z | from collections import OrderedDict
import os
import json
import re
import sys
import pytest
import yaml
from conda_build import api, exceptions, variants
from conda_build.utils import package_has_file, FileNotFoundError
thisdir = os.path.dirname(__file__)
recipe_dir = os.path.join(thisdir, 'test-recipes', 'variants')
def test_later_spec_priority(single_version, no_numpy_version):
# override a single key
specs = OrderedDict()
specs['no_numpy'] = no_numpy_version
specs['single_ver'] = single_version
combined_spec = variants.combine_specs(specs)
assert len(combined_spec) == 2
assert combined_spec["python"] == ["2.7.*"]
# keep keys that are not overwritten
specs = OrderedDict()
specs['single_ver'] = single_version
specs['no_numpy'] = no_numpy_version
combined_spec = variants.combine_specs(specs)
assert len(combined_spec) == 2
assert len(combined_spec["python"]) == 2
def test_get_package_variants_from_file(testing_workdir, testing_config, no_numpy_version):
with open('variant_example.yaml', 'w') as f:
yaml.dump(no_numpy_version, f, default_flow_style=False)
testing_config.variant_config_files = [os.path.join(testing_workdir, 'variant_example.yaml')]
testing_config.ignore_system_config = True
metadata = api.render(os.path.join(thisdir, "variant_recipe"),
no_download_source=False, config=testing_config)
# one for each Python version. Numpy is not strictly pinned and should present only 1 dimension
assert len(metadata) == 2
assert sum('python >=2.7,<2.8' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
assert sum('python >=3.5,<3.6' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
def test_use_selectors_in_variants(testing_workdir, testing_config):
testing_config.variant_config_files = [os.path.join(recipe_dir,
'selector_conda_build_config.yaml')]
variants.get_package_variants(testing_workdir, testing_config)
def test_get_package_variants_from_dictionary_of_lists(testing_config, no_numpy_version):
testing_config.ignore_system_config = True
metadata = api.render(os.path.join(thisdir, "variant_recipe"),
no_download_source=False, config=testing_config,
variants=no_numpy_version)
# one for each Python version. Numpy is not strictly pinned and should present only 1 dimension
assert len(metadata) == 2, metadata
assert sum('python >=2.7,<2.8' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
assert sum('python >=3.5,<3.6' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
@pytest.mark.xfail(reason="Strange failure 7/19/2017. Can't reproduce locally. Test runs fine "
"with parallelism and everything. Test fails reproducibly on CI, but logging "
"into appveyor after failed run, test passes. =(")
def test_variant_with_ignore_numpy_version_reduces_matrix(numpy_version_ignored):
# variants are defined in yaml file in this folder
# there are two python versions and two numpy versions. However, because numpy is not pinned,
# the numpy dimensions should get collapsed.
recipe = os.path.join(recipe_dir, '03_numpy_matrix')
metadata = api.render(recipe, variants=numpy_version_ignored, finalize=False)
assert len(metadata) == 2, metadata
def test_variant_with_numpy_pinned_has_matrix():
recipe = os.path.join(recipe_dir, '04_numpy_matrix_pinned')
metadata = api.render(recipe, finalize=False)
assert len(metadata) == 4
def test_pinning_in_build_requirements():
recipe = os.path.join(recipe_dir, '05_compatible')
metadata = api.render(recipe)[0][0]
build_requirements = metadata.meta['requirements']['build']
# make sure that everything in the build deps is exactly pinned
assert all(len(req.split(' ')) == 3 for req in build_requirements)
@pytest.mark.sanity
def test_no_satisfiable_variants_raises_error():
recipe = os.path.join(recipe_dir, '01_basic_templating')
with pytest.raises(exceptions.DependencyNeedsBuildingError):
api.render(recipe, permit_unsatisfiable_variants=False)
# the packages are not installable anyway, so this should show a warning that recipe can't
# be finalized
api.render(recipe, permit_unsatisfiable_variants=True)
# out, err = capsys.readouterr()
# print(out)
# print(err)
# print(caplog.text)
# assert "Returning non-final recipe; one or more dependencies was unsatisfiable" in err
def test_zip_fields():
"""Zipping keys together allows people to tie different versions as sets of combinations."""
v = {'python': ['2.7', '3.5'], 'vc': ['9', '14'], 'zip_keys': [('python', 'vc')]}
ld = variants.dict_of_lists_to_list_of_dicts(v)
assert len(ld) == 2
assert ld[0]['python'] == '2.7'
assert ld[0]['vc'] == '9'
assert ld[1]['python'] == '3.5'
assert ld[1]['vc'] == '14'
# allow duplication of values, but lengths of lists must always match
v = {'python': ['2.7', '2.7'], 'vc': ['9', '14'], 'zip_keys': [('python', 'vc')]}
ld = variants.dict_of_lists_to_list_of_dicts(v)
assert len(ld) == 2
assert ld[0]['python'] == '2.7'
assert ld[0]['vc'] == '9'
assert ld[1]['python'] == '2.7'
assert ld[1]['vc'] == '14'
def test_validate_spec():
"""
Basic spec validation checking for bad characters, bad zip_keys, missing keys,
duplicate keys, and zip_key fields length mismatch.
"""
spec = {
# normal expansions
"foo": [2.7, 3.7, 3.8],
# zip_keys are the values that need to be expanded as a set
"zip_keys": [["bar", "baz"], ["qux", "quux", "quuz"]],
"bar": [1, 2, 3],
"baz": [2, 4, 6],
"qux": [4, 5],
"quux": [8, 10],
"quuz": [12, 15],
# extend_keys are those values which we do not expand
"extend_keys": ["corge"],
"corge": 42,
}
# valid spec
variants.validate_spec("spec", spec)
spec2 = dict(spec)
spec2["bad-char"] = "bad-char"
# invalid characters
with pytest.raises(ValueError):
variants.validate_spec("spec[bad_char]", spec2)
spec3 = dict(spec, zip_keys="bad_zip_keys")
# bad zip_keys
with pytest.raises(ValueError):
variants.validate_spec("spec[bad_zip_keys]", spec3)
spec4 = dict(spec, zip_keys=[["bar", "baz"], ["qux", "quux"], ["quuz", "missing"]])
# zip_keys' zip_group has key missing from spec
with pytest.raises(ValueError):
variants.validate_spec("spec[missing_key]", spec4)
spec5 = dict(spec, zip_keys=[["bar", "baz"], ["qux", "quux", "quuz"], ["quuz"]])
# zip_keys' zip_group has duplicate key
with pytest.raises(ValueError):
variants.validate_spec("spec[duplicate_key]", spec5)
spec6 = dict(spec, baz=[4, 6])
# zip_keys' zip_group key fields have same length
with pytest.raises(ValueError):
variants.validate_spec("spec[duplicate_key]", spec6)
def test_cross_compilers():
recipe = os.path.join(recipe_dir, '09_cross')
ms = api.render(recipe, permit_unsatisfiable_variants=True, finalize=False, bypass_env_check=True)
assert len(ms) == 3
def test_variants_in_output_names():
recipe = os.path.join(recipe_dir, '11_variant_output_names')
outputs = api.get_output_file_paths(recipe)
assert len(outputs) == 4
def test_variants_in_versions_with_setup_py_data(testing_workdir):
recipe = os.path.join(recipe_dir, '12_variant_versions')
try:
outputs = api.get_output_file_paths(recipe)
assert len(outputs) == 2
assert any(os.path.basename(pkg).startswith('my_package-470.470') for pkg in outputs)
assert any(os.path.basename(pkg).startswith('my_package-480.480') for pkg in outputs)
except FileNotFoundError:
# problem with python 3.x with Travis CI somehow. Just ignore it.
print("Ignoring test on setup.py data - problem with download")
def test_git_variables_with_variants(testing_workdir, testing_config):
recipe = os.path.join(recipe_dir, '13_git_vars')
m = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)[0][0]
assert m.version() == "1.20.2"
assert m.build_number() == 0
def test_variant_input_with_zip_keys_keeps_zip_keys_list():
spec = {
'scipy': ['0.17', '0.19'],
'sqlite': ['3'],
'zlib': ['1.2'],
'xz': ['5'],
'zip_keys': ['sqlite', 'zlib', 'xz'],
'pin_run_as_build': {'python': {'min_pin': 'x.x', 'max_pin': 'x.x'}}
}
vrnts = variants.dict_of_lists_to_list_of_dicts(spec)
assert len(vrnts) == 2
assert vrnts[0].get("zip_keys") == spec["zip_keys"]
@pytest.mark.serial
@pytest.mark.xfail(sys.platform=='win32', reason="console readout issues on appveyor")
def test_ensure_valid_spec_on_run_and_test(testing_workdir, testing_config, caplog):
testing_config.debug = True
testing_config.verbose = True
recipe = os.path.join(recipe_dir, '14_variant_in_run_and_test')
api.render(recipe, config=testing_config)
text = caplog.text
assert "Adding .* to spec 'pytest 3.2'" in text
assert "Adding .* to spec 'click 6'" in text
assert "Adding .* to spec 'pytest-cov 2.3'" not in text
assert "Adding .* to spec 'pytest-mock 1.6'" not in text
def test_serial_builds_have_independent_configs(testing_config):
recipe = os.path.join(recipe_dir, '17_multiple_recipes_independent_config')
recipes = [os.path.join(recipe, dirname) for dirname in ('a', 'b')]
outputs = api.build(recipes, config=testing_config)
index_json = json.loads(package_has_file(outputs[0], 'info/index.json'))
assert 'bzip2 >=1,<1.0.7.0a0' in index_json['depends']
index_json = json.loads(package_has_file(outputs[1], 'info/index.json'))
assert 'bzip2 >=1.0.6,<2.0a0' in index_json['depends']
def test_subspace_selection(testing_config):
recipe = os.path.join(recipe_dir, '18_subspace_selection')
testing_config.variant = {'a': 'coffee'}
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
# there are two entries with a==coffee, so we should end up with 2 variants
assert len(ms) == 2
# ensure that the zipped keys still agree
assert sum(m.config.variant['b'] == '123' for m, _, _ in ms) == 1
assert sum(m.config.variant['b'] == 'abc' for m, _, _ in ms) == 1
assert sum(m.config.variant['b'] == 'concrete' for m, _, _ in ms) == 0
assert sum(m.config.variant['c'] == 'mooo' for m, _, _ in ms) == 1
assert sum(m.config.variant['c'] == 'baaa' for m, _, _ in ms) == 1
assert sum(m.config.variant['c'] == 'woof' for m, _, _ in ms) == 0
# test compound selection
testing_config.variant = {'a': 'coffee', 'b': '123'}
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
# there are two entries with a==coffee, but one with both 'coffee' for a, and '123' for b,
# so we should end up with 1 variants
assert len(ms) == 1
# ensure that the zipped keys still agree
assert sum(m.config.variant['b'] == '123' for m, _, _ in ms) == 1
assert sum(m.config.variant['b'] == 'abc' for m, _, _ in ms) == 0
assert sum(m.config.variant['b'] == 'concrete' for m, _, _ in ms) == 0
assert sum(m.config.variant['c'] == 'mooo' for m, _, _ in ms) == 1
assert sum(m.config.variant['c'] == 'baaa' for m, _, _ in ms) == 0
assert sum(m.config.variant['c'] == 'woof' for m, _, _ in ms) == 0
# test when configuration leads to no valid combinations - only c provided, and its value
# doesn't match any other existing values of c, so it's then ambiguous which zipped
# values to choose
testing_config.variant = {'c': 'not an animal'}
with pytest.raises(ValueError):
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
# all zipped keys provided by the new variant. It should clobber the old one.
testing_config.variant = {'a': 'some', 'b': 'new', 'c': 'animal'}
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
assert len(ms) == 1
assert ms[0][0].config.variant['a'] == 'some'
assert ms[0][0].config.variant['b'] == 'new'
assert ms[0][0].config.variant['c'] == 'animal'
def test_get_used_loop_vars(testing_config):
m = api.render(os.path.join(recipe_dir, '19_used_variables'), finalize=False, bypass_env_check=True)[0][0]
# conda_build_config.yaml has 4 loop variables defined, but only 3 are used.
# python and zlib are both implicitly used (depend on name matching), while
# some_package is explicitly used as a jinja2 variable
assert m.get_used_loop_vars() == {'python', 'some_package'}
# these are all used vars - including those with only one value (and thus not loop vars)
assert m.get_used_vars() == {'python', 'some_package', 'zlib', 'pthread_stubs', 'target_platform'}
def test_reprovisioning_source(testing_config):
ms = api.render(os.path.join(recipe_dir, '20_reprovision_source'))
def test_reduced_hashing_behavior(testing_config):
# recipes using any compiler jinja2 function need a hash
m = api.render(os.path.join(recipe_dir, '26_reduced_hashing', 'hash_yes_compiler'),
finalize=False, bypass_env_check=True)[0][0]
assert 'c_compiler' in m.get_hash_contents(), "hash contents should contain c_compiler"
assert re.search('h[0-9a-f]{%d}' % testing_config.hash_length, m.build_id()), \
"hash should be present when compiler jinja2 function is used"
# recipes that use some variable in conda_build_config.yaml to control what
# versions are present at build time also must have a hash (except
# python, r_base, and the other stuff covered by legacy build string
# behavior)
m = api.render(os.path.join(recipe_dir, '26_reduced_hashing', 'hash_yes_pinned'),
finalize=False, bypass_env_check=True)[0][0]
assert 'zlib' in m.get_hash_contents()
assert re.search('h[0-9a-f]{%d}' % testing_config.hash_length, m.build_id())
# anything else does not get a hash
m = api.render(os.path.join(recipe_dir, '26_reduced_hashing', 'hash_no_python'),
finalize=False, bypass_env_check=True)[0][0]
assert not m.get_hash_contents()
assert not re.search('h[0-9a-f]{%d}' % testing_config.hash_length, m.build_id())
def test_variants_used_in_jinja2_conditionals(testing_config):
ms = api.render(os.path.join(recipe_dir, '21_conditional_sections'),
finalize=False, bypass_env_check=True)
assert len(ms) == 2
assert sum(m.config.variant['blas_impl'] == 'mkl' for m, _, _ in ms) == 1
assert sum(m.config.variant['blas_impl'] == 'openblas' for m, _, _ in ms) == 1
def test_build_run_exports_act_on_host(testing_config, caplog):
"""Regression test for https://github.com/conda/conda-build/issues/2559"""
api.render(os.path.join(recipe_dir, '22_run_exports_rerendered_for_other_variants'),
platform='win', arch='64')
assert "failed to get install actions, retrying" not in caplog.text
def test_detect_variables_in_build_and_output_scripts(testing_config):
ms = api.render(os.path.join(recipe_dir, '24_test_used_vars_in_scripts'),
platform='linux', arch='64')
for m, _, _ in ms:
if m.name() == 'test_find_used_variables_in_scripts':
used_vars = m.get_used_vars()
assert used_vars
assert 'SELECTOR_VAR' in used_vars
assert 'OUTPUT_SELECTOR_VAR' not in used_vars
assert 'BASH_VAR1' in used_vars
assert 'BASH_VAR2' in used_vars
assert 'BAT_VAR' not in used_vars
assert 'OUTPUT_VAR' not in used_vars
else:
used_vars = m.get_used_vars()
assert used_vars
assert 'SELECTOR_VAR' not in used_vars
assert 'OUTPUT_SELECTOR_VAR' in used_vars
assert 'BASH_VAR1' not in used_vars
assert 'BASH_VAR2' not in used_vars
assert 'BAT_VAR' not in used_vars
assert 'OUTPUT_VAR' in used_vars
# on windows, we find variables in bat scripts as well as shell scripts
ms = api.render(os.path.join(recipe_dir, '24_test_used_vars_in_scripts'),
platform='win', arch='64')
for m, _, _ in ms:
if m.name() == 'test_find_used_variables_in_scripts':
used_vars = m.get_used_vars()
assert used_vars
assert 'SELECTOR_VAR' in used_vars
assert 'OUTPUT_SELECTOR_VAR' not in used_vars
assert 'BASH_VAR1' in used_vars
assert 'BASH_VAR2' in used_vars
# bat is in addition to bash, not instead of
assert 'BAT_VAR' in used_vars
assert 'OUTPUT_VAR' not in used_vars
else:
used_vars = m.get_used_vars()
assert used_vars
assert 'SELECTOR_VAR' not in used_vars
assert 'OUTPUT_SELECTOR_VAR' in used_vars
assert 'BASH_VAR1' not in used_vars
assert 'BASH_VAR2' not in used_vars
assert 'BAT_VAR' not in used_vars
assert 'OUTPUT_VAR' in used_vars
def test_target_platform_looping(testing_config):
outputs = api.get_output_file_paths(os.path.join(recipe_dir, '25_target_platform_looping'),
platform='win', arch='64')
assert len(outputs) == 2
def test_numpy_used_variable_looping(testing_config):
outputs = api.get_output_file_paths(os.path.join(recipe_dir, 'numpy_used'))
assert len(outputs) == 4
def test_exclusive_config_files(testing_workdir):
with open('conda_build_config.yaml', 'w') as f:
yaml.dump({'abc': ['someval'], 'cwd': ['someval']}, f, default_flow_style=False)
os.makedirs('config_dir')
with open(os.path.join('config_dir', 'config-0.yaml'), 'w') as f:
yaml.dump({'abc': ['super_0'], 'exclusive_0': ['0'], 'exclusive_both': ['0']},
f, default_flow_style=False)
with open(os.path.join('config_dir', 'config-1.yaml'), 'w') as f:
yaml.dump({'abc': ['super_1'], 'exclusive_1': ['1'], 'exclusive_both': ['1']},
f, default_flow_style=False)
exclusive_config_files = (
os.path.join('config_dir', 'config-0.yaml'),
os.path.join('config_dir', 'config-1.yaml'),
)
output = api.render(os.path.join(recipe_dir, 'exclusive_config_file'),
exclusive_config_files=exclusive_config_files)[0][0]
variant = output.config.variant
# is cwd ignored?
assert 'cwd' not in variant
# did we load the exclusive configs?
assert variant['exclusive_0'] == '0'
assert variant['exclusive_1'] == '1'
# does later exclusive config override initial one?
assert variant['exclusive_both'] == '1'
# does recipe config override exclusive?
assert 'unique_to_recipe' in variant
assert variant['abc'] == '123'
def test_exclusive_config_file(testing_workdir):
with open('conda_build_config.yaml', 'w') as f:
yaml.dump({'abc': ['someval'], 'cwd': ['someval']}, f, default_flow_style=False)
os.makedirs('config_dir')
with open(os.path.join('config_dir', 'config.yaml'), 'w') as f:
yaml.dump({'abc': ['super'], 'exclusive': ['someval']}, f, default_flow_style=False)
output = api.render(os.path.join(recipe_dir, 'exclusive_config_file'),
exclusive_config_file=os.path.join('config_dir', 'config.yaml'))[0][0]
variant = output.config.variant
# is cwd ignored?
assert 'cwd' not in variant
# did we load the exclusive config
assert 'exclusive' in variant
# does recipe config override exclusive?
assert 'unique_to_recipe' in variant
assert variant['abc'] == '123'
def test_inner_python_loop_with_output(testing_config):
outputs = api.get_output_file_paths(os.path.join(recipe_dir, 'test_python_as_subpackage_loop'),
config=testing_config)
outputs = [os.path.basename(out) for out in outputs]
assert len(outputs) == 5
assert len([out for out in outputs if out.startswith('tbb-2018')]) == 1
assert len([out for out in outputs if out.startswith('tbb-devel-2018')]) == 1
assert len([out for out in outputs if out.startswith('tbb4py-2018')]) == 3
testing_config.variant_config_files = [os.path.join(recipe_dir, 'test_python_as_subpackage_loop', 'config_with_zip.yaml')]
outputs = api.get_output_file_paths(os.path.join(recipe_dir, 'test_python_as_subpackage_loop'),
config=testing_config)
outputs = [os.path.basename(out) for out in outputs]
assert len(outputs) == 5
assert len([out for out in outputs if out.startswith('tbb-2018')]) == 1
assert len([out for out in outputs if out.startswith('tbb-devel-2018')]) == 1
assert len([out for out in outputs if out.startswith('tbb4py-2018')]) == 3
testing_config.variant_config_files = [os.path.join(recipe_dir, 'test_python_as_subpackage_loop', 'config_with_zip.yaml')]
outputs = api.get_output_file_paths(os.path.join(recipe_dir, 'test_python_as_subpackage_loop'),
config=testing_config, platform='win', arch=64)
outputs = [os.path.basename(out) for out in outputs]
assert len(outputs) == 5
assert len([out for out in outputs if out.startswith('tbb-2018')]) == 1
assert len([out for out in outputs if out.startswith('tbb-devel-2018')]) == 1
assert len([out for out in outputs if out.startswith('tbb4py-2018')]) == 3
def test_variant_as_dependency_name(testing_config):
outputs = api.render(os.path.join(recipe_dir, '27_requirements_host'),
config=testing_config)
assert len(outputs) == 2
def test_custom_compiler():
recipe = os.path.join(recipe_dir, '28_custom_compiler')
ms = api.render(recipe, permit_unsatisfiable_variants=True, finalize=False, bypass_env_check=True)
assert len(ms) == 3
def test_different_git_vars():
recipe = os.path.join(recipe_dir, '29_different_git_vars')
ms = api.render(recipe)
versions = [m[0].version() for m in ms]
assert "1.20.0" in versions
assert "1.21.11" in versions
@pytest.mark.skipif(sys.platform != "linux", reason="recipe uses a unix specific script")
def test_top_level_finalized(testing_config):
# see https://github.com/conda/conda-build/issues/3618
recipe = os.path.join(recipe_dir, '30_top_level_finalized')
outputs = api.build(recipe, config=testing_config)
xzcat_output = package_has_file(outputs[0], 'xzcat_output')
assert '5.2.3' in xzcat_output
def test_variant_subkeys_retained(testing_config):
m = api.render(os.path.join(recipe_dir, '31_variant_subkeys'), finalize=False, bypass_env_check=True)[0][0]
found_replacements = False
from conda_build.build import get_all_replacements
for variant in m.config.variants:
found_replacements = get_all_replacements(variant)
assert len(found_replacements), "Did not find replacements"
m.final = False
outputs = m.get_output_metadata_set(permit_unsatisfiable_variants=False)
get_all_replacements(outputs[0][1].config.variant)
| 45.404624 | 126 | 0.669467 |
62088758be42f0601243fa73d8f67b4af7332d82 | 2,983 | py | Python | artwork_redirect/config.py | metabrainz/coverart_redirect | 90ff5c7b57df4c37a6c03fd01dd9f04e76ed5bb8 | [
"MIT"
] | 10 | 2018-02-09T10:47:50.000Z | 2020-11-03T23:23:46.000Z | artwork_redirect/config.py | metabrainz/coverart_redirect | 90ff5c7b57df4c37a6c03fd01dd9f04e76ed5bb8 | [
"MIT"
] | 16 | 2015-10-08T20:31:40.000Z | 2019-10-15T09:45:05.000Z | artwork_redirect/config.py | metabrainz/coverart_redirect | 90ff5c7b57df4c37a6c03fd01dd9f04e76ed5bb8 | [
"MIT"
] | 10 | 2015-03-13T21:56:10.000Z | 2018-08-01T19:51:06.000Z | # Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import configparser
from os import path
from sqlalchemy.engine.url import URL
class S3Config(object):
def __init__(self):
self.prefix = None
def read(self, parser, section):
self.prefix = parser.get(section, 'prefix')
class SentryConfig(object):
def __init__(self):
self.dsn = None
def read(self, parser, section):
self.dsn = parser.get(section, 'dsn')
class ListenConfig(object):
def __init__(self):
self.addr = None
self.port = None
def read(self, parser, section):
self.addr = parser.get(section, 'address')
self.port = parser.get(section, 'port')
class DatabaseConfig(object):
def __init__(self):
self.user = None
self.superuser = 'postgres'
self.name = None
self.host = None
self.port = None
self.password = None
self.static_path = None
def create_url(self, superuser=False):
kwargs = {}
if superuser:
kwargs['username'] = self.superuser
else:
kwargs['username'] = self.user
kwargs['database'] = self.name
if self.host is not None:
kwargs['host'] = self.host
if self.port is not None:
kwargs['port'] = self.port
if self.password is not None:
kwargs['password'] = self.password
return URL('postgresql', **kwargs)
def read(self, parser, section):
self.user = parser.get(section, 'user')
self.name = parser.get(section, 'name')
if parser.has_option(section, 'host'):
self.host = parser.get(section, 'host')
if parser.has_option(section, 'port'):
self.port = parser.getint(section, 'port')
if parser.has_option(section, 'password'):
self.password = parser.get(section, 'password')
class Config(object):
def __init__(self, path, static_path, test=False):
self.static_path = static_path
parser = configparser.RawConfigParser()
parser.read(path)
self.database = DatabaseConfig()
if test:
self.database.read(parser, 'testdatabase')
else:
self.database.read(parser, 'database')
self.listen = ListenConfig()
self.listen.read(parser, 'listen')
self.s3 = S3Config()
self.s3.read(parser, 's3')
self.sentry = SentryConfig()
self.sentry.read(parser, 'sentry')
def load_config(test=False):
"""Load configuration from config.ini.
If test=True will take the database configuration from the
[testdatabase] section instead of the [database] section.
"""
config_path = path.join(path.dirname(path.abspath(__file__)), '..', 'config.ini')
static_path = path.join(path.dirname(path.abspath(__file__)), '..', 'static')
return Config(config_path, static_path, test)
| 29.245098 | 85 | 0.614817 |
9254fc52b6f4e7e6cbe02947a3d7a7c34977fcab | 6,280 | py | Python | function/python/brightics/function/regression/mlp_regression.py | sharon1321/studio | c5ce7f6db5503f5020b2aa0c6f2e6acfc61c90c5 | [
"Apache-2.0"
] | 1 | 2020-02-08T10:56:29.000Z | 2020-02-08T10:56:29.000Z | function/python/brightics/function/regression/mlp_regression.py | sharon1321/studio | c5ce7f6db5503f5020b2aa0c6f2e6acfc61c90c5 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/regression/mlp_regression.py | sharon1321/studio | c5ce7f6db5503f5020b2aa0c6f2e6acfc61c90c5 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import plt2MD
from brightics.common.repr import pandasDF2MD
from brightics.common.repr import dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils.table_converters import simple_tables2df_list
from brightics.function.utils import _model_dict
from brightics.common.validation import validate
from brightics.common.validation import greater_than, require_param
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.utils import get_default_from_parameters_if_required
def mlp_regression_train(table, group_by=None, **params):
check_required_parameters(_mlp_regression_train, params, ['table'])
params = get_default_from_parameters_if_required(params, _mlp_regression_train)
if (params['batch_size_auto']):
param_validation_check = [greater_than(params, 0.0, 'learning_rate_init'),
greater_than(params, 0.0, 'tol')]
else:
if not params['batch_size'] or not isinstance(params['batch_size'], int):
param_validation_check = [require_param('batch_size')]
validate(*param_validation_check)
param_validation_check = [greater_than(params, 0, 'batch_size'),
greater_than(params, 0.0, 'learning_rate_init'),
greater_than(params, 0.0, 'tol')]
validate(*param_validation_check)
if group_by is not None:
grouped_model = _function_by_group(_mlp_regression_train, table, group_by=group_by, **params)
return grouped_model
else:
return _mlp_regression_train(table, **params)
def _mlp_regression_train(table, feature_cols, label_col, hidden_layer_sizes=(100, ), activation='relu', solver='adam', alpha=0.0001, batch_size_auto=True, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, max_iter=200, random_state=None, tol=0.0001):
features = table[feature_cols]
label = table[label_col]
mlp_model = MLPRegressor(hidden_layer_sizes=hidden_layer_sizes, activation=activation, solver=solver, alpha=alpha, batch_size=batch_size, learning_rate=learning_rate, learning_rate_init=learning_rate_init, max_iter=max_iter, shuffle=True, random_state=random_state, tol=tol)
mlp_model.fit(features, label)
predict = mlp_model.predict(features)
intercepts = mlp_model.intercepts_
coefficients = mlp_model.coefs_
loss = mlp_model.loss_
_mean_absolute_error = mean_absolute_error(label, predict)
_mean_squared_error = mean_squared_error(label, predict)
_r2_score = r2_score(label, predict)
result_table = pd.DataFrame.from_items([
['Metric', ['Mean Absolute Error', 'Mean Squared Error', 'R2 Score']],
['Score', [_mean_absolute_error, _mean_squared_error, _r2_score]]
])
label_name = {
'hidden_layer_sizes': 'Hidden Layer Sizes',
'activation': 'Activation Function',
'solver': 'Solver',
'alpha': 'Alpha',
'batch_size': 'Batch Size',
'learning_rate': 'Learning Rate',
'learning_rate_init': 'Learning Rate Initial',
'max_iter': 'Max Iteration',
'random_state': 'Seed',
'tol': 'Tolerance'}
get_param = mlp_model.get_params()
param_table = pd.DataFrame.from_items([
['Parameter', list(label_name.values())],
['Value', [get_param[x] for x in list(label_name.keys())]]
])
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ### MLP Classification Result
| {result}
| ### Parameters
| {list_parameters}
""".format(result=pandasDF2MD(result_table), list_parameters=pandasDF2MD(param_table)
)))
model = _model_dict('mlp_regression_model')
model['features'] = feature_cols
model['label'] = label_col
model['intercepts'] = mlp_model.intercepts_
model['coefficients'] = mlp_model.coefs_
model['loss'] = mlp_model.loss_
model['mean_absolute_error'] = _mean_absolute_error
model['mean_squared_error'] = _mean_squared_error
model['r2_score'] = _r2_score
model['activation'] = activation
model['solver'] = solver
model['alpha'] = alpha
model['batch_size'] = batch_size
model['learning_rate'] = learning_rate
model['learning_rate_init'] = learning_rate_init
model['max_iter'] = max_iter
model['random_state'] = random_state
model['tol'] = tol
model['mlp_model'] = mlp_model
model['_repr_brtc_'] = rb.get()
return {'model' : model}
def mlp_regression_predict(table, model, **params):
check_required_parameters(_mlp_regression_predict, params, ['table', 'model'])
if '_grouped_data' in model:
return _function_by_group(_mlp_regression_predict, table, model, **params)
else:
return _mlp_regression_predict(table, model, **params)
def _mlp_regression_predict(table, model, prediction_col='prediction'):
result = table.copy()
feature_cols = model['features']
features = result[feature_cols]
mlp_model_fit = model['mlp_model']
prediction = mlp_model_fit.predict(features)
result[prediction_col] = prediction
return {'out_table' : result}
| 40.25641 | 278 | 0.715764 |
f1db8a19cbe26867ed904333a8b5998a11261b24 | 8,128 | py | Python | src/panorama/panorama_ui.py | vmariiechko/python-image-processing | 5613440dc04140845600b8c37a2b28786d504815 | [
"MIT"
] | null | null | null | src/panorama/panorama_ui.py | vmariiechko/python-image-processing | 5613440dc04140845600b8c37a2b28786d504815 | [
"MIT"
] | null | null | null | src/panorama/panorama_ui.py | vmariiechko/python-image-processing | 5613440dc04140845600b8c37a2b28786d504815 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import (QLabel, QPushButton, QComboBox, QLineEdit, QRadioButton, QListWidget,
QDialogButtonBox, QGridLayout, QVBoxLayout, QHBoxLayout, QSizePolicy)
from PyQt5.QtCore import Qt, QMetaObject
from PyQt5.QtGui import QIcon, QPixmap
from src.operations.form_ui import FormUI
class ImagePanoramaUI(FormUI):
"""Build UI for :class:`panorama.ImagePanorama`."""
def init_ui(self, panorama):
"""
Create user interface for :class:`panorama.ImagePanorama`.
The method creates the widget objects in the proper containers
and assigns the object names to them.
:param panorama: The image panorama dialog
:type panorama: :class:`panorama.ImagePanorama`
"""
panorama.setObjectName("panorama")
self.form_ui(panorama)
icon = QIcon()
icon.addPixmap(QPixmap("icons/panorama.png"), QIcon.Normal, QIcon.Off)
panorama.setWindowIcon(icon)
self.btn_description = QPushButton("Description")
self.btn_description.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.btn_description.setFocusPolicy(Qt.NoFocus)
self.btn_description.setObjectName("btn_description")
self.layout_btn = QHBoxLayout()
self.layout_btn.addWidget(self.btn_description)
self.label_mode = QLabel()
self.label_mode.setObjectName("label_mode")
self.cb_mode = QComboBox(panorama)
self.cb_mode.addItems(["Default", "Manual", "Manual Details"])
self.cb_mode.setObjectName("cb_mode")
self.label_pano_name = QLabel()
self.label_pano_name.setObjectName("label_pano_name")
self.edit_pano_name = QLineEdit()
self.edit_pano_name.setMaxLength(50)
self.edit_pano_name.setObjectName("edit_pano_name")
self.rbtn_crop = QRadioButton("Crop ROI")
self.rbtn_crop.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.rbtn_crop.setObjectName("rbtn_crop")
self.layout_form.addRow(self.label_mode, self.cb_mode)
self.layout_form.addRow(self.label_pano_name, self.edit_pano_name)
self.layout_form.addRow(None, self.rbtn_crop)
self.label_errors = QLabel()
self.label_errors.setStyleSheet("color: red")
self.label_errors.setAlignment(Qt.AlignCenter)
self.label_errors.setObjectName("label_errors")
self.grid_layout = QGridLayout()
self.grid_layout.setObjectName("grid_layout")
self.label_left_list = QLabel()
self.label_left_list.setObjectName("label_left_list")
self.left_list = QListWidget(panorama)
self.left_list.setObjectName("left_list")
self.grid_layout.addWidget(self.label_left_list, 1, 0, 1, 4, alignment=Qt.AlignCenter)
self.grid_layout.addWidget(self.left_list, 2, 0, 6, 4)
self.buttons = dict()
self.buttons['>>'] = QPushButton('>>')
self.buttons['>'] = QPushButton('>')
self.buttons['<'] = QPushButton('<')
self.buttons['<<'] = QPushButton('<<')
self.buttons['Up'] = QPushButton('Up')
self.buttons['Down'] = QPushButton('Down')
for b in self.buttons:
self.buttons[b].setSizePolicy(QSizePolicy.Preferred, QSizePolicy.MinimumExpanding)
self.buttons[b].setFocusPolicy(Qt.NoFocus)
self.grid_layout.setRowStretch(7, 1)
self.grid_layout.addWidget(self.buttons['>>'], 2, 4, 1, 1, alignment=Qt.AlignTop)
self.grid_layout.addWidget(self.buttons['>'], 3, 4, 1, 1, alignment=Qt.AlignTop)
self.grid_layout.addWidget(self.buttons['<'], 4, 4, 1, 1, alignment=Qt.AlignTop)
self.grid_layout.addWidget(self.buttons['<<'], 5, 4, 1, 1, alignment=Qt.AlignTop)
self.grid_layout.addWidget(self.buttons['Up'], 6, 4, 1, 1, alignment=Qt.AlignTop)
self.grid_layout.addWidget(self.buttons['Down'], 7, 4, 1, 1, alignment=Qt.AlignTop)
self.label_right_list = QLabel()
self.label_right_list.setObjectName("label_right_list")
self.right_list = QListWidget(panorama)
self.right_list.setObjectName("right_list")
self.grid_layout.addWidget(self.label_right_list, 1, 5, 1, 4, alignment=Qt.AlignCenter)
self.grid_layout.addWidget(self.right_list, 2, 5, 6, 4)
self.button_box = QDialogButtonBox(panorama)
self.button_box.setOrientation(Qt.Horizontal)
self.button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self.button_box.setCenterButtons(True)
self.button_box.rejected.connect(panorama.reject)
self.button_box.button(QDialogButtonBox.Ok).clicked.connect(panorama.stitch_images)
self.button_box.setObjectName("button_box")
self.layout = QVBoxLayout(panorama)
self.layout.setObjectName("layout")
self.layout.addLayout(self.layout_btn)
self.layout.addWidget(self.form)
self.layout.addWidget(self.label_errors)
self.layout.addLayout(self.grid_layout)
self.layout.addWidget(self.button_box)
panorama.setLayout(self.layout)
panorama.setWindowFlags(panorama.windowFlags() & ~Qt.WindowContextHelpButtonHint)
QMetaObject.connectSlotsByName(panorama)
def set_widget_connections(self):
"""Connect widgets to methods."""
self.left_list.itemSelectionChanged.connect(self.update_button_status)
self.right_list.itemSelectionChanged.connect(self.update_button_status)
self.buttons['>'].clicked.connect(self.button_add_clicked)
self.buttons['<'].clicked.connect(self.button_remove_clicked)
self.buttons['>>'].clicked.connect(self.button_add_all_clicked)
self.buttons['<<'].clicked.connect(self.button_remove_all_clicked)
self.buttons['Up'].clicked.connect(self.button_up_clicked)
self.buttons['Down'].clicked.connect(self.button_down_clicked)
def button_add_clicked(self):
"""Move a selected item from the left list to the right."""
row = self.left_list.currentRow()
row_item = self.left_list.takeItem(row)
self.right_list.addItem(row_item)
def button_remove_clicked(self):
"""Move a selected item from the right list to the left."""
row = self.right_list.currentRow()
row_item = self.right_list.takeItem(row)
self.left_list.addItem(row_item)
def button_add_all_clicked(self):
"""Move all items from the left list to the right."""
for i in range(self.left_list.count()):
self.right_list.addItem(self.left_list.takeItem(0))
def button_remove_all_clicked(self):
"""Move all items from the right list to the left."""
for i in range(self.right_list.count()):
self.left_list.addItem(self.right_list.takeItem(0))
def button_up_clicked(self):
"""Move a selected item from the right list to the top."""
row_index = self.right_list.currentRow()
current_item = self.right_list.takeItem(row_index)
self.right_list.insertItem(row_index - 1, current_item)
self.right_list.setCurrentRow(row_index - 1)
def button_down_clicked(self):
"""Move a selected item from the right list to the bottom."""
row_index = self.right_list.currentRow()
current_item = self.right_list.takeItem(row_index)
self.right_list.insertItem(row_index + 1, current_item)
self.right_list.setCurrentRow(row_index + 1)
def update_button_status(self):
"""Update buttons access whenever move items."""
self.buttons['Up'].setDisabled(not bool(self.right_list.selectedItems()) or self.right_list.currentRow() == 0)
self.buttons['Down'].setDisabled(not bool(self.right_list.selectedItems())
or self.right_list.currentRow() == self.right_list.count() - 1)
self.buttons['>'].setDisabled(not bool(self.left_list.selectedItems()) or self.left_list.count() == 0)
self.buttons['<'].setDisabled(not bool(self.right_list.selectedItems()) or self.right_list.count() == 0)
| 42.554974 | 118 | 0.682948 |
eb02a7cb809bc453dae916529b81dcfb0ba88861 | 5,607 | py | Python | influxdb_client/domain/check_base_links.py | wasted925/influxdb-client-python | afee531fd1dc244b3d9d270e262b0a1865a7c89d | [
"MIT"
] | 380 | 2019-09-19T20:20:10.000Z | 2022-03-31T12:59:33.000Z | influxdb_client/domain/check_base_links.py | mikeldiezs/influxdb-client-python | 0c1d1d9ff92dd2b3b4a9b6aa1e8f5b1c02fd48ab | [
"MIT"
] | 362 | 2019-09-16T11:53:29.000Z | 2022-03-29T03:11:59.000Z | influxdb_client/domain/check_base_links.py | mikeldiezs/influxdb-client-python | 0c1d1d9ff92dd2b3b4a9b6aa1e8f5b1c02fd48ab | [
"MIT"
] | 130 | 2019-09-20T08:02:35.000Z | 2022-03-30T16:44:45.000Z | # coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CheckBaseLinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'str',
'labels': 'str',
'members': 'str',
'owners': 'str',
'query': 'str'
}
attribute_map = {
'_self': 'self',
'labels': 'labels',
'members': 'members',
'owners': 'owners',
'query': 'query'
}
def __init__(self, _self=None, labels=None, members=None, owners=None, query=None): # noqa: E501,D401,D403
"""CheckBaseLinks - a model defined in OpenAPI.""" # noqa: E501
self.__self = None
self._labels = None
self._members = None
self._owners = None
self._query = None
self.discriminator = None
if _self is not None:
self._self = _self
if labels is not None:
self.labels = labels
if members is not None:
self.members = members
if owners is not None:
self.owners = owners
if query is not None:
self.query = query
@property
def _self(self):
"""Get the _self of this CheckBaseLinks.
URI of resource.
:return: The _self of this CheckBaseLinks.
:rtype: str
""" # noqa: E501
return self.__self
@_self.setter
def _self(self, _self):
"""Set the _self of this CheckBaseLinks.
URI of resource.
:param _self: The _self of this CheckBaseLinks.
:type: str
""" # noqa: E501
self.__self = _self
@property
def labels(self):
"""Get the labels of this CheckBaseLinks.
URI of resource.
:return: The labels of this CheckBaseLinks.
:rtype: str
""" # noqa: E501
return self._labels
@labels.setter
def labels(self, labels):
"""Set the labels of this CheckBaseLinks.
URI of resource.
:param labels: The labels of this CheckBaseLinks.
:type: str
""" # noqa: E501
self._labels = labels
@property
def members(self):
"""Get the members of this CheckBaseLinks.
URI of resource.
:return: The members of this CheckBaseLinks.
:rtype: str
""" # noqa: E501
return self._members
@members.setter
def members(self, members):
"""Set the members of this CheckBaseLinks.
URI of resource.
:param members: The members of this CheckBaseLinks.
:type: str
""" # noqa: E501
self._members = members
@property
def owners(self):
"""Get the owners of this CheckBaseLinks.
URI of resource.
:return: The owners of this CheckBaseLinks.
:rtype: str
""" # noqa: E501
return self._owners
@owners.setter
def owners(self, owners):
"""Set the owners of this CheckBaseLinks.
URI of resource.
:param owners: The owners of this CheckBaseLinks.
:type: str
""" # noqa: E501
self._owners = owners
@property
def query(self):
"""Get the query of this CheckBaseLinks.
URI of resource.
:return: The query of this CheckBaseLinks.
:rtype: str
""" # noqa: E501
return self._query
@query.setter
def query(self, query):
"""Set the query of this CheckBaseLinks.
URI of resource.
:param query: The query of this CheckBaseLinks.
:type: str
""" # noqa: E501
self._query = query
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, CheckBaseLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 25.256757 | 120 | 0.551454 |
7aa4cfe6812e4bfbb2513a1d6cd8a08747d4b026 | 2,177 | py | Python | python/src/nnabla/backward_function/patch_correlation.py | Mattlk13/nnabla | 09b7dfd03bd88366d1d1f6cc61492b42175e35e7 | [
"Apache-2.0"
] | null | null | null | python/src/nnabla/backward_function/patch_correlation.py | Mattlk13/nnabla | 09b7dfd03bd88366d1d1f6cc61492b42175e35e7 | [
"Apache-2.0"
] | 1 | 2020-11-09T07:33:29.000Z | 2020-11-09T07:33:29.000Z | python/src/nnabla/backward_function/patch_correlation.py | Mattlk13/nnabla | 09b7dfd03bd88366d1d1f6cc61492b42175e35e7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class PatchCorrelationBackward(BackwardFunction):
@property
def name(self):
return 'PatchCorrelationBackward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
msg = ("Implement this function correctly \n"
"if the backward function takes the output(s) of the forward function.\n"
"See the SigmoidBackward in that case.\n"
"Delete this error message after implementing.")
raise Exception(msg)
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
raise NotImplementedError(
"The backward method of PatchCorrelationBackward class is not implemented.")
| 38.192982 | 88 | 0.672944 |
6b09abe15eacb69fdf3dceb732af7faf21156ea8 | 3,467 | py | Python | core/management/commands/load_workers.py | Egorka96/med-org-portal | 3f55fb59daea03684b3bc6b5c394cd06cfd6e2df | [
"MIT"
] | 2 | 2020-03-02T17:29:14.000Z | 2020-05-28T13:19:49.000Z | core/management/commands/load_workers.py | Egorka96/med-org-portal | 3f55fb59daea03684b3bc6b5c394cd06cfd6e2df | [
"MIT"
] | 88 | 2020-02-17T09:46:57.000Z | 2022-03-12T00:24:32.000Z | core/management/commands/load_workers.py | Egorka96/med-org-portal | 3f55fb59daea03684b3bc6b5c394cd06cfd6e2df | [
"MIT"
] | 1 | 2020-04-17T15:56:51.000Z | 2020-04-17T15:56:51.000Z | import datetime
from time import sleep
from django.core.management import BaseCommand
from djutils.management.commands import add_date_from_to_arguments, process_date_from_to_options
from django.utils.timezone import now
from swutils.date import iso_to_datetime
import mis.worker
from core import models
class Command(BaseCommand):
args = ''
help = 'Выгрузка сотрудников из мис'
def add_arguments(self, parser):
parser.add_argument(
'--infinitely',
'-i',
dest='infinitely',
action='store_true',
default=False,
help='Бесконечно проверять есть ли что отправить',
)
add_date_from_to_arguments(parser)
def handle(self, *args, **options):
origin_dt_from, origin_dt_to = process_date_from_to_options(options, to_datetime=True)
dt_from = origin_dt_from
dt_to = origin_dt_to
while True:
if not dt_to:
dt_to = now()
if not dt_from:
dt_from_iso = models.Status.get_value(
name=models.Status.WORKER_LOAD_TIME,
default=datetime.datetime(2010, 1, 1, 0, 0).isoformat(sep=' ')[:19],
)
dt_from = iso_to_datetime(dt_from_iso)
self.load_workers(options, dt_from, dt_to)
if not origin_dt_from and not origin_dt_to:
models.Status.set_value(
name=models.Status.WORKER_LOAD_TIME,
value=dt_to.isoformat(sep=' ')[:19]
)
# если разовый запуск - прекратим
if not options.get('infinitely'):
break
if options.get('verbosity'):
print('Sleep 5 minutes')
sleep(60 * 5)
dt_from = dt_to
dt_to = now()
def load_workers(self, options, dt_from, dt_to):
params = {
'dm_to': dt_to,
'dm_from': dt_from
}
page = 1
while True:
params['page'] = page
response = mis.worker.Worker.filter_with_response(params)
for mis_worker in response['results']:
if options.get('verbosity'):
print(mis_worker)
worker, created = models.Worker.objects.get_or_create(
last_name=mis_worker.last_name,
first_name=mis_worker.first_name,
birth=mis_worker.birth,
middle_name=mis_worker.middle_name,
defaults={
'gender': mis_worker.gender
}
)
if not created:
worker.gender = mis_worker.gender
worker.save()
worker_org, created = models.WorkerOrganization.objects.get_or_create(
worker=worker,
org_id=mis_worker.org.id,
mis_id=mis_worker.id,
defaults={
'post': mis_worker.post,
'shop': mis_worker.shop
}
)
if not created:
worker_org.post = mis_worker.post
worker_org.shop = mis_worker.shop
worker_org.save()
worker_org.save()
if not response.get('next'):
break
page += 1
| 32.707547 | 96 | 0.522065 |
0e4f4a8834be550b6c4adada0b1e92a55d63a98b | 5,590 | py | Python | biobb_vs/fpocket/common.py | bioexcel/biobb_vs | 6f492d317b3783e6fd8468ca62883d1db7fa4bc6 | [
"Apache-2.0"
] | 1 | 2020-05-05T13:59:02.000Z | 2020-05-05T13:59:02.000Z | biobb_vs/fpocket/common.py | bioexcel/biobb_vs | 6f492d317b3783e6fd8468ca62883d1db7fa4bc6 | [
"Apache-2.0"
] | 4 | 2021-01-19T15:46:07.000Z | 2021-12-09T10:22:31.000Z | biobb_vs/fpocket/common.py | bioexcel/biobb_vs | 6f492d317b3783e6fd8468ca62883d1db7fa4bc6 | [
"Apache-2.0"
] | null | null | null | """ Common functions for package biobb_vs.fpocket """
from pathlib import Path, PurePath
import shutil
import json
import re
from biobb_common.tools import file_utils as fu
# CHECK PARAMETERS
def check_input_path(path, argument, out_log, classname):
""" Checks input file """
if not Path(path).exists():
fu.log(classname + ': Unexisting %s file, exiting' % argument, out_log)
raise SystemExit(classname + ': Unexisting %s file' % argument)
file_extension = PurePath(path).suffix
if not is_valid_file(file_extension[1:], argument):
fu.log(classname + ': Format %s in %s file is not compatible' % (file_extension[1:], argument), out_log)
raise SystemExit(classname + ': Format %s in %s file is not compatible' % (file_extension[1:], argument))
return path
def check_output_path(path, argument, optional, out_log, classname):
""" Checks output file """
if optional and not path:
return None
if PurePath(path).parent and not Path(PurePath(path).parent).exists():
fu.log(classname + ': Unexisting %s folder, exiting' % argument, out_log)
raise SystemExit(classname + ': Unexisting %s folder' % argument)
file_extension = PurePath(path).suffix
if not is_valid_file(file_extension[1:], argument):
fu.log(classname + ': Format %s in %s file is not compatible' % (file_extension[1:], argument), out_log)
raise SystemExit(classname + ': Format %s in %s file is not compatible' % (file_extension[1:], argument))
return path
def is_valid_file(ext, argument):
""" Checks if file format is compatible """
formats = {
'input_pdb_path': ['pdb'],
'output_pockets_zip': ['zip'],
'output_summary': ['json'],
'input_pockets_zip': ['zip'],
'input_summary': ['json'],
'output_filter_pockets_zip': ['zip'],
'output_pocket_pdb': ['pdb'],
'output_pocket_pqr': ['pqr']
}
return ext in formats[argument]
# CHECK PROPERTIES
def check_range(name, property, values, out_log, classname):
""" Checks the format of a range for fpocket_filter """
if not type(property) == list or len(property) != 2 or not all(isinstance(n, int) or isinstance(n, float) for n in property):
fu.log(classname + ': Incorrect format for %s property, exiting' % name, out_log)
raise SystemExit(classname + ': Incorrect format for %s property, exiting' % name)
if property[0] < values[0] or property[1] > values[1]:
fu.log(classname + ': %s is out of [%s] range, exiting' % (name, ', '.join(str(v) for v in values)), out_log)
raise SystemExit(classname + ': %s is out of [%s] range, exiting' % (name, ', '.join(str(v) for v in values)))
return property
# PROCESS OUTPUTS
def process_output_fpocket(tmp_folder, output_pockets_zip, output_summary, sort_by, remove_tmp, out_log, classname):
""" Creates the output_pockets_zip and generates the output_summary """
path = str(PurePath(tmp_folder).joinpath('input_out'))
if not Path(path).is_dir():
if remove_tmp:
# remove temporary folder
fu.rm(tmp_folder)
fu.log('Removing temporary folder: %s' % tmp_folder, out_log)
fu.log(classname + ': Error executing fpocket, please check your properties', out_log)
raise SystemExit(classname + ': Error executing fpocket, please check your properties')
# summary
# read input_info.txt file
info = PurePath(path).joinpath('input_info.txt')
with open(info, 'r') as info_text:
lines = info_text.readlines()
lines = [x for x in lines if x != '\n']
data = {}
# parse input_info.txt file to python object
pocket = ''
for line in lines:
if not line.startswith('\t'):
# first level: pocket
num = re.findall('\d+', line)[0]
pocket = 'pocket' + num
data[pocket] = {}
else:
# second level: pocket properties
groups = re.findall('(.*)(?:\ *\:\ *)(.*)', line)[0]
key = groups[0].lower().strip()
key = re.sub(r'\-|\.', '', key)
key = re.sub(r'\s+', '_', key)
value = float(groups[1]) if '.' in groups[1] else int(groups[1])
data[pocket][key] = value
# get number of pockets
fu.log('%d pockets found' % (len(data)), out_log)
# sort data by sort_by property
fu.log('Sorting output data by %s' % (sort_by), out_log)
data = dict(sorted(data.items(), key=lambda item: float(item[1][sort_by]), reverse = True))
# compress pockets
pockets = PurePath(path).joinpath('pockets')
files_list = [str(i) for i in Path(pockets).iterdir()]
fu.zip_list(zip_file = output_pockets_zip, file_list = files_list, out_log = out_log)
# save summary
fu.log('Saving summary to %s file' % (output_summary), out_log)
with open(output_summary, 'w') as outfile:
json.dump(data, outfile, indent=4)
if remove_tmp:
# remove temporary folder
fu.rm(tmp_folder)
fu.log('Removed temporary folder: %s' % tmp_folder, out_log)
def process_output_fpocket_filter(search_list, tmp_folder, input_pockets_zip, output_filter_pockets_zip, remove_tmp, out_log):
""" Creates the output_filter_pockets_zip """
# decompress the input_pockets_zip file to tmp_folder
cluster_list = fu.unzip_list(zip_file = input_pockets_zip, dest_dir = tmp_folder, out_log = out_log)
# list all files of tmp_folder
pockets_list = [str(i) for i in Path(tmp_folder).iterdir()]
# select search_list items from pockets_list
sel_pockets_list = [p for p in pockets_list for s in search_list if s + '_' in p ]
fu.log('Creating %s output file' % output_filter_pockets_zip, out_log)
# compress output to output_filter_pockets_zip
fu.zip_list(zip_file = output_filter_pockets_zip, file_list = sel_pockets_list, out_log = out_log)
if remove_tmp:
# remove temporary folder
fu.rm(tmp_folder)
fu.log('Removed temporary folder: %s' % tmp_folder, out_log)
| 37.516779 | 126 | 0.705903 |
631ff91e70a79d13fdcfedffd99caaec9d8e6825 | 566 | py | Python | python_pce.py | watal/python-pce | a633633f8bab3c0df77715c7fa8a151c9b8c54eb | [
"MIT"
] | null | null | null | python_pce.py | watal/python-pce | a633633f8bab3c0df77715c7fa8a151c9b8c54eb | [
"MIT"
] | null | null | null | python_pce.py | watal/python-pce | a633633f8bab3c0df77715c7fa8a151c9b8c54eb | [
"MIT"
] | 1 | 2020-08-20T12:14:18.000Z | 2020-08-20T12:14:18.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import threading
import linkstate_socksrv
import segmentlist_socksrv
import argparse
def main():
'''simple PCE for FRRouting'''
# Receive linkstate
thread_ls = threading.Thread(target=linkstate_socksrv.lsocket)
# Receive segment list
thread_sl = threading.Thread(target=segmentlist_socksrv.ssocket)
thread_ls.start()
thread_sl.start()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple SR PCE')
args = parser.parse_args()
main()
| 18.866667 | 68 | 0.708481 |
4c6f5b5c81844fc16e1e242f5fa6037cfb8330e6 | 120 | py | Python | 03Aula09-09/ex03.py | danicon/Curso-IPE | 3b9e2a9d187492d6561a512363bd06156286df6a | [
"MIT"
] | 2 | 2020-09-09T12:50:57.000Z | 2020-09-09T12:56:02.000Z | 03Aula09-09/ex03.py | danicon/Curso-IPE | 3b9e2a9d187492d6561a512363bd06156286df6a | [
"MIT"
] | null | null | null | 03Aula09-09/ex03.py | danicon/Curso-IPE | 3b9e2a9d187492d6561a512363bd06156286df6a | [
"MIT"
] | null | null | null | tabu = int(input('Escolha o número da tabuada: '))
c=0
while c <= 10:
print(f" {tabu} x {c} = {tabu*c}")
c+=1
| 17.142857 | 50 | 0.533333 |
22e19e32effd8ea52f3480015115f4d03135a0ef | 34,251 | py | Python | util/generate_build_files.py | chinshou/boringssl | a12a2497ffc95a4f75e50d6d7e861e7bee7b8a5e | [
"MIT"
] | 11 | 2020-06-24T06:20:13.000Z | 2021-11-17T12:07:48.000Z | util/generate_build_files.py | scw00/boringssl | 0c30649ba676e73a4bbb449ee684ccf2d452f7f6 | [
"MIT"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | util/generate_build_files.py | scw00/boringssl | 0c30649ba676e73a4bbb449ee684ccf2d452f7f6 | [
"MIT"
] | 4 | 2021-02-19T09:48:47.000Z | 2022-03-13T17:29:35.000Z | # coding=utf8
# Copyright (c) 2015, Google Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Enumerates source files for consumption by various build systems."""
import optparse
import os
import subprocess
import sys
import json
# OS_ARCH_COMBOS maps from OS and platform to the OpenSSL assembly "style" for
# that platform and the extension used by asm files.
OS_ARCH_COMBOS = [
('ios', 'arm', 'ios32', [], 'S'),
('ios', 'aarch64', 'ios64', [], 'S'),
('linux', 'arm', 'linux32', [], 'S'),
('linux', 'aarch64', 'linux64', [], 'S'),
('linux', 'ppc64le', 'linux64le', [], 'S'),
('linux', 'x86', 'elf', ['-fPIC', '-DOPENSSL_IA32_SSE2'], 'S'),
('linux', 'x86_64', 'elf', [], 'S'),
('mac', 'x86', 'macosx', ['-fPIC', '-DOPENSSL_IA32_SSE2'], 'S'),
('mac', 'x86_64', 'macosx', [], 'S'),
('win', 'x86', 'win32n', ['-DOPENSSL_IA32_SSE2'], 'asm'),
('win', 'x86_64', 'nasm', [], 'asm'),
]
# NON_PERL_FILES enumerates assembly files that are not processed by the
# perlasm system.
NON_PERL_FILES = {
('linux', 'arm'): [
'src/crypto/curve25519/asm/x25519-asm-arm.S',
'src/crypto/poly1305/poly1305_arm_asm.S',
],
('linux', 'x86_64'): [
'src/crypto/hrss/asm/poly_rq_mul.S',
],
}
PREFIX = None
EMBED_TEST_DATA = True
def PathOf(x):
return x if not PREFIX else os.path.join(PREFIX, x)
class Android(object):
def __init__(self):
self.header = \
"""# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is created by generate_build_files.py. Do not edit manually.
"""
def PrintVariableSection(self, out, name, files):
out.write('%s := \\\n' % name)
for f in sorted(files):
out.write(' %s\\\n' % f)
out.write('\n')
def WriteFiles(self, files, asm_outputs):
# New Android.bp format
with open('sources.bp', 'w+') as blueprint:
blueprint.write(self.header.replace('#', '//'))
# Separate out BCM files to allow different compilation rules (specific to Android FIPS)
bcm_c_files = files['bcm_crypto']
non_bcm_c_files = [file for file in files['crypto'] if file not in bcm_c_files]
non_bcm_asm = self.FilterBcmAsm(asm_outputs, False)
bcm_asm = self.FilterBcmAsm(asm_outputs, True)
self.PrintDefaults(blueprint, 'libcrypto_sources', non_bcm_c_files, non_bcm_asm)
self.PrintDefaults(blueprint, 'libcrypto_bcm_sources', bcm_c_files, bcm_asm)
self.PrintDefaults(blueprint, 'libssl_sources', files['ssl'])
self.PrintDefaults(blueprint, 'bssl_sources', files['tool'])
self.PrintDefaults(blueprint, 'boringssl_test_support_sources', files['test_support'])
self.PrintDefaults(blueprint, 'boringssl_crypto_test_sources', files['crypto_test'])
self.PrintDefaults(blueprint, 'boringssl_ssl_test_sources', files['ssl_test'])
# Legacy Android.mk format, only used by Trusty in new branches
with open('sources.mk', 'w+') as makefile:
makefile.write(self.header)
makefile.write('\n')
self.PrintVariableSection(makefile, 'crypto_sources', files['crypto'])
for ((osname, arch), asm_files) in asm_outputs:
if osname != 'linux':
continue
self.PrintVariableSection(
makefile, '%s_%s_sources' % (osname, arch), asm_files)
def PrintDefaults(self, blueprint, name, files, asm_outputs={}):
"""Print a cc_defaults section from a list of C files and optionally assembly outputs"""
blueprint.write('\n')
blueprint.write('cc_defaults {\n')
blueprint.write(' name: "%s",\n' % name)
blueprint.write(' srcs: [\n')
for f in sorted(files):
blueprint.write(' "%s",\n' % f)
blueprint.write(' ],\n')
if asm_outputs:
blueprint.write(' target: {\n')
for ((osname, arch), asm_files) in asm_outputs:
if osname != 'linux' or arch == 'ppc64le':
continue
if arch == 'aarch64':
arch = 'arm64'
blueprint.write(' linux_%s: {\n' % arch)
blueprint.write(' srcs: [\n')
for f in sorted(asm_files):
blueprint.write(' "%s",\n' % f)
blueprint.write(' ],\n')
blueprint.write(' },\n')
blueprint.write(' },\n')
blueprint.write('}\n')
def FilterBcmAsm(self, asm, want_bcm):
"""Filter a list of assembly outputs based on whether they belong in BCM
Args:
asm: Assembly file lists to filter
want_bcm: If true then include BCM files, otherwise do not
Returns:
A copy of |asm| with files filtered according to |want_bcm|
"""
return [(archinfo, filter(lambda p: ("/crypto/fipsmodule/" in p) == want_bcm, files))
for (archinfo, files) in asm]
class AndroidCMake(object):
def __init__(self):
self.header = \
"""# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is created by generate_build_files.py. Do not edit manually.
# To specify a custom path prefix, set BORINGSSL_ROOT before including this
# file, or use list(TRANSFORM ... PREPEND) from CMake 3.12.
"""
def PrintVariableSection(self, out, name, files):
out.write('set(%s\n' % name)
for f in sorted(files):
# Ideally adding the prefix would be the caller's job, but
# list(TRANSFORM ... PREPEND) is only available starting CMake 3.12. When
# sources.cmake is the source of truth, we can ask Android to either write
# a CMake function or update to 3.12.
out.write(' ${BORINGSSL_ROOT}%s\n' % f)
out.write(')\n')
def WriteFiles(self, files, asm_outputs):
# The Android emulator uses a custom CMake buildsystem.
#
# TODO(davidben): Move our various source lists into sources.cmake and have
# Android consume that directly.
with open('android-sources.cmake', 'w+') as out:
out.write(self.header)
self.PrintVariableSection(out, 'crypto_sources', files['crypto'])
self.PrintVariableSection(out, 'ssl_sources', files['ssl'])
self.PrintVariableSection(out, 'tool_sources', files['tool'])
self.PrintVariableSection(out, 'test_support_sources',
files['test_support'])
self.PrintVariableSection(out, 'crypto_test_sources',
files['crypto_test'])
self.PrintVariableSection(out, 'ssl_test_sources', files['ssl_test'])
for ((osname, arch), asm_files) in asm_outputs:
self.PrintVariableSection(
out, 'crypto_sources_%s_%s' % (osname, arch), asm_files)
class Bazel(object):
"""Bazel outputs files suitable for including in Bazel files."""
def __init__(self):
self.firstSection = True
self.header = \
"""# This file is created by generate_build_files.py. Do not edit manually.
"""
def PrintVariableSection(self, out, name, files):
if not self.firstSection:
out.write('\n')
self.firstSection = False
out.write('%s = [\n' % name)
for f in sorted(files):
out.write(' "%s",\n' % PathOf(f))
out.write(']\n')
def WriteFiles(self, files, asm_outputs):
with open('BUILD.generated.bzl', 'w+') as out:
out.write(self.header)
self.PrintVariableSection(out, 'ssl_headers', files['ssl_headers'])
self.PrintVariableSection(out, 'fips_fragments', files['fips_fragments'])
self.PrintVariableSection(
out, 'ssl_internal_headers', files['ssl_internal_headers'])
self.PrintVariableSection(out, 'ssl_sources', files['ssl'])
self.PrintVariableSection(out, 'crypto_headers', files['crypto_headers'])
self.PrintVariableSection(
out, 'crypto_internal_headers', files['crypto_internal_headers'])
self.PrintVariableSection(out, 'crypto_sources', files['crypto'])
self.PrintVariableSection(out, 'tool_sources', files['tool'])
self.PrintVariableSection(out, 'tool_headers', files['tool_headers'])
for ((osname, arch), asm_files) in asm_outputs:
self.PrintVariableSection(
out, 'crypto_sources_%s_%s' % (osname, arch), asm_files)
with open('BUILD.generated_tests.bzl', 'w+') as out:
out.write(self.header)
out.write('test_support_sources = [\n')
for filename in sorted(files['test_support'] +
files['test_support_headers'] +
files['crypto_internal_headers'] +
files['ssl_internal_headers']):
if os.path.basename(filename) == 'malloc.cc':
continue
out.write(' "%s",\n' % PathOf(filename))
out.write(']\n')
self.PrintVariableSection(out, 'crypto_test_sources',
files['crypto_test'])
self.PrintVariableSection(out, 'ssl_test_sources', files['ssl_test'])
self.PrintVariableSection(out, 'crypto_test_data',
files['crypto_test_data'])
self.PrintVariableSection(out, 'urandom_test_sources',
files['urandom_test'])
class Eureka(object):
def __init__(self):
self.header = \
"""# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is created by generate_build_files.py. Do not edit manually.
"""
def PrintVariableSection(self, out, name, files):
out.write('%s := \\\n' % name)
for f in sorted(files):
out.write(' %s\\\n' % f)
out.write('\n')
def WriteFiles(self, files, asm_outputs):
# Legacy Android.mk format
with open('eureka.mk', 'w+') as makefile:
makefile.write(self.header)
self.PrintVariableSection(makefile, 'crypto_sources', files['crypto'])
self.PrintVariableSection(makefile, 'ssl_sources', files['ssl'])
self.PrintVariableSection(makefile, 'tool_sources', files['tool'])
for ((osname, arch), asm_files) in asm_outputs:
if osname != 'linux':
continue
self.PrintVariableSection(
makefile, '%s_%s_sources' % (osname, arch), asm_files)
class GN(object):
def __init__(self):
self.firstSection = True
self.header = \
"""# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by generate_build_files.py. Do not edit manually.
"""
def PrintVariableSection(self, out, name, files):
if not self.firstSection:
out.write('\n')
self.firstSection = False
out.write('%s = [\n' % name)
for f in sorted(files):
out.write(' "%s",\n' % f)
out.write(']\n')
def WriteFiles(self, files, asm_outputs):
with open('BUILD.generated.gni', 'w+') as out:
out.write(self.header)
self.PrintVariableSection(out, 'crypto_sources',
files['crypto'] +
files['crypto_internal_headers'])
self.PrintVariableSection(out, 'crypto_headers',
files['crypto_headers'])
self.PrintVariableSection(out, 'ssl_sources',
files['ssl'] + files['ssl_internal_headers'])
self.PrintVariableSection(out, 'ssl_headers', files['ssl_headers'])
for ((osname, arch), asm_files) in asm_outputs:
self.PrintVariableSection(
out, 'crypto_sources_%s_%s' % (osname, arch), asm_files)
fuzzers = [os.path.splitext(os.path.basename(fuzzer))[0]
for fuzzer in files['fuzz']]
self.PrintVariableSection(out, 'fuzzers', fuzzers)
with open('BUILD.generated_tests.gni', 'w+') as out:
self.firstSection = True
out.write(self.header)
self.PrintVariableSection(out, 'test_support_sources',
files['test_support'] +
files['test_support_headers'])
self.PrintVariableSection(out, 'crypto_test_sources',
files['crypto_test'])
self.PrintVariableSection(out, 'crypto_test_data',
files['crypto_test_data'])
self.PrintVariableSection(out, 'ssl_test_sources', files['ssl_test'])
class GYP(object):
def __init__(self):
self.header = \
"""# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by generate_build_files.py. Do not edit manually.
"""
def PrintVariableSection(self, out, name, files):
out.write(' \'%s\': [\n' % name)
for f in sorted(files):
out.write(' \'%s\',\n' % f)
out.write(' ],\n')
def WriteFiles(self, files, asm_outputs):
with open('boringssl.gypi', 'w+') as gypi:
gypi.write(self.header + '{\n \'variables\': {\n')
self.PrintVariableSection(gypi, 'boringssl_ssl_sources',
files['ssl'] + files['ssl_headers'] +
files['ssl_internal_headers'])
self.PrintVariableSection(gypi, 'boringssl_crypto_sources',
files['crypto'] + files['crypto_headers'] +
files['crypto_internal_headers'])
for ((osname, arch), asm_files) in asm_outputs:
self.PrintVariableSection(gypi, 'boringssl_%s_%s_sources' %
(osname, arch), asm_files)
gypi.write(' }\n}\n')
class CMake(object):
def __init__(self):
self.header = \
R'''# Copyright (c) 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by generate_build_files.py. Do not edit manually.
cmake_minimum_required(VERSION 3.0)
project(BoringSSL LANGUAGES C CXX)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(CLANG 1)
endif()
if(CMAKE_COMPILER_IS_GNUCXX OR CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fvisibility=hidden -fno-common -fno-exceptions -fno-rtti")
if(APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden -fno-common")
if((CMAKE_C_COMPILER_VERSION VERSION_GREATER "4.8.99") OR CLANG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11")
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
endif()
endif()
# pthread_rwlock_t requires a feature flag.
if(NOT WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_XOPEN_SOURCE=700")
endif()
if(WIN32)
add_definitions(-D_HAS_EXCEPTIONS=0)
add_definitions(-DWIN32_LEAN_AND_MEAN)
add_definitions(-DNOMINMAX)
# Allow use of fopen.
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
# VS 2017 and higher supports STL-only warning suppressions.
# A bug in CMake < 3.13.0 may cause the space in this value to
# cause issues when building with NASM. In that case, update CMake.
add_definitions("-D_STL_EXTRA_DISABLED_WARNINGS=4774 4987")
endif()
add_definitions(-DBORINGSSL_IMPLEMENTATION)
# CMake's iOS support uses Apple's multiple-architecture toolchain. It takes an
# architecture list from CMAKE_OSX_ARCHITECTURES, leaves CMAKE_SYSTEM_PROCESSOR
# alone, and expects all architecture-specific logic to be conditioned within
# the source files rather than the build. This does not work for our assembly
# files, so we fix CMAKE_SYSTEM_PROCESSOR and only support single-architecture
# builds.
if(NOT OPENSSL_NO_ASM AND CMAKE_OSX_ARCHITECTURES)
list(LENGTH CMAKE_OSX_ARCHITECTURES NUM_ARCHES)
if(NOT ${NUM_ARCHES} EQUAL 1)
message(FATAL_ERROR "Universal binaries not supported.")
endif()
list(GET CMAKE_OSX_ARCHITECTURES 0 CMAKE_SYSTEM_PROCESSOR)
endif()
if(OPENSSL_NO_ASM)
add_definitions(-DOPENSSL_NO_ASM)
set(ARCH "generic")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(ARCH "x86_64")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "amd64")
set(ARCH "x86_64")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "AMD64")
# cmake reports AMD64 on Windows, but we might be building for 32-bit.
if(CMAKE_CL_64)
set(ARCH "x86_64")
else()
set(ARCH "x86")
endif()
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86")
set(ARCH "x86")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i386")
set(ARCH "x86")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i686")
set(ARCH "x86")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(ARCH "aarch64")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "arm64")
set(ARCH "aarch64")
# Apple A12 Bionic chipset which is added in iPhone XS/XS Max/XR uses arm64e architecture.
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "arm64e")
set(ARCH "aarch64")
elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "^arm*")
set(ARCH "arm")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "mips")
# Just to avoid the “unknown processor” error.
set(ARCH "generic")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "ppc64le")
set(ARCH "ppc64le")
else()
message(FATAL_ERROR "Unknown processor:" ${CMAKE_SYSTEM_PROCESSOR})
endif()
if(NOT OPENSSL_NO_ASM)
if(UNIX)
enable_language(ASM)
# Clang's integerated assembler does not support debug symbols.
if(NOT CMAKE_ASM_COMPILER_ID MATCHES "Clang")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -Wa,-g")
endif()
# CMake does not add -isysroot and -arch flags to assembly.
if(APPLE)
if(CMAKE_OSX_SYSROOT)
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -isysroot \"${CMAKE_OSX_SYSROOT}\"")
endif()
foreach(arch ${CMAKE_OSX_ARCHITECTURES})
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -arch ${arch}")
endforeach()
endif()
else()
set(CMAKE_ASM_NASM_FLAGS "${CMAKE_ASM_NASM_FLAGS} -gcv8")
enable_language(ASM_NASM)
endif()
endif()
if(BUILD_SHARED_LIBS)
add_definitions(-DBORINGSSL_SHARED_LIBRARY)
# Enable position-independent code globally. This is needed because
# some library targets are OBJECT libraries.
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
endif()
include_directories(src/include)
'''
def PrintLibrary(self, out, name, files):
out.write('add_library(\n')
out.write(' %s\n\n' % name)
for f in sorted(files):
out.write(' %s\n' % PathOf(f))
out.write(')\n\n')
def PrintExe(self, out, name, files, libs):
out.write('add_executable(\n')
out.write(' %s\n\n' % name)
for f in sorted(files):
out.write(' %s\n' % PathOf(f))
out.write(')\n\n')
out.write('target_link_libraries(%s %s)\n\n' % (name, ' '.join(libs)))
def PrintSection(self, out, name, files):
out.write('set(\n')
out.write(' %s\n\n' % name)
for f in sorted(files):
out.write(' %s\n' % PathOf(f))
out.write(')\n\n')
def WriteFiles(self, files, asm_outputs):
with open('CMakeLists.txt', 'w+') as cmake:
cmake.write(self.header)
for ((osname, arch), asm_files) in asm_outputs:
self.PrintSection(cmake, 'CRYPTO_%s_%s_SOURCES' % (osname, arch),
asm_files)
cmake.write(
R'''if(APPLE AND ${ARCH} STREQUAL "aarch64")
set(CRYPTO_ARCH_SOURCES ${CRYPTO_ios_aarch64_SOURCES})
elseif(APPLE AND ${ARCH} STREQUAL "arm")
set(CRYPTO_ARCH_SOURCES ${CRYPTO_ios_arm_SOURCES})
elseif(APPLE)
set(CRYPTO_ARCH_SOURCES ${CRYPTO_mac_${ARCH}_SOURCES})
elseif(UNIX)
set(CRYPTO_ARCH_SOURCES ${CRYPTO_linux_${ARCH}_SOURCES})
elseif(WIN32)
set(CRYPTO_ARCH_SOURCES ${CRYPTO_win_${ARCH}_SOURCES})
endif()
''')
self.PrintLibrary(cmake, 'crypto',
files['crypto'] + ['${CRYPTO_ARCH_SOURCES}'])
self.PrintLibrary(cmake, 'ssl', files['ssl'])
self.PrintExe(cmake, 'bssl', files['tool'], ['ssl', 'crypto'])
cmake.write(
R'''if(NOT MSVC AND NOT ANDROID)
target_link_libraries(crypto pthread)
endif()
''')
def FindCMakeFiles(directory):
"""Returns list of all CMakeLists.txt files recursively in directory."""
cmakefiles = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
if filename == 'CMakeLists.txt':
cmakefiles.append(os.path.join(path, filename))
return cmakefiles
def OnlyFIPSFragments(path, dent, is_dir):
return is_dir or (path.startswith(
os.path.join('src', 'crypto', 'fipsmodule', '')) and
NoTests(path, dent, is_dir))
def NoTestsNorFIPSFragments(path, dent, is_dir):
return (NoTests(path, dent, is_dir) and
(is_dir or not OnlyFIPSFragments(path, dent, is_dir)))
def NoTests(path, dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent
def OnlyTests(path, dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove
non-test sources."""
if is_dir:
return dent != 'test'
return '_test.' in dent
def AllFiles(path, dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to include all
sources."""
return True
def NoTestRunnerFiles(path, dent, is_dir):
"""Filter function that can be passed to FindCFiles or FindHeaderFiles in
order to exclude test runner files."""
# NOTE(martinkr): This prevents .h/.cc files in src/ssl/test/runner, which
# are in their own subpackage, from being included in boringssl/BUILD files.
return not is_dir or dent != 'runner'
def NotGTestSupport(path, dent, is_dir):
return 'gtest' not in dent and 'abi_test' not in dent
def SSLHeaderFiles(path, dent, is_dir):
return dent in ['ssl.h', 'tls1.h', 'ssl23.h', 'ssl3.h', 'dtls1.h', 'srtp.h']
def FindCFiles(directory, filter_func):
"""Recurses through directory and returns a list of paths to all the C source
files that pass filter_func."""
cfiles = []
for (path, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if not filename.endswith('.c') and not filename.endswith('.cc'):
continue
if not filter_func(path, filename, False):
continue
cfiles.append(os.path.join(path, filename))
for (i, dirname) in enumerate(dirnames):
if not filter_func(path, dirname, True):
del dirnames[i]
return cfiles
def FindHeaderFiles(directory, filter_func):
"""Recurses through directory and returns a list of paths to all the header files that pass filter_func."""
hfiles = []
for (path, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if not filename.endswith('.h'):
continue
if not filter_func(path, filename, False):
continue
hfiles.append(os.path.join(path, filename))
for (i, dirname) in enumerate(dirnames):
if not filter_func(path, dirname, True):
del dirnames[i]
return hfiles
def ExtractPerlAsmFromCMakeFile(cmakefile):
"""Parses the contents of the CMakeLists.txt file passed as an argument and
returns a list of all the perlasm() directives found in the file."""
perlasms = []
with open(cmakefile) as f:
for line in f:
line = line.strip()
if not line.startswith('perlasm('):
continue
if not line.endswith(')'):
raise ValueError('Bad perlasm line in %s' % cmakefile)
# Remove "perlasm(" from start and ")" from end
params = line[8:-1].split()
if len(params) < 2:
raise ValueError('Bad perlasm line in %s' % cmakefile)
perlasms.append({
'extra_args': params[2:],
'input': os.path.join(os.path.dirname(cmakefile), params[1]),
'output': os.path.join(os.path.dirname(cmakefile), params[0]),
})
return perlasms
def ReadPerlAsmOperations():
"""Returns a list of all perlasm() directives found in CMake config files in
src/."""
perlasms = []
cmakefiles = FindCMakeFiles('src')
for cmakefile in cmakefiles:
perlasms.extend(ExtractPerlAsmFromCMakeFile(cmakefile))
return perlasms
def PerlAsm(output_filename, input_filename, perlasm_style, extra_args):
"""Runs the a perlasm script and puts the output into output_filename."""
base_dir = os.path.dirname(output_filename)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
subprocess.check_call(
['perl', input_filename, perlasm_style] + extra_args + [output_filename])
def ArchForAsmFilename(filename):
"""Returns the architectures that a given asm file should be compiled for
based on substrings in the filename."""
if 'x86_64' in filename or 'avx2' in filename:
return ['x86_64']
elif ('x86' in filename and 'x86_64' not in filename) or '586' in filename:
return ['x86']
elif 'armx' in filename:
return ['arm', 'aarch64']
elif 'armv8' in filename:
return ['aarch64']
elif 'arm' in filename:
return ['arm']
elif 'ppc' in filename:
return ['ppc64le']
else:
raise ValueError('Unknown arch for asm filename: ' + filename)
def WriteAsmFiles(perlasms):
"""Generates asm files from perlasm directives for each supported OS x
platform combination."""
asmfiles = {}
for osarch in OS_ARCH_COMBOS:
(osname, arch, perlasm_style, extra_args, asm_ext) = osarch
key = (osname, arch)
outDir = '%s-%s' % key
for perlasm in perlasms:
filename = os.path.basename(perlasm['input'])
output = perlasm['output']
if not output.startswith('src'):
raise ValueError('output missing src: %s' % output)
output = os.path.join(outDir, output[4:])
if output.endswith('-armx.${ASM_EXT}'):
output = output.replace('-armx',
'-armx64' if arch == 'aarch64' else '-armx32')
output = output.replace('${ASM_EXT}', asm_ext)
if arch in ArchForAsmFilename(filename):
PerlAsm(output, perlasm['input'], perlasm_style,
perlasm['extra_args'] + extra_args)
asmfiles.setdefault(key, []).append(output)
for (key, non_perl_asm_files) in NON_PERL_FILES.iteritems():
asmfiles.setdefault(key, []).extend(non_perl_asm_files)
return asmfiles
def ExtractVariablesFromCMakeFile(cmakefile):
"""Parses the contents of the CMakeLists.txt file passed as an argument and
returns a dictionary of exported source lists."""
variables = {}
in_set_command = False
set_command = []
with open(cmakefile) as f:
for line in f:
if '#' in line:
line = line[:line.index('#')]
line = line.strip()
if not in_set_command:
if line.startswith('set('):
in_set_command = True
set_command = []
elif line == ')':
in_set_command = False
if not set_command:
raise ValueError('Empty set command')
variables[set_command[0]] = set_command[1:]
else:
set_command.extend([c for c in line.split(' ') if c])
if in_set_command:
raise ValueError('Unfinished set command')
return variables
def main(platforms):
cmake = ExtractVariablesFromCMakeFile(os.path.join('src', 'sources.cmake'))
crypto_c_files = (FindCFiles(os.path.join('src', 'crypto'), NoTestsNorFIPSFragments) +
FindCFiles(os.path.join('src', 'third_party', 'fiat'), NoTestsNorFIPSFragments))
fips_fragments = FindCFiles(os.path.join('src', 'crypto', 'fipsmodule'), OnlyFIPSFragments)
ssl_source_files = FindCFiles(os.path.join('src', 'ssl'), NoTests)
tool_c_files = FindCFiles(os.path.join('src', 'tool'), NoTests)
tool_h_files = FindHeaderFiles(os.path.join('src', 'tool'), AllFiles)
# third_party/fiat/p256.c lives in third_party/fiat, but it is a FIPS
# fragment, not a normal source file.
p256 = os.path.join('src', 'third_party', 'fiat', 'p256.c')
fips_fragments.append(p256)
crypto_c_files.remove(p256)
# BCM shared library C files
bcm_crypto_c_files = [
os.path.join('src', 'crypto', 'fipsmodule', 'bcm.c')
]
# Generate err_data.c
with open('err_data.c', 'w+') as err_data:
subprocess.check_call(['go', 'run', 'err_data_generate.go'],
cwd=os.path.join('src', 'crypto', 'err'),
stdout=err_data)
crypto_c_files.append('err_data.c')
test_support_c_files = FindCFiles(os.path.join('src', 'crypto', 'test'),
NotGTestSupport)
test_support_h_files = (
FindHeaderFiles(os.path.join('src', 'crypto', 'test'), AllFiles) +
FindHeaderFiles(os.path.join('src', 'ssl', 'test'), NoTestRunnerFiles))
crypto_test_files = []
if EMBED_TEST_DATA:
# Generate crypto_test_data.cc
with open('crypto_test_data.cc', 'w+') as out:
subprocess.check_call(
['go', 'run', 'util/embed_test_data.go'] + cmake['CRYPTO_TEST_DATA'],
cwd='src',
stdout=out)
crypto_test_files += ['crypto_test_data.cc']
crypto_test_files += FindCFiles(os.path.join('src', 'crypto'), OnlyTests)
crypto_test_files += [
'src/crypto/test/abi_test.cc',
'src/crypto/test/file_test_gtest.cc',
'src/crypto/test/gtest_main.cc',
]
# urandom_test.cc is in a separate binary so that it can be test PRNG
# initialisation.
crypto_test_files = [
file for file in crypto_test_files
if not file.endswith('/urandom_test.cc')
]
ssl_test_files = FindCFiles(os.path.join('src', 'ssl'), OnlyTests)
ssl_test_files += [
'src/crypto/test/abi_test.cc',
'src/crypto/test/gtest_main.cc',
]
urandom_test_files = [
'src/crypto/fipsmodule/rand/urandom_test.cc',
]
fuzz_c_files = FindCFiles(os.path.join('src', 'fuzz'), NoTests)
ssl_h_files = (
FindHeaderFiles(
os.path.join('src', 'include', 'openssl'),
SSLHeaderFiles))
def NotSSLHeaderFiles(path, filename, is_dir):
return not SSLHeaderFiles(path, filename, is_dir)
crypto_h_files = (
FindHeaderFiles(
os.path.join('src', 'include', 'openssl'),
NotSSLHeaderFiles))
ssl_internal_h_files = FindHeaderFiles(os.path.join('src', 'ssl'), NoTests)
crypto_internal_h_files = (
FindHeaderFiles(os.path.join('src', 'crypto'), NoTests) +
FindHeaderFiles(os.path.join('src', 'third_party', 'fiat'), NoTests))
files = {
'bcm_crypto': bcm_crypto_c_files,
'crypto': crypto_c_files,
'crypto_headers': crypto_h_files,
'crypto_internal_headers': crypto_internal_h_files,
'crypto_test': sorted(crypto_test_files),
'crypto_test_data': sorted('src/' + x for x in cmake['CRYPTO_TEST_DATA']),
'fips_fragments': fips_fragments,
'fuzz': fuzz_c_files,
'ssl': ssl_source_files,
'ssl_headers': ssl_h_files,
'ssl_internal_headers': ssl_internal_h_files,
'ssl_test': sorted(ssl_test_files),
'tool': tool_c_files,
'tool_headers': tool_h_files,
'test_support': test_support_c_files,
'test_support_headers': test_support_h_files,
'urandom_test': sorted(urandom_test_files),
}
asm_outputs = sorted(WriteAsmFiles(ReadPerlAsmOperations()).iteritems())
for platform in platforms:
platform.WriteFiles(files, asm_outputs)
return 0
if __name__ == '__main__':
parser = optparse.OptionParser(usage='Usage: %prog [--prefix=<path>]'
' [android|android-cmake|bazel|eureka|gn|gyp]')
parser.add_option('--prefix', dest='prefix',
help='For Bazel, prepend argument to all source files')
parser.add_option(
'--embed_test_data', type='choice', dest='embed_test_data',
action='store', default="true", choices=["true", "false"],
help='For Bazel or GN, don\'t embed data files in crypto_test_data.cc')
options, args = parser.parse_args(sys.argv[1:])
PREFIX = options.prefix
EMBED_TEST_DATA = (options.embed_test_data == "true")
if not args:
parser.print_help()
sys.exit(1)
platforms = []
for s in args:
if s == 'android':
platforms.append(Android())
elif s == 'android-cmake':
platforms.append(AndroidCMake())
elif s == 'bazel':
platforms.append(Bazel())
elif s == 'eureka':
platforms.append(Eureka())
elif s == 'gn':
platforms.append(GN())
elif s == 'gyp':
platforms.append(GYP())
elif s == 'cmake':
platforms.append(CMake())
else:
parser.print_help()
sys.exit(1)
sys.exit(main(platforms))
| 34.562059 | 112 | 0.663105 |
ad42a49d0502181c322ef13600a53a3160718798 | 2,613 | py | Python | service/views/error.py | thetestgame/LinkerIO | ef9af2b8f4552dd3b55270271cc1ea2144d9c410 | [
"MIT"
] | 1 | 2020-11-03T11:38:38.000Z | 2020-11-03T11:38:38.000Z | service/views/error.py | thetestgame/LinkerIO | ef9af2b8f4552dd3b55270271cc1ea2144d9c410 | [
"MIT"
] | null | null | null | service/views/error.py | thetestgame/LinkerIO | ef9af2b8f4552dd3b55270271cc1ea2144d9c410 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2019 Jordan Maxwell
Written 10/20/2019
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from flask import current_app
from flask import render_template
from flask.views import View
from flask import jsonify
from service.decorators import view
import traceback
@current_app.errorhandler(404)
def handle_404_exception(e):
"""
Handles all 404 exceptions
"""
return render_template(
'error/http.html',
errorcode='404',
errormsg='Page Not Found')
@current_app.errorhandler(403)
def handle_403_exception(e):
"""
Handles all 403 exceptions
"""
return render_template(
'error/http.html',
errorcode='403',
errormsg='Access Denied')
@current_app.errorhandler(410)
def handle_410_exception(e):
"""
Handles all 410 exceptions
"""
return render_template(
'error/http.html',
errorcode='410',
errormsg='Resource Removed')
@current_app.errorhandler(500)
def handle_500_exception(e):
"""
Handles all 500 exceptions
"""
return render_template(
'error/http.html',
errorcode='500',
errormsg='Internal Server Error')
@current_app.errorhandler(Exception)
def handle_default_exception(e):
"""
Handles all misc. general exceptions
"""
debug = current_app.config.get('DEBUG', False)
if not debug:
return handle_500_exception(e)
return render_template(
'error/internal.html',
errorname=type(e).__name__,
errormsg=str(e),
traceback='<br/>'.join(traceback.format_exc().splitlines()))
| 27.797872 | 78 | 0.716035 |
3188841a165b92452505051d65114c30083daefa | 3,237 | py | Python | proteingraph/resi_atoms.py | ericmjl/protein-interaction-network | eba38bc2054b1017500525f78ed0564cce145285 | [
"MIT"
] | 35 | 2016-03-11T17:07:07.000Z | 2022-02-10T06:53:52.000Z | proteingraph/resi_atoms.py | TopBang/protein-interaction-network | e4920351fced4acec18867f61b8f53b5ef5fb65a | [
"MIT"
] | 179 | 2018-09-26T04:58:52.000Z | 2022-03-14T15:45:29.000Z | proteingraph/resi_atoms.py | TopBang/protein-interaction-network | e4920351fced4acec18867f61b8f53b5ef5fb65a | [
"MIT"
] | 19 | 2017-10-22T01:56:44.000Z | 2021-11-01T05:19:46.000Z | """
Author: Eric J. Ma
Purpose: This is a set of utility variables and functions that can be used
across the PIN project.
"""
import numpy as np
from sklearn.preprocessing import StandardScaler
BACKBONE_ATOMS = ["N", "CA", "C", "O"]
AMINO_ACIDS = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
]
BOND_TYPES = [
"hydrophobic",
"disulfide",
"hbond",
"ionic",
"aromatic",
"aromatic_sulphur",
"cation_pi",
"backbone",
"delaunay",
]
RESI_NAMES = [
"ALA",
"ASX",
"CYS",
"ASP",
"GLU",
"PHE",
"GLY",
"HIS",
"ILE",
"LYS",
"LEU",
"MET",
"ASN",
"PRO",
"GLN",
"ARG",
"SER",
"THR",
"VAL",
"TRP",
"TYR",
"GLX",
"UNK",
]
HYDROPHOBIC_RESIS = [
"ALA",
"VAL",
"LEU",
"ILE",
"MET",
"PHE",
"TRP",
"PRO",
"TYR",
]
DISULFIDE_RESIS = ["CYS"]
DISULFIDE_ATOMS = ["SG"]
IONIC_RESIS = ["ARG", "LYS", "HIS", "ASP", "GLU"]
POS_AA = ["HIS", "LYS", "ARG"]
NEG_AA = ["GLU", "ASP"]
AA_RING_ATOMS = dict()
AA_RING_ATOMS["PHE"] = ["CG", "CD", "CE", "CZ"]
AA_RING_ATOMS["TRP"] = ["CD", "CE", "CH", "CZ"]
AA_RING_ATOMS["HIS"] = ["CG", "CD", "CE", "ND", "NE"]
AA_RING_ATOMS["TYR"] = ["CG", "CD", "CE", "CZ"]
AROMATIC_RESIS = ["PHE", "TRP", "HIS", "TYR"]
CATION_PI_RESIS = ["LYS", "ARG", "PHE", "TYR", "TRP"]
CATION_RESIS = ["LYS", "ARG"]
PI_RESIS = ["PHE", "TYR", "TRP"]
SULPHUR_RESIS = ["MET", "CYS"]
ISOELECTRIC_POINTS = {
"ALA": 6.11,
"ARG": 10.76,
"ASN": 10.76,
"ASP": 2.98,
"CYS": 5.02,
"GLU": 3.08,
"GLN": 5.65,
"GLY": 6.06,
"HIS": 7.64,
"ILE": 6.04,
"LEU": 6.04,
"LYS": 9.74,
"MET": 5.74,
"PHE": 5.91,
"PRO": 6.30,
"SER": 5.68,
"THR": 5.60,
"TRP": 5.88,
"TYR": 5.63,
"VAL": 6.02,
"UNK": 7.00, # unknown so assign neutral
"ASX": 6.87, # the average of D and N
"GLX": 4.35, # the average of E and Q
}
scaler = StandardScaler()
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
ISOELECTRIC_POINTS_STD = dict()
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
MOLECULAR_WEIGHTS = {
"ALA": 89.0935,
"ARG": 174.2017,
"ASN": 132.1184,
"ASP": 133.1032,
"CYS": 121.1590,
"GLU": 147.1299,
"GLN": 146.1451,
"GLY": 75.0669,
"HIS": 155.1552,
"ILE": 131.1736,
"LEU": 131.1736,
"LYS": 146.1882,
"MET": 149.2124,
"PHE": 165.1900,
"PRO": 115.1310,
"SER": 105.0930,
"THR": 119.1197,
"TRP": 204.2262,
"TYR": 181.1894,
"VAL": 117.1469,
"UNK": 137.1484, # unknown, therefore assign average of knowns
"ASX": 132.6108, # the average of D and N
"GLX": 146.6375, # the average of E and Q
}
MOLECULAR_WEIGHTS_STD = dict()
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
MOLECULAR_WEIGHTS_STD = dict()
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
| 17.883978 | 78 | 0.505097 |
af0deae8038271d3708ad75137e5cb9f2f706a3b | 2,560 | py | Python | contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/statsbeat_metrics/__init__.py | mpomarole/opencensus-python | 5aba7bcc720b63d6349b10bc4696702da0aed3f6 | [
"Apache-2.0"
] | null | null | null | contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/statsbeat_metrics/__init__.py | mpomarole/opencensus-python | 5aba7bcc720b63d6349b10bc4696702da0aed3f6 | [
"Apache-2.0"
] | null | null | null | contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/statsbeat_metrics/__init__.py | mpomarole/opencensus-python | 5aba7bcc720b63d6349b10bc4696702da0aed3f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from opencensus.ext.azure.metrics_exporter import MetricsExporter
from opencensus.ext.azure.metrics_exporter.statsbeat_metrics.statsbeat import (
_STATS_CONNECTION_STRING,
_STATS_SHORT_EXPORT_INTERVAL,
_StatsbeatMetrics,
)
from opencensus.metrics import transport
from opencensus.metrics.export.metric_producer import MetricProducer
_STATSBEAT_METRICS = None
_STATSBEAT_LOCK = threading.Lock()
def collect_statsbeat_metrics(ikey):
with _STATSBEAT_LOCK:
# Only start statsbeat if did not exist before
global _STATSBEAT_METRICS # pylint: disable=global-statement
if _STATSBEAT_METRICS is None:
exporter = MetricsExporter(
is_stats=True,
connection_string=_STATS_CONNECTION_STRING,
enable_standard_metrics=False,
export_interval=_STATS_SHORT_EXPORT_INTERVAL, # 15m by default
)
# The user's ikey is the one being tracked
producer = _AzureStatsbeatMetricsProducer(
instrumentation_key=ikey
)
_STATSBEAT_METRICS = producer
# Export some initial stats on program start
exporter.export_metrics(_STATSBEAT_METRICS.get_initial_metrics())
exporter.exporter_thread = \
transport.get_exporter_thread([_STATSBEAT_METRICS],
exporter,
exporter.options.export_interval)
class _AzureStatsbeatMetricsProducer(MetricProducer):
"""Implementation of the producer of statsbeat metrics.
Includes Azure attach rate metrics, implemented using gauges.
"""
def __init__(self, instrumentation_key):
self._statsbeat = _StatsbeatMetrics(instrumentation_key)
def get_metrics(self):
return self._statsbeat.get_metrics()
def get_initial_metrics(self):
return self._statsbeat.get_initial_metrics()
| 38.208955 | 79 | 0.701953 |
f006258b0b1d18c02a349232d2267f4c12d25d63 | 1,168 | py | Python | BackEnd/api/endpoints/database/evaluation.py | camilleAmaury/X5GON_project | 8d5b61eb45a357fe1881c0523389d463724c6448 | [
"Unlicense"
] | 1 | 2021-05-02T14:24:38.000Z | 2021-05-02T14:24:38.000Z | BackEnd/api/endpoints/database/evaluation.py | camilleAmaury/X5GON_project | 8d5b61eb45a357fe1881c0523389d463724c6448 | [
"Unlicense"
] | 1 | 2022-02-10T00:48:47.000Z | 2022-02-10T00:48:47.000Z | BackEnd/api/endpoints/database/evaluation.py | camilleAmaury/X5GON_project | 8d5b61eb45a357fe1881c0523389d463724c6448 | [
"Unlicense"
] | null | null | null | from flask import request
from flask_restplus import Namespace, Resource, fields
from api.utils import validator
from api.service.evaluation import add_evaluation
api = Namespace('evaluations', description='Evaluation operations')
evaluation_schema = api.model('Evaluation', {
'user_id': fields.Integer(required=True, description='ID of the user'),
'graph_ref': fields.String(required=True, description='ID of the document in the graph'),
'comprehension_rating': fields.Integer(required=True, description='Comprehension rating of this document'),
'quality_rating': fields.Integer(required=True, description='Quality rating of this document')
})
@api.route("/")
class EvaluationRoute(Resource):
@api.expect(evaluation_schema, envelope='json')
@api.doc(responses={
201: 'Evaluation successfully created',
409: 'Conflict, user not exists / document not exists / this user already evaluate this document',
422: 'Validation Error'
})
@api.marshal_with(evaluation_schema)
def post(self):
validator.validate_payload(request.json, evaluation_schema)
return add_evaluation(data=request.json),201
| 40.275862 | 111 | 0.742295 |
e27dba09695962f0f2d29c30f01c4f536b91effc | 2,632 | py | Python | models.py | Silve1ra/casting-agency-api | 075ac2d465972b84a95fbd3e0eb0823ec33a867c | [
"MIT"
] | null | null | null | models.py | Silve1ra/casting-agency-api | 075ac2d465972b84a95fbd3e0eb0823ec33a867c | [
"MIT"
] | null | null | null | models.py | Silve1ra/casting-agency-api | 075ac2d465972b84a95fbd3e0eb0823ec33a867c | [
"MIT"
] | null | null | null | import os
import sys
from sqlalchemy import Column, String, Integer
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# Db config
# ----------------------------------------------------------------
database_path = os.environ['DATABASE_URL_HEROKU']
db = SQLAlchemy()
# Setup db
# ----------------------------------------------------------------
def setup_db(app, database_path=database_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
migrate = Migrate(app, db)
db.app = app
db.init_app(app)
db_drop_and_create_all()
# Drop and create all db
# ----------------------------------------------------------------
def db_drop_and_create_all():
'''
Please, remove the comment here in order to perform the tests
Heroku crashes if it is activated.
'''
# db.drop_all()
# db.create_all()
# add one demo row for actors
actor = Actor(
name='Christian Bale',
age='47',
gender='male'
)
actor.insert()
# add one demo row for movies
movie = Movie(
title='The Dark Knight Rises',
release_date='2012-07-27',
)
movie.insert()
# ORM Methods
# ----------------------------------------------------------------
class ORMMethods(db.Model):
__abstract__ = True
def insert(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def update(self):
db.session.commit()
# Actor
# ----------------------------------------------------------------
class Actor(ORMMethods):
__tablename__ = 'actors'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
age = db.Column(db.String(120))
gender = db.Column(db.String(120))
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
def serialize(self):
return {
'id': self.id,
'name': self.name,
'age': self.age,
'gender': self.gender,
}
# Movie
# ----------------------------------------------------------------
class Movie(ORMMethods):
__tablename__ = 'movies'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), nullable=False)
release_date = db.Column(db.DateTime(), nullable=False)
def serialize(self):
return {
'id': self.id,
'title': self.title,
'release_date': self.release_date,
}
| 23.5 | 67 | 0.518237 |
c56d854bbfb382d4d61acf8bdd87f7c4ef4941d1 | 12,411 | py | Python | tools/c7n_mailer/tests/test_email.py | edonkor1/cloud-custodian | 6f54735acd071b6fc6a0cca851d36e1a1fa46aa0 | [
"Apache-2.0"
] | 1 | 2019-08-25T12:05:20.000Z | 2019-08-25T12:05:20.000Z | tools/c7n_mailer/tests/test_email.py | edonkor1/cloud-custodian | 6f54735acd071b6fc6a0cca851d36e1a1fa46aa0 | [
"Apache-2.0"
] | null | null | null | tools/c7n_mailer/tests/test_email.py | edonkor1/cloud-custodian | 6f54735acd071b6fc6a0cca851d36e1a1fa46aa0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import copy
import os
import unittest
import six
from c7n_mailer.email_delivery import EmailDelivery
from common import logger, get_ldap_lookup
from common import MAILER_CONFIG, RESOURCE_1, SQS_MESSAGE_1, SQS_MESSAGE_4
from mock import patch, call
from c7n_mailer.utils_email import is_email
# note principalId is very org/domain specific for federated?, it would be good to get
# confirmation from capone on this event / test.
CLOUDTRAIL_EVENT = {
'detail': {
'userIdentity': {
"type": "IAMUser",
"principalId": "AIDAJ45Q7YFFAREXAMPLE",
"arn": "arn:aws:iam::123456789012:user/michael_bolton",
"accountId": "123456789012",
"accessKeyId": "AKIAIOSFODNN7EXAMPLE",
"userName": "michael_bolton"
}
}
}
class MockEmailDelivery(EmailDelivery):
def get_ldap_connection(self):
return get_ldap_lookup(cache_engine='redis')
class EmailTest(unittest.TestCase):
def setUp(self):
self.aws_session = boto3.Session()
self.email_delivery = MockEmailDelivery(MAILER_CONFIG, self.aws_session, logger)
self.email_delivery.ldap_lookup.uid_regex = ''
template_abs_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'example.jinja')
# Jinja paths must always be forward slashes regardless of operating system
template_abs_filename = template_abs_filename.replace('\\', '/')
SQS_MESSAGE_1['action']['template'] = template_abs_filename
SQS_MESSAGE_4['action']['template'] = template_abs_filename
def test_valid_email(self):
self.assertFalse(is_email('foobar'))
self.assertFalse(is_email('foo@bar'))
self.assertFalse(is_email('slack://foo@bar.com'))
self.assertTrue(is_email('foo@bar.com'))
def test_smtp_creds(self):
conf = dict(MAILER_CONFIG)
conf['smtp_username'] = 'alice'
conf['smtp_password'] = 'bob'
msg = dict(SQS_MESSAGE_1)
deliver = MockEmailDelivery(conf, self.aws_session, logger)
messages_map = deliver.get_to_addrs_email_messages_map(msg)
with patch("smtplib.SMTP") as mock_smtp:
with patch('c7n_mailer.email_delivery.kms_decrypt') as mock_decrypt:
mock_decrypt.return_value = 'xyz'
for email_addrs, mimetext_msg in messages_map.items():
deliver.send_c7n_email(msg, list(email_addrs), mimetext_msg)
mock_decrypt.assert_called_once()
mock_smtp.assert_has_calls([call().login('alice', 'xyz')])
def test_priority_header_is_valid(self):
self.assertFalse(self.email_delivery.priority_header_is_valid('0'))
self.assertFalse(self.email_delivery.priority_header_is_valid('-1'))
self.assertFalse(self.email_delivery.priority_header_is_valid('6'))
self.assertFalse(self.email_delivery.priority_header_is_valid('sd'))
self.assertTrue(self.email_delivery.priority_header_is_valid('1'))
self.assertTrue(self.email_delivery.priority_header_is_valid('5'))
def test_get_valid_emails_from_list(self):
list_1 = [
'michael_bolton@initech.com',
'lsdk',
'resource-owner',
'event-owner',
'bill@initech.com'
]
valid_emails = self.email_delivery.get_valid_emails_from_list(list_1)
self.assertEqual(valid_emails, ['michael_bolton@initech.com', 'bill@initech.com'])
def test_event_owner_ldap_flow(self):
targets = ['event-owner']
michael_bolton_email = self.email_delivery.get_event_owner_email(targets, CLOUDTRAIL_EVENT)
self.assertEqual(michael_bolton_email, ['michael_bolton@initech.com'])
def test_get_ldap_emails_from_resource(self):
SQS_MESSAGE_1['action']['email_ldap_username_manager'] = False
ldap_emails = self.email_delivery.get_ldap_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_1
)
self.assertEqual(ldap_emails, ['peter@initech.com'])
SQS_MESSAGE_1['action']['email_ldap_username_manager'] = True
ldap_emails = self.email_delivery.get_ldap_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_1
)
self.assertEqual(ldap_emails, ['peter@initech.com', 'bill_lumberg@initech.com'])
def test_email_to_resources_map_with_ldap_manager(self):
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE_1
)
# make sure only 1 email is queued to go out
self.assertEqual(len(emails_to_resources_map.items()), 1)
to_emails = ('bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com')
self.assertEqual(emails_to_resources_map, {to_emails: [RESOURCE_1]})
def test_email_to_email_message_map_without_ldap_manager(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['policy']['actions'][1].pop('email_ldap_username_manager', None)
email_addrs_to_email_message_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
to_emails = ('bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com')
items = list(email_addrs_to_email_message_map.items())
self.assertEqual(items[0][0], to_emails)
self.assertEqual(items[0][1]['to'], ', '.join(to_emails))
def test_smtp_called_once(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
to_addrs_to_email_messages_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
with patch("smtplib.SMTP") as mock_smtp:
for email_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
self.email_delivery.send_c7n_email(SQS_MESSAGE, list(email_addrs), mimetext_msg)
self.assertEqual(mimetext_msg['X-Priority'], '1 (Highest)')
# Get instance of mocked SMTP object
smtp_instance = mock_smtp.return_value
# Checks the mock has been called at least one time
self.assertTrue(smtp_instance.sendmail.called)
# Check the mock has been called only once
self.assertEqual(smtp_instance.sendmail.call_count, 1)
# Check the mock' calls are equal to a specific list of calls in a
# specific order
to_addrs = ['bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com']
self.assertEqual(
smtp_instance.sendmail.mock_calls,
[call(MAILER_CONFIG['from_address'], to_addrs, mimetext_msg.as_string())]
)
def test_smtp_called_multiple_times(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': 'samir@initech.com',
'Key': 'SupportEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
SQS_MESSAGE['resources'].append(RESOURCE_2)
to_addrs_to_email_messages_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
with patch("smtplib.SMTP") as mock_smtp:
for email_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
self.email_delivery.send_c7n_email(SQS_MESSAGE, list(email_addrs), mimetext_msg)
self.assertEqual(mimetext_msg.get('X-Priority'), None)
# self.assertEqual(mimetext_msg.get('X-Priority'), None)
# Get instance of mocked SMTP object
smtp_instance = mock_smtp.return_value
# Checks the mock has been called at least one time
self.assertTrue(smtp_instance.sendmail.called)
# Check the mock has been called only once
self.assertEqual(smtp_instance.sendmail.call_count, 2)
def test_emails_resource_mapping_multiples(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': 'samir@initech.com',
'Key': 'SupportEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
SQS_MESSAGE['resources'].append(RESOURCE_2)
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
email_1_to_addrs = ('bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com')
email_2_to_addrs = ('samir@initech.com',)
self.assertEqual(emails_to_resources_map[email_1_to_addrs], [RESOURCE_1])
self.assertEqual(emails_to_resources_map[email_2_to_addrs], [RESOURCE_2])
def test_emails_resource_mapping_no_owner(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
SQS_MESSAGE['action']['owner_absent_contact'] = ['foo@example.com']
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': 'peter',
'Key': 'CreatorName'
}
],
'VolumeId': 'vol-01a0e6ea6b89f0099'
}
SQS_MESSAGE['resources'] = [RESOURCE_2]
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
email_1_to_addrs = (
'bill_lumberg@initech.com', 'foo@example.com', 'peter@initech.com'
)
self.assertEqual(
emails_to_resources_map[email_1_to_addrs], [RESOURCE_2]
)
def test_no_mapping_if_no_valid_emails(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action']['to'].remove('ldap_uid_tags')
SQS_MESSAGE['resources'][0].pop('Tags', None)
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
self.assertEqual(emails_to_resources_map, {})
def test_flattened_list_get_resource_owner_emails_from_resource(self):
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': '123456',
'Key': 'OwnerEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
RESOURCE_3 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': 'milton@initech.com',
'Key': 'OwnerEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
ldap_emails = self.email_delivery.get_resource_owner_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_2
)
self.assertEqual(ldap_emails, ['milton@initech.com'])
ldap_emails = self.email_delivery.get_resource_owner_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_3
)
self.assertEqual(ldap_emails, ['milton@initech.com'])
def test_cc_email_functionality(self):
email = self.email_delivery.get_mimetext_message(
SQS_MESSAGE_4, SQS_MESSAGE_4['resources'], ['hello@example.com'])
self.assertEqual(email['Cc'], 'hello@example.com, cc@example.com')
| 41.508361 | 99 | 0.639755 |
f13ca0537ba7ca6ef576bbb61ba4d6e5e79d4823 | 2,533 | py | Python | tests/test_packages/test_protocols/test_gym.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | tests/test_packages/test_protocols/test_gym.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | tests/test_packages/test_protocols/test_gym.py | lrahmani/agents-aea | 9bd1d51530fc21bf41b5adea031cda19a94b048b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the messages module."""
from packages.fetchai.protocols.gym.message import GymMessage
from packages.fetchai.protocols.gym.serialization import GymSerializer
def test_gym_message_instantiation():
"""Test instantiation of the gym message."""
assert GymMessage(
performative=GymMessage.Performative.ACT,
action=GymMessage.AnyObject("any_action"),
step_id=1,
)
assert GymMessage(
performative=GymMessage.Performative.PERCEPT,
observation=GymMessage.AnyObject("any_observation"),
reward=0.0,
info=GymMessage.AnyObject({"some_key": "some_value"}),
done=True,
step_id=1,
)
assert GymMessage(performative=GymMessage.Performative.RESET)
assert GymMessage(performative=GymMessage.Performative.CLOSE)
assert str(GymMessage.Performative.CLOSE) == "close"
def test_gym_serialization():
"""Test that the serialization for the 'simple' protocol works for the ERROR message."""
msg = GymMessage(
performative=GymMessage.Performative.ACT,
action=GymMessage.AnyObject("any_action"),
step_id=1,
)
msg_bytes = GymSerializer().encode(msg)
actual_msg = GymSerializer().decode(msg_bytes)
expected_msg = msg
assert expected_msg == actual_msg
msg = GymMessage(
performative=GymMessage.Performative.PERCEPT,
observation=GymMessage.AnyObject("any_observation"),
reward=0.0,
info=GymMessage.AnyObject({"some_key": "some_value"}),
done=True,
step_id=1,
)
msg_bytes = GymSerializer().encode(msg)
actual_msg = GymSerializer().decode(msg_bytes)
expected_msg = msg
assert expected_msg == actual_msg
| 36.185714 | 92 | 0.659692 |
e28f64b3c251ab8ac427dee074d380addb2f630b | 8,983 | py | Python | efficientdet/object_detection/argmax_matcher.py | Rednickle/automl | 2aa620b06cc2645a94d2eb1ea37154926980c856 | [
"Apache-2.0"
] | 7 | 2020-04-07T14:24:49.000Z | 2020-09-27T08:48:15.000Z | efficientdet/object_detection/argmax_matcher.py | aaryanMontana/automl | 3614751749a21ca2fcb299b60238c6651ff51125 | [
"Apache-2.0"
] | null | null | null | efficientdet/object_detection/argmax_matcher.py | aaryanMontana/automl | 3614751749a21ca2fcb299b60238c6651ff51125 | [
"Apache-2.0"
] | 1 | 2020-10-30T10:08:40.000Z | 2020-10-30T10:08:40.000Z | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from object_detection import matcher
from object_detection import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| 45.140704 | 80 | 0.672159 |
1a0d57b295046f9d80af12b63b22aad221f8a7b5 | 2,445 | py | Python | web/views/usuarios.py | ESEGroup/Paraguai | 42de0a15b7a31d470803e5f88d15e83f93c0a467 | [
"Apache-2.0"
] | 6 | 2016-11-08T16:08:48.000Z | 2017-03-05T20:48:41.000Z | web/views/usuarios.py | bamorim/eel873-final-assignment | 42de0a15b7a31d470803e5f88d15e83f93c0a467 | [
"Apache-2.0"
] | 39 | 2016-11-13T00:36:49.000Z | 2018-05-20T16:40:15.000Z | web/views/usuarios.py | bamorim/eel873-final-assignment | 42de0a15b7a31d470803e5f88d15e83f93c0a467 | [
"Apache-2.0"
] | 3 | 2016-11-08T22:33:54.000Z | 2017-07-23T21:07:14.000Z | from flask import Blueprint, render_template, current_app, request, url_for, redirect
from domain.usuario import Usuario, DTOUsuario
from domain.usuario.nivel_acesso import *
from web.autenticacao import requer_usuario
from web.autorizacao import requer_acesso
view_usuarios = Blueprint('usuarios', __name__)
niveisAcesso = [
(0, "Usuário Comum"),
(2, "Administrador")
]
def nivelAcessoToStr(nivel):
if nivel == Administrador():
return "Administrador"
if nivel == UsuarioComum():
return "Usuário Comum"
@view_usuarios.route("/")
@requer_usuario
@requer_acesso(Administrador())
def index():
usuarios = current_app.crud_usuario.listar()
usuarios = [u for u in usuarios if u.nivelAcesso != SistemaManutencao()]
usuarios = [{
'id': u.id,
'nome': u.nome,
'email': u.email,
'nivel': nivelAcessoToStr(u.nivelAcesso)
} for u in usuarios]
return render_template("usuarios/index.html", usuarios=usuarios)
@view_usuarios.route("/novo")
@requer_usuario
@requer_acesso(Administrador())
def novo():
return render_template("usuarios/novo.html", dto_usuario=DTOUsuario(), niveisAcesso=niveisAcesso)
@view_usuarios.route("/<id_usuario>/editar")
@requer_usuario
@requer_acesso(Administrador())
def editar(id_usuario):
usuario = current_app.crud_usuario.obter(int(id_usuario))
dto = DTOUsuario(usuario.nome, usuario.email, None, None)
return render_template("usuarios/editar.html", id_usuario=id_usuario, dto_usuario=dto, niveisAcesso=niveisAcesso)
@view_usuarios.route("/<id_usuario>", methods=["POST"])
@requer_usuario
@requer_acesso(Administrador())
def alterar(id_usuario):
dto = DTOUsuario(request.form["nome"], request.form["email"], request.form["senha"], int(request.form["nivelAcesso"]))
current_app.crud_usuario.alterar(int(id_usuario), dto)
return redirect(url_for('usuarios.index'))
@view_usuarios.route("/", methods=["POST"])
@requer_usuario
@requer_acesso(Administrador())
def criar():
dto = DTOUsuario(request.form["nome"], request.form["email"], request.form["senha"], int(request.form["nivelAcesso"]))
current_app.crud_usuario.criar(dto)
return redirect(url_for('usuarios.index'))
@view_usuarios.route("/<id_usuario>/remover")
@requer_usuario
@requer_acesso(Administrador())
def remover(id_usuario):
usuario = current_app.crud_usuario.remover(int(id_usuario))
return redirect(url_for('usuarios.index'))
| 33.493151 | 122 | 0.733742 |
7941355a76edd9e4bff4988571bb3a5d0068a242 | 33,242 | py | Python | rally_openstack/task/scenarios/neutron/utils.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/neutron/utils.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/neutron/utils.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import utils
from rally_openstack.common.services.network import neutron
from rally_openstack.task import scenario
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NeutronBaseScenario(scenario.OpenStackScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
def __init__(self, *args, **kwargs):
super(NeutronBaseScenario, self).__init__(*args, **kwargs)
if hasattr(self, "_clients"):
self.neutron = neutron.NeutronService(
clients=self._clients,
name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions()
)
class NeutronScenario(NeutronBaseScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
# TODO(rkiran): modify in case LBaaS-v2 requires
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
LB_PROTOCOL_PORT = 80
HM_TYPE = "PING"
HM_MAX_RETRIES = 3
HM_DELAY = 20
HM_TIMEOUT = 10
def _get_network_id(self, network, **kwargs):
"""Get Neutron network ID for the network name.
:param network: str, network name/id
:param kwargs: dict, network options
:returns: str, Neutron network-id
"""
try:
return self.neutron.find_network(network)["id"]
except exceptions.GetResourceFailure:
raise exceptions.NotFoundException(
message="Network %s not found." % network)
@property
def _ext_gw_mode_enabled(self):
"""Determine if the ext-gw-mode extension is enabled.
Without this extension, we can't pass the enable_snat parameter.
"""
return self.neutron.supports_extension("ext-gw-mode", silent=True)
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args.pop("name", None)
return {"network": self.neutron.create_network(**network_create_args)}
def _list_networks(self, **kwargs):
"""Return user networks list.
:param kwargs: network list options
"""
return self.neutron.list_networks(**kwargs)
def _list_agents(self, **kwargs):
"""Fetches agents.
:param kwargs: neutron agent list options
:returns: user agents list
"""
return self.neutron.list_agents(**kwargs)
def _update_network(self, network, network_update_args):
"""Update the network.
This atomic function updates the network with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
network_update_args["name"] = self.generate_random_name()
return {"network": self.neutron.update_network(
network["network"]["id"], **network_update_args)}
def _show_network(self, network, **kwargs):
"""show network details.
:param network: Network object
:param kwargs: dict, POST /v2.0/networks show options
:returns: details of the network
"""
network = self.neutron.get_network(network["network"]["id"], **kwargs)
return {"network": network}
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.neutron.delete_network(network["id"])
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
subnet_create_args.pop("name", None)
subnet_create_args["network_id"] = network["network"]["id"]
subnet_create_args["start_cidr"] = start_cidr
return {"subnet": self.neutron.create_subnet(**subnet_create_args)}
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.neutron.list_subnets()
def _show_subnet(self, subnet, **kwargs):
"""show subnet details.
:param subnet: Subnet object
:param kwargs: Optional additional arguments for subnet show
:returns: details of the subnet
"""
return {"subnet": self.neutron.get_subnet(subnet["subnet"]["id"])}
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet.
This atomic function updates the subnet with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
subnet_update_args["name"] = self.generate_random_name()
return {"subnet": self.neutron.update_subnet(
subnet["subnet"]["id"], **subnet_update_args)}
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.neutron.delete_subnet(subnet["subnet"]["id"])
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args.pop("name", None)
if ("tenant_id" in router_create_args
and "project_id" not in router_create_args):
router_create_args["project_id"] = router_create_args.pop(
"tenant_id")
return {"router": self.neutron.create_router(
discover_external_gw=external_gw, **router_create_args)}
def _list_routers(self):
"""Returns user routers list."""
return self.neutron.list_routers()
def _show_router(self, router, **kwargs):
"""Show information of a given router.
:param router: ID or name of router to look up
:kwargs: dict, POST /v2.0/routers show options
:return: details of the router
"""
return {"router": self.neutron.get_router(
router["router"]["id"], **kwargs)}
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.neutron.delete_router(router["router"]["id"])
def _update_router(self, router, router_update_args):
"""Update the neutron router.
This atomic function updates the router with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
router_update_args["name"] = self.generate_random_name()
return {"router": self.neutron.update_router(
router["router"]["id"], **router_update_args)}
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
return {"port": self.neutron.create_port(
network_id=network["network"]["id"], **port_create_args)}
def _list_ports(self):
"""Return user ports list."""
return self.neutron.list_ports()
def _show_port(self, port, **params):
"""Return user port details.
:param port: dict, neutron port
:param params: neutron port show options
:returns: neutron port dict
"""
return {"port": self.neutron.get_port(port["port"]["id"], **params)}
def _update_port(self, port, port_update_args):
"""Update the neutron port.
This atomic function updates port with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
port_update_args["name"] = self.generate_random_name()
return {"port": self.neutron.update_port(port["port"]["id"],
**port_update_args)}
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.neutron.delete_port(port["port"]["id"])
@logging.log_deprecated_args(
"network_create_args is deprecated; use the network context instead",
"0.1.0", "network_create_args")
def _get_or_create_network(self, network_create_args=None):
"""Get a network from context, or create a new one.
This lets users either create networks with the 'network'
context, provide existing networks with the 'existing_network'
context, or let the scenario create a default network for
them. Running this without one of the network contexts is
deprecated.
:param network_create_args: Deprecated way to provide network
creation args; use the network
context instead.
:returns: Network dict
"""
if "networks" in self.context["tenant"]:
return {"network":
random.choice(self.context["tenant"]["networks"])}
else:
LOG.warning("Running this scenario without either the 'network' "
"or 'existing_network' context is deprecated")
return self._create_network(network_create_args or {})
def _create_subnets(self, network,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=1):
"""Create <count> new subnets in the given network.
:param network: network to create subnets in
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:returns: List of subnet dicts
"""
return [self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
for i in range(subnets_per_network)]
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
subnet_create_args = dict(subnet_create_args or {})
subnet_create_args["start_cidr"] = subnet_cidr_start
net_topo = self.neutron.create_network_topology(
network_create_args=(network_create_args or {}),
subnet_create_args=subnet_create_args,
subnets_count=subnets_per_network
)
subnets = [{"subnet": s} for s in net_topo["subnets"]]
return {"network": net_topo["network"]}, subnets
def _create_network_structure(self, network_create_args=None,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=None,
router_create_args=None):
"""Create a network and a given number of subnets and routers.
:param network_create_args: dict, POST /v2.0/networks request options
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:param router_create_args: dict, POST /v2.0/routers request options
:returns: tuple of (network, subnets, routers)
"""
subnet_create_args = dict(subnet_create_args or {})
subnet_create_args["start_cidr"] = subnet_cidr_start
net_topo = self.neutron.create_network_topology(
network_create_args=(network_create_args or {}),
router_create_args=(router_create_args or {}),
router_per_subnet=True,
subnet_create_args=subnet_create_args,
subnets_count=subnets_per_network
)
return ({"network": net_topo["network"]},
[{"subnet": s} for s in net_topo["subnets"]],
[{"router": r} for r in net_topo["routers"]])
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.neutron.add_interface_to_router(router_id=router["id"],
subnet_id=subnet["id"])
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.neutron.remove_interface_from_router(
router_id=router["id"], subnet_id=subnet["id"])
def _add_gateway_router(self, router, ext_net, enable_snat=None):
"""Set the external network gateway for a router.
:param router: dict, neutron router
:param ext_net: external network for the gateway
:param enable_snat: True if enable snat, None to avoid update
"""
self.neutron.add_gateway_to_router(
router_id=router["router"]["id"],
network_id=ext_net["network"]["id"],
enable_snat=enable_snat
)
def _remove_gateway_router(self, router):
"""Removes an external network gateway from the specified router.
:param router: dict, neutron router
"""
self.neutron.remove_gateway_from_router(router["router"]["id"])
@atomic.action_timer("neutron.create_pool")
def _create_lb_pool(self, subnet_id, **pool_create_args):
"""Create LB pool(v1)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:returns: dict, neutron lb pool
"""
args = {"lb_method": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"subnet_id": subnet_id}
args.update(pool_create_args)
return self.clients("neutron").create_pool({"pool": args})
def _create_v1_pools(self, networks, **pool_create_args):
"""Create LB pools(v1)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
pools = []
for net in networks:
subnets.extend(net.get("subnets", []))
for subnet_id in subnets:
pools.append(self._create_lb_pool(
subnet_id, **pool_create_args))
return pools
@atomic.action_timer("neutron.list_pools")
def _list_v1_pools(self, **kwargs):
"""Return user lb pool list(v1)."""
return self.clients("neutron").list_pools(**kwargs)
@atomic.action_timer("neutron.delete_pool")
def _delete_v1_pool(self, pool):
"""Delete neutron pool.
:param pool: Pool object
"""
self.clients("neutron").delete_pool(pool["id"])
@atomic.action_timer("neutron.update_pool")
def _update_v1_pool(self, pool, **pool_update_args):
"""Update pool.
This atomic function updates the pool with pool_update_args.
:param pool: Pool object
:param pool_update_args: dict, POST /lb/pools update options
:returns: updated neutron pool dict
"""
pool_update_args["name"] = self.generate_random_name()
body = {"pool": pool_update_args}
return self.clients("neutron").update_pool(pool["pool"]["id"], body)
def _create_v1_vip(self, pool, **vip_create_args):
"""Create VIP(v1)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"pool_id": pool["pool"]["id"],
"subnet_id": pool["pool"]["subnet_id"]}
args.update(vip_create_args)
return self.clients("neutron").create_vip({"vip": args})
@atomic.action_timer("neutron.list_vips")
def _list_v1_vips(self, **kwargs):
"""Return user lb vip list(v1)."""
return self.clients("neutron").list_vips(**kwargs)
@atomic.action_timer("neutron.delete_vip")
def _delete_v1_vip(self, vip):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_vip(vip["id"])
@atomic.action_timer("neutron.update_vip")
def _update_v1_vip(self, vip, **vip_update_args):
"""Updates vip.
This atomic function updates vip name and admin state
:param vip: Vip object
:param vip_update_args: dict, POST /lb/vips update options
:returns: updated neutron vip dict
"""
vip_update_args["name"] = self.generate_random_name()
body = {"vip": vip_update_args}
return self.clients("neutron").update_vip(vip["vip"]["id"], body)
def _create_floatingip(self, floating_network, **floating_ip_args):
"""Create floating IP with floating_network.
:param floating_network: str, external network to create floating IP
:param floating_ip_args: dict, POST /floatingips create options
:returns: dict, neutron floating IP
"""
return {"floatingip": self.neutron.create_floatingip(
floating_network=floating_network, **floating_ip_args)}
def _list_floating_ips(self, **kwargs):
"""Return floating IPs list."""
return {"floatingips": self.neutron.list_floatingips(**kwargs)}
def _delete_floating_ip(self, floating_ip):
"""Delete floating IP.
:param dict, floating IP object
"""
return self.neutron.delete_floatingip(floating_ip["id"])
def _associate_floating_ip(self, floatingip, port):
"""Associate floating IP with port.
:param floatingip: floating IP dict
:param port: port dict
:returns: updated floating IP dict
"""
return self.neutron.associate_floatingip(
port_id=port["id"],
floatingip_id=floatingip["id"])
def _dissociate_floating_ip(self, floatingip):
"""Dissociate floating IP from ports.
:param floatingip: floating IP dict
:returns: updated floating IP dict
"""
return self.neutron.dissociate_floatingip(
floatingip_id=floatingip["id"])
@atomic.action_timer("neutron.create_healthmonitor")
def _create_v1_healthmonitor(self, **healthmonitor_create_args):
"""Create LB healthmonitor.
This atomic function creates healthmonitor with the provided
healthmonitor_create_args.
:param healthmonitor_create_args: dict, POST /lb/healthmonitors
:returns: neutron healthmonitor dict
"""
args = {"type": self.HM_TYPE,
"delay": self.HM_DELAY,
"max_retries": self.HM_MAX_RETRIES,
"timeout": self.HM_TIMEOUT}
args.update(healthmonitor_create_args)
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
@atomic.action_timer("neutron.list_healthmonitors")
def _list_v1_healthmonitors(self, **kwargs):
"""List LB healthmonitors.
This atomic function lists all helthmonitors.
:param kwargs: optional parameters
:returns: neutron lb healthmonitor list
"""
return self.clients("neutron").list_health_monitors(**kwargs)
@atomic.action_timer("neutron.delete_healthmonitor")
def _delete_v1_healthmonitor(self, healthmonitor):
"""Delete neutron healthmonitor.
:param healthmonitor: neutron healthmonitor dict
"""
self.clients("neutron").delete_health_monitor(healthmonitor["id"])
@atomic.action_timer("neutron.update_healthmonitor")
def _update_v1_healthmonitor(self, healthmonitor,
**healthmonitor_update_args):
"""Update neutron healthmonitor.
:param healthmonitor: neutron lb healthmonitor dict
:param healthmonitor_update_args: POST /lb/healthmonitors
update options
:returns: updated neutron lb healthmonitor dict
"""
body = {"health_monitor": healthmonitor_update_args}
return self.clients("neutron").update_health_monitor(
healthmonitor["health_monitor"]["id"], body)
def _create_security_group(self, **security_group_create_args):
"""Create Neutron security-group.
:param security_group_create_args: dict, POST /v2.0/security-groups
request options
:returns: dict, neutron security-group
"""
security_group_create_args["name"] = self.generate_random_name()
return {"security_group": self.neutron.create_security_group(
**security_group_create_args)}
def _delete_security_group(self, security_group):
"""Delete Neutron security group.
:param security_group: dict, neutron security_group
"""
return self.neutron.delete_security_group(
security_group["security_group"]["id"])
def _list_security_groups(self, **kwargs):
"""Return list of Neutron security groups."""
return {"security_groups": self.neutron.list_security_groups(**kwargs)}
def _show_security_group(self, security_group, **kwargs):
"""Show security group details.
:param security_group: dict, neutron security_group
:param kwargs: Optional additional arguments for security_group show
:returns: security_group details
"""
return {"security_group": self.neutron.get_security_group(
security_group["security_group"]["id"], **kwargs)}
def _update_security_group(self, security_group,
**security_group_update_args):
"""Update Neutron security-group.
:param security_group: dict, neutron security_group
:param security_group_update_args: dict, POST /v2.0/security-groups
update options
:returns: dict, updated neutron security-group
"""
security_group_update_args["name"] = self.generate_random_name()
return {"security_group": self.neutron.update_security_group(
security_group["security_group"]["id"],
**security_group_update_args)}
def update_loadbalancer_resource(self, lb):
try:
new_lb = self.clients("neutron").show_loadbalancer(lb["id"])
except Exception as e:
if getattr(e, "status_code", 400) == 404:
raise exceptions.GetResourceNotFound(resource=lb)
raise exceptions.GetResourceFailure(resource=lb, err=e)
return new_lb["loadbalancer"]
@atomic.action_timer("neutron.create_lbaasv2_loadbalancer")
def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args):
"""Create LB loadbalancer(v2)
:param subnet_id: str, neutron subnet-id
:param lb_create_args: dict, POST /lbaas/loadbalancers request options
:returns: dict, neutron lb
"""
args = {"name": self.generate_random_name(),
"vip_subnet_id": subnet_id}
args.update(lb_create_args)
neutronclient = self.clients("neutron")
lb = neutronclient.create_loadbalancer({"loadbalancer": args})
lb = lb["loadbalancer"]
lb = utils.wait_for_status(
lb,
ready_statuses=["ACTIVE"],
status_attr="provisioning_status",
update_resource=self.update_loadbalancer_resource,
timeout=CONF.openstack.neutron_create_loadbalancer_timeout,
check_interval=(
CONF.openstack.neutron_create_loadbalancer_poll_interval)
)
return lb
@atomic.action_timer("neutron.list_lbaasv2_loadbalancers")
def _list_lbaasv2_loadbalancers(self, retrieve_all=True, **lb_list_args):
"""List LB loadbalancers(v2)
:param lb_list_args: dict, POST /lbaas/loadbalancers request options
:returns: dict, neutron lb loadbalancers(v2)
"""
return self.clients("neutron").list_loadbalancers(retrieve_all,
**lb_list_args)
@atomic.action_timer("neutron.create_bgpvpn")
def _create_bgpvpn(self, **kwargs):
"""Create Bgpvpn resource (POST /bgpvpn/bgpvpn)
:param kwargs: optional parameters to create BGP VPN
:returns dict, bgpvpn resource details
"""
kwargs["name"] = self.generate_random_name()
return self.admin_clients("neutron").create_bgpvpn({"bgpvpn": kwargs})
@atomic.action_timer("neutron.delete_bgpvpn")
def _delete_bgpvpn(self, bgpvpn):
"""Delete Bgpvpn resource.(DELETE /bgpvpn/bgpvpns/{id})
:param bgpvpn: dict, bgpvpn
:return dict, bgpvpn
"""
return self.admin_clients("neutron").delete_bgpvpn(
bgpvpn["bgpvpn"]["id"])
@atomic.action_timer("neutron.list_bgpvpns")
def _list_bgpvpns(self, **kwargs):
"""Return bgpvpns list.
:param kwargs: dict, GET /bgpvpn/bgpvpns request options
:returns: bgpvpns list
"""
return self.admin_clients("neutron").list_bgpvpns(
True, **kwargs)["bgpvpns"]
@atomic.action_timer("neutron.update_bgpvpn")
def _update_bgpvpn(self, bgpvpn, update_name=False, **kwargs):
"""Update a bgpvpn.
:param bgpvpn: dict, bgpvpn
:param update_name: update_name: bool, whether or not to modify
BGP VPN name
:param **kwargs: dict, PUT /bgpvpn/bgpvpns update options
:return dict, updated bgpvpn
"""
if update_name or "name" in kwargs:
kwargs["name"] = self.generate_random_name()
return self.admin_clients("neutron").update_bgpvpn(
bgpvpn["bgpvpn"]["id"], {"bgpvpn": kwargs})
@atomic.action_timer("neutron.create_bgpvpn_network_assoc")
def _create_bgpvpn_network_assoc(self, bgpvpn, network):
"""Creates a new BGP VPN network association.
:param bgpvpn: dict, bgpvpn
:param network: dict, network
:return dict: network_association
"""
netassoc = {"network_id": network["id"]}
return self.clients("neutron").create_bgpvpn_network_assoc(
bgpvpn["bgpvpn"]["id"], {"network_association": netassoc})
@atomic.action_timer("neutron.delete_bgpvpn_network_assoc")
def _delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc):
"""Delete the specified BGP VPN network association
:param bgpvpn: dict, bgpvpn
:param net_assoc: dict, network
:return dict: network_association
"""
return self.clients("neutron").delete_bgpvpn_network_assoc(
bgpvpn["bgpvpn"]["id"], net_assoc["network_association"]["id"])
@atomic.action_timer("neutron.create_bgpvpn_router_assoc")
def _create_bgpvpn_router_assoc(self, bgpvpn, router):
"""Creates a new BGP VPN router association.
:param bgpvpn: dict, bgpvpn
:param router: dict, router
:return dict: network_association
"""
router_assoc = {"router_id": router["id"]}
return self.clients("neutron").create_bgpvpn_router_assoc(
bgpvpn["bgpvpn"]["id"], {"router_association": router_assoc})
@atomic.action_timer("neutron.delete_bgpvpn_router_assoc")
def _delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc):
"""Delete the specified BGP VPN router association
:param bgpvpn: dict, bgpvpn
:param router_assoc: dict, router
:return dict: router_association
"""
return self.clients("neutron").delete_bgpvpn_router_assoc(
bgpvpn["bgpvpn"]["id"], router_assoc["router_association"]["id"])
@atomic.action_timer("neutron.list_bgpvpn_network_assocs")
def _list_bgpvpn_network_assocs(self, bgpvpn, **kwargs):
"""List network association of bgpvpn
:param bgpvpn: dict, bgpvpn
:param **kwargs: dict, optional parameters
:return dict: network_association
"""
return self.clients("neutron").list_bgpvpn_network_assocs(
bgpvpn["bgpvpn"]["id"], **kwargs)
@atomic.action_timer("neutron.list_bgpvpn_router_assocs")
def _list_bgpvpn_router_assocs(self, bgpvpn, **kwargs):
"""List router association of bgpvpn
:param bgpvpn: dict, bgpvpn
:param **kwargs: dict, optional parameters
:return dict: router_association
"""
return self.clients("neutron").list_bgpvpn_router_assocs(
bgpvpn["bgpvpn"]["id"], **kwargs)
def _create_security_group_rule(self, security_group_id,
**security_group_rule_args):
"""Create Neutron security-group-rule.
:param security_group_id: id of neutron security_group
:param security_group_rule_args: dict, POST
/v2.0/security-group-rules request options
:returns: dict, neutron security-group-rule
"""
return {"security_group_rule": self.neutron.create_security_group_rule(
security_group_id, **security_group_rule_args
)}
def _list_security_group_rules(self, **kwargs):
"""List all security group rules.
:param kwargs: Optional additional arguments for roles list
:return: list of security group rules
"""
result = self.neutron.list_security_group_rules(**kwargs)
return {"security_group_rules": result}
def _show_security_group_rule(self, security_group_rule, **kwargs):
"""Show information of a given security group rule.
:param security_group_rule: id of security group rule
:param kwargs: Optional additional arguments for roles list
:return: details of security group rule
"""
return {"security_group_rule": self.neutron.get_security_group_rule(
security_group_rule, **kwargs)}
def _delete_security_group_rule(self, security_group_rule):
"""Delete a given security group rule.
:param security_group_rule: id of security group rule
"""
self.neutron.delete_security_group_rule(security_group_rule)
@atomic.action_timer("neutron.delete_trunk")
def _delete_trunk(self, trunk_port):
self.clients("neutron").delete_trunk(trunk_port["port_id"])
@atomic.action_timer("neutron.create_trunk")
def _create_trunk(self, trunk_payload):
trunk_payload["name"] = self.generate_random_name()
return self.clients("neutron").create_trunk({"trunk": trunk_payload})
@atomic.action_timer("neutron.list_trunks")
def _list_trunks(self, **kwargs):
return self.clients("neutron").list_trunks(**kwargs)["trunks"]
@atomic.action_timer("neutron.list_subports_by_trunk")
def _list_subports_by_trunk(self, trunk_id):
return self.clients("neutron").trunk_get_subports(trunk_id)
@atomic.action_timer("neutron._add_subports_to_trunk")
def _add_subports_to_trunk(self, trunk_id, subports):
return self.clients("neutron").trunk_add_subports(
trunk_id, {"sub_ports": subports})
def _list_ports_by_device_id(self, device_id):
return self.neutron.list_ports(device_id=device_id)
| 38.430058 | 79 | 0.641357 |
c9a3ce6cfae9d5790684d4aa0ab25d246678a3fd | 7,079 | py | Python | Classes/Unit.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | Classes/Unit.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | Classes/Unit.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Warning : this file has been generated, you shouldn't edit it"""
from os import linesep
from pyleecan.Classes.check import check_init_dict, check_var, raise_
from pyleecan.Functions.save import save
from pyleecan.Classes.frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from pyleecan.Methods.GUI_Option.Unit.get_m import get_m
except ImportError as error:
get_m = error
try:
from pyleecan.Methods.GUI_Option.Unit.get_m2 import get_m2
except ImportError as error:
get_m2 = error
try:
from pyleecan.Methods.GUI_Option.Unit.get_m_name import get_m_name
except ImportError as error:
get_m_name = error
try:
from pyleecan.Methods.GUI_Option.Unit.get_m2_name import get_m2_name
except ImportError as error:
get_m2_name = error
try:
from pyleecan.Methods.GUI_Option.Unit.set_m import set_m
except ImportError as error:
set_m = error
try:
from pyleecan.Methods.GUI_Option.Unit.set_m2 import set_m2
except ImportError as error:
set_m2 = error
from pyleecan.Classes.check import InitUnKnowClassError
class Unit(FrozenClass):
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.GUI_Option.Unit.get_m
if isinstance(get_m, ImportError):
get_m = property(
fget=lambda x: raise_(
ImportError("Can't use Unit method get_m: " + str(get_m))
)
)
else:
get_m = get_m
# cf Methods.GUI_Option.Unit.get_m2
if isinstance(get_m2, ImportError):
get_m2 = property(
fget=lambda x: raise_(
ImportError("Can't use Unit method get_m2: " + str(get_m2))
)
)
else:
get_m2 = get_m2
# cf Methods.GUI_Option.Unit.get_m_name
if isinstance(get_m_name, ImportError):
get_m_name = property(
fget=lambda x: raise_(
ImportError("Can't use Unit method get_m_name: " + str(get_m_name))
)
)
else:
get_m_name = get_m_name
# cf Methods.GUI_Option.Unit.get_m2_name
if isinstance(get_m2_name, ImportError):
get_m2_name = property(
fget=lambda x: raise_(
ImportError("Can't use Unit method get_m2_name: " + str(get_m2_name))
)
)
else:
get_m2_name = get_m2_name
# cf Methods.GUI_Option.Unit.set_m
if isinstance(set_m, ImportError):
set_m = property(
fget=lambda x: raise_(
ImportError("Can't use Unit method set_m: " + str(set_m))
)
)
else:
set_m = set_m
# cf Methods.GUI_Option.Unit.set_m2
if isinstance(set_m2, ImportError):
set_m2 = property(
fget=lambda x: raise_(
ImportError("Can't use Unit method set_m2: " + str(set_m2))
)
)
else:
set_m2 = set_m2
# save method is available in all object
save = save
def __init__(self, unit_m=0, unit_rad=0, unit_m2=0, init_dict=None):
"""Constructor of the class. Can be use in two ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_dict is not None: # Initialisation by dict
check_init_dict(init_dict, ["unit_m", "unit_rad", "unit_m2"])
# Overwrite default value with init_dict content
if "unit_m" in list(init_dict.keys()):
unit_m = init_dict["unit_m"]
if "unit_rad" in list(init_dict.keys()):
unit_rad = init_dict["unit_rad"]
if "unit_m2" in list(init_dict.keys()):
unit_m2 = init_dict["unit_m2"]
# Initialisation by argument
self.parent = None
self.unit_m = unit_m
self.unit_rad = unit_rad
self.unit_m2 = unit_m2
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
Unit_str = ""
if self.parent is None:
Unit_str += "parent = None " + linesep
else:
Unit_str += "parent = " + str(type(self.parent)) + " object" + linesep
Unit_str += "unit_m = " + str(self.unit_m) + linesep
Unit_str += "unit_rad = " + str(self.unit_rad) + linesep
Unit_str += "unit_m2 = " + str(self.unit_m2)
return Unit_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.unit_m != self.unit_m:
return False
if other.unit_rad != self.unit_rad:
return False
if other.unit_m2 != self.unit_m2:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
Unit_dict = dict()
Unit_dict["unit_m"] = self.unit_m
Unit_dict["unit_rad"] = self.unit_rad
Unit_dict["unit_m2"] = self.unit_m2
# The class name is added to the dict fordeserialisation purpose
Unit_dict["__class__"] = "Unit"
return Unit_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.unit_m = None
self.unit_rad = None
self.unit_m2 = None
def _get_unit_m(self):
"""getter of unit_m"""
return self._unit_m
def _set_unit_m(self, value):
"""setter of unit_m"""
check_var("unit_m", value, "int", Vmin=0, Vmax=1)
self._unit_m = value
# 0: use m, 1: use mm
# Type : int, min = 0, max = 1
unit_m = property(
fget=_get_unit_m, fset=_set_unit_m, doc=u"""0: use m, 1: use mm"""
)
def _get_unit_rad(self):
"""getter of unit_rad"""
return self._unit_rad
def _set_unit_rad(self, value):
"""setter of unit_rad"""
check_var("unit_rad", value, "int", Vmin=0, Vmax=1)
self._unit_rad = value
# 0: use rad, 1: use deg
# Type : int, min = 0, max = 1
unit_rad = property(
fget=_get_unit_rad, fset=_set_unit_rad, doc=u"""0: use rad, 1: use deg"""
)
def _get_unit_m2(self):
"""getter of unit_m2"""
return self._unit_m2
def _set_unit_m2(self, value):
"""setter of unit_m2"""
check_var("unit_m2", value, "int", Vmin=0, Vmax=1)
self._unit_m2 = value
# 0: use m^2, 1: use mm^2
# Type : int, min = 0, max = 1
unit_m2 = property(
fget=_get_unit_m2, fset=_set_unit_m2, doc=u"""0: use m^2, 1: use mm^2"""
)
| 31.602679 | 88 | 0.608137 |
79cfba96f08b36e913e2388c099fe992cd6f1d96 | 7,097 | py | Python | ptf_nn/ptf_nn_test/test.py | linarnan/ptf | 83444198f295a6fc8486b095379f67d8f7c25ad6 | [
"Apache-2.0"
] | 1 | 2021-09-23T05:52:43.000Z | 2021-09-23T05:52:43.000Z | ptf_nn/ptf_nn_test/test.py | linarnan/ptf | 83444198f295a6fc8486b095379f67d8f7c25ad6 | [
"Apache-2.0"
] | 1 | 2020-03-07T18:01:02.000Z | 2020-03-07T18:01:02.000Z | ptf_nn/ptf_nn_test/test.py | linarnan/ptf | 83444198f295a6fc8486b095379f67d8f7c25ad6 | [
"Apache-2.0"
] | 1 | 2020-10-26T18:56:26.000Z | 2020-10-26T18:56:26.000Z | import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.testutils as testutils
class DataplaneBaseTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
def setUp(self):
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
def tearDown(self):
if config["log_dir"] != None:
self.dataplane.stop_pcap()
class OneTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
class GetMacTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
def check_mac(device, port):
mac = self.dataplane.get_mac(device, port)
self.assertIsNotNone(mac)
self.assertEqual(mac.decode().count(":"), 5)
check_mac(0, 1)
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
check_mac(1, 1)
class GetCountersTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
def check_counters(device, port):
counters = self.dataplane.get_nn_counters(device, port)
self.assertIsNotNone(counters)
self.assertTrue(type(counters) is tuple)
self.assertEqual(len(counters), 2)
return counters
counters_01_b = check_counters(0, 1)
counters_11_b = check_counters(1, 1)
print("Counters:")
print(" (0, 1) %d:%d" % counters_01_b)
print(" (1, 1) %d:%d" % counters_11_b)
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
counters_01_e = check_counters(0, 1)
counters_11_e = check_counters(1, 1)
print("Counters:")
print(" (0, 1) %d:%d" % counters_01_e)
print(" (1, 1) %d:%d" % counters_11_e)
self.assertTrue(counters_01_e[1] > counters_01_b[1])
self.assertTrue(counters_11_e[0] > counters_11_b[0])
class VerifyAnyPacketAnyPort(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_any_packet_any_port(
self, pkts=[pkt], ports=[3, 1], device_number=1)
# negative test: if the packet is indeed received, but not on one of the
# expected ports, the test should fail
with self.assertRaises(AssertionError):
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_any_packet_any_port(
self, pkts=[pkt], ports=[0, 2, 3], device_number=1)
class RemovePort(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
# We remove a port to test port_remove, but in order to execute
# subsequent tests, we need to make sure we re-add the port
# afterwards. In order to re-add the port, we need the interface name,
# which is what this method is for. This is a little hacky but fine for
# testing. In practice, you would not be removing ports which are part
# of the original ptf config.
def find_ifname(device_number, port_number):
for port_id, ifname in config["port_map"].items():
if (device_number, port_number) == port_id:
return ifname
ifname = find_ifname(1, 1)
self.assertTrue(self.dataplane.port_remove(1, 1))
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_no_other_packets(self, device_number=1)
self.dataplane.port_add(ifname, 1, 1)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class SimpleTcpPacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 400
pkt = testutils.simple_tcp_packet(pktlen=pktlen)
self.assertEqual(len(pkt), pktlen)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class SimpleIpv4PacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 70
pkt = testutils.simple_ipv4ip_packet(pktlen=pktlen)
self.assertEqual(len(pkt), pktlen)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class SimpleIpv6PacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 400
pkt = testutils.simple_ipv6ip_packet(pktlen=pktlen)
self.assertEqual(len(pkt), pktlen)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class Ipv4InIpv4PacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 70
pkt = testutils.simple_ipv4ip_packet(pktlen=pktlen)
pkt2 = testutils.simple_ipv4ip_packet(pktlen=pktlen, inner_frame=pkt["IP"])
testutils.send_packet(self, (0, 1), pkt2)
print("packet sent")
testutils.verify_packet(self, pkt2, (1, 1))
testutils.verify_no_other_packets(self, 1)
class Ipv6InGREPacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 1000
udp = testutils.simple_udp_packet()
ipv6 = testutils.simple_ipv6ip_packet(inner_frame=udp['UDP'])
gre = testutils.simple_grev6_packet(pktlen=pktlen, inner_frame=ipv6["IPv6"])
self.assertEqual(gre['GRE'].proto, 0x86DD)
testutils.send_packet(self, (0, 1), gre)
print("packet sent")
testutils.verify_packet(self, gre, (1, 1))
testutils.verify_no_other_packets(self, 1)
| 34.120192 | 84 | 0.629421 |
9ca6537a821949e2a0d5540a56eba2e4ab31762d | 339 | py | Python | pages/migrations/0002_auto_20190502_1726.py | deejungx/goalza | c9ec93aad13228bccd9f185cfac6ff9e1fc1994a | [
"MIT"
] | 1 | 2019-05-04T11:26:14.000Z | 2019-05-04T11:26:14.000Z | pages/migrations/0002_auto_20190502_1726.py | deejungx/goalza | c9ec93aad13228bccd9f185cfac6ff9e1fc1994a | [
"MIT"
] | null | null | null | pages/migrations/0002_auto_20190502_1726.py | deejungx/goalza | c9ec93aad13228bccd9f185cfac6ff9e1fc1994a | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-05-02 17:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='ground',
options={'ordering': ['ground_number']},
),
]
| 18.833333 | 52 | 0.584071 |
b746217733170e4488665c4fe20c360b07e9d5a1 | 28,650 | py | Python | dev/models.py | mariakesa/UdacityMachineLearningEngineerNanoDegree | 9909515186a7233c46163971b579a862f8151244 | [
"Apache-2.0"
] | null | null | null | dev/models.py | mariakesa/UdacityMachineLearningEngineerNanoDegree | 9909515186a7233c46163971b579a862f8151244 | [
"Apache-2.0"
] | null | null | null | dev/models.py | mariakesa/UdacityMachineLearningEngineerNanoDegree | 9909515186a7233c46163971b579a862f8151244 | [
"Apache-2.0"
] | null | null | null | """
This work modifies code from the pl_bolts library.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018-2021 William Falcon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Deep Q Network
"""
import argparse
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from torch import optim as optim
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from pl_bolts.datamodules.experience_source import Experience, ExperienceSourceDataset
from pl_bolts.losses.rl import dqn_loss
from pl_bolts.models.rl.common.gym_wrappers import make_environment
from pl_bolts.models.rl.common.memory import MultiStepBuffer
from pl_bolts.models.rl.common.networks import CNN
from gym import Env
from abc import ABC
from typing import List
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from functools import partial
import collections
import torchfunc
class Agent(ABC):
"""Basic agent that always returns 0"""
def __init__(self, net: nn.Module):
self.net = net
def __call__(self, state: torch.Tensor, device: str, *args, **kwargs) -> List[int]:
"""
Using the given network, decide what action to carry
Args:
state: current state of the environment
device: device used for current batch
Returns:
action
"""
return [0]
class ValueAgent(Agent):
"""Value based agent that returns an action based on the Q values from the network"""
def __init__(
self,
net: nn.Module,
action_space: int,
eps_start: float = 1.0,
eps_end: float = 0.2,
eps_frames: float = 1000,
):
super().__init__(net)
self.action_space = action_space
self.eps_start = eps_start
self.epsilon = eps_start
self.eps_end = eps_end
self.eps_frames = eps_frames
self.recorder=torchfunc.hooks.recorders.ForwardPre()
self.recorder.modules(self.net)
@torch.no_grad()
def __call__(self, state: torch.Tensor, device: str) -> List[int]:
"""
Takes in the current state and returns the action based on the agents policy
Args:
state: current state of the environment
device: the device used for the current batch
Returns:
action defined by policy
"""
if not isinstance(state, list):
state = [state]
if np.random.random() < self.epsilon:
action = self.get_random_action(state)
else:
action = self.get_action(state, device)
return action
def get_random_action(self, state: torch.Tensor) -> int:
"""returns a random action"""
actions = []
for i in range(len(state)):
action = np.random.randint(0, self.action_space)
actions.append(action)
return actions
def get_action(self, state: torch.Tensor, device: torch.device):
"""
Returns the best action based on the Q values of the network
Args:
state: current state of the environment
device: the device used for the current batch
Returns:
action defined by Q values
"""
if not isinstance(state, torch.Tensor):
state = torch.tensor(state, device=device)
q_values = self.net(state)
_, actions = torch.max(q_values, dim=1)
return actions.detach().cpu().numpy()
def update_epsilon(self, step: int) -> None:
"""
Updates the epsilon value based on the current step
Args:
step: current global step
"""
self.epsilon = max(self.eps_end, self.eps_start - (step + 1) / self.eps_frames)
class DQN(pl.LightningModule):
"""
Basic DQN Model
PyTorch Lightning implementation of `DQN <https://arxiv.org/abs/1312.5602>`_
Paper authors: Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Alex Graves,
Ioannis Antonoglou, Daan Wierstra, Martin Riedmiller.
Model implemented by:
- `Donal Byrne <https://github.com/djbyrne>`
Example:
>>> from pl_bolts.models.rl.dqn_model import DQN
...
>>> model = DQN("PongNoFrameskip-v4")
Train::
trainer = Trainer()
trainer.fit(model)
Note:
This example is based on:
https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition/blob/master/Chapter06/02_dqn_pong.py
Note:
Currently only supports CPU and single GPU training with `distributed_backend=dp`
"""
def __init__(
self,
env: str,
eps_start: float = 1.0,
eps_end: float = 0.02,
eps_last_frame: int = 150000,
sync_rate: int = 1000,
gamma: float = 0.99,
learning_rate: float = 1e-4,
batch_size: int = 32,
replay_size: int = 100000,
warm_start_size: int = 10000,
avg_reward_len: int = 100,
min_episode_reward: int = -21,
seed: int = 123,
batches_per_epoch: int = 1000,
n_steps: int = 1,
**kwargs,
):
"""
Args:
env: gym environment tag
eps_start: starting value of epsilon for the epsilon-greedy exploration
eps_end: final value of epsilon for the epsilon-greedy exploration
eps_last_frame: the final frame in for the decrease of epsilon. At this frame espilon = eps_end
sync_rate: the number of iterations between syncing up the target network with the train network
gamma: discount factor
learning_rate: learning rate
batch_size: size of minibatch pulled from the DataLoader
replay_size: total capacity of the replay buffer
warm_start_size: how many random steps through the environment to be carried out at the start of
training to fill the buffer with a starting point
avg_reward_len: how many episodes to take into account when calculating the avg reward
min_episode_reward: the minimum score that can be achieved in an episode. Used for filling the avg buffer
before training begins
seed: seed value for all RNG used
batches_per_epoch: number of batches per epoch
n_steps: size of n step look ahead
"""
super().__init__()
# Environment
self.exp = None
self.env = self.make_environment(env, seed)
self.test_env = self.make_environment(env)
self.obs_shape = self.env.observation_space.shape
self.n_actions = self.env.action_space.n
# Model Attributes
self.buffer = None
self.dataset = None
self.net = None
self.target_net = None
self.build_networks()
self.agent = ValueAgent(
self.net,
self.n_actions,
eps_start=eps_start,
eps_end=eps_end,
eps_frames=eps_last_frame,
)
# Hyperparameters
self.sync_rate = sync_rate
self.gamma = gamma
self.lr = learning_rate
self.batch_size = batch_size
self.replay_size = replay_size
self.warm_start_size = warm_start_size
self.batches_per_epoch = batches_per_epoch
self.n_steps = n_steps
self.save_hyperparameters()
# Metrics
self.total_episode_steps = [0]
self.total_rewards = [0]
self.done_episodes = 0
self.total_steps = 0
# Average Rewards
self.avg_reward_len = avg_reward_len
for _ in range(avg_reward_len):
self.total_rewards.append(torch.tensor(min_episode_reward, device=self.device))
self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len:]))
self.state = self.env.reset()
def run_n_episodes(self, env, n_epsiodes: int = 1, epsilon: float = 1.0) -> List[int]:
"""
Carries out N episodes of the environment with the current agent
Args:
env: environment to use, either train environment or test environment
n_epsiodes: number of episodes to run
epsilon: epsilon value for DQN agent
"""
total_rewards = []
self.im_arr=[]
self.actions_record=[]
for _ in range(n_epsiodes):
episode_state = env.reset()
done = False
episode_reward = 0
while not done:
self.agent.epsilon = epsilon
action = self.agent(episode_state, self.device)
#print(action)
import matplotlib.pyplot as plt
#plt.imshow(episode_state[0,:,:])
#plt.show()
self.im_arr.append(np.mean(episode_state,axis=0).flatten())
self.actions_record.append(action[0])
next_state, reward, done, _ = env.step(action[0])
episode_state = next_state
episode_reward += reward
total_rewards.append(episode_reward)
self.activations=self.agent.recorder.data
self.im_arr=np.array(self.im_arr)
return total_rewards
def populate(self, warm_start: int) -> None:
"""Populates the buffer with initial experience"""
if warm_start > 0:
self.state = self.env.reset()
for _ in range(warm_start):
self.agent.epsilon = 1.0
action = self.agent(self.state, self.device)
#print(action)
next_state, reward, done, _ = self.env.step(action[0])
exp = Experience(state=self.state, action=action[0], reward=reward, done=done, new_state=next_state)
self.buffer.append(exp)
self.state = next_state
if done:
self.state = self.env.reset()
def build_networks(self) -> None:
"""Initializes the DQN train and target networks"""
self.net = CNN(self.obs_shape, self.n_actions)
self.target_net = CNN(self.obs_shape, self.n_actions)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Passes in a state x through the network and gets the q_values of each action as an output
Args:
x: environment state
Returns:
q values
"""
output = self.net(x)
return output
def train_batch(self, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Contains the logic for generating a new batch of data to be passed to the DataLoader
Returns:
yields a Experience tuple containing the state, action, reward, done and next_state.
"""
episode_reward = 0
episode_steps = 0
while True:
self.total_steps += 1
action = self.agent(self.state, self.device)
next_state, r, is_done, _ = self.env.step(action[0])
episode_reward += r
episode_steps += 1
exp = Experience(state=self.state, action=action[0], reward=r, done=is_done, new_state=next_state)
self.agent.update_epsilon(self.global_step)
self.buffer.append(exp)
self.state = next_state
if is_done:
self.done_episodes += 1
self.total_rewards.append(episode_reward)
self.total_episode_steps.append(episode_steps)
self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len:]))
self.state = self.env.reset()
episode_steps = 0
episode_reward = 0
states, actions, rewards, dones, new_states = self.buffer.sample(self.batch_size)
for idx, _ in enumerate(dones):
yield states[idx], actions[idx], rewards[idx], dones[idx], new_states[idx]
# Simulates epochs
if self.total_steps % self.batches_per_epoch == 0:
break
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], _) -> OrderedDict:
"""
Carries out a single step through the environment to update the replay buffer.
Then calculates loss based on the minibatch recieved
Args:
batch: current mini batch of replay data
_: batch number, not used
Returns:
Training loss and log metrics
"""
# calculates training loss
loss = dqn_loss(batch, self.net, self.target_net)
if self.trainer.use_dp or self.trainer.use_ddp2:
loss = loss.unsqueeze(0)
# Soft update of target network
if self.global_step % self.sync_rate == 0:
self.target_net.load_state_dict(self.net.state_dict())
self.log_dict({
"total_reward": self.total_rewards[-1],
"avg_reward": self.avg_rewards,
"train_loss": loss,
"episodes": self.done_episodes,
"episode_steps": self.total_episode_steps[-1]
})
return OrderedDict({
"loss": loss,
"avg_reward": self.avg_rewards,
})
def test_step(self, *args, **kwargs) -> Dict[str, torch.Tensor]:
"""Evaluate the agent for 10 episodes"""
test_reward = self.run_n_episodes(self.test_env, 1, 0)
avg_reward = sum(test_reward) / len(test_reward)
return {"test_reward": avg_reward}
def test_epoch_end(self, outputs) -> Dict[str, torch.Tensor]:
"""Log the avg of the test results"""
rewards = [x["test_reward"] for x in outputs]
avg_reward = sum(rewards) / len(rewards)
self.log("avg_test_reward", avg_reward)
return {"avg_test_reward": avg_reward}
def configure_optimizers(self) -> List[Optimizer]:
""" Initialize Adam optimizer"""
optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
return [optimizer]
def _dataloader(self) -> DataLoader:
"""Initialize the Replay Buffer dataset used for retrieving experiences"""
self.buffer = MultiStepBuffer(self.replay_size, self.n_steps)
self.populate(self.warm_start_size)
self.dataset = ExperienceSourceDataset(self.train_batch)
return DataLoader(dataset=self.dataset, batch_size=self.batch_size)
def train_dataloader(self) -> DataLoader:
"""Get train loader"""
return self._dataloader()
def test_dataloader(self) -> DataLoader:
"""Get test loader"""
return self._dataloader()
@staticmethod
def make_environment(env_name: str, seed: Optional[int] = None) -> Env:
"""
Initialise gym environment
Args:
env_name: environment name or tag
seed: value to seed the environment RNG for reproducibility
Returns:
gym environment
"""
env = make_environment(env_name)
if seed:
env.seed(seed)
return env
@staticmethod
def add_model_specific_args(arg_parser: argparse.ArgumentParser, ) -> argparse.ArgumentParser:
"""
Adds arguments for DQN model
Note:
These params are fine tuned for Pong env.
Args:
arg_parser: parent parser
"""
arg_parser.add_argument(
"--sync_rate",
type=int,
default=1000,
help="how many frames do we update the target network",
)
arg_parser.add_argument(
"--replay_size",
type=int,
default=100000,
help="capacity of the replay buffer",
)
arg_parser.add_argument(
"--warm_start_size",
type=int,
default=10000,
help="how many samples do we use to fill our buffer at the start of training",
)
arg_parser.add_argument(
"--eps_last_frame",
type=int,
default=150000,
help="what frame should epsilon stop decaying",
)
arg_parser.add_argument("--eps_start", type=float, default=1.0, help="starting value of epsilon")
arg_parser.add_argument("--eps_end", type=float, default=0.02, help="final value of epsilon")
arg_parser.add_argument("--batches_per_epoch", type=int, default=10000, help="number of batches in an epoch")
arg_parser.add_argument("--batch_size", type=int, default=32, help="size of the batches")
arg_parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
arg_parser.add_argument("--env", type=str, required=True, help="gym environment tag")
arg_parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
arg_parser.add_argument(
"--avg_reward_len",
type=int,
default=100,
help="how many episodes to include in avg reward",
)
arg_parser.add_argument(
"--n_steps",
type=int,
default=1,
help="how many frames do we update the target network",
)
return arg_parser
| 39.736477 | 131 | 0.657417 |
ddfd4b6f561e22d172971db77605315cb11b4875 | 3,217 | py | Python | angle_dep_analysis_bin.py | acere/NCD-for-quantum-correlations | dfe7c661ff07d52bd7760d8676281d8c4e7d9722 | [
"MIT"
] | null | null | null | angle_dep_analysis_bin.py | acere/NCD-for-quantum-correlations | dfe7c661ff07d52bd7760d8676281d8c4e7d9722 | [
"MIT"
] | null | null | null | angle_dep_analysis_bin.py | acere/NCD-for-quantum-correlations | dfe7c661ff07d52bd7760d8676281d8c4e7d9722 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Script to evaluate the NCDs for the violation versus
separation angle measurement
January 21, 2015
Author: Alessandro Cere, Centre for Quantum Technologies,
National University of Singapore
"""
import glob
from bit_compression_analysis import *
angles = [x / 100. for x in range(0, 28, 2)]
angles[0] = 0
angles[8] = 0.15
angles[9] = 0.16
angles[10] = 0.18
angles[11] = 0.2
angles[12] = 0.22
angles[13] = 0.24
ncd_vec = []
compressor = 'lzma'
with open('result_' + compressor + '.dat', 'a') as f, \
open('result_' + compressor + '_ncd.dat', 'a') as g:
for angle in angles:
str_a0b0_alice = str(angle) + '_a0b0_alice.dat.bin'
str_a0b1_alice = str(angle) + '_a0b1_alice.dat.bin'
str_a1b0_alice = str(angle) + '_a1b0_alice.dat.bin'
str_a1b1_alice = str(angle) + '_a1b1_alice.dat.bin'
str_a0b0_bob = str(angle) + '_a0b0_bob.dat.bin'
str_a0b1_bob = str(angle) + '_a0b1_bob.dat.bin'
str_a1b0_bob = str(angle) + '_a1b0_bob.dat.bin'
str_a1b1_bob = str(angle) + '_a1b1_bob.dat.bin'
str_a0b0_alice_bob = str(angle) + '_a0b0_alice_bob.dat.bin'
str_a0b1_alice_bob = str(angle) + '_a0b1_alice_bob.dat.bin'
str_a1b0_alice_bob = str(angle) + '_a1b0_alice_bob.dat.bin'
str_a1b1_alice_bob = str(angle) + '_a1b1_alice_bob.dat.bin'
a0b0_alice = glob.glob(str_a0b0_alice)[0]
a0b1_alice = glob.glob(str_a0b1_alice)[0]
a1b0_alice = glob.glob(str_a1b0_alice)[0]
a1b1_alice = glob.glob(str_a1b1_alice)[0]
a0b0_bob = glob.glob(str_a0b0_bob)[0]
a0b1_bob = glob.glob(str_a0b1_bob)[0]
a1b0_bob = glob.glob(str_a1b0_bob)[0]
a1b1_bob = glob.glob(str_a1b1_bob)[0]
a0b0_alice_bob = glob.glob(str_a0b0_alice_bob)[0]
a0b1_alice_bob = glob.glob(str_a0b1_alice_bob)[0]
a1b0_alice_bob = glob.glob(str_a1b0_alice_bob)[0]
a1b1_alice_bob = glob.glob(str_a1b1_alice_bob)[0]
c0, len_a0, na0, len_b0, nb0, len_ab0, nab0 = ncd_dist(
a0b1_alice, a0b1_bob, a0b1_alice_bob, 1, compressor)
c1, len_a1, na1, len_b1, nb1, len_ab1, nab1 = ncd_dist(
a0b0_alice, a0b0_bob, a0b0_alice_bob, 1, compressor)
c2, len_a2, na2, len_b2, nb2, len_ab2, nab2 = ncd_dist(
a1b0_alice, a1b0_bob, a1b0_alice_bob, 1, compressor)
c3, len_a3, na3, len_b3, nb3, len_ab3, nab3 = ncd_dist(
a1b1_alice, a1b1_bob, a1b1_alice_bob, 1, compressor)
ncd_vec.append(c0 - (c1 + c2 + c3))
f.write(('{0} ' * 25 + '\n').format(angle, na0, nb0, nab0,
na1, nb1, nab1,
na2, nb2, nab2,
na3, nb3, nab3,
len_a0, len_b0, len_ab0,
len_a1, len_b1, len_ab1,
len_a2, len_b2, len_ab2,
len_a3, len_b3, len_ab3))
g.write('{0} {1}\n'.format(angle, c0 - (c1 + c2 + c3)))
| 38.759036 | 70 | 0.56761 |
f8273dca42debb5e8ab0b05251a5b9b1a1fbe63a | 135 | py | Python | plugins/robothor_plugin/robothor_constants.py | mattdeitke/allenact-1 | 70f106b32a38424e862399a76d84f607838063be | [
"MIT"
] | 1 | 2020-09-10T13:09:14.000Z | 2020-09-10T13:09:14.000Z | plugins/robothor_plugin/robothor_constants.py | mattdeitke/allenact-1 | 70f106b32a38424e862399a76d84f607838063be | [
"MIT"
] | null | null | null | plugins/robothor_plugin/robothor_constants.py | mattdeitke/allenact-1 | 70f106b32a38424e862399a76d84f607838063be | [
"MIT"
] | null | null | null | MOVE_AHEAD = "MoveAhead"
ROTATE_LEFT = "RotateLeft"
ROTATE_RIGHT = "RotateRight"
LOOK_DOWN = "LookDown"
LOOK_UP = "LookUp"
END = "End"
| 19.285714 | 28 | 0.733333 |
3f11057addcd621881ddd7596a5005c9c5bbf81c | 768 | py | Python | Misc/DTE2509/Oblig3/Classes/MyDb.py | Unicron2k/dte_bsc | 1703c6e57e9ceec2dcce3ec6b1b363bf7147536d | [
"MIT"
] | null | null | null | Misc/DTE2509/Oblig3/Classes/MyDb.py | Unicron2k/dte_bsc | 1703c6e57e9ceec2dcce3ec6b1b363bf7147536d | [
"MIT"
] | null | null | null | Misc/DTE2509/Oblig3/Classes/MyDb.py | Unicron2k/dte_bsc | 1703c6e57e9ceec2dcce3ec6b1b363bf7147536d | [
"MIT"
] | 1 | 2021-12-01T17:19:06.000Z | 2021-12-01T17:19:06.000Z | import mysql.connector
class MyDb:
def __init__(self) -> None:
dbconfig = {'host': 'kark.uit.no',
'user': 'stud_v19_eriksen',
'password': 'ThisismyKarkPassword1!$',
'database': 'stud_v19_eriksen',}
self.configuration = dbconfig
def __enter__(self) -> 'cursor':
self.conn = mysql.connector.connect(**self.configuration)
self.cursor = self.conn.cursor(prepared=True,)
return self
def __exit__(self, exc_type, exc_val, exc_trace) -> None:
self.conn.commit()
self.cursor.close()
self.conn.close()
def query(self, sql, data):
self.cursor.execute(sql, data)
result = self.cursor.fetchall()
return result
| 28.444444 | 65 | 0.579427 |
b9ad97a2db5efae921ed5767334fbc638fd17b29 | 5,053 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/running_config/get.py | wilbeacham85/genielibs | 519da71e3956b86d4211d8649667c0d931dd2715 | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/running_config/get.py | wilbeacham85/genielibs | 519da71e3956b86d4211d8649667c0d931dd2715 | [
"Apache-2.0"
] | 1 | 2020-08-01T00:59:29.000Z | 2020-08-01T00:59:32.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/running_config/get.py | jeaubin/genielibs | b4c9c4c0e7b85a7bef94ab77cc477a1c8592a0cb | [
"Apache-2.0"
] | null | null | null | """Utility type functions that do not fit into another category"""
# Python
import logging
import re
# unicon
from unicon.core.errors import SubCommandFailure
# Running-Config
from genie.libs.sdk.apis.utils import get_config_dict
log = logging.getLogger(__name__)
def search_running_config(device, option):
""" search config in show running-config output
Args:
device (`obj`): Device object
option (`str`): key word to search
Returns:
config (`str`): search result
"""
out = device.execute("show running-config | include {}".format(option))
config = None
m = re.search(r"{} +(?P<cfg>[\S]+)".format(option), out)
if m:
config = m.groupdict()["cfg"]
return config
def get_running_config_dict(device, option=None):
""" Get show running-config output
Args:
device (`obj`): Device object
option (`str`): option command
Returns:
config_dict (`dict`): dict of show run output
"""
if option:
cmd = "show running-config {}".format(option)
else:
cmd = "show running-config"
try:
out = device.execute(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not get running-config information "
"on device {device}".format(device=device.name)
)
config_dict = get_config_dict(out)
return config_dict
def get_running_config_hostname(device, iteration=5):
""" Get device hostname from show run
Args:
device (`obj`): Device object
Returns:
hostname (`str`): Device hostname
"""
log.info("Get hostname from {}".format(device.name))
hostname = ""
for i in range(iteration):
try:
out = device.execute("show running-config | include hostname")
hostname = re.match(r"hostname +(?P<name>\S+)", out).groupdict()[
"name"
]
except Exception as e:
log.error("Failed to get hostname:{}".format(e))
continue
return hostname
def get_running_config_section_dict(
device, section=None, options=None
):
""" Get section information from show run
Args:
device ('str'): Device str
section ('str'): Section str
Returns:
Configuration dict
"""
if options and section:
cmd = "show run {options} | section {section}".format(
options=options, section=section
)
elif options:
cmd = "show run {options}".format(options=options)
elif section:
cmd = "show run | section {section}".format(section=section)
else:
cmd = "show run"
try:
output = device.execute(cmd)
except SubCommandFailure:
return None
config_dict = get_config_dict(output)
return config_dict
def get_running_config(device, keyword=None):
""" Return list with configuration starting with passed keyword
Args:
device ('obj') : Device object to extract configuration
keyword ('str') : Configuration to be extracted from device
Returns:
List containing configuration
"""
if keyword:
output = device.execute(
"show running-config | i ^{keyword}".format(keyword=keyword)
)
else:
output = device.execute("show running-config")
return output.splitlines()
def get_running_config_section(device, keyword):
""" Return list with configuration section starting with passed keyword
Args:
device ('obj') : Device object to extract configuration
keyword ('str') : Configuration to be extracted from device
Returns:
Return list of configuration section starting with the passed keyword
"""
output = device.execute(
"show running-config | sec ^{keyword}".format(keyword=keyword)
)
return output.splitlines()
def get_config_commands_from_running_config(
device, option
):
""" Builds configuration command from running config
Args:
device ('obj'): device to run on
option ('str'): running config sub option
Returns:
list of config commands
"""
log.info(
"Building configuration command from show running-config {}".format(
option
)
)
config_commands = []
config_start = False
out = device.execute("show running-config {}".format(option))
for line in out.splitlines():
line = line.strip()
if not config_start and option.lower() in line.lower():
config_start = True
if config_start:
if line in "end":
break
config_commands.append(line)
return config_commands
| 27.02139 | 82 | 0.579458 |
a641df2deac1c19f7e427d315d80979c571eba2b | 1,555 | py | Python | getting_personality_profile_tweet.py | samshad/Stack-Twitter-Big5 | 0716503dc2992ed53dca3730bcea0adef5e15dd2 | [
"MIT"
] | null | null | null | getting_personality_profile_tweet.py | samshad/Stack-Twitter-Big5 | 0716503dc2992ed53dca3730bcea0adef5e15dd2 | [
"MIT"
] | null | null | null | getting_personality_profile_tweet.py | samshad/Stack-Twitter-Big5 | 0716503dc2992ed53dca3730bcea0adef5e15dd2 | [
"MIT"
] | null | null | null | from ibm_watson import PersonalityInsightsV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import json
import pandas as pd
#personality_insights = PersonalityInsightsV3(version="2017-10-13", iam_apikey='F91VJPHvze7_bD17qLwfvZtLvd-ij9jGwG7oAaiLEyMp', url='https://gateway.watsonplatform.net/personality-insights/api')
#authenticator = IAMAuthenticator('F91VJPHvze7_bD17qLwfvZtLvd-ij9jGwG7oAaiLEyMp')
authenticator = IAMAuthenticator('rKKOOaC5nd8haGRmAAFFeCXt196bYI5qhWogSapG_zlJ')
personality_insights = PersonalityInsightsV3(
version='2017-10-13',
authenticator=authenticator
)
#personality_insights.set_service_url('https://gateway.watsonplatform.net/personality-insights/api')
personality_insights.set_service_url('https://api.us-south.personality-insights.watson.cloud.ibm.com/instances/66b10b97-021d-47ae-b5c2-fec4f691fc69')
er_cnt = 0
cnt = 0
er_users = []
df = pd.read_csv('Data/twitter_data.csv')
for index, row in df.iterrows():
cnt += 1
user = row['users']
tweet = row['tweets']
print(cnt, " => ", user)
try:
profile = personality_insights.profile(tweet, content_type='text/plain', accept_language='en',
accept='application/json').get_result()
with open('Data/Personality_Tweets/' + user + '_personality.json', 'w') as json_file:
json.dump(profile, json_file, indent=4)
except:
print('########## Error: ', user)
er_users.append(user)
er_cnt += 1
print('Done...', er_cnt)
print(er_users)
| 34.555556 | 193 | 0.720257 |
9a2127d144d6bc331540fa83d1d21202e2f14163 | 7,392 | py | Python | tests/python/pants_test/backend/python/tasks/test_interpreter_selection_integration.py | SergeKireev/pants | cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/python/tasks/test_interpreter_selection_integration.py | SergeKireev/pants | cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/python/tasks/test_interpreter_selection_integration.py | SergeKireev/pants | cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import glob
import os
import subprocess
from pex.executor import Executor
from pex.interpreter import PythonInterpreter
from pants.testutil.interpreter_selection_utils import (
PY_3,
PY_27,
skip_unless_python3_present,
skip_unless_python27_and_python3_present,
skip_unless_python27_present,
)
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.contextutil import temporary_dir
class InterpreterSelectionIntegrationTest(PantsRunIntegrationTest):
testproject = 'testprojects/src/python/interpreter_selection'
@classmethod
def hermetic(cls):
# We must set as true to ignore `PANTS_PYTHON_SETUP_INTERPRETER_CONSTRAINTS`
# preconfiguring the interpreter_constraint. For example, in `ci.py` we set
# this environment variable to Python 3, which overrides any config defined
# in the below tests.
return True
def _build_pex(self, binary_target, config=None, args=None, version=PY_27):
# By default, Avoid some known-to-choke-on interpreters.
constraint = '["CPython>=3.6,<4"]' if version == PY_3 else '["CPython>=2.7,<3"]'
args = list(args) if args is not None else [
'--python-setup-interpreter-constraints={}'.format(constraint)
]
command = ['binary', binary_target] + args
return self.run_pants(command=command, config=config)
def _echo_version(self, version):
with temporary_dir() as distdir:
config = {
'GLOBAL': {
'pants_distdir': distdir
}
}
binary_name = 'echo_interpreter_version_{}'.format(version)
binary_target = '{}:{}'.format(self.testproject, binary_name)
pants_run = self._build_pex(binary_target, config, version=version)
self.assert_success(pants_run, 'Failed to build {binary}.'.format(binary=binary_target))
# Run the built pex.
exe = os.path.join(distdir, binary_name + '.pex')
return self._popen_stdout(exe)
def _popen_stdout(self, exe):
proc = subprocess.Popen([exe], stdout=subprocess.PIPE)
(stdout_data, _) = proc.communicate()
return stdout_data.decode()
def _test_version(self, version):
self._assert_version_matches(self._echo_version(version), version)
def _assert_version_matches(self, actual, expected):
v = actual.strip().split('.') # E.g., 2.7.13.
self.assertTrue(len(v) > 2, 'Not a valid version string: {}'.format(v))
expected_components = expected.split('.')
self.assertEqual(expected_components, v[:len(expected_components)])
def test_cli_option_wins_compatibility_conflict(self):
# Tests that targets with compatibility conflicts collide.
binary_target = '{}:deliberately_conflicting_compatibility'.format(self.testproject)
pants_run = self._build_pex(binary_target)
self.assert_success(pants_run, 'Failed to build {binary}.'.format(binary=binary_target))
def test_conflict_via_config(self):
# Tests that targets with compatibility conflict with targets with default compatibility.
# NB: Passes empty `args` to avoid having the default CLI args override the config.
config = {
'python-setup': {
'interpreter_constraints': ['CPython<2.7'],
}
}
binary_target = '{}:echo_interpreter_version'.format(self.testproject)
pants_run = self._build_pex(binary_target, config=config, args=[])
self.assert_failure(
pants_run,
'Unexpected successful build of {binary}.'.format(binary=binary_target)
)
self.assertIn(
"Unable to detect a suitable interpreter for compatibilities",
pants_run.stdout_data
)
self.assertIn(
"CPython<2.7",
pants_run.stdout_data,
"Did not output requested compatibiility."
)
self.assertIn("Conflicting targets: {}".format(binary_target), pants_run.stdout_data)
# NB: we expect the error message to print *all* interpreters resolved by Pants. However,
# to simplify the tests and for hermicity, here we only test that the current interpreter
# gets printed as a proxy for the overall behavior.
self.assertIn(
PythonInterpreter.get().version_string,
pants_run.stdout_data,
"Did not output interpreters discoved by Pants."
)
@skip_unless_python27_and_python3_present
def test_binary_uses_own_compatibility(self):
"""Tests that a binary target uses its own compatiblity, rather than including that of its
transitive dependencies.
"""
# This target depends on a 2.7 minimum library, but does not declare its own compatibility.
# By specifying a version on the CLI, we ensure that the binary target will use that, and then
# test that it ends up with the version we request (and not the lower version specified on its
# dependency).
with temporary_dir() as distdir:
config = {
'GLOBAL': {
'pants_distdir': distdir
}
}
args = [
'--python-setup-interpreter-constraints=["CPython>=3.6,<4"]',
]
binary_name = 'echo_interpreter_version'
binary_target = '{}:{}'.format(self.testproject, binary_name)
pants_run = self._build_pex(binary_target, config=config, args=args)
self.assert_success(pants_run, 'Failed to build {binary}.'.format(binary=binary_target))
actual = self._popen_stdout(os.path.join(distdir, binary_name + '.pex'))
self._assert_version_matches(actual, '3')
@skip_unless_python3_present
def test_select_3(self):
self._test_version(PY_3)
@skip_unless_python27_present
def test_select_27(self):
self._test_version(PY_27)
def test_stale_interpreter_purge_integration(self):
target = '{}:{}'.format(self.testproject, 'echo_interpreter_version')
config = {
'python-setup': {
'interpreter_constraints': ['CPython>=2.7,<4'],
}
}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
["run", target],
workdir=workdir,
config=config
)
self.assert_success(pants_run)
def _prepend_bad_interpreter_to_interpreter_path_file(path):
with open(path, 'r') as fp:
file_data = fp.readlines()
file_data[0] = '/my/bogus/interpreter/python2.7'
with open(path, 'w') as fp:
fp.writelines(file_data)
def _validate_good_interpreter_path_file(path):
with open(path, 'r') as fp:
lines = fp.readlines()
binary = lines[0].strip()
try:
interpreter = PythonInterpreter.from_binary(binary)
return True if interpreter else False
except Executor.ExecutableNotFound:
return False
# Mangle interpreter.info.
for path in glob.glob(os.path.join(workdir, 'pyprep/interpreter/*/interpreter.info')):
_prepend_bad_interpreter_to_interpreter_path_file(path)
pants_run = self.run_pants_with_workdir(
["run", target],
workdir=workdir,
config=config
)
self.assert_success(pants_run)
for path in glob.glob(os.path.join(workdir, 'pyprep/interpreter/*/interpreter.info')):
self.assertTrue(
_validate_good_interpreter_path_file(path),
'interpreter.info was not purged and repopulated properly: {}'.format(path)
)
| 38.300518 | 98 | 0.696293 |
af22e1059de4204cf3f1ae2c777dba9417a01543 | 735 | py | Python | aws_elb_policies/aws_application_load_balancer_web_acl.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | 1 | 2020-10-21T08:14:49.000Z | 2020-10-21T08:14:49.000Z | aws_elb_policies/aws_application_load_balancer_web_acl.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | aws_elb_policies/aws_application_load_balancer_web_acl.py | designing-penguin/panther-analysis | 26034cea4504f43227f8d3789225f6ca7b35dfe0 | [
"Apache-2.0"
] | null | null | null | # MAPPINGS is a dictionary where the Key is an application load balancer ARN, and the
# Value is a WAF web ACL ID. For each Load Balancer ARN present in MAPPINGS,
# this rule verifies that the load balancer has the associated Web ACL
MAPPINGS = {
"TEST_LOAD_BALANCER_ARN": "TEST_WAF_WEB_ACL_ID",
}
def policy(resource):
# Check if a Web ACL is required for this load balancer
if resource['LoadBalancerArn'] not in MAPPINGS:
return True
# Check if a Web ACL exists for this load balancer
if resource['WebAcl'] is None:
return False
# Check that the correct Web ACL is assigned for this load balancer
return resource['WebAcl']['WebACLId'] == MAPPINGS[
resource['LoadBalancerArn']]
| 35 | 85 | 0.714286 |
53012b7f8657db5f38f1856c47074f066cb2620d | 14,188 | py | Python | moto/kms/responses.py | harveywi/moto | 3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8 | [
"Apache-2.0"
] | null | null | null | moto/kms/responses.py | harveywi/moto | 3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8 | [
"Apache-2.0"
] | null | null | null | moto/kms/responses.py | harveywi/moto | 3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import base64
import json
import re
import six
from moto.core.responses import BaseResponse
from .models import kms_backends
from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException
reserved_aliases = [
'alias/aws/ebs',
'alias/aws/s3',
'alias/aws/redshift',
'alias/aws/rds',
]
class KmsResponse(BaseResponse):
@property
def parameters(self):
return json.loads(self.body)
@property
def kms_backend(self):
return kms_backends[self.region]
def create_key(self):
policy = self.parameters.get('Policy')
key_usage = self.parameters.get('KeyUsage')
description = self.parameters.get('Description')
tags = self.parameters.get('Tags')
key = self.kms_backend.create_key(
policy, key_usage, description, tags, self.region)
return json.dumps(key.to_dict())
def update_key_description(self):
key_id = self.parameters.get('KeyId')
description = self.parameters.get('Description')
self.kms_backend.update_key_description(key_id, description)
return json.dumps(None)
def tag_resource(self):
key_id = self.parameters.get('KeyId')
tags = self.parameters.get('Tags')
self.kms_backend.tag_resource(key_id, tags)
return json.dumps({})
def list_resource_tags(self):
key_id = self.parameters.get('KeyId')
tags = self.kms_backend.list_resource_tags(key_id)
return json.dumps({
"Tags": tags,
"NextMarker": None,
"Truncated": False,
})
def describe_key(self):
key_id = self.parameters.get('KeyId')
try:
key = self.kms_backend.describe_key(
self.kms_backend.get_key_id(key_id))
except KeyError:
headers = dict(self.headers)
headers['status'] = 404
return "{}", headers
return json.dumps(key.to_dict())
def list_keys(self):
keys = self.kms_backend.list_keys()
return json.dumps({
"Keys": [
{
"KeyArn": key.arn,
"KeyId": key.id,
} for key in keys
],
"NextMarker": None,
"Truncated": False,
})
def create_alias(self):
alias_name = self.parameters['AliasName']
target_key_id = self.parameters['TargetKeyId']
if not alias_name.startswith('alias/'):
raise ValidationException('Invalid identifier')
if alias_name in reserved_aliases:
raise NotAuthorizedException()
if ':' in alias_name:
raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name))
if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name):
raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' "
"failed to satisfy constraint: Member must satisfy regular "
"expression pattern: ^[a-zA-Z0-9:/_-]+$"
.format(alias_name=alias_name))
if self.kms_backend.alias_exists(target_key_id):
raise ValidationException('Aliases must refer to keys. Not aliases')
if self.kms_backend.alias_exists(alias_name):
raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} '
'already exists'.format(region=self.region, alias_name=alias_name))
self.kms_backend.add_alias(target_key_id, alias_name)
return json.dumps(None)
def delete_alias(self):
alias_name = self.parameters['AliasName']
if not alias_name.startswith('alias/'):
raise ValidationException('Invalid identifier')
if not self.kms_backend.alias_exists(alias_name):
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:'
'{alias_name} is not found.'.format(region=self.region, alias_name=alias_name))
self.kms_backend.delete_alias(alias_name)
return json.dumps(None)
def list_aliases(self):
region = self.region
response_aliases = [
{
'AliasArn': u'arn:aws:kms:{region}:012345678912:{reserved_alias}'.format(region=region,
reserved_alias=reserved_alias),
'AliasName': reserved_alias
} for reserved_alias in reserved_aliases
]
backend_aliases = self.kms_backend.get_all_aliases()
for target_key_id, aliases in backend_aliases.items():
for alias_name in aliases:
response_aliases.append({
'AliasArn': u'arn:aws:kms:{region}:012345678912:{alias_name}'.format(region=region,
alias_name=alias_name),
'AliasName': alias_name,
'TargetKeyId': target_key_id,
})
return json.dumps({
'Truncated': False,
'Aliases': response_aliases,
})
def enable_key_rotation(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.enable_key_rotation(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def disable_key_rotation(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.disable_key_rotation(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_rotation_status(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
rotation_enabled = self.kms_backend.get_key_rotation_status(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyRotationEnabled': rotation_enabled})
def put_key_policy(self):
key_id = self.parameters.get('KeyId')
policy_name = self.parameters.get('PolicyName')
policy = self.parameters.get('Policy')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
_assert_default_policy(policy_name)
try:
self.kms_backend.put_key_policy(key_id, policy)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def get_key_policy(self):
key_id = self.parameters.get('KeyId')
policy_name = self.parameters.get('PolicyName')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
_assert_default_policy(policy_name)
try:
return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)})
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def list_key_policies(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.describe_key(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'Truncated': False, 'PolicyNames': ['default']})
def encrypt(self):
"""
We perform no encryption, we just encode the value as base64 and then
decode it in decrypt().
"""
value = self.parameters.get("Plaintext")
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'})
def decrypt(self):
# TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated
value = self.parameters.get("CiphertextBlob")
try:
return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8"), 'KeyId': 'key_id'})
except UnicodeDecodeError:
# Generate data key will produce random bytes which when decrypted is still returned as base64
return json.dumps({"Plaintext": value})
def disable_key(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.disable_key(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def enable_key(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.enable_key(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps(None)
def cancel_key_deletion(self):
key_id = self.parameters.get('KeyId')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
self.kms_backend.cancel_key_deletion(key_id)
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
return json.dumps({'KeyId': key_id})
def schedule_key_deletion(self):
key_id = self.parameters.get('KeyId')
if self.parameters.get('PendingWindowInDays') is None:
pending_window_in_days = 30
else:
pending_window_in_days = self.parameters.get('PendingWindowInDays')
_assert_valid_key_id(self.kms_backend.get_key_id(key_id))
try:
return json.dumps({
'KeyId': key_id,
'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days)
})
except KeyError:
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
def generate_data_key(self):
key_id = self.parameters.get('KeyId')
encryption_context = self.parameters.get('EncryptionContext')
number_of_bytes = self.parameters.get('NumberOfBytes')
key_spec = self.parameters.get('KeySpec')
grant_tokens = self.parameters.get('GrantTokens')
# Param validation
if key_id.startswith('alias'):
if self.kms_backend.get_key_id_from_alias(key_id) is None:
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(
region=self.region, alias_name=key_id))
else:
if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys:
raise NotFoundException('Invalid keyId')
if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0):
raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed "
"to satisfy constraint: Member must have value less than or "
"equal to 1024")
if key_spec and key_spec not in ('AES_256', 'AES_128'):
raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed "
"to satisfy constraint: Member must satisfy enum value set: "
"[AES_256, AES_128]")
if not key_spec and not number_of_bytes:
raise ValidationException("Please specify either number of bytes or key spec.")
if key_spec and number_of_bytes:
raise ValidationException("Please specify either number of bytes or key spec.")
plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context,
number_of_bytes, key_spec, grant_tokens)
plaintext = base64.b64encode(plaintext).decode()
return json.dumps({
'CiphertextBlob': plaintext,
'Plaintext': plaintext,
'KeyId': key_arn # not alias
})
def generate_data_key_without_plaintext(self):
result = json.loads(self.generate_data_key())
del result['Plaintext']
return json.dumps(result)
def _assert_valid_key_id(key_id):
if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE):
raise NotFoundException('Invalid keyId')
def _assert_default_policy(policy_name):
if policy_name != 'default':
raise NotFoundException("No such policy exists")
| 40.770115 | 124 | 0.607697 |
b819a9a05188ac717b0ad0158248386f61b10ce0 | 33,283 | py | Python | app/create_db.py | akram/haproxy-wi | 5f27f86df07e9392036805c2a79134e9a140f948 | [
"Apache-2.0"
] | 862 | 2018-04-10T03:53:03.000Z | 2021-05-01T02:05:44.000Z | app/create_db.py | akram/haproxy-wi | 5f27f86df07e9392036805c2a79134e9a140f948 | [
"Apache-2.0"
] | 250 | 2018-04-19T02:59:34.000Z | 2021-04-30T19:08:15.000Z | app/create_db.py | akram/haproxy-wi | 5f27f86df07e9392036805c2a79134e9a140f948 | [
"Apache-2.0"
] | 184 | 2018-02-10T09:33:38.000Z | 2021-04-29T14:38:47.000Z | #!/usr/bin/env python3
import distro
from sql import out_error
from db_model import *
from funct import check_ver
def default_values():
if distro.id() == 'ubuntu':
apache_dir = 'apache2'
else:
apache_dir = 'httpd'
data_source = [
{'param': 'time_zone', 'value': 'UTC', 'section': 'main', 'desc': 'Time Zone', 'group': '1'},
{'param': 'proxy', 'value': '', 'section': 'main', 'desc': 'IP address and port of the proxy server. Use proto://ip:port', 'group': '1'},
{'param': 'session_ttl', 'value': '5', 'section': 'main', 'desc': 'TTL for a user session (in days)',
'group': '1'},
{'param': 'token_ttl', 'value': '5', 'section': 'main', 'desc': 'TTL for a user token (in days)',
'group': '1'},
{'param': 'tmp_config_path', 'value': '/tmp/', 'section': 'main',
'desc': 'Path to the temporary directory. A valid path should be specified as the value of this parameter. The directory must be owned by the user specified in SSH settings',
'group': '1'},
{'param': 'cert_path', 'value': '/etc/ssl/certs/', 'section': 'main',
'desc': 'Path to SSL dir. Folder owner must be a user which set in the SSH settings. Path must exist',
'group': '1'},
{'param': 'ssl_local_path', 'value': 'certs', 'section': 'main',
'desc': 'Path to the directory with the saved local SSL certificates. The value of this parameter is specified as a relative path beginning with $HOME_ROXY_WI/app/',
'group': '1'},
{'param': 'lists_path', 'value': 'lists', 'section': 'main',
'desc': 'Path to the black and the wild list. The value of this paramer should be specified as a relative path beginning with $HOME_ROXY-WI',
'group': '1'},
{'param': 'local_path_logs', 'value': '/var/log/haproxy.log', 'section': 'logs',
'desc': 'The default local path for saving logs', 'group': '1'},
{'param': 'syslog_server_enable', 'value': '0', 'section': 'logs',
'desc': 'Enable getting logs from a syslog server; (0 - no, 1 - yes)', 'group': '1'},
{'param': 'syslog_server', 'value': '', 'section': 'logs', 'desc': 'IP address of the syslog_server',
'group': '1'},
{'param': 'log_time_storage', 'value': '14', 'section': 'logs',
'desc': 'Retention period for user activity logs (in days)', 'group': '1'},
{'param': 'stats_user', 'value': 'admin', 'section': 'haproxy', 'desc': 'Username for accessing HAProxy stats page',
'group': '1'},
{'param': 'stats_password', 'value': 'password', 'section': 'haproxy',
'desc': 'Password for accessing HAProxy stats page', 'group': '1'},
{'param': 'stats_port', 'value': '8085', 'section': 'haproxy', 'desc': 'Port for HAProxy stats page',
'group': '1'},
{'param': 'stats_page', 'value': 'stats', 'section': 'haproxy', 'desc': 'URI for HAProxy stats page',
'group': '1'},
{'param': 'haproxy_dir', 'value': '/etc/haproxy', 'section': 'haproxy', 'desc': 'Path to the HAProxy directory',
'group': '1'},
{'param': 'haproxy_config_path', 'value': '/etc/haproxy/haproxy.cfg', 'section': 'haproxy', 'desc': 'Path to the HAProxy configuration file',
'group': '1'},
{'param': 'server_state_file', 'value': '/etc/haproxy/haproxy.state', 'section': 'haproxy', 'desc': 'Path to the HAProxy state file',
'group': '1'},
{'param': 'haproxy_sock', 'value': '/var/run/haproxy.sock', 'section': 'haproxy',
'desc': 'Socket port for HAProxy', 'group': '1'},
{'param': 'haproxy_sock_port', 'value': '1999', 'section': 'haproxy', 'desc': 'HAProxy sock port',
'group': '1'},
{'param': 'apache_log_path', 'value': '/var/log/'+apache_dir+'/', 'section': 'logs', 'desc': 'Path to Apache logs',
'group': '1'},
{'param': 'nginx_path_error_logs', 'value': '/var/log/nginx/error.log', 'section': 'nginx',
'desc': 'Nginx error log', 'group': '1'},
{'param': 'nginx_stats_user', 'value': 'admin', 'section': 'nginx', 'desc': 'Username for accessing Nginx stats page',
'group': '1'},
{'param': 'nginx_stats_password', 'value': 'password', 'section': 'nginx',
'desc': 'Password for Stats web page Ngin', 'group': '1'},
{'param': 'nginx_stats_port', 'value': '8086', 'section': 'nginx', 'desc': 'Stats port for web page Nginx',
'group': '1'},
{'param': 'nginx_stats_page', 'value': 'stats', 'section': 'nginx', 'desc': 'URI Stats for web page Nginx',
'group': '1'},
{'param': 'nginx_dir', 'value': '/etc/nginx/conf.d/', 'section': 'nginx', 'desc': 'Path to the Nginx directory',
'group': '1'},
{'param': 'nginx_config_path', 'value': '/etc/nginx/conf.d/default.conf', 'section': 'nginx',
'desc': 'Path to the Nginx configuration file', 'group': '1'},
{'param': 'ldap_enable', 'value': '0', 'section': 'ldap', 'desc': 'Enable LDAP (1 - yes, 0 - no)',
'group': '1'},
{'param': 'ldap_server', 'value': '', 'section': 'ldap', 'desc': 'IP address of the LDAP server', 'group': '1'},
{'param': 'ldap_port', 'value': '389', 'section': 'ldap', 'desc': 'LDAP port (port 389 or 636 is used by default)',
'group': '1'},
{'param': 'ldap_user', 'value': '', 'section': 'ldap',
'desc': 'LDAP username. Format: user@domain.com', 'group': '1'},
{'param': 'ldap_password', 'value': '', 'section': 'ldap', 'desc': 'LDAP password', 'group': '1'},
{'param': 'ldap_base', 'value': '', 'section': 'ldap', 'desc': 'Base domain. Example: dc=domain, dc=com',
'group': '1'},
{'param': 'ldap_domain', 'value': '', 'section': 'ldap', 'desc': 'LDAP domain for logging in', 'group': '1'},
{'param': 'ldap_class_search', 'value': 'user', 'section': 'ldap', 'desc': 'Class for searching the user',
'group': '1'},
{'param': 'ldap_user_attribute', 'value': 'sAMAccountName', 'section': 'ldap',
'desc': 'Attribute to search users by', 'group': '1'},
{'param': 'ldap_search_field', 'value': 'mail', 'section': 'ldap', 'desc': 'User\'s email address', 'group': '1'},
{'param': 'ldap_type', 'value': '0', 'section': 'ldap', 'desc': 'Use LDAPS (1 - yes, 0 - no)', 'group': '1'},
]
try:
Setting.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
out_error(e)
data_source = [
{'username': 'admin', 'email': 'admin@localhost', 'password': '21232f297a57a5a743894a0e4a801fc3', 'role': 'superAdmin', 'groups': '1'},
{'username': 'editor', 'email': 'editor@localhost', 'password': '5aee9dbd2a188839105073571bee1b1f', 'role': 'admin', 'groups': '1'},
{'username': 'guest', 'email': 'guest@localhost', 'password': '084e0343a0486ff05530df6c705c8bb4', 'role': 'guest', 'groups': '1'}
]
try:
User.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
out_error(e)
data_source = [
{'name': 'superAdmin', 'description': 'Has the highest level of administrative permissions and controls the actions of all other users'},
{'name': 'admin', 'description': 'Has access everywhere except the Admin area'},
{'name': 'editor', 'description': 'Has the same rights as the admin but has no access to the Servers page'},
{'name': 'guest', 'description': 'Read-only access'}
]
try:
Role.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
out_error(e)
try:
Groups.insert(name='All', description='All servers are included in this group by default').on_conflict_ignore().execute()
except Exception as e:
out_error(e)
def update_db_v_3_4_5_22():
try:
Version.insert(version='3.4.5.2').execute()
except Exception as e:
print('Cannot insert version %s' % e)
def update_db_v_41(**kwargs):
cursor = conn.cursor()
sql = """
ALTER TABLE `servers` ADD COLUMN nginx INTEGER NOT NULL DEFAULT 0;
"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: nginx' or str(e) == '(1060, "Duplicate column name \'nginx\'")':
print('Updating... one more for version 4.0.0')
else:
print("An error occurred:", e)
else:
print("Updating... one more for version 4.0.0")
def update_db_v_42(**kwargs):
cursor = conn.cursor()
sql = """
ALTER TABLE `servers` ADD COLUMN haproxy INTEGER NOT NULL DEFAULT 0;
"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: haproxy' or str(e) == '(1060, "Duplicate column name \'haproxy\'")':
print('Updating... go to version 4.2.3')
else:
print("An error occurred:", e)
else:
print("Updating... go to version 4.2.3")
def update_db_v_4_3_0(**kwargs):
try:
UserGroups.insert_from(User.select(User.user_id, User.groups),
fields=[UserGroups.user_id, UserGroups.user_group_id]).on_conflict_ignore().execute()
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: haproxy' or str(e) == '(1060, "Duplicate column name \'haproxy\'")':
print('Updating... go to version 4.3.1')
else:
print("An error occurred:", e)
def update_db_v_4_3_1(**kwargs):
cursor = conn.cursor()
sql = """
ALTER TABLE `servers` ADD COLUMN pos INTEGER NOT NULL DEFAULT 0;
"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: pos' or str(e) == '(1060, "Duplicate column name \'pos\'")':
print('Updating... go to version 4.3.2')
else:
print("An error occurred:", e)
else:
print("DB has been updated to 4.3.1")
def update_db_v_4_4_2_1(**kwargs):
cursor = conn.cursor()
sql = """ALTER TABLE `settings` ADD COLUMN `group` INTEGER NOT NULL DEFAULT 1;"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: group' or str(e) == '(1060, "Duplicate column name \'group\'")':
print('Updating... go to version 4.4.2')
else:
print("An error occurred:", e)
else:
print("DB has been updated to 4.4.2")
def update_db_v_4_5_1(**kwargs):
cursor = conn.cursor()
sql = """ select name from role where name = 'superAdmin';"""
try:
cursor.execute(sql)
except Exception as e:
out_error(e)
else:
role = cursor.fetchall()
if not role:
sql = list()
sql.append("update role set name = 'superAdmin' where id = '1';")
sql.append("update role set name = 'admin', `description` = 'Has access everywhere except the Admin area' where id = '2';")
sql.append("update role set id = '4' where id = '3';")
sql.append("INSERT INTO role (id, name, `description`) values('3', 'editor', 'Has the same as the admin except the Servers page');")
sql.append("update user set role = 'superAdmin' where role = 'admin';")
sql.append("update user set role = 'admin' where role = 'editor';")
for i in sql:
try:
cursor.execute(i)
except:
pass
else:
if kwargs.get('silent') != 1:
print('DB has been updated to 4.5.0')
def update_db_v_4_5_4(**kwargs):
cursor = conn.cursor()
sql = list()
sql.append("ALTER TABLE `servers` ADD COLUMN `nginx_active` INTEGER NOT NULL DEFAULT 0;")
sql.append("ALTER TABLE `servers` ADD COLUMN `firewall_enable` INTEGER NOT NULL DEFAULT 0;")
sql.append("delete from settings where param = 'firewall_enable';")
for i in sql:
try:
cursor.execute(i)
except Exception as e:
pass
else:
if kwargs.get('silent') != 1:
print('Updating... go to version 4.5.7')
def update_db_v_4_5_7(**kwargs):
cursor = conn.cursor()
sql = """
ALTER TABLE `servers` ADD COLUMN nginx_alert INTEGER NOT NULL DEFAULT 0;
"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: nginx_alert' or str(e) == '(1060, "Duplicate column name \'nginx_alert\'")':
print('Updating... go to version 4.5.8')
else:
print("An error occurred:", e)
else:
print("DB has been updated to 4.3.1")
def update_db_v_4_5_8_1(**kwargs):
data_source = [
{'code': 'RW', 'name': 'Rwanda'},
{'code': 'SO', 'name': 'Somalia'},
{'code': 'YE', 'name': 'Yemen'},
{'code': 'IQ', 'name': 'Iraq'},
{'code': 'SA', 'name': 'Saudi Arabia'},
{'code': 'IR', 'name': 'Iran'},
{'code': 'CY', 'name': 'Cyprus'},
{'code': 'TZ', 'name': 'Tanzania'},
{'code': 'SY', 'name': 'Syria'},
{'code': 'AM', 'name': 'Armenia'},
{'code': 'KE', 'name': 'Kenya'},
{'code': 'CD', 'name': 'DR Congo'},
{'code': 'DJ', 'name': 'Djibouti'},
{'code': 'UG', 'name': 'Uganda'},
{'code': 'CF', 'name': 'Central African Republic'},
{'code': 'SC', 'name': 'Seychelles'},
{'code': 'JO', 'name': 'Hashemite Kingdom of Jordan'},
{'code': 'LB', 'name': 'Lebanon'},
{'code': 'KW', 'name': 'Kuwait'},
{'code': 'OM', 'name': 'Oman'},
{'code': 'QA', 'name': 'Qatar'},
{'code': 'BH', 'name': 'Bahrain'},
{'code': 'AE', 'name': 'United Arab Emirates'},
{'code': 'IL', 'name': 'Israel'},
{'code': 'TR', 'name': 'Turkey'},
{'code': 'ET', 'name': 'Ethiopia'},
{'code': 'ER', 'name': 'Eritrea'},
{'code': 'EG', 'name': 'Egypt'},
{'code': 'SD', 'name': 'Sudan'},
{'code': 'GR', 'name': 'Greece'},
{'code': 'BI', 'name': 'Burundi'},
{'code': 'EE', 'name': 'Estonia'},
{'code': 'LV', 'name': 'Latvia'},
{'code': 'AZ', 'name': 'Azerbaijan'},
{'code': 'LT', 'name': 'Republic of Lithuania'},
{'code': 'SJ', 'name': 'Svalbard and Jan Mayen'},
{'code': 'GE', 'name': 'Georgia'},
{'code': 'MD', 'name': 'Republic of Moldova'},
{'code': 'BY', 'name': 'Belarus'},
{'code': 'FI', 'name': 'Finland'},
{'code': 'AX', 'name': 'Åland'},
{'code': 'UA', 'name': 'Ukraine'},
{'code': 'MK', 'name': 'North Macedonia'},
{'code': 'HU', 'name': 'Hungary'},
{'code': 'BG', 'name': 'Bulgaria'},
{'code': 'AL', 'name': 'Albania'},
{'code': 'PL', 'name': 'Poland'},
{'code': 'RO', 'name': 'Romania'},
{'code': 'XK', 'name': 'Kosovo'},
{'code': 'ZW', 'name': 'Zimbabwe'},
{'code': 'ZM', 'name': 'Zambia'},
{'code': 'KM', 'name': 'Comoros'},
{'code': 'MW', 'name': 'Malawi'},
{'code': 'LS', 'name': 'Lesotho'},
{'code': 'BW', 'name': 'Botswana'},
{'code': 'MU', 'name': 'Mauritius'},
{'code': 'SZ', 'name': 'Eswatini'},
{'code': 'RE', 'name': 'Réunion'},
{'code': 'ZA', 'name': 'South Africa'},
{'code': 'YT', 'name': 'Mayotte'},
{'code': 'MZ', 'name': 'Mozambique'},
{'code': 'MG', 'name': 'Madagascar'},
{'code': 'AF', 'name': 'Afghanistan'},
{'code': 'PK', 'name': 'Pakistan'},
{'code': 'BD', 'name': 'Bangladesh'},
{'code': 'TM', 'name': 'Turkmenistan'},
{'code': 'TJ', 'name': 'Tajikistan'},
{'code': 'LK', 'name': 'Sri Lanka'},
{'code': 'BT', 'name': 'Bhutan'},
{'code': 'IN', 'name': 'India'},
{'code': 'MV', 'name': 'Maldives'},
{'code': 'IO', 'name': 'British Indian Ocean Territory'},
{'code': 'NP', 'name': 'Nepal'},
{'code': 'MM', 'name': 'Myanmar'},
{'code': 'UZ', 'name': 'Uzbekistan'},
{'code': 'KZ', 'name': 'Kazakhstan'},
{'code': 'KG', 'name': 'Kyrgyzstan'},
{'code': 'TF', 'name': 'French Southern Territories'},
{'code': 'HM', 'name': 'Heard Island and McDonald Islands'},
{'code': 'CC', 'name': 'Cocos [Keeling] Islands'},
{'code': 'PW', 'name': 'Palau'},
{'code': 'VN', 'name': 'Vietnam'},
{'code': 'TH', 'name': 'Thailand'},
{'code': 'ID', 'name': 'Indonesia'},
{'code': 'LA', 'name': 'Laos'},
{'code': 'TW', 'name': 'Taiwan'},
{'code': 'PH', 'name': 'Philippines'},
{'code': 'MY', 'name': 'Malaysia'},
{'code': 'CN', 'name': 'China'},
{'code': 'HK', 'name': 'Hong Kong'},
{'code': 'BN', 'name': 'Brunei'},
{'code': 'MO', 'name': 'Macao'},
{'code': 'KH', 'name': 'Cambodia'},
{'code': 'KR', 'name': 'South Korea'},
{'code': 'JP', 'name': 'Japan'},
{'code': 'KP', 'name': 'North Korea'},
{'code': 'SG', 'name': 'Singapore'},
{'code': 'CK', 'name': 'Cook Islands'},
{'code': 'TL', 'name': 'East Timor'},
{'code': 'RU', 'name': 'Russia'},
{'code': 'MN', 'name': 'Mongolia'},
{'code': 'AU', 'name': 'Australia'},
{'code': 'CX', 'name': 'Christmas Island'},
{'code': 'MH', 'name': 'Marshall Islands'},
{'code': 'FM', 'name': 'Federated States of Micronesia'},
{'code': 'PG', 'name': 'Papua New Guinea'},
{'code': 'SB', 'name': 'Solomon Islands'},
{'code': 'TV', 'name': 'Tuvalu'},
{'code': 'NR', 'name': 'Nauru'},
{'code': 'VU', 'name': 'Vanuatu'},
{'code': 'NC', 'name': 'New Caledonia'},
{'code': 'NF', 'name': 'Norfolk Island'},
{'code': 'NZ', 'name': 'New Zealand'},
{'code': 'FJ', 'name': 'Fiji'},
{'code': 'LY', 'name': 'Libya'},
{'code': 'CM', 'name': 'Cameroon'},
{'code': 'SN', 'name': 'Senegal'},
{'code': 'CG', 'name': 'Congo Republic'},
{'code': 'PT', 'name': 'Portugal'},
{'code': 'LR', 'name': 'Liberia'},
{'code': 'CI', 'name': 'Ivory Coast'},
{'code': 'GH', 'name': 'Ghana'},
{'code': 'GQ', 'name': 'Equatorial Guinea'},
{'code': 'NG', 'name': 'Nigeria'},
{'code': 'BF', 'name': 'Burkina Faso'},
{'code': 'TG', 'name': 'Togo'},
{'code': 'GW', 'name': 'Guinea-Bissau'},
{'code': 'MR', 'name': 'Mauritania'},
{'code': 'BJ', 'name': 'Benin'},
{'code': 'GA', 'name': 'Gabon'},
{'code': 'SL', 'name': 'Sierra Leone'},
{'code': 'ST', 'name': 'São Tomé and Príncipe'},
{'code': 'GI', 'name': 'Gibraltar'},
{'code': 'GM', 'name': 'Gambia'},
{'code': 'GN', 'name': 'Guinea'},
{'code': 'TD', 'name': 'Chad'},
{'code': 'NE', 'name': 'Niger'},
{'code': 'ML', 'name': 'Mali'},
{'code': 'EH', 'name': 'Western Sahara'},
{'code': 'TN', 'name': 'Tunisia'},
{'code': 'ES', 'name': 'Spain'},
{'code': 'MA', 'name': 'Morocco'},
{'code': 'MT', 'name': 'Malta'},
{'code': 'DZ', 'name': 'Algeria'},
{'code': 'FO', 'name': 'Faroe Islands'},
{'code': 'DK', 'name': 'Denmark'},
{'code': 'IS', 'name': 'Iceland'},
{'code': 'GB', 'name': 'United Kingdom'},
{'code': 'CH', 'name': 'Switzerland'},
{'code': 'SE', 'name': 'Sweden'},
{'code': 'NL', 'name': 'Netherlands'},
{'code': 'AT', 'name': 'Austria'},
{'code': 'BE', 'name': 'Belgium'},
{'code': 'DE', 'name': 'Germany'},
{'code': 'LU', 'name': 'Luxembourg'},
{'code': 'IE', 'name': 'Ireland'},
{'code': 'MC', 'name': 'Monaco'},
{'code': 'FR', 'name': 'France'},
{'code': 'AD', 'name': 'Andorra'},
{'code': 'LI', 'name': 'Liechtenstein'},
{'code': 'JE', 'name': 'Jersey'},
{'code': 'IM', 'name': 'Isle of Man'},
{'code': 'GG', 'name': 'Guernsey'},
{'code': 'SK', 'name': 'Slovakia'},
{'code': 'CZ', 'name': 'Czechia'},
{'code': 'NO', 'name': 'Norway'},
{'code': 'VA', 'name': 'Vatican City'},
{'code': 'SM', 'name': 'San Marino'},
{'code': 'IT', 'name': 'Italy'},
{'code': 'SI', 'name': 'Slovenia'},
{'code': 'ME', 'name': 'Montenegro'},
{'code': 'HR', 'name': 'Croatia'},
{'code': 'BA', 'name': 'Bosnia and Herzegovina'},
{'code': 'AO', 'name': 'Angola'},
{'code': 'NA', 'name': 'Namibia'},
{'code': 'SH', 'name': 'Saint Helena'},
{'code': 'BV', 'name': 'Bouvet Island'},
{'code': 'BB', 'name': 'Barbados'},
{'code': 'CV', 'name': 'Cabo Verde'},
{'code': 'GY', 'name': 'Guyana'},
{'code': 'GF', 'name': 'French Guiana'},
{'code': 'SR', 'name': 'Suriname'},
{'code': 'PM', 'name': 'Saint Pierre and Miquelon'},
{'code': 'GL', 'name': 'Greenland'},
{'code': 'PY', 'name': 'Paraguay'},
{'code': 'UY', 'name': 'Uruguay'},
{'code': 'BR', 'name': 'Brazil'},
{'code': 'FK', 'name': 'Falkland Islands'},
{'code': 'GS', 'name': 'South Georgia and the South Sandwich Islands'},
{'code': 'JM', 'name': 'Jamaica'},
{'code': 'DO', 'name': 'Dominican Republic'},
{'code': 'CU', 'name': 'Cuba'},
{'code': 'MQ', 'name': 'Martinique'},
{'code': 'BS', 'name': 'Bahamas'},
{'code': 'BM', 'name': 'Bermuda'},
{'code': 'AI', 'name': 'Anguilla'},
{'code': 'TT', 'name': 'Trinidad and Tobago'},
{'code': 'KN', 'name': 'St Kitts and Nevis'},
{'code': 'DM', 'name': 'Dominica'},
{'code': 'AG', 'name': 'Antigua and Barbuda'},
{'code': 'LC', 'name': 'Saint Lucia'},
{'code': 'TC', 'name': 'Turks and Caicos Islands'},
{'code': 'AW', 'name': 'Aruba'},
{'code': 'VG', 'name': 'British Virgin Islands'},
{'code': 'VC', 'name': 'Saint Vincent and the Grenadines'},
{'code': 'MS', 'name': 'Montserrat'},
{'code': 'MF', 'name': 'Saint Martin'},
{'code': 'BL', 'name': 'Saint Barthélemy'},
{'code': 'GP', 'name': 'Guadeloupe'},
{'code': 'GD', 'name': 'Grenada'},
{'code': 'KY', 'name': 'Cayman Islands'},
{'code': 'BZ', 'name': 'Belize'},
{'code': 'SV', 'name': 'El Salvador'},
{'code': 'GT', 'name': 'Guatemala'},
{'code': 'HN', 'name': 'Honduras'},
{'code': 'NI', 'name': 'Nicaragua'},
{'code': 'CR', 'name': 'Costa Rica'},
{'code': 'VE', 'name': 'Venezuela'},
{'code': 'EC', 'name': 'Ecuador'},
{'code': 'CO', 'name': 'Colombia'},
{'code': 'PA', 'name': 'Panama'},
{'code': 'HT', 'name': 'Haiti'},
{'code': 'AR', 'name': 'Argentina'},
{'code': 'CL', 'name': 'Chile'},
{'code': 'BO', 'name': 'Bolivia'},
{'code': 'PE', 'name': 'Peru'},
{'code': 'MX', 'name': 'Mexico'},
{'code': 'PF', 'name': 'French Polynesia'},
{'code': 'PN', 'name': 'Pitcairn Islands'},
{'code': 'KI', 'name': 'Kiribati'},
{'code': 'TK', 'name': 'Tokelau'},
{'code': 'TO', 'name': 'Tonga'},
{'code': 'WF', 'name': 'Wallis and Futuna'},
{'code': 'WS', 'name': 'Samoa'},
{'code': 'NU', 'name': 'Niue'},
{'code': 'MP', 'name': 'Northern Mariana Islands'},
{'code': 'GU', 'name': 'Guam'},
{'code': 'PR', 'name': 'Puerto Rico'},
{'code': 'VI', 'name': 'U.S. Virgin Islands'},
{'code': 'UM', 'name': 'U.S. Minor Outlying Islands'},
{'code': 'AS', 'name': 'American Samoa'},
{'code': 'CA', 'name': 'Canada'},
{'code': 'US', 'name': 'United States'},
{'code': 'PS', 'name': 'Palestine'},
{'code': 'RS', 'name': 'Serbia'},
{'code': 'AQ', 'name': 'Antarctica'},
{'code': 'SX', 'name': 'Sint Maarten'},
{'code': 'CW', 'name': 'Curaçao'},
{'code': 'BQ', 'name': 'Bonaire'},
{'code': 'SS', 'name': 'South Sudan'}
]
try:
GeoipCodes.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
print(str(e))
else:
if kwargs.get('silent') != 1:
print('Updating... go to version 4.5.6')
def update_db_v_4_5_8_2(**kwargs):
groups = ''
query = Groups.select()
try:
query_res = query.execute()
except Exception as e:
out_error(e)
else:
groups = query_res
for g in groups:
try:
Setting.insert(param='maxmind_key',
value='',
section='haproxy',
desc='License key for downloading to GeoLite2 DB. You can create it on maxmind.com',
group=g.group_id).execute()
except Exception as e:
if kwargs.get('silent') != 1:
if (
str(e) == 'columns param, group are not unique' or
str(e) == '(1062, "Duplicate entry \'maxmind_key-1\' for key \'param\'")' or
str(e) == 'UNIQUE constraint failed: settings.param, settings.group'
):
pass
else:
print("An error occurred:", e)
else:
print("Updating... groups")
def update_db_v_4_5_9(**kwargs):
data_source = [
{'param': 'smon_check_interval', 'value': '1', 'section': 'monitoring', 'desc': 'Check interval for SMON (in minutes)',
'group': '1'},
{'param': 'checker_check_interval', 'value': '1', 'section': 'monitoring',
'desc': 'Check interval for Checker (in minutes)', 'group': '1'},
{'param': 'port_scan_interval', 'value': '5', 'section': 'monitoring',
'desc': 'Check interval for Port scanner (in minutes)', 'group': '1'},
]
try:
Setting.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'columns param, group are not unique':
pass
else:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print('Updating... DB has been updated to version 4.5.9')
def update_db_v_5_0_1(**kwargs):
cursor = conn.cursor()
sql = list()
sql.append("alter table provisioned_servers add column project VARCHAR ( 64 )")
sql.append("alter table provisioned_servers add column network_name VARCHAR ( 64 )")
sql.append("alter table provisioned_servers add column volume_type VARCHAR ( 64 )")
sql.append("alter table provisioned_servers add column name_template VARCHAR ( 64 )")
for i in sql:
try:
cursor.execute(i)
except:
pass
else:
if kwargs.get('silent') != 1:
print('Updating... DB has been updated to version 5.0.1')
def update_db_v_5_1_2(**kwargs):
data_source = [
{'param': 'smon_keep_history_range', 'value': '14', 'section': 'monitoring',
'desc': 'Retention period for SMON history', 'group': '1'},
{'param': 'checker_keep_history_range', 'value': '14', 'section': 'monitoring',
'desc': 'Retention period for Checker history', 'group': '1'}
]
try:
Setting.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'columns param, group are not unique':
pass
else:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print('Updating... DB has been updated to version 5.1.2')
def update_db_v_5_1_3(**kwargs):
cursor = conn.cursor()
sql = """ALTER TABLE `servers` ADD COLUMN protected INTEGER NOT NULL DEFAULT 0;"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'duplicate column name: protected' or str(e) == '(1060, "Duplicate column name \'protected\'")':
print('Updating... DB has been updated to version 5.1.3')
else:
print("An error occurred:", e)
else:
print("DB has been updated to version 5.1.3")
def update_db_v_5_2_0(**kwargs):
try:
Setting.insert(param='portscanner_keep_history_range', value=14, section='monitoring',
desc='Retention period for Port scanner history').execute()
except Exception as e:
if kwargs.get('silent') != 1:
if (
str(e) == 'columns param, group are not unique' or
str(e) == '(1062, "Duplicate entry \'portscanner_keep_history_range-1\' for key \'param\'")' or
str(e) == 'UNIQUE constraint failed: settings.param, settings.group'
):
pass
else:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print('Updating... DB has been updated to version 5.2.0')
def update_db_v_5_2_4(**kwargs):
cursor = conn.cursor()
sql = """ALTER TABLE `user` ADD COLUMN user_services varchar(20) DEFAULT '1 2 3';"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'duplicate column name: user_services' or str(e) == '(1060, "Duplicate column name \'user_services\'")':
print('Updating... DB has been updated to version 5.2.4')
else:
print("An error occurred:", e)
else:
print("Updating... DB has been updated to version 5.2.4")
def update_db_v_5_2_4_1(**kwargs):
cursor = conn.cursor()
sql = """ALTER TABLE `servers` ADD COLUMN nginx_metrics integer DEFAULT 0;"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'duplicate column name: nginx_metrics' or str(e) == '(1060, "Duplicate column name \'nginx_metrics\'")':
print('Updating... DB has been updated to version 5.2.4-1')
else:
print("An error occurred:", e)
else:
print("Updating... DB has been updated to version 5.2.4-1")
def update_db_v_5_2_5(**kwargs):
query = Role.update(name='user').where(Role.name == 'editor')
try:
query.execute()
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'column name is not unique' or str(e) == '(1060, "column name is not unique")':
print('Updating... DB has been updated to version 5.2.5-1')
else:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print("Updating... DB has been updated to version 5.2.5")
def update_db_v_5_2_5_1(**kwargs):
query = User.update(role='user').where(User.role == 'editor')
try:
query.execute()
except Exception as e:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print("Updating... DB has been updated to version 5.2.5-1")
def update_db_v_5_2_5_2(**kwargs):
query = Role.delete().where(Role.name == 'editor')
try:
query.execute()
except Exception as e:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print("Updating... DB has been updated to version 5.2.5-2")
def update_db_v_5_2_5_3(**kwargs):
cursor = conn.cursor()
sql = list()
sql.append("alter table user add column last_login_date timestamp default '0000-00-00 00:00:00'")
sql.append("alter table user add column last_login_ip VARCHAR ( 64 )")
for i in sql:
try:
cursor.execute(i)
except:
pass
else:
if kwargs.get('silent') != 1:
print('Updating... DB has been updated to version 5.2.5-3')
def update_db_v_5_2_6(**kwargs):
query = Setting.delete().where(Setting.param == 'haproxy_enterprise')
try:
query.execute()
except Exception as e:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print("Updating... DB has been updated to version 5.2.6")
def update_db_v_5_3_0(**kwargs):
groups = ''
query = Groups.select()
try:
query_res = query.execute()
except Exception as e:
out_error(e)
else:
groups = query_res
for g in groups:
try:
data_source = [
{'param': 'nginx_container_name', 'value': 'nginx', 'section': 'nginx',
'desc': 'Docker container name for Nginx service',
'group': g.group_id},
{'param': 'haproxy_container_name', 'value': 'haproxy', 'section': 'haproxy',
'desc': 'Docker container name for HAProxy service',
'group': g.group_id},
]
try:
Setting.insert_many(data_source).on_conflict_ignore().execute()
except Exception as e:
if kwargs.get('silent') != 1:
if str(e) == 'columns param, group are not unique':
pass
else:
print("An error occurred:", e)
except Exception as e:
if kwargs.get('silent') != 1:
if (
str(e) == 'columns param, group are not unique' or
str(e) == '(1062, "Duplicate entry \'nginx_container_name\' for key \'param\'")' or
str(e) == 'UNIQUE constraint failed: settings.param, settings.group'
):
pass
else:
print("An error occurred:", e)
def update_db_v_5_3_1(**kwargs):
cursor = conn.cursor()
sql = """
ALTER TABLE `servers` ADD COLUMN keepalived_active INTEGER NOT NULL DEFAULT 0;
"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: keepalived_active' or str(e) == '(1060, "Duplicate column name \'keepalived_active\'")':
print('Updating... DB has been updated to version 5.3.1')
else:
print("An error occurred:", e)
else:
print("Updating... DB has been updated to version 5.3.1")
def update_db_v_5_3_2(**kwargs):
try:
Setting.insert(param='checker_maxconn_threshold', value=90, section='monitoring',
desc='Threshold value for alerting, in %').execute()
except Exception as e:
if kwargs.get('silent') != 1:
if (
str(e) == 'columns param, group are not unique' or
str(e) == '(1062, "Duplicate entry \'checker_maxconn_threshold-1\' for key \'param\'")' or
str(e) == 'UNIQUE constraint failed: settings.param, settings.group'
):
pass
else:
print("An error occurred:", e)
else:
if kwargs.get('silent') != 1:
print('Updating... DB has been updated to version 5.3.2')
def update_db_v_5_3_2_2(**kwargs):
cursor = conn.cursor()
sql = """
ALTER TABLE `servers` ADD COLUMN keepalived_alert INTEGER NOT NULL DEFAULT 0;
"""
try:
cursor.execute(sql)
except Exception as e:
if kwargs.get('silent') != 1:
if e.args[0] == 'duplicate column name: keepalived_alert' or str(e) == '(1060, "Duplicate column name \'keepalived_alert\'")':
print('Updating... DB has been updated to version 5.3.2')
else:
print("An error occurred:", e)
else:
print("Updating... DB has been updated to version 5.3.2")
def update_ver():
query = Version.update(version='5.3.5.0')
try:
query.execute()
except:
print('Cannot update version')
def update_all():
if check_ver() is None:
update_db_v_3_4_5_22()
update_db_v_41()
update_db_v_42()
update_db_v_4_3_0()
update_db_v_4_3_1()
update_db_v_4_4_2_1()
update_db_v_4_5_1()
update_db_v_4_5_4()
update_db_v_4_5_7()
update_db_v_4_5_8_1()
update_db_v_4_5_8_2()
update_db_v_4_5_9()
update_db_v_5_0_1()
update_db_v_5_1_2()
update_db_v_5_1_3()
update_db_v_5_2_0()
update_db_v_5_2_4()
update_db_v_5_2_4_1()
update_db_v_5_2_5()
update_db_v_5_2_5_1()
update_db_v_5_2_5_2()
update_db_v_5_2_5_3()
update_db_v_5_2_6()
update_db_v_5_3_0()
update_db_v_5_3_1()
update_db_v_5_3_2()
update_db_v_5_3_2_2()
update_ver()
def update_all_silent():
if check_ver() is None:
update_db_v_3_4_5_22()
update_db_v_41(silent=1)
update_db_v_42(silent=1)
update_db_v_4_3_0(silent=1)
update_db_v_4_3_1(silent=1)
update_db_v_4_4_2_1(silent=1)
update_db_v_4_5_1(silent=1)
update_db_v_4_5_4(silent=1)
update_db_v_4_5_7(silent=1)
update_db_v_4_5_8_1(silent=1)
update_db_v_4_5_8_2(silent=1)
update_db_v_4_5_9(silent=1)
update_db_v_5_0_1(silent=1)
update_db_v_5_1_2(silent=1)
update_db_v_5_1_3(silent=1)
update_db_v_5_2_0(silent=1)
update_db_v_5_2_4(silent=1)
update_db_v_5_2_4_1(silent=1)
update_db_v_5_2_5(silent=1)
update_db_v_5_2_5_1(silent=1)
update_db_v_5_2_5_2(silent=1)
update_db_v_5_2_5_3(silent=1)
update_db_v_5_2_6(silent=1)
update_db_v_5_3_0(silent=1)
update_db_v_5_3_1(silent=1)
update_db_v_5_3_2(silent=1)
update_db_v_5_3_2_2(silent=1)
update_ver()
if __name__ == "__main__":
create_tables()
default_values()
update_all()
| 35.520811 | 177 | 0.605354 |
201059ee54ea78bfa1af3322015f70ba066fb9bc | 1,872 | py | Python | cycle_2020/management/commands/full_load_from_rss_2020.py | RobBickel/nyt-fec | 802df867c3b31fff8e922be00bab6f40a5db2d00 | [
"Apache-2.0"
] | 17 | 2018-03-27T15:09:58.000Z | 2020-05-13T11:32:43.000Z | cycle_2020/management/commands/full_load_from_rss_2020.py | RobBickel/nyt-fec | 802df867c3b31fff8e922be00bab6f40a5db2d00 | [
"Apache-2.0"
] | 59 | 2018-03-21T17:08:15.000Z | 2021-12-13T19:47:37.000Z | cycle_2020/management/commands/full_load_from_rss_2020.py | RobBickel/nyt-fec | 802df867c3b31fff8e922be00bab6f40a5db2d00 | [
"Apache-2.0"
] | 11 | 2018-09-11T23:18:32.000Z | 2021-12-15T08:43:58.000Z | import pytz
import datetime
import os
import requests
import csv
import process_filing
import time
import traceback
import sys
from cycle_2020.models import *
from cycle_2020.utils import loader
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--repeat-interval',
dest='repeat-interval',
help='Number of minutes before rerunning the command. If not specified, just run once. This is to make it easy to daemonize this command locally if needed')
parser.add_argument('--filing_dir',
dest='filing_dir',
help='where to save and read filings from')
#default is to do just today and just committees we have in the DB
def handle(self, *args, **options):
fec_time=pytz.timezone('US/Eastern') #fec time is eastern
if options['repeat-interval']:
repeat_interval = int(options['repeat-interval'])
else:
repeat_interval = None
if options['filing_dir']:
filing_dir = options['filing_dir']
else:
filing_dir = 'filings/'
while True:
print("Pulling filings from RSS feed")
#keep looping if an interval is provided, this is mostly for testing
filings = loader.filing_list_from_rss()
if not filings:
print("failed to find any new filings in the RSS feed")
else:
loader.download_filings(filings, filing_dir)
loader.load_filings(filing_dir)
if repeat_interval:
time.sleep(repeat_interval)
else:
break
if repeat_interval:
time.sleep(repeat_interval)
else:
break
| 28.8 | 168 | 0.612179 |
761b7a56d5d2c7051eb7a49757ed1180133073d9 | 562 | py | Python | Ch06_Code/URL.py | arifmudi/Python-GUI-Programming-Cookbook-Third-Edition | 942c151a62ef422bb6fdd15b4b141a07c699cb9a | [
"MIT"
] | 68 | 2019-08-23T10:54:38.000Z | 2022-03-09T20:21:39.000Z | Ch06_Code/URL.py | arifmudi/Python-GUI-Programming-Cookbook-Third-Edition | 942c151a62ef422bb6fdd15b4b141a07c699cb9a | [
"MIT"
] | null | null | null | Ch06_Code/URL.py | arifmudi/Python-GUI-Programming-Cookbook-Third-Edition | 942c151a62ef422bb6fdd15b4b141a07c699cb9a | [
"MIT"
] | 46 | 2019-09-03T18:04:29.000Z | 2022-03-30T01:06:52.000Z | '''
Created on May 28, 2019
Ch06
@author: Burkhard A. Meier
'''
from urllib.request import urlopen
link = 'http://python.org/'
def get_html():
try:
http_rsp = urlopen(link)
print(http_rsp)
html = http_rsp.read()
print(html)
html_decoded = html.decode()
print(html_decoded)
except Exception as ex:
print('*** Failed to get Html! ***\n\n' + str(ex))
else:
return html_decoded
#-------------------------------
if __name__ == '__main__':
get_html() | 22.48 | 62 | 0.521352 |
2ba8576809efac0455f734f4562586fe290c9708 | 1,616 | py | Python | tgbot/handlers/utils/info.py | rustamwho/tgbot-django-rentcars | ce834b20487b6a222e3791ab9633ffd0f1bb41bf | [
"Apache-2.0"
] | null | null | null | tgbot/handlers/utils/info.py | rustamwho/tgbot-django-rentcars | ce834b20487b6a222e3791ab9633ffd0f1bb41bf | [
"Apache-2.0"
] | null | null | null | tgbot/handlers/utils/info.py | rustamwho/tgbot-django-rentcars | ce834b20487b6a222e3791ab9633ffd0f1bb41bf | [
"Apache-2.0"
] | null | null | null | from functools import wraps
from typing import Dict, Callable
import telegram
from telegram import Update
def send_typing_action(func: Callable):
"""Sends typing action while processing func command."""
@wraps(func)
def command_func(update, context, *args, **kwargs):
context.bot.send_chat_action(chat_id=update.effective_message.chat_id,
action=telegram.ChatAction.TYPING)
return func(update, context, *args, **kwargs)
return command_func
def extract_user_data_from_update(update: Update) -> Dict:
""" python-telegram-bot's Update instance --> User info """
if update.message is not None:
user = update.message.from_user.to_dict()
elif update.inline_query is not None:
user = update.inline_query.from_user.to_dict()
elif update.chosen_inline_result is not None:
user = update.chosen_inline_result.from_user.to_dict()
elif (update.callback_query is not None
and
update.callback_query.from_user is not None):
user = update.callback_query.from_user.to_dict()
elif (update.callback_query is not None
and
update.callback_query.message is not None):
user = update.callback_query.message.chat.to_dict()
else:
raise Exception(f"Can't extract user data from update: {update}")
return dict(
user_id=user["id"],
is_blocked_bot=False,
**{
k: user[k]
for k in ["username", "first_name", "last_name", "language_code"]
if k in user and user[k] is not None
},
)
| 33.666667 | 78 | 0.654703 |
819045e58f8b28cadcabcf57c4bb14a1be88d8e9 | 4,996 | py | Python | src/persontitles/academic_german.py | 0LL13/persontitles | fcba08e6a4921813972bc3997caf0e805b0268cd | [
"MIT"
] | null | null | null | src/persontitles/academic_german.py | 0LL13/persontitles | fcba08e6a4921813972bc3997caf0e805b0268cd | [
"MIT"
] | null | null | null | src/persontitles/academic_german.py | 0LL13/persontitles | fcba08e6a4921813972bc3997caf0e805b0268cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# academic_german.py
"""Collection of German academic degrees combining wiki and drtitel."""
import os
import sys
import re
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(),
os.path.expanduser(__file__))),
) # isort:skip # noqa # pylint: disable=wrong-import-position
sys.path.append(
os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)),
) # isort: skip # noqa # pylint: disable=wrong-import-position
from persontitles.academic_german_wiki import degrees_ger_wiki # noqa
from persontitles.academic_german_drtitel import degrees_ger_drtitel # noqa
def degrees_ger() -> list:
try:
with open('./src/persontitles/data/academic_german.txt', mode='r', encoding='utf-8') as fin: # noqa
DEGREES = fin.read().split('\n')
except FileNotFoundError:
DEGREES_WIKI = set(degrees_ger_wiki())
DEGREES_DRTITEL = set(degrees_ger_drtitel())
DEGREES = [dgr for dgr in DEGREES_WIKI | DEGREES_DRTITEL]
with open('./src/persontitles/data/academic_german.txt', mode='a', encoding='utf-8') as fout: # noqa
fout.write('\n'.join(item for item in DEGREES))
return DEGREES
def no_empty_space_in_degree(degrees):
"""
Because there is no coherent writing of German degrees and some are written
with empty spaces and some without, all degrees that potentially could
have a space/s or other way round will be replicated with/without empty
space/s.
To make sure that writing a degree without empty spaces is covered, all
degrees get reduced of their whitespace.
"""
degrees_wo_empty_space = []
for degree in degrees:
degree = re.sub(r'\. ', '.', degree)
degrees_wo_empty_space.append(degree)
return degrees_wo_empty_space
def add_empty_space(degrees) -> list:
"""
A degree like "Dipl.agr.biol." has no empty spaces between the grade of
the degree (Dipl.) and its specification (agr. biol.). To make sure that
both ways of writing are covered, the degree will be replicated as "Dipl.
agr. biol.".
"""
degrees_w_empty_space = []
for degree in degrees:
degree_w_space = ''
dot_counter = 1
max = len(degree) - 1
for i, ch in enumerate(degree):
if ch == '.' and i == max:
if degree_w_space != '':
degrees_w_empty_space.append(degree_w_space.strip())
else:
degrees_w_empty_space.append(degree.strip())
elif ch == '.' and i < max:
if degree[i + 1] not in ['-', ')']:
if degree_w_space == '':
degree_w_space = degree[:i + 1] + ' ' + degree[i + 1:]
dot_counter += 1
else:
degree_w_space = degree_w_space[:i+dot_counter] + ' ' + degree_w_space[i+dot_counter:] # noqa
dot_counter += 1
elif i == max:
if degree_w_space != '':
degrees_w_empty_space.append(degree_w_space.strip())
else:
degrees_w_empty_space.append(degree.strip())
return degrees_w_empty_space
def german_abbrevs(DEGREES) -> list:
"""
Because the aim of this module is to find the compounds of a person's name
or degree or even peer title, the fact that German degrees are more often
than not written with empty spaces between the degree and its specification
makes it necessary to collect both those solitary specs like "rer.",
"nat.", "oec.", and "med." (there are more), and also the degrees like
"Dr." or "Dipl.".
"""
degrees = add_empty_space(DEGREES)
abbrevs = []
for degree in degrees:
elements = degree.split(' ')
for element in elements:
if element not in abbrevs:
abbrevs.append(element)
abbrevs = [element for element in abbrevs if element.strip()]
return abbrevs
if __name__ == '__main__':
DEGREES_WIKI = set(degrees_ger_wiki())
DEGREES_DRTITEL = set(degrees_ger_drtitel())
DEGREES = [dgr for dgr in DEGREES_WIKI | DEGREES_DRTITEL]
# for i, degree in enumerate(sorted(DEGREES)):
# print(i, degree)
print()
print('Count degrees from wiki:', len(DEGREES_WIKI))
print('Count degrees from drtitel:', len(DEGREES_DRTITEL))
print('Common degrees from both sets:', len(DEGREES_WIKI & DEGREES_DRTITEL)) # noqa
print('Degrees only from wiki:', len(DEGREES_WIKI - DEGREES_DRTITEL))
print('Degrees only from drtitel:', len(DEGREES_DRTITEL - DEGREES_WIKI))
print('Sum of degrees of both sets:', len(DEGREES))
degrees_wo = no_empty_space_in_degree(DEGREES)
degrees_w = add_empty_space(degrees_wo)
abbrevs = german_abbrevs(DEGREES)
print('Number of abbreviations:', len(abbrevs))
for degree in sorted(DEGREES):
print(degree)
| 36.202899 | 118 | 0.638511 |
06dbf748b1ab46020f113b6b75de56300165fe13 | 10,779 | py | Python | optimus/engines/base/set.py | ironmussa/Optimus | fbaea6e0957f0bc016280a85ff021904faac20c5 | [
"Apache-2.0"
] | 1,045 | 2017-07-17T17:59:46.000Z | 2021-06-15T07:06:48.000Z | optimus/engines/base/set.py | ironmussa/Optimus | fbaea6e0957f0bc016280a85ff021904faac20c5 | [
"Apache-2.0"
] | 955 | 2017-07-14T15:47:58.000Z | 2021-05-27T14:16:24.000Z | optimus/engines/base/set.py | ironmussa/Optimus | fbaea6e0957f0bc016280a85ff021904faac20c5 | [
"Apache-2.0"
] | 226 | 2017-08-04T20:41:33.000Z | 2021-05-21T08:28:33.000Z | from typing import Callable
from optimus.helpers.types import *
class BaseSet():
"""Base class for all set implementations"""
def __init__(self, root: 'DataFrameType'):
self.root = root
def __call__(self, cols=None, value=None, default=None, args=[], where=None):
return self.root.cols.set(cols=cols, value_func=value, args=args, where=where, default=default)
def all(self, cols, value=None) -> 'DataFrameType':
return self.__call__(cols, value)
def _mask(self, cols, true_value=None, default_value=None, args=[], func: Callable = None, **kwargs) -> 'DataFrameType':
df = self.root
if cols in df.cols.names():
input = df[cols]
input_cols = cols
elif isinstance(true_value, (self.root.__class__, )):
input = true_value
input_cols = input.cols.names()[0]
else:
df[cols] = default_value or true_value
input = df[cols]
input_cols = cols
# uses name instead of the function to use it correctly with 'input' instead of with the whole dataframe
mask = getattr(input.mask, func.__name__)(input_cols, **kwargs)
return self.__call__(cols, value=true_value, default=default_value, args=args, where=mask)
# Types
def str(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.str)
def int(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.int)
def float(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.float)
def numeric(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.numeric)
def email(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.email)
def ip(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.ip)
def url(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.url)
def gender(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.gender)
def boolean(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.boolean)
def zip_code(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.zip_code)
def credit_card_number(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.credit_card_number)
def datetime(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.datetime)
def object(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.object)
def array(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.array)
def phone_number(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.phone_number)
def social_security_number(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.social_security_number)
def http_code(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.http_code)
# Other
def null(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.null)
def none(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.none)
def nan(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.nan)
def empty(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.empty)
def greater_than(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.greater_than, value=value)
def greater_than_equal(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.greater_than_equal, value=value)
def less_than(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.less_than, value=value)
def less_than_equal(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.less_than_equal, value=value)
def between(self, cols, true_value=None, default_value=None, args=[], lower_bound=None, upper_bound=None, equal=True, bounds=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.between, lower_bound=lower_bound, upper_bound=upper_bound, equal=equal, bounds=bounds)
def equal(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.equal, value=value)
def not_equal(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.not_equal, value=value)
def missing(self, cols, value=None, default_value=None, args=[]) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.missing)
def mismatch(self, cols, value=None, default_value=None, args=[], data_type=None) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.mismatch, data_type=data_type)
def duplicated(self, cols, value=None, default_value=None, args=[], keep="first") -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.duplicated, keep=keep)
def unique(self, cols, value=None, default_value=None, args=[], keep="first") -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.unique, keep=keep)
def match(self, cols, value=None, default_value=None, args=[], regex=None, data_type=None) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.match_data_type, regex=regex, data_type=data_type)
def match_regex(self, cols, value=None, default_value=None, args=[], regex=None) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.match_regex, regex=regex)
def match_data_type(self, cols, value=None, default_value=None, args=[], data_type=None) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.match_data_type, data_type=data_type)
def match_pattern(self, cols, value=None, default_value=None, args=[], pattern=None) -> 'DataFrameType':
return self._mask(cols, true_value=value, default_value=default_value, args=args, func=self.root.mask.match_pattern, pattern=pattern)
def value_in(self, cols, true_value=None, default_value=None, args=[], values=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.value_in, values=values)
def starts_with(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.starts_with, value=value)
def ends_with(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.ends_with, value=value)
def contains(self, cols, true_value=None, default_value=None, args=[], value=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.contains, value=value)
def expression(self, cols, true_value=None, default_value=None, args=[], where=None) -> 'DataFrameType':
return self._mask(cols, true_value=true_value, default_value=default_value, args=args, func=self.root.mask.expression, where=where)
| 66.128834 | 201 | 0.725485 |
dd54d8201044ee5c498d44b86f60a93dee1e1542 | 2,235 | py | Python | Python_Arcade/template-master/source_code/main.py | npinak/Python-Projects | 6e6463f4fde175fde60c9cca045e3c114b854505 | [
"MIT"
] | 1 | 2021-10-16T16:22:14.000Z | 2021-10-16T16:22:14.000Z | Python_Arcade/template-master/source_code/main.py | npinak/Python-Projects | 6e6463f4fde175fde60c9cca045e3c114b854505 | [
"MIT"
] | null | null | null | Python_Arcade/template-master/source_code/main.py | npinak/Python-Projects | 6e6463f4fde175fde60c9cca045e3c114b854505 | [
"MIT"
] | null | null | null | """
Starting Template
"""
import arcade
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Starting Template"
class MyGame(arcade.Window):
"""
Main application class.
NOTE: Go ahead and delete the methods you don't need.
If you do need a method, delete the 'pass' and replace it
with your own code. Don't leave 'pass' in this program.
"""
def __init__(self, width, height, title):
super().__init__(width, height, title)
arcade.set_background_color(arcade.color.AMAZON)
# If you have sprite lists, you should create them here,
# and set them to None
def setup(self):
# Create your sprites and sprite lists here
pass
def on_draw(self):
"""
Render the screen.
"""
# This command should happen before we start drawing. It will clear
# the screen to the background color, and erase what we drew last frame.
arcade.start_render()
# Call draw() on all your sprite lists below
def on_update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
Normally, you'll call update() on the sprite lists that
need it.
"""
pass
def on_key_press(self, key, key_modifiers):
"""
Called whenever a key on the keyboard is pressed.
For a full list of keys, see:
http://arcade.academy/arcade.key.html
"""
pass
def on_key_release(self, key, key_modifiers):
"""
Called whenever the user lets off a previously pressed key.
"""
pass
def on_mouse_motion(self, x, y, delta_x, delta_y):
"""
Called whenever the mouse moves.
"""
pass
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
pass
def on_mouse_release(self, x, y, button, key_modifiers):
"""
Called when a user releases a mouse button.
"""
pass
def main():
""" Main method """
game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
game.setup()
arcade.run()
if __name__ == "__main__":
main()
| 23.526316 | 80 | 0.597315 |
e5aeebcf62d2a734004737d19313a04ffe4423fa | 4,260 | py | Python | Main_Production/Create_Music.py | EricCacciavillani/LyreBird | 858657faef39d1adcba19ff0213210ba490b4afa | [
"MIT"
] | 1 | 2019-05-04T02:34:20.000Z | 2019-05-04T02:34:20.000Z | Main_Production/Create_Music.py | EricCacciavillani/LyreBird | 858657faef39d1adcba19ff0213210ba490b4afa | [
"MIT"
] | null | null | null | Main_Production/Create_Music.py | EricCacciavillani/LyreBird | 858657faef39d1adcba19ff0213210ba490b4afa | [
"MIT"
] | 1 | 2019-04-04T19:14:09.000Z | 2019-04-04T19:14:09.000Z | import sys
sys.path.append('..')
from Shared_Files.Constants import *
from Shared_Files.Global_Util import *
import pickle
import numpy
from music21 import instrument, note, stream, chord
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
# Main function to generate the music
def generate():
# Open and read notes from file
with open(ABS_PATHS.SAVED_NOTES_PATH + "Classical_Notes", 'rb') as filepath:
notes = pickle.load(filepath)
pitchnames = sorted(set(item for item in notes))
n_vocab = len(set(notes))
# ----
network_input, normalized_input = prepare_sequences(notes, pitchnames, n_vocab)
model = create_network(normalized_input, n_vocab)
prediction_output = generate_notes(model, network_input, pitchnames, n_vocab)
create_midi(prediction_output)
def prepare_sequences(notes,
pitchnames,
n_vocab):
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
sequence_length = 100
network_input = []
output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
normalized_input = normalized_input / float(n_vocab)
return (network_input, normalized_input)
# Recreate the model from train
def create_network(network_input, n_vocab):
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
return_sequences=True
))
model.add(Dropout(0.3))
model.add(LSTM(512, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(ABS_PATHS.SAVED_WEIGHTS_PATH + 'Classic_Music_Weights.hdf5')
return model
# Predict notes with given input
def generate_notes(model,
network_input,
pitchnames,
n_vocab):
start = numpy.random.randint(0, len(network_input)-1)
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
pattern = network_input[start]
prediction_output = []
for note_index in range(200):
prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
result = int_to_note[index]
prediction_output.append(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
return prediction_output
# Generate music
def create_midi(prediction_output):
offset = 0
output_notes = []
# Iterate list of notes to generate music
for pattern in prediction_output:
# Must be a Chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# Must be a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# Offset next note by .5 of a second
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp=ABS_PATHS.AUDIO_CACHE_PATH + 'Output.mid')
if __name__ == '__main__':
generate()
| 30 | 85 | 0.667371 |
30c4460f9d59f0f705b4a4d2b4052679a5f42291 | 857 | py | Python | tests/e2e/process_model/main.py | actris-cloudnet/cloudnet-processing | 1d1a67df3bad0beb7f8ec455e441ea06e8a32e55 | [
"MIT"
] | null | null | null | tests/e2e/process_model/main.py | actris-cloudnet/cloudnet-processing | 1d1a67df3bad0beb7f8ec455e441ea06e8a32e55 | [
"MIT"
] | null | null | null | tests/e2e/process_model/main.py | actris-cloudnet/cloudnet-processing | 1d1a67df3bad0beb7f8ec455e441ea06e8a32e55 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
from os import path
from tempfile import NamedTemporaryFile
import test_utils.utils as utils
SCRIPT_PATH = path.dirname(path.realpath(__file__))
temp_file = NamedTemporaryFile()
site = "bucharest"
date = "2020-10-22"
instrument = "model"
source_data = [
("eb176ca3-374e-471c-9c82-fc9a45578883", "20201022_bucharest_ecmwf.nc"),
("80c2fab5-2dc5-4692-bafe-a7274071770e", "20201022_bucharest_gdas1.nc"),
]
def main():
utils.start_test_servers(instrument, SCRIPT_PATH)
session = utils.register_storage_urls(
temp_file, source_data, site, date, instrument, True, products=["ecmwf", "gdas1"]
)
main_args = [f"-s={site}", f"--date={date}", "-p=radar", "model"]
utils.process(session, main_args, temp_file, SCRIPT_PATH, processing_mode="model")
if __name__ == "__main__":
main()
| 26.78125 | 89 | 0.71762 |
a95646fa3083150c724bea5154efed7af61fd654 | 1,053 | py | Python | results/pretty_graphs.py | columbia/fairtest | 8696051c9276f127ab8b2f437850f845ff0ca786 | [
"Apache-2.0"
] | 42 | 2017-01-12T13:59:23.000Z | 2022-03-01T01:44:12.000Z | results/pretty_graphs.py | columbia/fairtest | 8696051c9276f127ab8b2f437850f845ff0ca786 | [
"Apache-2.0"
] | 3 | 2019-05-24T21:02:51.000Z | 2019-11-15T15:36:17.000Z | results/pretty_graphs.py | columbia/fairtest | 8696051c9276f127ab8b2f437850f845ff0ca786 | [
"Apache-2.0"
] | 20 | 2017-01-12T23:07:10.000Z | 2021-08-11T09:13:50.000Z | import matplotlib.pyplot as plt
from matplotlib import rcParams
import pickle
rcParams.update({'figure.autolayout': True})
plt.rcParams.update({'font.size': 22})
"""
#
# Medical
#
PLOT_LABEL = 'a'
for context in [0, 26, 4, 1]:
filename = 'medical_reg_plots/context_{}'.format(context)
fig = pickle.load(file(filename + '.pkl'))
#plt.text(0.07, 0.9, '(' + PLOT_LABEL + ')', horizontalalignment='center',
# verticalalignment='center', transform=plt.gca().transAxes)
#PLOT_LABEL = chr(ord(PLOT_LABEL) + 1)
plt.savefig(filename + '.png')
"""
#
# Recommender (Movie Rating)
#
ticks = {0: [3.6, 3.8, 4.0, 4.2, 4.4, 4.6],
1: [0, 0.5, 1.0, 1.5, 2.0],
2: [10, 20, 30, 40, 50, 60, 70]}
for key in ticks.keys():
dir = 'movies_{}_plots'.format(key)
for sens in ['Age', 'Gender']:
filename = dir+'/' + sens + '/context_0'
fig = pickle.load(file(filename + '.pkl'))
plt.yticks(ticks[key])
plt.ylim(ticks[key][0], ticks[key][-1])
plt.savefig(filename + '.png')
| 25.071429 | 78 | 0.587844 |
330127fe81c80f7efa3125eee653c2279ffa7208 | 901 | py | Python | setup.py | InfraPixels/powerlibs-shentry | c5cf03cf62154db2647ce1ad26bf8355e5371bae | [
"0BSD"
] | null | null | null | setup.py | InfraPixels/powerlibs-shentry | c5cf03cf62154db2647ce1ad26bf8355e5371bae | [
"0BSD"
] | null | null | null | setup.py | InfraPixels/powerlibs-shentry | c5cf03cf62154db2647ce1ad26bf8355e5371bae | [
"0BSD"
] | 1 | 2021-05-26T00:16:04.000Z | 2021-05-26T00:16:04.000Z | #!/usr/bin/env python
from setuptools import setup
setup(
name="shentry",
version="0.1.0",
author="Cléber Zavadniak",
author_email="contato@cleber.solutions",
url="https://github.com/cleber-solutions/powerlibs-shentry",
license="ISC",
packages=['shentry'],
entry_points='''
[console_scripts]
shentry=shentry:main
''',
keywords=["logging"],
description="Wrap a program in Sentry",
python_requires='>=3.6',
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
]
)
| 27.30303 | 64 | 0.597114 |
f610aeebfcb5bc4353385ef665d97c2fd7ce26bc | 7,377 | py | Python | vyper/semantics/validation/annotation.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 1,347 | 2019-11-22T06:49:38.000Z | 2022-03-31T19:49:32.000Z | vyper/semantics/validation/annotation.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 915 | 2019-11-21T05:48:16.000Z | 2022-03-31T23:51:03.000Z | vyper/semantics/validation/annotation.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 262 | 2019-11-28T01:44:04.000Z | 2022-03-31T21:33:43.000Z | from vyper import ast as vy_ast
from vyper.exceptions import StructureException
from vyper.semantics.types.bases import BaseTypeDefinition
from vyper.semantics.types.function import ContractFunction
from vyper.semantics.types.user.event import Event
from vyper.semantics.types.user.struct import StructPrimitive
from vyper.semantics.validation.utils import (
get_common_types,
get_exact_type_from_node,
get_possible_types_from_node,
)
class _AnnotationVisitorBase:
"""
Annotation visitor base class.
Annotation visitors apply metadata (such as type information) to vyper AST objects.
Immediately after type checking a statement-level node, that node is passed to
`StatementAnnotationVisitor`. Some expression nodes are then passed onward to
`ExpressionAnnotationVisitor` for additional annotation.
"""
def visit(self, node, *args):
if isinstance(node, self.ignored_types):
return
# iterate over the MRO until we find a matching visitor function
# this lets us use a single function to broadly target several
# node types with a shared parent
for class_ in node.__class__.mro():
ast_type = class_.__name__
visitor_fn = getattr(self, f"visit_{ast_type}", None)
if visitor_fn:
visitor_fn(node, *args)
return
raise StructureException(f"Cannot annotate: {node.ast_type}", node)
class StatementAnnotationVisitor(_AnnotationVisitorBase):
ignored_types = (
vy_ast.Break,
vy_ast.Continue,
vy_ast.Pass,
vy_ast.Raise,
)
def __init__(self, fn_node: vy_ast.FunctionDef, namespace: dict) -> None:
self.func = fn_node._metadata["type"]
self.namespace = namespace
self.expr_visitor = ExpressionAnnotationVisitor()
def visit(self, node):
super().visit(node)
def visit_Attribute(self, node):
# NOTE: required for msg.data node, removing borks and results in
# vyper.exceptions.StructureException: Cannot annotate: Attribute
pass
def visit_AnnAssign(self, node):
type_ = get_exact_type_from_node(node.target)
self.expr_visitor.visit(node.target, type_)
self.expr_visitor.visit(node.value, type_)
def visit_Assert(self, node):
self.expr_visitor.visit(node.test)
def visit_Assign(self, node):
type_ = get_exact_type_from_node(node.target)
self.expr_visitor.visit(node.target, type_)
self.expr_visitor.visit(node.value, type_)
def visit_AugAssign(self, node):
type_ = get_exact_type_from_node(node.target)
self.expr_visitor.visit(node.target, type_)
self.expr_visitor.visit(node.value, type_)
def visit_Expr(self, node):
self.expr_visitor.visit(node.value)
def visit_If(self, node):
self.expr_visitor.visit(node.test)
def visit_Log(self, node):
node._metadata["type"] = self.namespace[node.value.func.id]
self.expr_visitor.visit(node.value)
def visit_Return(self, node):
if node.value is not None:
self.expr_visitor.visit(node.value, self.func.return_type)
def visit_For(self, node):
if isinstance(node.iter, (vy_ast.Name, vy_ast.Attribute)):
self.expr_visitor.visit(node.iter)
class ExpressionAnnotationVisitor(_AnnotationVisitorBase):
ignored_types = ()
def visit(self, node, type_=None):
# the statement visitor sometimes passes type information about expressions
super().visit(node, type_)
def visit_Attribute(self, node, type_):
base_type = get_exact_type_from_node(node.value)
node._metadata["type"] = base_type.get_member(node.attr, None)
self.visit(node.value, None)
def visit_BinOp(self, node, type_):
if type_ is None:
type_ = get_common_types(node.left, node.right)
if len(type_) == 1:
type_ = type_.pop()
self.visit(node.left, type_)
self.visit(node.right, type_)
def visit_BoolOp(self, node, type_):
for value in node.values:
self.visit(value)
def visit_Call(self, node, type_):
call_type = get_exact_type_from_node(node.func)
node._metadata["type"] = type_ or call_type.fetch_call_return(node)
self.visit(node.func)
if isinstance(call_type, (Event, ContractFunction)):
# events and internal function calls
for arg, arg_type in zip(node.args, list(call_type.arguments.values())):
self.visit(arg, arg_type)
elif isinstance(call_type, StructPrimitive):
# literal structs
for value, arg_type in zip(node.args[0].values, list(call_type.members.values())):
self.visit(value, arg_type)
elif node.func.id not in ("empty", "range"):
# builtin functions
for arg in node.args:
self.visit(arg, None)
for kwarg in node.keywords:
self.visit(kwarg.value, None)
def visit_Compare(self, node, type_):
if isinstance(node.op, (vy_ast.In, vy_ast.NotIn)):
if isinstance(node.right, vy_ast.List):
type_ = get_common_types(node.left, *node.right.elements).pop()
self.visit(node.left, type_)
for element in node.right.elements:
self.visit(element, type_)
else:
type_ = get_exact_type_from_node(node.right)
self.visit(node.right, type_)
self.visit(node.left, type_.value_type)
else:
type_ = get_common_types(node.left, node.right).pop()
self.visit(node.left, type_)
self.visit(node.right, type_)
def visit_Constant(self, node, type_):
node._metadata["type"] = type_
def visit_Dict(self, node, type_):
node._metadata["type"] = type_
def visit_Index(self, node, type_):
self.visit(node.value, type_)
def visit_List(self, node, type_):
if not node.elements:
return
if type_ is None:
type_ = get_possible_types_from_node(node)
if len(type_) == 1:
type_ = type_.pop()
node._metadata["type"] = type_
for element in node.elements:
self.visit(element, type_.value_type)
def visit_Name(self, node, type_):
node._metadata["type"] = get_exact_type_from_node(node)
def visit_Subscript(self, node, type_):
base_type = get_exact_type_from_node(node.value)
if isinstance(base_type, BaseTypeDefinition):
# in the vast majority of cases `base_type` is a type definition,
# however there are some edge cases with args to builtin functions
self.visit(node.slice, base_type.get_index_type(node.slice.value))
self.visit(node.value, base_type)
def visit_Tuple(self, node, type_):
node._metadata["type"] = type_
for element, subtype in zip(node.elements, type_.value_type):
self.visit(element, subtype)
def visit_UnaryOp(self, node, type_):
if type_ is None:
type_ = get_possible_types_from_node(node.operand)
if len(type_) == 1:
type_ = type_.pop()
self.visit(node.operand, type_)
| 36.339901 | 94 | 0.645655 |
e870d8a17e67984e7f8af7d84b78175d3c8189fd | 875 | py | Python | secondproject/urls.py | Jeonghun-Ban/likelionmyongji_lotto | 1ef6e09eb4eec63a2204ed43ff76d2f378555c9c | [
"MIT"
] | 4 | 2019-03-27T01:52:12.000Z | 2019-05-10T17:10:14.000Z | secondproject/urls.py | likelionmyongji/likelionmyongji_lotto | 1ef6e09eb4eec63a2204ed43ff76d2f378555c9c | [
"MIT"
] | 6 | 2020-06-05T20:35:49.000Z | 2022-02-10T08:14:21.000Z | secondproject/urls.py | likelionmyongji/likelionmyongji_lotto | 1ef6e09eb4eec63a2204ed43ff76d2f378555c9c | [
"MIT"
] | null | null | null | """secondproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import lotto.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', lotto.views.home, name='home'),
path('result/', lotto.views.result, name='result'),
]
| 35 | 77 | 0.702857 |
d2abc8179010b59ad405a174e3ec7191e695a91c | 2,807 | py | Python | icbd/compiler/code_emitter.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 7 | 2015-04-06T15:17:13.000Z | 2020-10-21T04:57:00.000Z | icbd/compiler/code_emitter.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | null | null | null | icbd/compiler/code_emitter.py | kmod/icbd | 9636564eb3993afa07c6220d589bbd1991923d74 | [
"MIT"
] | 4 | 2016-05-16T17:53:08.000Z | 2020-11-28T17:18:50.000Z | from cStringIO import StringIO
class CodeEmitter(object):
def __init__(self, em):
assert em
self._lio = StringIO()
self._cio = StringIO()
self._indent = 0
self._ntemps = {} # (prefix, suffix) -> num
self._nplaceholders = 0
self._replacements = {} # subfunctions can register replacements to be dealt with later (since the name of a given variable might not be known in advance, for certain loop constructs like list comprehensions)
if isinstance(em, CodeEmitter):
# self.mkname = lambda prefix='_v', suffix="": em.mkname(prefix="_sub"+prefix, suffix=suffix)
self.mkname = em.mkname
self.root_mkname = em.root_mkname
self.llvm_head, self.llvm_tail, self.c_head, self.c_tail = em.llvm_head, em.llvm_tail, em.c_head, em.c_tail
self._str_table = em._str_table
self.register_replacement = em.register_replacement
self.get_placeholder = em.get_placeholder
else:
self.root_mkname = self.mkname
self.llvm_head, self.llvm_tail, self.c_head, self.c_tail = em
self._str_table = {}
def get_str(self, s):
l = len(s)
s = ''.join(['\\%02X' % ord(c) for c in s])
# s = s.replace('\n', '\\0A')
if s in self._str_table:
return self._str_table[s]
n = self.root_mkname("str_")
self.llvm_tail.write('''@%s = internal constant [%d x i8] c"%s\\00"\n''' % (n, l + 1, s))
self._str_table[s] = n
return n
def get_str_ptr(em, s):
n = em.get_str(s)
return "getelementptr inbounds ([%d x i8]* @%s, i32 0, i32 0)" % (len(s) + 1, n)
def get_placeholder(self):
self._nplaceholders += 1
return "#!%d!#" % self._nplaceholders
def register_replacement(self, token, s):
id = int(token[2:-2])
assert 1 <= id <= self._nplaceholders
assert id not in self._replacements
self._replacements[id] = s
def mkname(self, prefix="_v", suffix=""):
assert not (prefix and prefix[-1].isdigit())
assert not (suffix and suffix[0].isdigit())
n = self._ntemps.get((prefix, suffix), 0)
rtn = "%s%s%s" % (prefix, n, suffix)
self._ntemps[(prefix, suffix)] = n+1
return rtn
def indent(self, x):
self._indent += x
def get_llvm(self):
return self._lio.getvalue().strip()
def get_c(self):
return self._cio.getvalue().strip()
def pl(self, s=''):
self.__p(self._lio, s)
def pc(self, s=''):
self.__p(self._cio, s)
def __p(self, f, s):
if s is None:
return
if self._indent:
print >>f, ' '*(self._indent-1), s
else:
print >>f, s
| 33.023529 | 216 | 0.568579 |
4a7f7a920d9a6628da869cd1212662c631464b79 | 2,864 | py | Python | docs/conf.py | viniciusdc/qhub-cloud | be7256f26d140eb8edb3b5f19dc222458f5284b7 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | viniciusdc/qhub-cloud | be7256f26d140eb8edb3b5f19dc222458f5284b7 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | viniciusdc/qhub-cloud | be7256f26d140eb8edb3b5f19dc222458f5284b7 | [
"BSD-3-Clause"
] | null | null | null | # import sphinx.writers.html5
# sphinx.writers.html5.HTML5Translator.visit_pending_xref = lambda *x:...
# sphinx.writers.html5.HTML5Translator.depart_pending_xref = lambda *x:...
BLOG_TITLE = title = html_title = "QHub Code as Infrastructure."
BLOG_AUTHOR = author = "Quansight"
html_theme = "pydata_sphinx_theme"
master_doc = "index"
source_suffix = ".md .rst .ipynb .py".split()
extensions = (
"myst_parser nbsphinx sphinx.ext.autodoc sphinx.ext.napoleon".split()
) # autoapi.extension
exclude_patterns = ["_build", "*checkpoint*", "output", "outputs"]
autoapi_type = "python"
autoapi_dirs = []
["qhapi"]
html_theme = "sphinx_material"
THEME = "material-theme"
DEFAULT_LANG = "en"
NAVIGATION_LINKS = {
DEFAULT_LANG: tuple(),
}
THEME_COLOR = "652c90" # "#7B699F"
POSTS = (
("posts/*.md", "posts", "post.tmpl"),
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
("posts/*.ipynb", "posts", "post.tmpl"),
("posts/*.md.ipynb", "posts", "post.tmpl"),
)
templates_path = ['_templates']
# Material theme options (see theme.conf for more information)
html_theme_options = {
# Set the name of the project to appear in the navigation.
"nav_title": "QHub",
# Set you GA account ID to enable tracking
# 'google_analytics_account': 'UA-XXXXX',
# Specify a base_url used to generate sitemap.xml. If not
# specified, then no sitemap will be built.
"base_url": "https://qhub.dev/",
# Set the color and the accent color
"theme_color": THEME_COLOR,
"color_primary": THEME_COLOR,
"color_accent": "light-yellow",
# Set the repo location to get a badge with stats
"repo_url": "https://github.com/Quansight/qhub",
"repo_name": "QHub",
# Visible levels of the global TOC; -1 means unlimited
"globaltoc_depth": 1,
# If False, expand all TOC entries
"globaltoc_collapse": True,
# If True, show hidden TOC entries
"globaltoc_includehidden": False,
"nav_links": [
{
"href": "index",
"title": "QHub Home",
"internal": True,
},
{
"href": "https://pypi.org/project/qhub/",
"title": "Pypi",
"internal": False,
},
{
"href": "docs/faqs",
"title": "FAQ",
"internal": True,
},
],
}
html_sidebars = {
"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]
}
# Exclude build directory and Jupyter backup files:
exclude_patterns = ["_build", "*checkpoint*", "site", "jupyter_execute"]
latex_documents = [
(
master_doc,
"qhub.tex",
"Infrastructure as Code",
"QHub",
"manual",
)
]
jupyter_execute_notebooks = "off"
# SITE_URL = "https://quansight.github.io/qhub-home/"
| 28.356436 | 81 | 0.61662 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.