hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fb24ae8c976a4d6787651bae16cdf3c623fa6d | 4,782 | py | Python | pytorch-r2d2_mini/r2d2.py | wotmd5731/J_LAB | 6951dbc898f063fbbe2853a36b7caeeca10ed173 | [
"Apache-2.0"
] | null | null | null | pytorch-r2d2_mini/r2d2.py | wotmd5731/J_LAB | 6951dbc898f063fbbe2853a36b7caeeca10ed173 | [
"Apache-2.0"
] | null | null | null | pytorch-r2d2_mini/r2d2.py | wotmd5731/J_LAB | 6951dbc898f063fbbe2853a36b7caeeca10ed173 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.utils.data
import torch.optim as optim
import torch.nn as nn
import numpy as np
from collections import deque
import random
import gym
import os
from copy import deepcopy
from time import time, sleep
import torch.multiprocessing as mp
#mp.set_start_method('spawn', force=True)
from models import ActorNet, CriticNet
import queue
import visdom
vis = visdom.Visdom()
os.system('cls')
from actor import Actor, actor_process
from learner import Learner, learner_process
from models import ActorNet, CriticNet
if __name__ == '__main__':
vis.close()
config = {
'game_name':'CartPole-v0',
'obs_space':(1,4),
'reward_space':(1,1),
'gamma_space':(1,1),
'action_space':(1,2),
'num_envs':1,
'use_cnn':False,
# 'action_argmax':True,
'get_img_from_render':False,
# 'obs_space':(1,3,84,84),
# 'reward_space':(1,1),
# 'gamma_space':(1,1),
# 'action_space':(1,2),
# 'num_envs':1,
# 'use_cnn':True,
# 'action_argmax':True,
# 'get_img_from_render':True,
#
'action_argmax':False,
# 'game_name':'Pendulum-v0',
# 'action_space':1,
# 'obs_space':(1,3),
'burn_in_length':0,
'learning_length':1,
'n_step':1,
'memory_sequence_size':1000000,
# 'actor_parameter_update_interval':2000,
'learner_parameter_update_interval':50,
'actor_lr':1e-4,
'critic_lr':1e-4,
'gamma':0.997,
'actor_max_frame':1000000,
'learner_max_frame':100000,
'batch_size':64,
'num_processes':1,
'learner_actor_rate':20,
'target_update_interval':50,
'max_shared_q_size':5,
}
num_processes = config['num_processes']
use_cuda = torch.cuda.is_available()
dev_cpu = torch.device('cpu')
dev_gpu = torch.device('cuda' if use_cuda else 'cpu')
# manager = mp.Manager()
# shared_state = manager.dict()
# shared_queue = manager.Queue()
shared_queue = mp.Queue()
# shared_queue = queue.Queue()
shared_state = dict()
shared_state["actor"] = ActorNet(dev_cpu,config).share_memory()
shared_state["critic"] = CriticNet(dev_cpu,config).share_memory()
shared_state["target_actor"] = ActorNet(dev_cpu,config).share_memory()
shared_state["target_critic"] = CriticNet(dev_cpu,config).share_memory()
# shared_state["frame"] = mp.Array('i', [0 for i in range(num_processes)])
# shared_state["sleep"] = mp.Array('i', [0 for i in range(num_processes)])
shared_state["update"] = mp.Array('i', [0 for i in range(num_processes)])
# shared_state["actor"] = ActorNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["critic"] = CriticNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["target_actor"] = ActorNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["target_critic"] = CriticNet(config['obs_space'], config['action_space'],dev_cpu)
# shared_state["frame"] = [0 for i in range(num_processes)]
# shared_state["sleep"] = [0 for i in range(num_processes)]
# shared_state["update"]=False
# for i in range(10):
# actor_process(0,config,dev_cpu,shared_state,shared_queue,0.3)
# actor_process(1,config,dev_cpu,shared_state,shared_queue,0.3)
# actor_process(2,config,dev_cpu,shared_state,shared_queue,0.3)
# learner_process(1,config,dev_cpu,shared_state,shared_queue)
#
proc_list = []
proc_list.append(mp.Process(target=learner_process, args=(num_processes, config,dev_cpu,shared_state,shared_queue)))
eps = [0.10,0.6,0.4,0.3,0.2,0.6,0.4,0.6,0.2,0.4]
for i in range(num_processes):
proc_list.append( mp.Process(target=actor_process, args=(i,config,dev_cpu,shared_state,shared_queue,eps[i])) )
for proc in proc_list:
proc.start()
try:
for proc in proc_list:
proc.join()
except:
print('qclose')
shared_queue.close()
# print('shared_state close')
# shared_state["update"].close()
# for key in shared_state.keys():
# shared_state[key].close()
print('process close')
for proc in proc_list:
proc.terminate()
shared_queue.join_thread()
# shared_state["update"].join_thread()
# for key in shared_state.keys():
# shared_state[key].join_thread()
# shared_state.close()
# shared_queue.close()
| 31.668874 | 120 | 0.607486 | import torch
import torch.utils.data
import torch.optim as optim
import torch.nn as nn
import numpy as np
from collections import deque
import random
import gym
import os
from copy import deepcopy
from time import time, sleep
import torch.multiprocessing as mp
from models import ActorNet, CriticNet
import queue
import visdom
vis = visdom.Visdom()
os.system('cls')
from actor import Actor, actor_process
from learner import Learner, learner_process
from models import ActorNet, CriticNet
if __name__ == '__main__':
vis.close()
config = {
'game_name':'CartPole-v0',
'obs_space':(1,4),
'reward_space':(1,1),
'gamma_space':(1,1),
'action_space':(1,2),
'num_envs':1,
'use_cnn':False,
'get_img_from_render':False,
'action_argmax':False,
'burn_in_length':0,
'learning_length':1,
'n_step':1,
'memory_sequence_size':1000000,
'learner_parameter_update_interval':50,
'actor_lr':1e-4,
'critic_lr':1e-4,
'gamma':0.997,
'actor_max_frame':1000000,
'learner_max_frame':100000,
'batch_size':64,
'num_processes':1,
'learner_actor_rate':20,
'target_update_interval':50,
'max_shared_q_size':5,
}
num_processes = config['num_processes']
use_cuda = torch.cuda.is_available()
dev_cpu = torch.device('cpu')
dev_gpu = torch.device('cuda' if use_cuda else 'cpu')
shared_queue = mp.Queue()
shared_state = dict()
shared_state["actor"] = ActorNet(dev_cpu,config).share_memory()
shared_state["critic"] = CriticNet(dev_cpu,config).share_memory()
shared_state["target_actor"] = ActorNet(dev_cpu,config).share_memory()
shared_state["target_critic"] = CriticNet(dev_cpu,config).share_memory()
shared_state["update"] = mp.Array('i', [0 for i in range(num_processes)])
proc_list = []
proc_list.append(mp.Process(target=learner_process, args=(num_processes, config,dev_cpu,shared_state,shared_queue)))
eps = [0.10,0.6,0.4,0.3,0.2,0.6,0.4,0.6,0.2,0.4]
for i in range(num_processes):
proc_list.append( mp.Process(target=actor_process, args=(i,config,dev_cpu,shared_state,shared_queue,eps[i])) )
for proc in proc_list:
proc.start()
try:
for proc in proc_list:
proc.join()
except:
print('qclose')
shared_queue.close()
print('process close')
for proc in proc_list:
proc.terminate()
shared_queue.join_thread()
| true | true |
f7fb252558f8525c2eaa53faa368501d1c61e260 | 8,359 | py | Python | sympy/polys/tests/test_multivariate_resultants.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/tests/test_multivariate_resultants.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/tests/test_multivariate_resultants.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | """Tests for Dixon's and Macaulay's classes. """
from sympy import Matrix, factor
from sympy.core import symbols
from sympy.tensor.indexed import IndexedBase
from sympy.polys.multivariate_resultants import DixonResultant, MacaulayResultant
c, d = symbols("a, b")
x, y = symbols("x, y")
p = c * x + y
q = x + d * y
dixon = DixonResultant(polynomials=[p, q], variables=[x, y])
macaulay = MacaulayResultant(polynomials=[p, q], variables=[x, y])
def test_dixon_resultant_init():
"""Test init method of DixonResultant."""
a = IndexedBase("alpha")
assert dixon.polynomials == [p, q]
assert dixon.variables == [x, y]
assert dixon.n == 2
assert dixon.m == 2
assert dixon.dummy_variables == [a[0], a[1]]
def test_get_dixon_polynomial_numerical():
"""Test Dixon's polynomial for a numerical example."""
a = IndexedBase("alpha")
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
dixon = DixonResultant([p, q, h], [x, y])
polynomial = (
-x * y ** 2 * a[0]
- x * y ** 2 * a[1]
- x * y * a[0] * a[1]
- x * y * a[1] ** 2
- x * a[0] * a[1] ** 2
+ x * a[0]
- y ** 2 * a[0] * a[1]
+ y ** 2 * a[1]
- y * a[0] * a[1] ** 2
+ y * a[1] ** 2
)
assert dixon.get_dixon_polynomial().as_expr().expand() == polynomial
def test_get_max_degrees():
"""Tests max degrees function."""
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
dixon = DixonResultant(polynomials=[p, q, h], variables=[x, y])
dixon_polynomial = dixon.get_dixon_polynomial()
assert dixon.get_max_degrees(dixon_polynomial) == [1, 2]
def test_get_dixon_matrix():
"""Test Dixon's resultant for a numerical example."""
x, y = symbols("x, y")
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
dixon = DixonResultant([p, q, h], [x, y])
polynomial = dixon.get_dixon_polynomial()
assert dixon.get_dixon_matrix(polynomial).det() == 0
def test_get_dixon_matrix_example_two():
"""Test Dixon's matrix for example from [Palancz08]_."""
x, y, z = symbols("x, y, z")
f = x ** 2 + y ** 2 - 1 + z * 0
g = x ** 2 + z ** 2 - 1 + y * 0
h = y ** 2 + z ** 2 - 1
example_two = DixonResultant([f, g, h], [y, z])
poly = example_two.get_dixon_polynomial()
matrix = example_two.get_dixon_matrix(poly)
expr = 1 - 8 * x ** 2 + 24 * x ** 4 - 32 * x ** 6 + 16 * x ** 8
assert (matrix.det() - expr).expand() == 0
def test_KSY_precondition():
"""Tests precondition for KSY Resultant."""
A, B, C = symbols("A, B, C")
m1 = Matrix([[1, 2, 3], [4, 5, 12], [6, 7, 18]])
m2 = Matrix([[0, C ** 2], [-2 * C, -(C ** 2)]])
m3 = Matrix([[1, 0], [0, 1]])
m4 = Matrix([[A ** 2, 0, 1], [A, 1, 1 / A]])
m5 = Matrix([[5, 1], [2, B], [0, 1], [0, 0]])
assert dixon.KSY_precondition(m1) == False
assert dixon.KSY_precondition(m2) == True
assert dixon.KSY_precondition(m3) == True
assert dixon.KSY_precondition(m4) == False
assert dixon.KSY_precondition(m5) == True
def test_delete_zero_rows_and_columns():
"""Tests method for deleting rows and columns containing only zeros."""
A, B, C = symbols("A, B, C")
m1 = Matrix([[0, 0], [0, 0], [1, 2]])
m2 = Matrix([[0, 1, 2], [0, 3, 4], [0, 5, 6]])
m3 = Matrix([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])
m4 = Matrix([[1, 0, 2], [0, 0, 0], [3, 0, 4]])
m5 = Matrix([[0, 0, 0, 1], [0, 0, 0, 2], [0, 0, 0, 3], [0, 0, 0, 4]])
m6 = Matrix([[0, 0, A], [B, 0, 0], [0, 0, C]])
assert dixon.delete_zero_rows_and_columns(m1) == Matrix([[1, 2]])
assert dixon.delete_zero_rows_and_columns(m2) == Matrix([[1, 2], [3, 4], [5, 6]])
assert dixon.delete_zero_rows_and_columns(m3) == Matrix([[1, 2], [3, 4]])
assert dixon.delete_zero_rows_and_columns(m4) == Matrix([[1, 2], [3, 4]])
assert dixon.delete_zero_rows_and_columns(m5) == Matrix([[1], [2], [3], [4]])
assert dixon.delete_zero_rows_and_columns(m6) == Matrix([[0, A], [B, 0], [0, C]])
def test_product_leading_entries():
"""Tests product of leading entries method."""
A, B = symbols("A, B")
m1 = Matrix([[1, 2, 3], [0, 4, 5], [0, 0, 6]])
m2 = Matrix([[0, 0, 1], [2, 0, 3]])
m3 = Matrix([[0, 0, 0], [1, 2, 3], [0, 0, 0]])
m4 = Matrix([[0, 0, A], [1, 2, 3], [B, 0, 0]])
assert dixon.product_leading_entries(m1) == 24
assert dixon.product_leading_entries(m2) == 2
assert dixon.product_leading_entries(m3) == 1
assert dixon.product_leading_entries(m4) == A * B
def test_get_KSY_Dixon_resultant_example_one():
"""Tests the KSY Dixon resultant for example one"""
x, y, z = symbols("x, y, z")
p = x * y * z
q = x ** 2 - z ** 2
h = x + y + z
dixon = DixonResultant([p, q, h], [x, y])
dixon_poly = dixon.get_dixon_polynomial()
dixon_matrix = dixon.get_dixon_matrix(dixon_poly)
D = dixon.get_KSY_Dixon_resultant(dixon_matrix)
assert D == -(z ** 3)
def test_get_KSY_Dixon_resultant_example_two():
"""Tests the KSY Dixon resultant for example two"""
x, y, A = symbols("x, y, A")
p = x * y + x * A + x - A ** 2 - A + y ** 2 + y
q = x ** 2 + x * A - x + x * y + y * A - y
h = x ** 2 + x * y + 2 * x - x * A - y * A - 2 * A
dixon = DixonResultant([p, q, h], [x, y])
dixon_poly = dixon.get_dixon_polynomial()
dixon_matrix = dixon.get_dixon_matrix(dixon_poly)
D = factor(dixon.get_KSY_Dixon_resultant(dixon_matrix))
assert D == -8 * A * (A - 1) * (A + 2) * (2 * A - 1) ** 2
def test_macaulay_resultant_init():
"""Test init method of MacaulayResultant."""
assert macaulay.polynomials == [p, q]
assert macaulay.variables == [x, y]
assert macaulay.n == 2
assert macaulay.degrees == [1, 1]
assert macaulay.degree_m == 1
assert macaulay.monomials_size == 2
def test_get_degree_m():
assert macaulay._get_degree_m() == 1
def test_get_size():
assert macaulay.get_size() == 2
def test_macaulay_example_one():
"""Tests the Macaulay for example from [Bruce97]_"""
x, y, z = symbols("x, y, z")
a_1_1, a_1_2, a_1_3 = symbols("a_1_1, a_1_2, a_1_3")
a_2_2, a_2_3, a_3_3 = symbols("a_2_2, a_2_3, a_3_3")
b_1_1, b_1_2, b_1_3 = symbols("b_1_1, b_1_2, b_1_3")
b_2_2, b_2_3, b_3_3 = symbols("b_2_2, b_2_3, b_3_3")
c_1, c_2, c_3 = symbols("c_1, c_2, c_3")
f_1 = (
a_1_1 * x ** 2
+ a_1_2 * x * y
+ a_1_3 * x * z
+ a_2_2 * y ** 2
+ a_2_3 * y * z
+ a_3_3 * z ** 2
)
f_2 = (
b_1_1 * x ** 2
+ b_1_2 * x * y
+ b_1_3 * x * z
+ b_2_2 * y ** 2
+ b_2_3 * y * z
+ b_3_3 * z ** 2
)
f_3 = c_1 * x + c_2 * y + c_3 * z
mac = MacaulayResultant([f_1, f_2, f_3], [x, y, z])
assert mac.degrees == [2, 2, 1]
assert mac.degree_m == 3
assert mac.monomial_set == [
x ** 3,
x ** 2 * y,
x ** 2 * z,
x * y ** 2,
x * y * z,
x * z ** 2,
y ** 3,
y ** 2 * z,
y * z ** 2,
z ** 3,
]
assert mac.monomials_size == 10
assert mac.get_row_coefficients() == [
[x, y, z],
[x, y, z],
[x * y, x * z, y * z, z ** 2],
]
matrix = mac.get_matrix()
assert matrix.shape == (mac.monomials_size, mac.monomials_size)
assert mac.get_submatrix(matrix) == Matrix([[a_1_1, a_2_2], [b_1_1, b_2_2]])
def test_macaulay_example_two():
"""Tests the Macaulay formulation for example from [Stiller96]_."""
x, y, z = symbols("x, y, z")
a_0, a_1, a_2 = symbols("a_0, a_1, a_2")
b_0, b_1, b_2 = symbols("b_0, b_1, b_2")
c_0, c_1, c_2, c_3, c_4 = symbols("c_0, c_1, c_2, c_3, c_4")
f = a_0 * y - a_1 * x + a_2 * z
g = b_1 * x ** 2 + b_0 * y ** 2 - b_2 * z ** 2
h = c_0 * y - c_1 * x ** 3 + c_2 * x ** 2 * z - c_3 * x * z ** 2 + c_4 * z ** 3
mac = MacaulayResultant([f, g, h], [x, y, z])
assert mac.degrees == [1, 2, 3]
assert mac.degree_m == 4
assert mac.monomials_size == 15
assert len(mac.get_row_coefficients()) == mac.n
matrix = mac.get_matrix()
assert matrix.shape == (mac.monomials_size, mac.monomials_size)
assert mac.get_submatrix(matrix) == Matrix(
[[-a_1, a_0, a_2, 0], [0, -a_1, 0, 0], [0, 0, -a_1, 0], [0, 0, 0, -a_1]]
)
| 28.050336 | 85 | 0.539658 |
from sympy import Matrix, factor
from sympy.core import symbols
from sympy.tensor.indexed import IndexedBase
from sympy.polys.multivariate_resultants import DixonResultant, MacaulayResultant
c, d = symbols("a, b")
x, y = symbols("x, y")
p = c * x + y
q = x + d * y
dixon = DixonResultant(polynomials=[p, q], variables=[x, y])
macaulay = MacaulayResultant(polynomials=[p, q], variables=[x, y])
def test_dixon_resultant_init():
a = IndexedBase("alpha")
assert dixon.polynomials == [p, q]
assert dixon.variables == [x, y]
assert dixon.n == 2
assert dixon.m == 2
assert dixon.dummy_variables == [a[0], a[1]]
def test_get_dixon_polynomial_numerical():
a = IndexedBase("alpha")
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
dixon = DixonResultant([p, q, h], [x, y])
polynomial = (
-x * y ** 2 * a[0]
- x * y ** 2 * a[1]
- x * y * a[0] * a[1]
- x * y * a[1] ** 2
- x * a[0] * a[1] ** 2
+ x * a[0]
- y ** 2 * a[0] * a[1]
+ y ** 2 * a[1]
- y * a[0] * a[1] ** 2
+ y * a[1] ** 2
)
assert dixon.get_dixon_polynomial().as_expr().expand() == polynomial
def test_get_max_degrees():
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
dixon = DixonResultant(polynomials=[p, q, h], variables=[x, y])
dixon_polynomial = dixon.get_dixon_polynomial()
assert dixon.get_max_degrees(dixon_polynomial) == [1, 2]
def test_get_dixon_matrix():
x, y = symbols("x, y")
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
dixon = DixonResultant([p, q, h], [x, y])
polynomial = dixon.get_dixon_polynomial()
assert dixon.get_dixon_matrix(polynomial).det() == 0
def test_get_dixon_matrix_example_two():
x, y, z = symbols("x, y, z")
f = x ** 2 + y ** 2 - 1 + z * 0
g = x ** 2 + z ** 2 - 1 + y * 0
h = y ** 2 + z ** 2 - 1
example_two = DixonResultant([f, g, h], [y, z])
poly = example_two.get_dixon_polynomial()
matrix = example_two.get_dixon_matrix(poly)
expr = 1 - 8 * x ** 2 + 24 * x ** 4 - 32 * x ** 6 + 16 * x ** 8
assert (matrix.det() - expr).expand() == 0
def test_KSY_precondition():
A, B, C = symbols("A, B, C")
m1 = Matrix([[1, 2, 3], [4, 5, 12], [6, 7, 18]])
m2 = Matrix([[0, C ** 2], [-2 * C, -(C ** 2)]])
m3 = Matrix([[1, 0], [0, 1]])
m4 = Matrix([[A ** 2, 0, 1], [A, 1, 1 / A]])
m5 = Matrix([[5, 1], [2, B], [0, 1], [0, 0]])
assert dixon.KSY_precondition(m1) == False
assert dixon.KSY_precondition(m2) == True
assert dixon.KSY_precondition(m3) == True
assert dixon.KSY_precondition(m4) == False
assert dixon.KSY_precondition(m5) == True
def test_delete_zero_rows_and_columns():
A, B, C = symbols("A, B, C")
m1 = Matrix([[0, 0], [0, 0], [1, 2]])
m2 = Matrix([[0, 1, 2], [0, 3, 4], [0, 5, 6]])
m3 = Matrix([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])
m4 = Matrix([[1, 0, 2], [0, 0, 0], [3, 0, 4]])
m5 = Matrix([[0, 0, 0, 1], [0, 0, 0, 2], [0, 0, 0, 3], [0, 0, 0, 4]])
m6 = Matrix([[0, 0, A], [B, 0, 0], [0, 0, C]])
assert dixon.delete_zero_rows_and_columns(m1) == Matrix([[1, 2]])
assert dixon.delete_zero_rows_and_columns(m2) == Matrix([[1, 2], [3, 4], [5, 6]])
assert dixon.delete_zero_rows_and_columns(m3) == Matrix([[1, 2], [3, 4]])
assert dixon.delete_zero_rows_and_columns(m4) == Matrix([[1, 2], [3, 4]])
assert dixon.delete_zero_rows_and_columns(m5) == Matrix([[1], [2], [3], [4]])
assert dixon.delete_zero_rows_and_columns(m6) == Matrix([[0, A], [B, 0], [0, C]])
def test_product_leading_entries():
A, B = symbols("A, B")
m1 = Matrix([[1, 2, 3], [0, 4, 5], [0, 0, 6]])
m2 = Matrix([[0, 0, 1], [2, 0, 3]])
m3 = Matrix([[0, 0, 0], [1, 2, 3], [0, 0, 0]])
m4 = Matrix([[0, 0, A], [1, 2, 3], [B, 0, 0]])
assert dixon.product_leading_entries(m1) == 24
assert dixon.product_leading_entries(m2) == 2
assert dixon.product_leading_entries(m3) == 1
assert dixon.product_leading_entries(m4) == A * B
def test_get_KSY_Dixon_resultant_example_one():
x, y, z = symbols("x, y, z")
p = x * y * z
q = x ** 2 - z ** 2
h = x + y + z
dixon = DixonResultant([p, q, h], [x, y])
dixon_poly = dixon.get_dixon_polynomial()
dixon_matrix = dixon.get_dixon_matrix(dixon_poly)
D = dixon.get_KSY_Dixon_resultant(dixon_matrix)
assert D == -(z ** 3)
def test_get_KSY_Dixon_resultant_example_two():
x, y, A = symbols("x, y, A")
p = x * y + x * A + x - A ** 2 - A + y ** 2 + y
q = x ** 2 + x * A - x + x * y + y * A - y
h = x ** 2 + x * y + 2 * x - x * A - y * A - 2 * A
dixon = DixonResultant([p, q, h], [x, y])
dixon_poly = dixon.get_dixon_polynomial()
dixon_matrix = dixon.get_dixon_matrix(dixon_poly)
D = factor(dixon.get_KSY_Dixon_resultant(dixon_matrix))
assert D == -8 * A * (A - 1) * (A + 2) * (2 * A - 1) ** 2
def test_macaulay_resultant_init():
assert macaulay.polynomials == [p, q]
assert macaulay.variables == [x, y]
assert macaulay.n == 2
assert macaulay.degrees == [1, 1]
assert macaulay.degree_m == 1
assert macaulay.monomials_size == 2
def test_get_degree_m():
assert macaulay._get_degree_m() == 1
def test_get_size():
assert macaulay.get_size() == 2
def test_macaulay_example_one():
x, y, z = symbols("x, y, z")
a_1_1, a_1_2, a_1_3 = symbols("a_1_1, a_1_2, a_1_3")
a_2_2, a_2_3, a_3_3 = symbols("a_2_2, a_2_3, a_3_3")
b_1_1, b_1_2, b_1_3 = symbols("b_1_1, b_1_2, b_1_3")
b_2_2, b_2_3, b_3_3 = symbols("b_2_2, b_2_3, b_3_3")
c_1, c_2, c_3 = symbols("c_1, c_2, c_3")
f_1 = (
a_1_1 * x ** 2
+ a_1_2 * x * y
+ a_1_3 * x * z
+ a_2_2 * y ** 2
+ a_2_3 * y * z
+ a_3_3 * z ** 2
)
f_2 = (
b_1_1 * x ** 2
+ b_1_2 * x * y
+ b_1_3 * x * z
+ b_2_2 * y ** 2
+ b_2_3 * y * z
+ b_3_3 * z ** 2
)
f_3 = c_1 * x + c_2 * y + c_3 * z
mac = MacaulayResultant([f_1, f_2, f_3], [x, y, z])
assert mac.degrees == [2, 2, 1]
assert mac.degree_m == 3
assert mac.monomial_set == [
x ** 3,
x ** 2 * y,
x ** 2 * z,
x * y ** 2,
x * y * z,
x * z ** 2,
y ** 3,
y ** 2 * z,
y * z ** 2,
z ** 3,
]
assert mac.monomials_size == 10
assert mac.get_row_coefficients() == [
[x, y, z],
[x, y, z],
[x * y, x * z, y * z, z ** 2],
]
matrix = mac.get_matrix()
assert matrix.shape == (mac.monomials_size, mac.monomials_size)
assert mac.get_submatrix(matrix) == Matrix([[a_1_1, a_2_2], [b_1_1, b_2_2]])
def test_macaulay_example_two():
x, y, z = symbols("x, y, z")
a_0, a_1, a_2 = symbols("a_0, a_1, a_2")
b_0, b_1, b_2 = symbols("b_0, b_1, b_2")
c_0, c_1, c_2, c_3, c_4 = symbols("c_0, c_1, c_2, c_3, c_4")
f = a_0 * y - a_1 * x + a_2 * z
g = b_1 * x ** 2 + b_0 * y ** 2 - b_2 * z ** 2
h = c_0 * y - c_1 * x ** 3 + c_2 * x ** 2 * z - c_3 * x * z ** 2 + c_4 * z ** 3
mac = MacaulayResultant([f, g, h], [x, y, z])
assert mac.degrees == [1, 2, 3]
assert mac.degree_m == 4
assert mac.monomials_size == 15
assert len(mac.get_row_coefficients()) == mac.n
matrix = mac.get_matrix()
assert matrix.shape == (mac.monomials_size, mac.monomials_size)
assert mac.get_submatrix(matrix) == Matrix(
[[-a_1, a_0, a_2, 0], [0, -a_1, 0, 0], [0, 0, -a_1, 0], [0, 0, 0, -a_1]]
)
| true | true |
f7fb27196bc7774b2fd914d3d983e69fcef6b7d4 | 18,360 | py | Python | lib/streamlit/elements/data_frame_proto.py | Camilo-Mendoza/streamlit-ML | be8aafdf9f334b92a6e056e6c4f994da82587f80 | [
"Apache-2.0"
] | null | null | null | lib/streamlit/elements/data_frame_proto.py | Camilo-Mendoza/streamlit-ML | be8aafdf9f334b92a6e056e6c4f994da82587f80 | [
"Apache-2.0"
] | 9 | 2021-03-01T20:47:52.000Z | 2022-02-12T20:49:50.000Z | lib/streamlit/elements/data_frame_proto.py | Camilo-Mendoza/streamlit-ML | be8aafdf9f334b92a6e056e6c4f994da82587f80 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to marshall a pandas.DataFrame into a proto.Dataframe."""
import re
import tzlocal
from collections import namedtuple
from streamlit import util
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
CSSStyle = namedtuple('CSSStyle', ['property', 'value'])
def marshall_data_frame(data, proto_df):
"""Convert a pandas.DataFrame into a proto.DataFrame.
Parameters
----------
data : pandas.DataFrame, numpy.ndarray, Iterable, dict, DataFrame, Styler, or None
Something that is or can be converted to a dataframe.
proto_df : proto.DataFrame
Output. The protobuf for a Streamlit DataFrame proto.
"""
df = convert_anything_to_df(data)
# Convert df into an iterable of columns (each of type Series).
df_data = (df.iloc[:, col] for col in range(len(df.columns)))
import numpy as np
import pandas as pd
_marshall_table(df_data, proto_df.data)
_marshall_index(df.columns, proto_df.columns)
_marshall_index(df.index, proto_df.index)
styler = data if _is_pandas_styler(data) else None
_marshall_styles(proto_df.style, df, styler)
def convert_anything_to_df(df):
"""Try to convert different formats to a Pandas Dataframe.
Parameters
----------
df : ndarray, Iterable, dict, DataFrame, Styler, None, or any
Returns
-------
pandas.DataFrame
"""
if util.is_type(df, 'pandas.core.frame.DataFrame'):
return df
if _is_pandas_styler(df):
return df.data
import pandas as pd
if util.is_type(df, 'numpy.ndarray') and len(df.shape) == 0:
return pd.DataFrame([])
# Try to convert to pandas.DataFrame. This will raise an error is df is not
# compatible with the pandas.DataFrame constructor.
return pd.DataFrame(df)
def _is_pandas_styler(obj):
return util.is_type(obj, 'pandas.io.formats.style.Styler')
def _marshall_styles(proto_table_style, df, styler=None):
"""Adds pandas.Styler styling data to a proto.DataFrame
Parameters
----------
proto_table_style : proto.TableStyle
df : pandas.DataFrame
styler : pandas.Styler holding styling data for the data frame, or
None if there's no style data to marshall
"""
# NB: we're using protected members of Styler to get this data,
# which is non-ideal and could break if Styler's interface changes.
if styler is not None:
styler._compute()
translated_style = styler._translate()
css_styles = _get_css_styles(translated_style)
display_values = _get_custom_display_values(df, translated_style)
else:
# If we have no Styler, we just make an empty CellStyle for each cell
css_styles = {}
display_values = {}
nrows, ncols = df.shape
for col in range(ncols):
proto_col = proto_table_style.cols.add()
for row in range(nrows):
proto_cell_style = proto_col.styles.add()
for css in css_styles.get((row, col), []):
proto_css = proto_cell_style.css.add()
proto_css.property = css.property
proto_css.value = css.value
display_value = display_values.get((row, col), None)
if display_value is not None:
proto_cell_style.display_value = display_value
proto_cell_style.has_display_value = True
def _get_css_styles(translated_style):
"""Parses pandas.Styler style dictionary into a
{(row, col): [CSSStyle]} dictionary
"""
# Create {(row, col): [CSSStyle]} from translated_style['cellstyle']
# translated_style['cellstyle'] has the shape:
# [
# {
# 'props': [['color', ' black'], ['background-color', 'orange'], ['', '']],
# 'selector': 'row0_col0'
# }
# ...
# ]
cell_selector_regex = re.compile(r'row(\d+)_col(\d+)')
css_styles = {}
for cell_style in translated_style['cellstyle']:
cell_selector = cell_style[
'selector'] # a string of the form 'row0_col0'
match = cell_selector_regex.match(cell_selector)
if not match:
raise RuntimeError('Failed to parse cellstyle selector "%s"' %
cell_selector)
row = int(match.group(1))
col = int(match.group(2))
css_declarations = []
props = cell_style['props']
for prop in props:
if not isinstance(prop, list) or len(prop) != 2:
raise RuntimeError('Unexpected cellstyle props "%s"' % prop)
name = str(prop[0]).strip()
value = str(prop[1]).strip()
if name and value:
css_declarations.append(CSSStyle(property=name, value=value))
css_styles[(row, col)] = css_declarations
return css_styles
def _get_custom_display_values(df, translated_style):
"""Parses pandas.Styler style dictionary into a
{(row, col): display_value} dictionary for cells whose display format
has been customized.
"""
# Create {(row, col): display_value} from translated_style['body']
# translated_style['body'] has the shape:
# [
# [ // row
# { // cell or header
# 'id': 'level0_row0' (for row header) | 'row0_col0' (for cells)
# 'value': 1.329212
# 'display_value': '132.92%'
# ...
# }
# ]
# ]
default_formatter = df.style._display_funcs[(0, 0)]
def has_custom_display_value(cell):
value = str(cell['value'])
display_value = str(cell['display_value'])
if value == display_value:
return False
# Pandas applies a default style to all float values, regardless
# of whether they have a user-specified display format. We test
# for that here.
return default_formatter(value) != display_value
cell_selector_regex = re.compile(r'row(\d+)_col(\d+)')
header_selector_regex = re.compile(r'level(\d+)_row(\d+)')
display_values = {}
for row in translated_style['body']:
# row is a List[Dict], containing format data for each cell in the row,
# plus an extra first entry for the row header, which we skip
found_row_header = False
for cell in row:
cell_id = cell['id'] # a string in the form 'row0_col0'
if header_selector_regex.match(cell_id):
if not found_row_header:
# We don't care about processing row headers, but as
# a sanity check, ensure we only see one per row
found_row_header = True
continue
else:
raise RuntimeError('Found unexpected row header "%s"' %
cell)
match = cell_selector_regex.match(cell_id)
if not match:
raise RuntimeError('Failed to parse cell selector "%s"' %
cell_id)
# Only store display values that differ from the cell's default
if has_custom_display_value(cell):
row = int(match.group(1))
col = int(match.group(2))
display_values[(row, col)] = str(cell['display_value'])
return display_values
def _marshall_index(pandas_index, proto_index):
"""Convert an pandas.Index into a proto.Index.
pandas_index - Panda.Index or related (input)
proto_index - proto.Index (output)
"""
import pandas as pd
import numpy as np
if type(pandas_index) == pd.Index:
_marshall_any_array(np.array(pandas_index),
proto_index.plain_index.data)
elif type(pandas_index) == pd.RangeIndex:
min = pandas_index.min()
max = pandas_index.max()
if pd.isna(min) or pd.isna(max):
proto_index.range_index.start = 0
proto_index.range_index.stop = 0
else:
proto_index.range_index.start = min
proto_index.range_index.stop = max + 1
elif type(pandas_index) == pd.MultiIndex:
for level in pandas_index.levels:
_marshall_index(level, proto_index.multi_index.levels.add())
if hasattr(pandas_index, 'codes'):
index_codes = pandas_index.codes
else:
# Deprecated in Pandas 0.24, do don't bother covering.
index_codes = pandas_index.labels # pragma: no cover
for label in index_codes:
proto_index.multi_index.labels.add().data.extend(label)
elif type(pandas_index) == pd.DatetimeIndex:
if pandas_index.tz is None:
current_zone = tzlocal.get_localzone()
pandas_index = pandas_index.tz_localize(current_zone)
proto_index.datetime_index.data.data.extend(
pandas_index.astype(np.int64))
elif type(pandas_index) == pd.TimedeltaIndex:
proto_index.timedelta_index.data.data.extend(
pandas_index.astype(np.int64))
elif type(pandas_index) == pd.Int64Index:
proto_index.int_64_index.data.data.extend(pandas_index)
elif type(pandas_index) == pd.Float64Index:
proto_index.float_64_index.data.data.extend(pandas_index)
else:
raise NotImplementedError("Can't handle %s yet." % type(pandas_index))
def _marshall_table(pandas_table, proto_table):
"""Convert a sequence of 1D arrays into proto.Table.
pandas_table - Sequence of 1D arrays which are AnyArray compatible (input).
proto_table - proto.Table (output)
"""
for pandas_array in pandas_table:
_marshall_any_array(pandas_array, proto_table.cols.add())
def _marshall_any_array(pandas_array, proto_array):
"""Convert a 1D numpy.Array into a proto.AnyArray.
pandas_array - 1D arrays which is AnyArray compatible (input).
proto_array - proto.AnyArray (output)
"""
import numpy as np
# Convert to np.array as necessary.
if not hasattr(pandas_array, 'dtype'):
pandas_array = np.array(pandas_array)
# Only works on 1D arrays.
if len(pandas_array.shape) != 1:
raise ValueError('Array must be 1D.')
# Perform type-conversion based on the array dtype.
if issubclass(pandas_array.dtype.type, np.floating):
proto_array.doubles.data.extend(pandas_array)
elif issubclass(pandas_array.dtype.type, np.timedelta64):
proto_array.timedeltas.data.extend(pandas_array.astype(np.int64))
elif issubclass(pandas_array.dtype.type, np.integer):
proto_array.int64s.data.extend(pandas_array)
elif pandas_array.dtype == np.bool:
proto_array.int64s.data.extend(pandas_array)
elif pandas_array.dtype == np.object:
proto_array.strings.data.extend(map(str, pandas_array))
# Setting a timezone changes (dtype, dtype.type) from
# 'datetime64[ns]', <class 'numpy.datetime64'>
# to
# datetime64[ns, UTC], <class 'pandas._libs.tslibs.timestamps.Timestamp'>
elif pandas_array.dtype.name.startswith('datetime64'):
# TODO(armando): Convert eveything to UTC not local timezone.
if pandas_array.dt.tz is None:
current_zone = tzlocal.get_localzone()
pandas_array = pandas_array.dt.tz_localize(current_zone)
proto_array.datetimes.data.extend(pandas_array.astype(np.int64))
else:
raise NotImplementedError('Dtype %s not understood.' %
pandas_array.dtype)
def add_rows(delta1, delta2, name=None):
"""Concat the DataFrame in delta2 to the DataFrame in delta1.
Parameters
----------
delta1 : Delta
delta2 : Delta
name : str or None
"""
df1 = _get_data_frame(delta1, name)
df2 = _get_data_frame(delta2, name)
if len(df1.data.cols) == 0:
if len(df2.data.cols) == 0:
return
df1.CopyFrom(df2)
return
# Copy Data
if len(df1.data.cols) != len(df2.data.cols):
raise ValueError('Dataframes have incompatible shapes')
for (col1, col2) in zip(df1.data.cols, df2.data.cols):
_concat_any_array(col1, col2)
# Copy index
_concat_index(df1.index, df2.index)
# Don't concat columns! add_rows should leave the dataframe with the same
# number of columns as it had before.
# DON'T DO: _concat_index(df1.columns, df2.columns)
# Copy styles
for (style_col1, style_col2) in zip(df1.style.cols, df2.style.cols):
_concat_cell_style_array(style_col1, style_col2)
def _concat_index(index1, index2):
"""Contact index2 into index1."""
# Special case if index1 is empty.
if _index_len(index1) == 0:
index1.Clear()
index1.CopyFrom(index2)
return
# Otherwise, dispatch based on type.
type1 = index1.WhichOneof('type')
type2 = index2.WhichOneof('type')
# This branch is covered with tests but pytest doesnt seem to realize it.
if type1 != type2: # pragma: no cover
raise ValueError('Cannot concatenate %(type1)s with %(type2)s.' % {
'type1': type1,
'type2': type2
})
if type1 == 'plain_index':
_concat_any_array(index1.plain_index.data, index2.plain_index.data)
elif type1 == 'range_index':
index1.range_index.stop += \
(index2.range_index.stop - index2.range_index.start)
elif type1 == 'multi_index':
raise NotImplementedError('Cannot yet concatenate MultiIndices.')
elif type1 == 'int_64_index':
index1.int_64_index.data.data.extend(index2.int_64_index.data.data)
elif type1 == 'datetime_index':
index1.datetime_index.data.data.extend(index2.datetime_index.data.data)
elif type1 == 'timedelta_index':
index1.timedelta_index.data.data.extend(
index2.timedelta_index.data.data)
else:
raise NotImplementedError('Cannot concatenate "%s" indices.' % type1)
def _concat_any_array(any_array_1, any_array_2):
"""Concat elements from any_array_2 into any_array_1."""
# Special case if any_array_1 is empty
if _any_array_len(any_array_1) == 0:
any_array_1.CopyFrom(any_array_2)
return
type1 = any_array_1.WhichOneof('type')
type2 = any_array_2.WhichOneof('type')
if type1 != type2:
raise ValueError('Cannot concatenate %(type1)s with %(type2)s.' % {
'type1': type1,
'type2': type2
})
getattr(any_array_1, type1).data.extend(getattr(any_array_2, type2).data)
def _concat_cell_style_array(style_array1, style_array2):
"""Concat elements from any_array_2 into any_array_1."""
# Special case if array1 is empty
if len(style_array1.styles) == 0:
style_array1.CopyFrom(style_array2)
return
style_array1.styles.extend(style_array2.styles)
def _get_data_frame(delta, name=None):
"""Extract the dataframe from a delta."""
delta_type = delta.WhichOneof('type')
if delta_type == 'new_element':
element_type = delta.new_element.WhichOneof('type')
# Some element types don't support named datasets.
if name and element_type in ('data_frame', 'table', 'chart'):
raise ValueError(
'Dataset names not supported for st.%s' % element_type)
if element_type in 'data_frame':
return delta.new_element.data_frame
elif element_type in 'table':
return delta.new_element.table
elif element_type == 'chart':
return delta.new_element.chart.data
elif element_type == 'vega_lite_chart':
chart_proto = delta.new_element.vega_lite_chart
if name:
return _get_or_create_dataset(chart_proto.datasets, name)
elif len(chart_proto.datasets) == 1:
# Support the case where the dataset name was randomly given by
# the charting library (e.g. Altair) and the user has no
# knowledge of it.
return chart_proto.datasets[0].data
else:
return chart_proto.data
# TODO: Support DeckGL. Need to figure out how to handle layer indices
# first.
elif delta_type == 'add_rows':
if delta.add_rows.has_name and name != delta.add_rows.name:
raise ValueError('No dataset found with name "%s".' % name)
return delta.add_rows.data
else:
raise ValueError('Cannot extract DataFrame from %s.' % delta_type)
def _get_or_create_dataset(datasets_proto, name):
for dataset in datasets_proto:
if dataset.has_name and dataset.name == name:
return dataset.data
dataset = datasets_proto.add()
dataset.name = name
dataset.has_name = True
return dataset.data
def _index_len(index):
"""Return the number of elements in an index."""
index_type = index.WhichOneof('type')
if index_type == 'plain_index':
return _any_array_len(index.plain_index.data)
elif index_type == 'range_index':
return index.range_index.stop - index.range_index.start
elif index_type == 'multi_index':
if len(index.multi_index.labels) == 0:
return 0
else:
return len(index.multi_index.labels[0].data)
elif index_type == 'int_64_index':
return len(index.int_64_index.data.data)
elif index_type == 'float_64_index':
return len(index.float_64_index.data.data)
elif index_type == 'datetime_index':
return len(index.datetime_index.data.data)
elif index_type == 'timedelta_index':
return len(index.timedelta_index.data.data)
def _any_array_len(any_array):
"""Return the length of an any_array."""
array_type = any_array.WhichOneof('type')
the_array = getattr(any_array, array_type).data
return len(the_array)
| 35.859375 | 86 | 0.64439 |
import re
import tzlocal
from collections import namedtuple
from streamlit import util
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
CSSStyle = namedtuple('CSSStyle', ['property', 'value'])
def marshall_data_frame(data, proto_df):
df = convert_anything_to_df(data)
df_data = (df.iloc[:, col] for col in range(len(df.columns)))
import numpy as np
import pandas as pd
_marshall_table(df_data, proto_df.data)
_marshall_index(df.columns, proto_df.columns)
_marshall_index(df.index, proto_df.index)
styler = data if _is_pandas_styler(data) else None
_marshall_styles(proto_df.style, df, styler)
def convert_anything_to_df(df):
if util.is_type(df, 'pandas.core.frame.DataFrame'):
return df
if _is_pandas_styler(df):
return df.data
import pandas as pd
if util.is_type(df, 'numpy.ndarray') and len(df.shape) == 0:
return pd.DataFrame([])
return pd.DataFrame(df)
def _is_pandas_styler(obj):
return util.is_type(obj, 'pandas.io.formats.style.Styler')
def _marshall_styles(proto_table_style, df, styler=None):
# which is non-ideal and could break if Styler's interface changes.
if styler is not None:
styler._compute()
translated_style = styler._translate()
css_styles = _get_css_styles(translated_style)
display_values = _get_custom_display_values(df, translated_style)
else:
css_styles = {}
display_values = {}
nrows, ncols = df.shape
for col in range(ncols):
proto_col = proto_table_style.cols.add()
for row in range(nrows):
proto_cell_style = proto_col.styles.add()
for css in css_styles.get((row, col), []):
proto_css = proto_cell_style.css.add()
proto_css.property = css.property
proto_css.value = css.value
display_value = display_values.get((row, col), None)
if display_value is not None:
proto_cell_style.display_value = display_value
proto_cell_style.has_display_value = True
def _get_css_styles(translated_style):
cell_selector_regex = re.compile(r'row(\d+)_col(\d+)')
css_styles = {}
for cell_style in translated_style['cellstyle']:
cell_selector = cell_style[
'selector']
match = cell_selector_regex.match(cell_selector)
if not match:
raise RuntimeError('Failed to parse cellstyle selector "%s"' %
cell_selector)
row = int(match.group(1))
col = int(match.group(2))
css_declarations = []
props = cell_style['props']
for prop in props:
if not isinstance(prop, list) or len(prop) != 2:
raise RuntimeError('Unexpected cellstyle props "%s"' % prop)
name = str(prop[0]).strip()
value = str(prop[1]).strip()
if name and value:
css_declarations.append(CSSStyle(property=name, value=value))
css_styles[(row, col)] = css_declarations
return css_styles
def _get_custom_display_values(df, translated_style):
default_formatter = df.style._display_funcs[(0, 0)]
def has_custom_display_value(cell):
value = str(cell['value'])
display_value = str(cell['display_value'])
if value == display_value:
return False
return default_formatter(value) != display_value
cell_selector_regex = re.compile(r'row(\d+)_col(\d+)')
header_selector_regex = re.compile(r'level(\d+)_row(\d+)')
display_values = {}
for row in translated_style['body']:
found_row_header = False
for cell in row:
cell_id = cell['id']
if header_selector_regex.match(cell_id):
if not found_row_header:
# a sanity check, ensure we only see one per row
found_row_header = True
continue
else:
raise RuntimeError('Found unexpected row header "%s"' %
cell)
match = cell_selector_regex.match(cell_id)
if not match:
raise RuntimeError('Failed to parse cell selector "%s"' %
cell_id)
# Only store display values that differ from the cell's default
if has_custom_display_value(cell):
row = int(match.group(1))
col = int(match.group(2))
display_values[(row, col)] = str(cell['display_value'])
return display_values
def _marshall_index(pandas_index, proto_index):
import pandas as pd
import numpy as np
if type(pandas_index) == pd.Index:
_marshall_any_array(np.array(pandas_index),
proto_index.plain_index.data)
elif type(pandas_index) == pd.RangeIndex:
min = pandas_index.min()
max = pandas_index.max()
if pd.isna(min) or pd.isna(max):
proto_index.range_index.start = 0
proto_index.range_index.stop = 0
else:
proto_index.range_index.start = min
proto_index.range_index.stop = max + 1
elif type(pandas_index) == pd.MultiIndex:
for level in pandas_index.levels:
_marshall_index(level, proto_index.multi_index.levels.add())
if hasattr(pandas_index, 'codes'):
index_codes = pandas_index.codes
else:
index_codes = pandas_index.labels # pragma: no cover
for label in index_codes:
proto_index.multi_index.labels.add().data.extend(label)
elif type(pandas_index) == pd.DatetimeIndex:
if pandas_index.tz is None:
current_zone = tzlocal.get_localzone()
pandas_index = pandas_index.tz_localize(current_zone)
proto_index.datetime_index.data.data.extend(
pandas_index.astype(np.int64))
elif type(pandas_index) == pd.TimedeltaIndex:
proto_index.timedelta_index.data.data.extend(
pandas_index.astype(np.int64))
elif type(pandas_index) == pd.Int64Index:
proto_index.int_64_index.data.data.extend(pandas_index)
elif type(pandas_index) == pd.Float64Index:
proto_index.float_64_index.data.data.extend(pandas_index)
else:
raise NotImplementedError("Can't handle %s yet." % type(pandas_index))
def _marshall_table(pandas_table, proto_table):
for pandas_array in pandas_table:
_marshall_any_array(pandas_array, proto_table.cols.add())
def _marshall_any_array(pandas_array, proto_array):
import numpy as np
if not hasattr(pandas_array, 'dtype'):
pandas_array = np.array(pandas_array)
if len(pandas_array.shape) != 1:
raise ValueError('Array must be 1D.')
if issubclass(pandas_array.dtype.type, np.floating):
proto_array.doubles.data.extend(pandas_array)
elif issubclass(pandas_array.dtype.type, np.timedelta64):
proto_array.timedeltas.data.extend(pandas_array.astype(np.int64))
elif issubclass(pandas_array.dtype.type, np.integer):
proto_array.int64s.data.extend(pandas_array)
elif pandas_array.dtype == np.bool:
proto_array.int64s.data.extend(pandas_array)
elif pandas_array.dtype == np.object:
proto_array.strings.data.extend(map(str, pandas_array))
elif pandas_array.dtype.name.startswith('datetime64'):
if pandas_array.dt.tz is None:
current_zone = tzlocal.get_localzone()
pandas_array = pandas_array.dt.tz_localize(current_zone)
proto_array.datetimes.data.extend(pandas_array.astype(np.int64))
else:
raise NotImplementedError('Dtype %s not understood.' %
pandas_array.dtype)
def add_rows(delta1, delta2, name=None):
df1 = _get_data_frame(delta1, name)
df2 = _get_data_frame(delta2, name)
if len(df1.data.cols) == 0:
if len(df2.data.cols) == 0:
return
df1.CopyFrom(df2)
return
if len(df1.data.cols) != len(df2.data.cols):
raise ValueError('Dataframes have incompatible shapes')
for (col1, col2) in zip(df1.data.cols, df2.data.cols):
_concat_any_array(col1, col2)
_concat_index(df1.index, df2.index)
# number of columns as it had before.
# DON'T DO: _concat_index(df1.columns, df2.columns)
for (style_col1, style_col2) in zip(df1.style.cols, df2.style.cols):
_concat_cell_style_array(style_col1, style_col2)
def _concat_index(index1, index2):
if _index_len(index1) == 0:
index1.Clear()
index1.CopyFrom(index2)
return
type1 = index1.WhichOneof('type')
type2 = index2.WhichOneof('type')
if type1 != type2:
raise ValueError('Cannot concatenate %(type1)s with %(type2)s.' % {
'type1': type1,
'type2': type2
})
if type1 == 'plain_index':
_concat_any_array(index1.plain_index.data, index2.plain_index.data)
elif type1 == 'range_index':
index1.range_index.stop += \
(index2.range_index.stop - index2.range_index.start)
elif type1 == 'multi_index':
raise NotImplementedError('Cannot yet concatenate MultiIndices.')
elif type1 == 'int_64_index':
index1.int_64_index.data.data.extend(index2.int_64_index.data.data)
elif type1 == 'datetime_index':
index1.datetime_index.data.data.extend(index2.datetime_index.data.data)
elif type1 == 'timedelta_index':
index1.timedelta_index.data.data.extend(
index2.timedelta_index.data.data)
else:
raise NotImplementedError('Cannot concatenate "%s" indices.' % type1)
def _concat_any_array(any_array_1, any_array_2):
if _any_array_len(any_array_1) == 0:
any_array_1.CopyFrom(any_array_2)
return
type1 = any_array_1.WhichOneof('type')
type2 = any_array_2.WhichOneof('type')
if type1 != type2:
raise ValueError('Cannot concatenate %(type1)s with %(type2)s.' % {
'type1': type1,
'type2': type2
})
getattr(any_array_1, type1).data.extend(getattr(any_array_2, type2).data)
def _concat_cell_style_array(style_array1, style_array2):
if len(style_array1.styles) == 0:
style_array1.CopyFrom(style_array2)
return
style_array1.styles.extend(style_array2.styles)
def _get_data_frame(delta, name=None):
delta_type = delta.WhichOneof('type')
if delta_type == 'new_element':
element_type = delta.new_element.WhichOneof('type')
if name and element_type in ('data_frame', 'table', 'chart'):
raise ValueError(
'Dataset names not supported for st.%s' % element_type)
if element_type in 'data_frame':
return delta.new_element.data_frame
elif element_type in 'table':
return delta.new_element.table
elif element_type == 'chart':
return delta.new_element.chart.data
elif element_type == 'vega_lite_chart':
chart_proto = delta.new_element.vega_lite_chart
if name:
return _get_or_create_dataset(chart_proto.datasets, name)
elif len(chart_proto.datasets) == 1:
# Support the case where the dataset name was randomly given by
# the charting library (e.g. Altair) and the user has no
# knowledge of it.
return chart_proto.datasets[0].data
else:
return chart_proto.data
# TODO: Support DeckGL. Need to figure out how to handle layer indices
# first.
elif delta_type == 'add_rows':
if delta.add_rows.has_name and name != delta.add_rows.name:
raise ValueError('No dataset found with name "%s".' % name)
return delta.add_rows.data
else:
raise ValueError('Cannot extract DataFrame from %s.' % delta_type)
def _get_or_create_dataset(datasets_proto, name):
for dataset in datasets_proto:
if dataset.has_name and dataset.name == name:
return dataset.data
dataset = datasets_proto.add()
dataset.name = name
dataset.has_name = True
return dataset.data
def _index_len(index):
index_type = index.WhichOneof('type')
if index_type == 'plain_index':
return _any_array_len(index.plain_index.data)
elif index_type == 'range_index':
return index.range_index.stop - index.range_index.start
elif index_type == 'multi_index':
if len(index.multi_index.labels) == 0:
return 0
else:
return len(index.multi_index.labels[0].data)
elif index_type == 'int_64_index':
return len(index.int_64_index.data.data)
elif index_type == 'float_64_index':
return len(index.float_64_index.data.data)
elif index_type == 'datetime_index':
return len(index.datetime_index.data.data)
elif index_type == 'timedelta_index':
return len(index.timedelta_index.data.data)
def _any_array_len(any_array):
array_type = any_array.WhichOneof('type')
the_array = getattr(any_array, array_type).data
return len(the_array)
| true | true |
f7fb296fe05adccbf13151574889867e1605688c | 1,160 | py | Python | tests/test_managers.py | kishorehariram/django-logic | 955f18211443b30ce39a845495e136d7590183a6 | [
"MIT"
] | 47 | 2019-11-23T11:51:04.000Z | 2022-03-16T15:37:24.000Z | tests/test_managers.py | kishorehariram/django-logic | 955f18211443b30ce39a845495e136d7590183a6 | [
"MIT"
] | 29 | 2019-11-25T12:16:25.000Z | 2021-05-10T13:17:46.000Z | tests/test_managers.py | kishorehariram/django-logic | 955f18211443b30ce39a845495e136d7590183a6 | [
"MIT"
] | 6 | 2019-12-25T11:17:05.000Z | 2021-11-23T07:33:33.000Z | from django.db import models
from django.test import TestCase
from django_logic.process import ProcessManager, Process
from django_logic.transition import Transition
class FirstProcess(Process):
process_name = 'first_process'
queryset = models.Manager() # fake it to pass init
transitions = [
Transition(action_name='transition1', sources=['state1'], target='state3')
]
class SecondProcess(Process):
process_name = 'second_process'
queryset = models.Manager() # fake it to pass init
transitions = [
Transition(action_name='transition2', sources=['state2'], target='state3')
]
class ProcessManagerTestCase(TestCase):
def test_processes_bound_correctly(self):
bind_class = ProcessManager.bind_state_fields(first_state=FirstProcess, second_state=SecondProcess)
bind_class_obj = bind_class()
self.assertTrue(hasattr(bind_class_obj, 'first_process'))
self.assertTrue(isinstance(bind_class_obj.first_process, FirstProcess))
self.assertTrue(hasattr(bind_class_obj, 'second_process'))
self.assertTrue(isinstance(bind_class_obj.second_process, SecondProcess))
| 36.25 | 107 | 0.744828 | from django.db import models
from django.test import TestCase
from django_logic.process import ProcessManager, Process
from django_logic.transition import Transition
class FirstProcess(Process):
process_name = 'first_process'
queryset = models.Manager()
transitions = [
Transition(action_name='transition1', sources=['state1'], target='state3')
]
class SecondProcess(Process):
process_name = 'second_process'
queryset = models.Manager()
transitions = [
Transition(action_name='transition2', sources=['state2'], target='state3')
]
class ProcessManagerTestCase(TestCase):
def test_processes_bound_correctly(self):
bind_class = ProcessManager.bind_state_fields(first_state=FirstProcess, second_state=SecondProcess)
bind_class_obj = bind_class()
self.assertTrue(hasattr(bind_class_obj, 'first_process'))
self.assertTrue(isinstance(bind_class_obj.first_process, FirstProcess))
self.assertTrue(hasattr(bind_class_obj, 'second_process'))
self.assertTrue(isinstance(bind_class_obj.second_process, SecondProcess))
| true | true |
f7fb2b1fa89e507daca1330af17ae8ddefab5c72 | 1,158 | py | Python | pythonBase/advancePyton/chapter11/process_test.py | cangchengkun/pythonbase | 4e01331b1c7c13d86f32f697dd812cb267abe7ef | [
"CNRI-Python"
] | null | null | null | pythonBase/advancePyton/chapter11/process_test.py | cangchengkun/pythonbase | 4e01331b1c7c13d86f32f697dd812cb267abe7ef | [
"CNRI-Python"
] | null | null | null | pythonBase/advancePyton/chapter11/process_test.py | cangchengkun/pythonbase | 4e01331b1c7c13d86f32f697dd812cb267abe7ef | [
"CNRI-Python"
] | null | null | null | # 多线程编程
# GIL锁在单cpu上只能执行一个,效率低
# 多进程可以在多个cpu上执行
# 多线程和多线程都能够并发为什么在IO操作是不使用多进程
# 进程切换比较耗时
# 1.对于耗cpu,多进程优于多线程
# 2.对于IO操作,多线程由于多进程
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import os
import time
# pid = os.fork() # 同时会存在两个进程,会拷贝父进程中的代码和数据到子进程中
#
# print("boby")
#
# if pid == 0:
# print("子进程 {} , 父进程 {}.".format(os.getpid(), os.getppid()))
# else:
# print("父进程 {}.".format(pid))
# 多进程
import multiprocessing
def get_html(n):
time.sleep(n)
return n
class MyProcess(multiprocessing.Process):
def run(self):
pass
if __name__ == '__main__':
process = multiprocessing.Process(target=get_html, args=(2,))
print(process.pid)
process.start()
process.join()
print(process.pid)
print("multiprocessing is end")
pool = multiprocessing.Pool(multiprocessing.cpu_count())
# result = pool.apply_async(get_html, (2,))
# pool.close()
# print(result.get())
for result in pool.imap(get_html, [1, 5, 3]):
print("{} sleep success".format(result))
for result in pool.imap_unordered(get_html, [1, 5, 3]):
print("{} sleep success".format(result))
| 20.315789 | 70 | 0.661485 |
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import os
import time
ng
def get_html(n):
time.sleep(n)
return n
class MyProcess(multiprocessing.Process):
def run(self):
pass
if __name__ == '__main__':
process = multiprocessing.Process(target=get_html, args=(2,))
print(process.pid)
process.start()
process.join()
print(process.pid)
print("multiprocessing is end")
pool = multiprocessing.Pool(multiprocessing.cpu_count())
for result in pool.imap(get_html, [1, 5, 3]):
print("{} sleep success".format(result))
for result in pool.imap_unordered(get_html, [1, 5, 3]):
print("{} sleep success".format(result))
| true | true |
f7fb2bf57560ff065bd448cc06360a865d2d6f01 | 254 | py | Python | test_driven_development_with_python/superlists/manage.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | test_driven_development_with_python/superlists/manage.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | test_driven_development_with_python/superlists/manage.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.090909 | 74 | 0.775591 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
f7fb2c06a0c94a1f778393f64fd2d5ce8f893f62 | 330 | py | Python | typeidea/comment/adminx.py | persue/typeidea | 1510efaf9c260c802390ab6490e726182e8037ab | [
"Apache-2.0"
] | null | null | null | typeidea/comment/adminx.py | persue/typeidea | 1510efaf9c260c802390ab6490e726182e8037ab | [
"Apache-2.0"
] | 5 | 2021-03-19T09:17:12.000Z | 2022-03-12T00:08:19.000Z | typeidea/comment/adminx.py | persue/typeidea | 1510efaf9c260c802390ab6490e726182e8037ab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import xadmin
#from django.contrib import admin
from .models import Comment
#from typeidea.custom_site import custom_site
#from typeidea.base_admin import BaseOwnerAdmin
@xadmin.sites.register(Comment)
class CommentAdmin:
list_display = ('target', 'nickname', 'content', 'website', 'created_time')
| 25.384615 | 79 | 0.757576 |
import xadmin
from .models import Comment
@xadmin.sites.register(Comment)
class CommentAdmin:
list_display = ('target', 'nickname', 'content', 'website', 'created_time')
| true | true |
f7fb2c651134c68cfd8b101c68def41076630084 | 5,299 | py | Python | axl_add_Sip_Trunk.py | alikhalid72/axl-python-zeep-samples | d0da03f066eaa2ddd55a18997f44402e50338c46 | [
"MIT"
] | null | null | null | axl_add_Sip_Trunk.py | alikhalid72/axl-python-zeep-samples | d0da03f066eaa2ddd55a18997f44402e50338c46 | [
"MIT"
] | null | null | null | axl_add_Sip_Trunk.py | alikhalid72/axl-python-zeep-samples | d0da03f066eaa2ddd55a18997f44402e50338c46 | [
"MIT"
] | null | null | null | """AXL <addSipTrunk> sample script, using the Zeep SOAP library
Install Python 3.7
On Windows, choose the option to add to PATH environment variable
If this is a fresh installation, update pip (you may need to use `pip3` on Linux or Mac)
$ python -m pip install --upgrade pip
Script Dependencies:
lxml
requests
zeep
Dependency Installation:
$ pip install zeep
This will install automatically all of zeep dependencies, including lxml, requests
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from lxml import etree
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings, Plugin
from zeep.transports import Transport
from zeep.exceptions import Fault
# Configure CUCM location and AXL credentials in creds.py
import creds
# Change to true to enable output of request/response headers and XML
DEBUG = False
# The WSDL is a local file in the working directory, see README
WSDL_FILE = 'schema/AXLAPI.wsdl'
# This class lets you view the incoming and outgoing http headers and XML
class MyLoggingPlugin( Plugin ):
def egress( self, envelope, http_headers, operation, binding_options ):
# Format the request body as pretty printed XML
xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode')
print( f'\nRequest\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' )
def ingress( self, envelope, http_headers, operation ):
# Format the response body as pretty printed XML
xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode')
print( f'\nResponse\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' )
# The first step is to create a SOAP client session
session = Session()
# We avoid certificate verification by default
session.verify = False
# To enabled SSL cert checking (recommended for production)
# place the CUCM Tomcat cert .pem file in the root of the project
# and uncomment the two lines below
# CERT = 'changeme.pem'
# session.verify = CERT
session.auth = HTTPBasicAuth( creds.USERNAME, creds.PASSWORD )
transport = Transport( session = session, timeout = 10 )
# strict=False is not always necessary, but it allows zeep to parse imperfect XML
settings = Settings( strict = False, xml_huge_tree = True )
# If debug output is requested, add the MyLoggingPlugin callback
plugin = [ MyLoggingPlugin() ] if DEBUG else [ ]
# Create the Zeep client with the specified settings
client = Client( WSDL_FILE, settings = settings, transport = transport,
plugins = plugin )
# Create the Zeep service binding to AXL at the specified CUCM
service = client.create_service( '{http://www.cisco.com/AXLAPIService/}AXLAPIBinding',
'https://{cucm}:8443/axl/'.format( cucm = creds.CUCM_ADDRESS ))
# Create an object with the new SIP trunk fields and data
sip_trunk_data = {
'name': 'testSipTrunk',
'description': 'testDescription',
'product': 'SIP Trunk',
'class': 'Trunk',
'protocol': 'SIP',
'protocolSide': 'Network',
'devicePoolName': 'Default',
'locationName': 'Hub_None',
'securityProfileName': 'Non Secure SIP Trunk Profile',
'sipProfileName': 'Standard SIP Profile',
'presenceGroupName': 'Standard Presence group',
'callingAndCalledPartyInfoFormat': 'Deliver DN only in connected party',
'destinations': [ ],
}
# Create and add a Destination object to the Destinations array
sip_trunk_data['destinations'].append(
{ 'destination': {
'addressIpv4': '1.1.1.1', 'port': '5060', 'sortOrder': 1 }
}
)
# Execute the addSipTrunk request
try:
resp = service.addSipTrunk( sip_trunk_data )
except Fault as err:
print('Zeep error: addSipTrunk: {err}'.format( err = err ) )
else:
print( 'addSipTrunk response:' )
print( resp )
input( 'Press Enter to continue...' )
# Cleanup the SIP Trunk we just created
try:
resp = service.removeSipTrunk( name = 'testSipTrunk' )
except Fault as err:
print( 'Zeep error: removeSipTrunk: {err}'.format( err = err ) )
else:
print( 'removeSipTrunk response:' )
print( resp )
| 35.326667 | 96 | 0.707115 |
from lxml import etree
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings, Plugin
from zeep.transports import Transport
from zeep.exceptions import Fault
import creds
DEBUG = False
WSDL_FILE = 'schema/AXLAPI.wsdl'
class MyLoggingPlugin( Plugin ):
def egress( self, envelope, http_headers, operation, binding_options ):
xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode')
print( f'\nRequest\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' )
def ingress( self, envelope, http_headers, operation ):
xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode')
print( f'\nResponse\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' )
session = Session()
session.verify = False
session.auth = HTTPBasicAuth( creds.USERNAME, creds.PASSWORD )
transport = Transport( session = session, timeout = 10 )
settings = Settings( strict = False, xml_huge_tree = True )
plugin = [ MyLoggingPlugin() ] if DEBUG else [ ]
client = Client( WSDL_FILE, settings = settings, transport = transport,
plugins = plugin )
service = client.create_service( '{http://www.cisco.com/AXLAPIService/}AXLAPIBinding',
'https://{cucm}:8443/axl/'.format( cucm = creds.CUCM_ADDRESS ))
sip_trunk_data = {
'name': 'testSipTrunk',
'description': 'testDescription',
'product': 'SIP Trunk',
'class': 'Trunk',
'protocol': 'SIP',
'protocolSide': 'Network',
'devicePoolName': 'Default',
'locationName': 'Hub_None',
'securityProfileName': 'Non Secure SIP Trunk Profile',
'sipProfileName': 'Standard SIP Profile',
'presenceGroupName': 'Standard Presence group',
'callingAndCalledPartyInfoFormat': 'Deliver DN only in connected party',
'destinations': [ ],
}
sip_trunk_data['destinations'].append(
{ 'destination': {
'addressIpv4': '1.1.1.1', 'port': '5060', 'sortOrder': 1 }
}
)
try:
resp = service.addSipTrunk( sip_trunk_data )
except Fault as err:
print('Zeep error: addSipTrunk: {err}'.format( err = err ) )
else:
print( 'addSipTrunk response:' )
print( resp )
input( 'Press Enter to continue...' )
try:
resp = service.removeSipTrunk( name = 'testSipTrunk' )
except Fault as err:
print( 'Zeep error: removeSipTrunk: {err}'.format( err = err ) )
else:
print( 'removeSipTrunk response:' )
print( resp )
| true | true |
f7fb2d5c320bd37feebc0783b11324565991b7f4 | 8,599 | py | Python | actualizar_costos.py | osim-microgrid-tool/osim_islanded_microgrids_sizing | 63738ddf418e4023f3b64dc1f0aba6f079710bc8 | [
"MIT"
] | 2 | 2021-07-27T13:56:18.000Z | 2021-07-27T13:56:23.000Z | actualizar_costos.py | wropero-enersinc/osim_microrredes | 407ce32683043dbc0356fa0b2edaf537b0098b71 | [
"MIT"
] | null | null | null | actualizar_costos.py | wropero-enersinc/osim_microrredes | 407ce32683043dbc0356fa0b2edaf537b0098b71 | [
"MIT"
] | 1 | 2021-07-25T23:03:27.000Z | 2021-07-25T23:03:27.000Z | import pandas as pd
import numpy as np
import math
import sqlite3
import os
def conexion_bd(sql=None, update=True):
sqlite3.register_adapter(np.int64, lambda val: int(val))
sqlite3.register_adapter(np.int32, lambda val: int(val))
con = sqlite3.connect(os.getcwd() + os.sep + "result_op.db")
try:
if update == True:
cur = con.cursor()
cur.execute(sql)
con.commit()
df=None
else:
df= pd.read_sql_query(sql, con=con)
except:
print(sql)
df=None
con.close()
return df
def actualizar_costos(parametros=None, resultados=None, i=None):
trm = 3736.91
id_simulacion = parametros.loc[i,'id_simulacion']
n_pv = parametros.loc[i,'n_pv']
n_dg = parametros.loc[i,'n_dg']
p_bat = parametros.loc[i,'p_bat']
p_dg = parametros.loc[i,'p_dg']
ens = parametros.loc[i,'ens_result']
c_pv = parametros.loc[i,'cost_pv']
c_bat = parametros.loc[i,'cost_bat']
c_dg = parametros.loc[i,'cost_dg']
lpsp = parametros.loc[i,'lpsp_result']
if lpsp <=1.5:
c_ens = 1532.53
elif lpsp > 1.5 and lpsp <= 5:
c_ens = 2778.13
elif lpsp > 5 and lpsp < 90:
c_ens = 4872.19
else:
c_ens = 0
resultado_id = resultados[resultados['id_simulacion']==id_simulacion].reset_index(drop=True)
resultado_id['total_dg'] = resultado_id['energia_Dg'] + resultado_id['p_bat_dg']
resultado_id['total_pv'] = resultado_id['energia_PV'] + resultado_id['p_bat_pv']
ei = round(resultado_id['total_dg'].sum(),2)
et = round(resultado_id['total_dg'].sum(),2)
et_pv = round(resultado_id['total_pv'].sum(),2)
load = round(resultado_id['load'].sum(),2)
et_bat = round(resultado_id['energia_descarga_bateria'].sum(),2)
cost_e_dg = (et*c_dg)/trm
cost_e_pv = (et_pv*c_pv)/trm
cost_e_bat =( et_bat*c_bat)/trm
cost_e_ens =( ens*c_ens)/trm
financiero = {'R':20, 'ir':0.0808, 'cpv_ins' : 5605.365 ,'cbat_ins' : 539983.495,'cdg_ins' : 7627407.001, 'npv' : n_pv ,'ndg' : n_dg ,'ppv_stc': 300,'ebcell_nom': p_bat,
'pdg_rate': p_dg, 'li_bat':10,'li_dg':10,'ybat': 0.7,'ydg': 0.7, 'ipp_o': 74.37,'ipp_actual' : 129.23, 'cec': 0.0974,'ei': ei,'et': et,'pami': 8789 ,
'cel':0.0005,'plim':79900, 'p_load':load,'ens':ens, 'factor_pv':0.01,'factor_bat':0.02}
R = financiero['R'] # the life time of the project
ir = financiero['ir'] # (i_n - i_f) / (1 + i_f) # Tomado de otro estudio
crf = round(ir*((1+ir)**R) /((1+ir)**R - 1),2) # The capital recovery factor is calculated by
cpv_ins = financiero['cpv_ins'] # costo de PV kWh instalado
cbat_ins = financiero['cbat_ins'] # costo Battery de kWh instalado
cdg_ins = financiero['cdg_ins'] # costo diesel de kWh instalado
npv = financiero['npv'] # Número de paneles fotovoltaicos
ndg = financiero['ndg'] # Número planta diesel
ppv_stc = financiero['ppv_stc']# Capacidad nominal paneles
ebcell_nom = financiero['ebcell_nom'] # Capacidad de la batería
pdg_rate = financiero['pdg_rate'] # Capacidad nominal diesel
ccpv = round(cpv_ins*npv*ppv_stc,4)
ccbat = round(cbat_ins*ebcell_nom,4)
ccdg = round(cdg_ins*ndg*pdg_rate,4)
def calcular_ki(R, li, ir):
"""
Para cálcular single payment present worth
"""
yi_replacements = math.floor(R/li)
values_to_sum = []
for i in range(1,yi_replacements+1):
x = (1)/((1+ir)**(i*li))
values_to_sum.append(x)
return sum(values_to_sum)
kbat = round(calcular_ki(R=financiero['R'], li=financiero['li_bat'], ir=financiero['ir']),4) # single payment present worth battery
kdg = round(calcular_ki(R=financiero['R'], li=financiero['li_dg'], ir=financiero['ir']),4) # single payment present worth diesel
ybat = financiero['ybat'] #are de-rate factors of the initial capital cost invested
ydg = financiero['ydg'] #are de-rate factors of the initial capital cost invested
rc_bat = round(ybat*ccbat*kbat,4)
rc_dg = round(ydg*ccdg*kdg,4)
factor_pv = financiero['factor_pv'] # Factor de la inversión inicial
factor_bat = financiero['factor_bat'] # Factor de la inversión inicial
oym_pv = factor_pv*ccpv
oym_bat = factor_bat*ccbat
ipp_o = financiero['ipp_o']
ipp_actual = financiero['ipp_actual']
cec = financiero['cec'] #Consumo especifíco de combustible 0.0974 gal/kWh (capacidad <= 100 kW)
ei = financiero['ei'] #Energía entregada al Sistema de Distribución por el generador i
et = financiero['et'] #Energía total entregada al Sistema de Distribución
pami = financiero['pami'] #Precio promedio del combustible para la planta de abasto más cercana al generador i en el mes m ($/gal).
tmi = pami*0.1
calm = 82.14*(ipp_actual/ipp_o) # Costo de almacenamiento de combustible en el mes m ($/gal)
pci = pami + tmi + calm # Precio del galón en el sitio para el generador i
if et>0:
cc = (1/et)*(cec*pci*ei) # Costo de Combustible (CC)
else:
cc=0
cel = financiero['cel'] # Consumo Específico de Lubricante 0,00050 gal/kWh para plantas de capacidad <= 2.000 Kw
plim = financiero['plim'] #Precio del Galón de lubricante en el sitio para el generador i en el mes m ($/gal). el precio del lubricante se determinará con base en los precios promedio del mercado.
if et>0:
cl = (1/et)*(cel*(plim+tmi)*ei)
else:
cl=0
cam = 0.1*(cc+cl)
oym_dg = (cam + cc + cl)*ei
incentivo = 0.9038
asc = (((ccpv+ccbat+ccdg)+(rc_bat+rc_dg))*crf + (oym_dg + oym_pv + oym_bat))/trm
asc_incentivo = ((((ccpv+ccbat)*incentivo+ccdg)+(rc_bat+rc_dg))*crf + (oym_dg + oym_pv + oym_bat))/trm
p_load = financiero['p_load']
ens = financiero['ens']
lcoe = (asc/(p_load - ens))
lcoe_incentivo = (asc_incentivo/(p_load - ens))
sql_actualizar= """UPDATE parametros
SET vida_proyecto = %s,
ir = %s,
crf= %s,
cpv_ins= %s,
cbat_ins= %s,
cdg_ins= %s,
capital_cpv= %s,
capital_cbat= %s,
capital_cdg= %s,
kbat= %s,
kdg= %s,
ybat= %s,
ydg= %s,
rc_bat= %s,
rc_dg= %s,
factor_bat= %s,
factor_pv= %s,
oym_pv= %s,
oym_bat= %s,
ipp_actual= %s,
trm= %s,
pami = %s,
plim = %s,
oym_dg = %s,
asc = %s,
lcoe= %s,
asc_incentivo=%s,
lcoe_incentivo=%s,
cost_e_dg=%s,
cost_e_pv=%s,
cost_e_bat=%s,
cost_e_ens=%s
WHERE id_simulacion =%s"""%(R,
ir,
crf,
cpv_ins,
cbat_ins,
cdg_ins,
ccpv ,
ccbat,
ccdg ,
kbat,
kdg,
ybat,
ydg,
rc_bat,
rc_dg,
factor_bat,
factor_pv,
round(oym_pv,2),
round(oym_bat,2),
round(ipp_actual,2),
round(trm,2),
round(pami,2),
round(plim,2),
round(oym_dg,2),
round(asc,2),
round(lcoe,2),
round(asc_incentivo,2),
round(lcoe_incentivo,2),
round(cost_e_dg,2),
round(cost_e_pv,2) ,
round(cost_e_bat,2),
round(cost_e_ens,2),
round(id_simulacion,2))
conexion_bd(sql=sql_actualizar)
| 36.591489 | 200 | 0.515641 | import pandas as pd
import numpy as np
import math
import sqlite3
import os
def conexion_bd(sql=None, update=True):
sqlite3.register_adapter(np.int64, lambda val: int(val))
sqlite3.register_adapter(np.int32, lambda val: int(val))
con = sqlite3.connect(os.getcwd() + os.sep + "result_op.db")
try:
if update == True:
cur = con.cursor()
cur.execute(sql)
con.commit()
df=None
else:
df= pd.read_sql_query(sql, con=con)
except:
print(sql)
df=None
con.close()
return df
def actualizar_costos(parametros=None, resultados=None, i=None):
trm = 3736.91
id_simulacion = parametros.loc[i,'id_simulacion']
n_pv = parametros.loc[i,'n_pv']
n_dg = parametros.loc[i,'n_dg']
p_bat = parametros.loc[i,'p_bat']
p_dg = parametros.loc[i,'p_dg']
ens = parametros.loc[i,'ens_result']
c_pv = parametros.loc[i,'cost_pv']
c_bat = parametros.loc[i,'cost_bat']
c_dg = parametros.loc[i,'cost_dg']
lpsp = parametros.loc[i,'lpsp_result']
if lpsp <=1.5:
c_ens = 1532.53
elif lpsp > 1.5 and lpsp <= 5:
c_ens = 2778.13
elif lpsp > 5 and lpsp < 90:
c_ens = 4872.19
else:
c_ens = 0
resultado_id = resultados[resultados['id_simulacion']==id_simulacion].reset_index(drop=True)
resultado_id['total_dg'] = resultado_id['energia_Dg'] + resultado_id['p_bat_dg']
resultado_id['total_pv'] = resultado_id['energia_PV'] + resultado_id['p_bat_pv']
ei = round(resultado_id['total_dg'].sum(),2)
et = round(resultado_id['total_dg'].sum(),2)
et_pv = round(resultado_id['total_pv'].sum(),2)
load = round(resultado_id['load'].sum(),2)
et_bat = round(resultado_id['energia_descarga_bateria'].sum(),2)
cost_e_dg = (et*c_dg)/trm
cost_e_pv = (et_pv*c_pv)/trm
cost_e_bat =( et_bat*c_bat)/trm
cost_e_ens =( ens*c_ens)/trm
financiero = {'R':20, 'ir':0.0808, 'cpv_ins' : 5605.365 ,'cbat_ins' : 539983.495,'cdg_ins' : 7627407.001, 'npv' : n_pv ,'ndg' : n_dg ,'ppv_stc': 300,'ebcell_nom': p_bat,
'pdg_rate': p_dg, 'li_bat':10,'li_dg':10,'ybat': 0.7,'ydg': 0.7, 'ipp_o': 74.37,'ipp_actual' : 129.23, 'cec': 0.0974,'ei': ei,'et': et,'pami': 8789 ,
'cel':0.0005,'plim':79900, 'p_load':load,'ens':ens, 'factor_pv':0.01,'factor_bat':0.02}
R = financiero['R']
ir = financiero['ir'] +ir)**R) /((1+ir)**R - 1),2)
cpv_ins = financiero['cpv_ins']
cbat_ins = financiero['cbat_ins']
cdg_ins = financiero['cdg_ins']
npv = financiero['npv']
ndg = financiero['ndg']
ppv_stc = financiero['ppv_stc']
ebcell_nom = financiero['ebcell_nom']
pdg_rate = financiero['pdg_rate']
ccpv = round(cpv_ins*npv*ppv_stc,4)
ccbat = round(cbat_ins*ebcell_nom,4)
ccdg = round(cdg_ins*ndg*pdg_rate,4)
def calcular_ki(R, li, ir):
yi_replacements = math.floor(R/li)
values_to_sum = []
for i in range(1,yi_replacements+1):
x = (1)/((1+ir)**(i*li))
values_to_sum.append(x)
return sum(values_to_sum)
kbat = round(calcular_ki(R=financiero['R'], li=financiero['li_bat'], ir=financiero['ir']),4)
kdg = round(calcular_ki(R=financiero['R'], li=financiero['li_dg'], ir=financiero['ir']),4)
ybat = financiero['ybat']
ydg = financiero['ydg']
rc_bat = round(ybat*ccbat*kbat,4)
rc_dg = round(ydg*ccdg*kdg,4)
factor_pv = financiero['factor_pv']
factor_bat = financiero['factor_bat']
oym_pv = factor_pv*ccpv
oym_bat = factor_bat*ccbat
ipp_o = financiero['ipp_o']
ipp_actual = financiero['ipp_actual']
cec = financiero['cec']
ei = financiero['ei']
et = financiero['et']
pami = financiero['pami']
tmi = pami*0.1
calm = 82.14*(ipp_actual/ipp_o)
pci = pami + tmi + calm
if et>0:
cc = (1/et)*(cec*pci*ei)
else:
cc=0
cel = financiero['cel']
plim = financiero['plim']
if et>0:
cl = (1/et)*(cel*(plim+tmi)*ei)
else:
cl=0
cam = 0.1*(cc+cl)
oym_dg = (cam + cc + cl)*ei
incentivo = 0.9038
asc = (((ccpv+ccbat+ccdg)+(rc_bat+rc_dg))*crf + (oym_dg + oym_pv + oym_bat))/trm
asc_incentivo = ((((ccpv+ccbat)*incentivo+ccdg)+(rc_bat+rc_dg))*crf + (oym_dg + oym_pv + oym_bat))/trm
p_load = financiero['p_load']
ens = financiero['ens']
lcoe = (asc/(p_load - ens))
lcoe_incentivo = (asc_incentivo/(p_load - ens))
sql_actualizar= """UPDATE parametros
SET vida_proyecto = %s,
ir = %s,
crf= %s,
cpv_ins= %s,
cbat_ins= %s,
cdg_ins= %s,
capital_cpv= %s,
capital_cbat= %s,
capital_cdg= %s,
kbat= %s,
kdg= %s,
ybat= %s,
ydg= %s,
rc_bat= %s,
rc_dg= %s,
factor_bat= %s,
factor_pv= %s,
oym_pv= %s,
oym_bat= %s,
ipp_actual= %s,
trm= %s,
pami = %s,
plim = %s,
oym_dg = %s,
asc = %s,
lcoe= %s,
asc_incentivo=%s,
lcoe_incentivo=%s,
cost_e_dg=%s,
cost_e_pv=%s,
cost_e_bat=%s,
cost_e_ens=%s
WHERE id_simulacion =%s"""%(R,
ir,
crf,
cpv_ins,
cbat_ins,
cdg_ins,
ccpv ,
ccbat,
ccdg ,
kbat,
kdg,
ybat,
ydg,
rc_bat,
rc_dg,
factor_bat,
factor_pv,
round(oym_pv,2),
round(oym_bat,2),
round(ipp_actual,2),
round(trm,2),
round(pami,2),
round(plim,2),
round(oym_dg,2),
round(asc,2),
round(lcoe,2),
round(asc_incentivo,2),
round(lcoe_incentivo,2),
round(cost_e_dg,2),
round(cost_e_pv,2) ,
round(cost_e_bat,2),
round(cost_e_ens,2),
round(id_simulacion,2))
conexion_bd(sql=sql_actualizar)
| true | true |
f7fb2e4acbf2aa94f74bb2be3cd6973f04ab40c2 | 550 | py | Python | hackerrank prob/10.nested list.py | kingRovo/PythonCodingChalenge | b62938592df10ccafec9930b69c14c778e19ad37 | [
"bzip2-1.0.6"
] | 1 | 2021-08-02T16:52:55.000Z | 2021-08-02T16:52:55.000Z | hackerrank prob/10.nested list.py | kingRovo/PythonCodingChalenge | b62938592df10ccafec9930b69c14c778e19ad37 | [
"bzip2-1.0.6"
] | null | null | null | hackerrank prob/10.nested list.py | kingRovo/PythonCodingChalenge | b62938592df10ccafec9930b69c14c778e19ad37 | [
"bzip2-1.0.6"
] | null | null | null | scores = {}
top2 = []
def logScore(score):
global top2
xs = [x for x in top2 if x < score] + [score] + [x for x in top2 if x > score]
if (len(xs) > 2):
scores.pop(xs[2], None)
xs.pop()
top2 = xs
return score <= xs[-1]
for _ in range(int(input())):
name = input()
score = float(input())
if (logScore(score)):
scores[score] = scores.get(score,[]) + [name]
for name in sorted(scores[top2[1]]):
print(name)
#input formate
#5
#Harry
#37.21
#Berry
#37.21
#Tina
#37.2
#Akriti
#41
#Harsh
#39
| 15.714286 | 82 | 0.554545 | scores = {}
top2 = []
def logScore(score):
global top2
xs = [x for x in top2 if x < score] + [score] + [x for x in top2 if x > score]
if (len(xs) > 2):
scores.pop(xs[2], None)
xs.pop()
top2 = xs
return score <= xs[-1]
for _ in range(int(input())):
name = input()
score = float(input())
if (logScore(score)):
scores[score] = scores.get(score,[]) + [name]
for name in sorted(scores[top2[1]]):
print(name)
| true | true |
f7fb2e522871c2a020f3f991e187ebff8901bb38 | 563 | py | Python | user.py | VoltJimathy/BooksDiscordpy | 6874b2d6184b329f84892e4f18454dc366bc5846 | [
"BSD-2-Clause"
] | null | null | null | user.py | VoltJimathy/BooksDiscordpy | 6874b2d6184b329f84892e4f18454dc366bc5846 | [
"BSD-2-Clause"
] | 1 | 2021-08-29T19:34:09.000Z | 2021-09-03T16:06:35.000Z | user.py | VoltJimathy/BooksDiscordpy | 6874b2d6184b329f84892e4f18454dc366bc5846 | [
"BSD-2-Clause"
] | null | null | null | import typing as t
class User:
def __init__(self, json: dict, bot):
# print(json.keys())
self.verified = json.get("verified", False)
self.username = json.get("username", None)
self.mfa_enabled = json.get("mfa_enabled", False)
self.id = json.get("id")
self.flags = json.get("flags", None)
self.email = json.get("email", None)
self.discriminator = json.get("discriminator", None)
self.bot = json.get("bot", False)
self.avatar = json.get("avatar", None)
self._bot = bot
| 29.631579 | 60 | 0.586146 | import typing as t
class User:
def __init__(self, json: dict, bot):
self.verified = json.get("verified", False)
self.username = json.get("username", None)
self.mfa_enabled = json.get("mfa_enabled", False)
self.id = json.get("id")
self.flags = json.get("flags", None)
self.email = json.get("email", None)
self.discriminator = json.get("discriminator", None)
self.bot = json.get("bot", False)
self.avatar = json.get("avatar", None)
self._bot = bot
| true | true |
f7fb2f90496a29c2de8d6b94019d96286ba0bfbb | 8,154 | py | Python | mitmproxy/console/options.py | jvillacorta/mitmproxy | 3aa2d59f627e0fc95167fb76ffbe84330e3a5cc5 | [
"MIT"
] | 1 | 2018-03-31T17:16:07.000Z | 2018-03-31T17:16:07.000Z | mitmproxy/console/options.py | jvillacorta/mitmproxy | 3aa2d59f627e0fc95167fb76ffbe84330e3a5cc5 | [
"MIT"
] | null | null | null | mitmproxy/console/options.py | jvillacorta/mitmproxy | 3aa2d59f627e0fc95167fb76ffbe84330e3a5cc5 | [
"MIT"
] | 4 | 2018-04-18T13:17:01.000Z | 2021-02-21T17:08:33.000Z | from __future__ import absolute_import, print_function, division
import urwid
from mitmproxy import contentviews
from mitmproxy.console import common
from mitmproxy.console import grideditor
from mitmproxy.console import palettes
from mitmproxy.console import select
from mitmproxy.console import signals
footer = [
('heading_key', "enter/space"), ":toggle ",
('heading_key', "C"), ":clear all ",
]
def _mkhelp():
text = []
keys = [
("enter/space", "activate option"),
("C", "clear all options"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
class Options(urwid.WidgetWrap):
def __init__(self, master):
self.master = master
self.lb = select.Select(
[
select.Heading("Traffic Manipulation"),
select.Option(
"Header Set Patterns",
"H",
lambda: len(master.options.setheaders),
self.setheaders
),
select.Option(
"Ignore Patterns",
"I",
lambda: master.options.ignore_hosts,
self.ignore_hosts
),
select.Option(
"Replacement Patterns",
"R",
lambda: len(master.options.replacements),
self.replacepatterns
),
select.Option(
"Scripts",
"S",
lambda: master.options.scripts,
self.scripts
),
select.Heading("Interface"),
select.Option(
"Default Display Mode",
"M",
self.has_default_displaymode,
self.default_displaymode
),
select.Option(
"Palette",
"P",
lambda: self.master.palette != palettes.DEFAULT,
self.palette
),
select.Option(
"Show Host",
"w",
lambda: master.options.showhost,
master.options.toggler("showhost")
),
select.Heading("Network"),
select.Option(
"No Upstream Certs",
"U",
lambda: master.options.no_upstream_cert,
master.options.toggler("no_upstream_cert")
),
select.Option(
"TCP Proxying",
"T",
lambda: master.options.tcp_hosts,
self.tcp_hosts
),
select.Option(
"Don't Verify SSL/TLS Certificates",
"V",
lambda: master.options.ssl_insecure,
master.options.toggler("ssl_insecure")
),
select.Heading("Utility"),
select.Option(
"Anti-Cache",
"a",
lambda: master.options.anticache,
master.options.toggler("anticache")
),
select.Option(
"Anti-Compression",
"o",
lambda: master.options.anticomp,
master.options.toggler("anticomp")
),
select.Option(
"Kill Extra",
"x",
lambda: master.options.kill,
master.options.toggler("kill")
),
select.Option(
"No Refresh",
"f",
lambda: not master.options.refresh_server_playback,
master.options.toggler("refresh_server_playback")
),
select.Option(
"Sticky Auth",
"A",
lambda: master.options.stickyauth,
self.sticky_auth
),
select.Option(
"Sticky Cookies",
"t",
lambda: master.options.stickycookie,
self.sticky_cookie
),
]
)
title = urwid.Text("Options")
title = urwid.Padding(title, align="left", width=("relative", 100))
title = urwid.AttrWrap(title, "heading")
w = urwid.Frame(
self.lb,
header = title
)
super(Options, self).__init__(w)
self.master.loop.widget.footer.update("")
signals.update_settings.connect(self.sig_update_settings)
master.options.changed.connect(self.sig_update_settings)
def sig_update_settings(self, sender, updated=None):
self.lb.walker._modified()
def keypress(self, size, key):
if key == "C":
self.clearall()
return None
return super(self.__class__, self).keypress(size, key)
def clearall(self):
self.master.options.update(
anticache = False,
anticomp = False,
ignore_hosts = (),
tcp_hosts = (),
kill = False,
no_upstream_cert = False,
refresh_server_playback = True,
replacements = [],
scripts = [],
setheaders = [],
showhost = False,
stickyauth = None,
stickycookie = None,
)
self.master.state.default_body_view = contentviews.get("Auto")
signals.update_settings.send(self)
signals.status_message.send(
message = "All select.Options cleared",
expire = 1
)
def setheaders(self):
self.master.view_grideditor(
grideditor.SetHeadersEditor(
self.master,
self.master.options.setheaders,
self.master.options.setter("setheaders")
)
)
def tcp_hosts(self):
self.master.view_grideditor(
grideditor.HostPatternEditor(
self.master,
self.master.options.tcp_hosts,
self.master.options.setter("tcp_hosts")
)
)
def ignore_hosts(self):
self.master.view_grideditor(
grideditor.HostPatternEditor(
self.master,
self.master.options.ignore_hosts,
self.master.options.setter("ignore_hosts")
)
)
def replacepatterns(self):
self.master.view_grideditor(
grideditor.ReplaceEditor(
self.master,
self.master.options.replacements,
self.master.options.setter("replacements")
)
)
def scripts(self):
self.master.view_grideditor(
grideditor.ScriptEditor(
self.master,
[[i] for i in self.master.options.scripts],
self.master.edit_scripts
)
)
def default_displaymode(self):
signals.status_prompt_onekey.send(
prompt = "Global default display mode",
keys = contentviews.view_prompts,
callback = self.master.change_default_display_mode
)
def has_default_displaymode(self):
return self.master.state.default_body_view.name != "Auto"
def sticky_auth(self):
signals.status_prompt.send(
prompt = "Sticky auth filter",
text = self.master.options.stickyauth,
callback = self.master.options.setter("stickyauth")
)
def sticky_cookie(self):
signals.status_prompt.send(
prompt = "Sticky cookie filter",
text = self.master.options.stickycookie,
callback = self.master.options.setter("stickycookie")
)
def palette(self):
self.master.view_palette_picker()
| 31.604651 | 77 | 0.484425 | from __future__ import absolute_import, print_function, division
import urwid
from mitmproxy import contentviews
from mitmproxy.console import common
from mitmproxy.console import grideditor
from mitmproxy.console import palettes
from mitmproxy.console import select
from mitmproxy.console import signals
footer = [
('heading_key', "enter/space"), ":toggle ",
('heading_key', "C"), ":clear all ",
]
def _mkhelp():
text = []
keys = [
("enter/space", "activate option"),
("C", "clear all options"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
class Options(urwid.WidgetWrap):
def __init__(self, master):
self.master = master
self.lb = select.Select(
[
select.Heading("Traffic Manipulation"),
select.Option(
"Header Set Patterns",
"H",
lambda: len(master.options.setheaders),
self.setheaders
),
select.Option(
"Ignore Patterns",
"I",
lambda: master.options.ignore_hosts,
self.ignore_hosts
),
select.Option(
"Replacement Patterns",
"R",
lambda: len(master.options.replacements),
self.replacepatterns
),
select.Option(
"Scripts",
"S",
lambda: master.options.scripts,
self.scripts
),
select.Heading("Interface"),
select.Option(
"Default Display Mode",
"M",
self.has_default_displaymode,
self.default_displaymode
),
select.Option(
"Palette",
"P",
lambda: self.master.palette != palettes.DEFAULT,
self.palette
),
select.Option(
"Show Host",
"w",
lambda: master.options.showhost,
master.options.toggler("showhost")
),
select.Heading("Network"),
select.Option(
"No Upstream Certs",
"U",
lambda: master.options.no_upstream_cert,
master.options.toggler("no_upstream_cert")
),
select.Option(
"TCP Proxying",
"T",
lambda: master.options.tcp_hosts,
self.tcp_hosts
),
select.Option(
"Don't Verify SSL/TLS Certificates",
"V",
lambda: master.options.ssl_insecure,
master.options.toggler("ssl_insecure")
),
select.Heading("Utility"),
select.Option(
"Anti-Cache",
"a",
lambda: master.options.anticache,
master.options.toggler("anticache")
),
select.Option(
"Anti-Compression",
"o",
lambda: master.options.anticomp,
master.options.toggler("anticomp")
),
select.Option(
"Kill Extra",
"x",
lambda: master.options.kill,
master.options.toggler("kill")
),
select.Option(
"No Refresh",
"f",
lambda: not master.options.refresh_server_playback,
master.options.toggler("refresh_server_playback")
),
select.Option(
"Sticky Auth",
"A",
lambda: master.options.stickyauth,
self.sticky_auth
),
select.Option(
"Sticky Cookies",
"t",
lambda: master.options.stickycookie,
self.sticky_cookie
),
]
)
title = urwid.Text("Options")
title = urwid.Padding(title, align="left", width=("relative", 100))
title = urwid.AttrWrap(title, "heading")
w = urwid.Frame(
self.lb,
header = title
)
super(Options, self).__init__(w)
self.master.loop.widget.footer.update("")
signals.update_settings.connect(self.sig_update_settings)
master.options.changed.connect(self.sig_update_settings)
def sig_update_settings(self, sender, updated=None):
self.lb.walker._modified()
def keypress(self, size, key):
if key == "C":
self.clearall()
return None
return super(self.__class__, self).keypress(size, key)
def clearall(self):
self.master.options.update(
anticache = False,
anticomp = False,
ignore_hosts = (),
tcp_hosts = (),
kill = False,
no_upstream_cert = False,
refresh_server_playback = True,
replacements = [],
scripts = [],
setheaders = [],
showhost = False,
stickyauth = None,
stickycookie = None,
)
self.master.state.default_body_view = contentviews.get("Auto")
signals.update_settings.send(self)
signals.status_message.send(
message = "All select.Options cleared",
expire = 1
)
def setheaders(self):
self.master.view_grideditor(
grideditor.SetHeadersEditor(
self.master,
self.master.options.setheaders,
self.master.options.setter("setheaders")
)
)
def tcp_hosts(self):
self.master.view_grideditor(
grideditor.HostPatternEditor(
self.master,
self.master.options.tcp_hosts,
self.master.options.setter("tcp_hosts")
)
)
def ignore_hosts(self):
self.master.view_grideditor(
grideditor.HostPatternEditor(
self.master,
self.master.options.ignore_hosts,
self.master.options.setter("ignore_hosts")
)
)
def replacepatterns(self):
self.master.view_grideditor(
grideditor.ReplaceEditor(
self.master,
self.master.options.replacements,
self.master.options.setter("replacements")
)
)
def scripts(self):
self.master.view_grideditor(
grideditor.ScriptEditor(
self.master,
[[i] for i in self.master.options.scripts],
self.master.edit_scripts
)
)
def default_displaymode(self):
signals.status_prompt_onekey.send(
prompt = "Global default display mode",
keys = contentviews.view_prompts,
callback = self.master.change_default_display_mode
)
def has_default_displaymode(self):
return self.master.state.default_body_view.name != "Auto"
def sticky_auth(self):
signals.status_prompt.send(
prompt = "Sticky auth filter",
text = self.master.options.stickyauth,
callback = self.master.options.setter("stickyauth")
)
def sticky_cookie(self):
signals.status_prompt.send(
prompt = "Sticky cookie filter",
text = self.master.options.stickycookie,
callback = self.master.options.setter("stickycookie")
)
def palette(self):
self.master.view_palette_picker()
| true | true |
f7fb300272c8f9cbf76ac8ba0c823da11c1f6e34 | 808 | py | Python | 02_task/02_subtask.py | SKantar/SignalProcessing | c8e5e9a45c92e1d337086b60bf7eed131756dcaf | [
"MIT"
] | null | null | null | 02_task/02_subtask.py | SKantar/SignalProcessing | c8e5e9a45c92e1d337086b60bf7eed131756dcaf | [
"MIT"
] | null | null | null | 02_task/02_subtask.py | SKantar/SignalProcessing | c8e5e9a45c92e1d337086b60bf7eed131756dcaf | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft
Fs = 200 # Sample frequence
N = 150 # Number of sample points
T = 1.0 / Fs # Sample spacing
t = np.linspace(T, N * T, N)
A = 2.3
f = 20
x_clear = A * np.sin(f * 2.0 * np.pi * t)
powers, colors = [0, 1, 3, 6], ['r', 'g', 'b', 'y']
for i, power in enumerate(powers):
e = np.random.normal(0, 1, N) * np.sqrt(power)
x = x_clear + e
xf = fft(x)
yf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
# Display FFT
plt.figure(i + 1)
plt.stem(
yf,
2.0 / N * np.abs(xf[0:N // 2]),
colors[i],
markerfmt='{}o'.format(colors[i])
)
plt.title('FFT Spectrum AWGN Power {}'.format(powers[i]))
plt.xlabel('Frequency [Hz]')
plt.grid()
plt.show()
| 22.444444 | 61 | 0.532178 | import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft
Fs = 200
N = 150
T = 1.0 / Fs
t = np.linspace(T, N * T, N)
A = 2.3
f = 20
x_clear = A * np.sin(f * 2.0 * np.pi * t)
powers, colors = [0, 1, 3, 6], ['r', 'g', 'b', 'y']
for i, power in enumerate(powers):
e = np.random.normal(0, 1, N) * np.sqrt(power)
x = x_clear + e
xf = fft(x)
yf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
plt.figure(i + 1)
plt.stem(
yf,
2.0 / N * np.abs(xf[0:N // 2]),
colors[i],
markerfmt='{}o'.format(colors[i])
)
plt.title('FFT Spectrum AWGN Power {}'.format(powers[i]))
plt.xlabel('Frequency [Hz]')
plt.grid()
plt.show()
| true | true |
f7fb30647bd52e10d1e69b97e1f25ed17da4891c | 5,556 | py | Python | fc.py | HamzaFarhan/dreamai_2 | 58b28cb964f026882d289b2fdbea301005bad394 | [
"MIT"
] | null | null | null | fc.py | HamzaFarhan/dreamai_2 | 58b28cb964f026882d289b2fdbea301005bad394 | [
"MIT"
] | 8 | 2020-03-31T11:17:01.000Z | 2022-03-12T00:17:36.000Z | fc.py | HamzaFarhan/dreamai_2 | 58b28cb964f026882d289b2fdbea301005bad394 | [
"MIT"
] | null | null | null | from dreamai.dai_imports import*
from dreamai.utils import *
from dreamai.model import *
class FC(Network):
def __init__(self,
num_inputs=10,
num_outputs=10,
layers=[],
lr=0.003,
class_names=[],
optimizer_name='AdaDelta',
dropout_p=0.2,
hidden_non_linearity='relu',
output_non_linearity=None,
criterion=nn.CrossEntropyLoss(),
model_name='FC',
model_type ='classifier',
best_accuracy=0.,
best_validation_loss=None,
best_model_file = 'best_model_file.pth',
device=None):
super().__init__(device=device)
self.hidden_non_linearity = hidden_non_linearity
self.model = nn.Sequential()
if len(layers) > 0:
self.model.add_module('fc1',nn.Linear(num_inputs,layers[0]))
self.model.add_module(hidden_non_linearity+'1',nn.ReLU())
self.model.add_module('dropout1',nn.Dropout(p=dropout_p,inplace=True))
for i in range(1,len(layers)):
self.model.add_module('fc'+str(i+1),nn.Linear(layers[i-1],layers[i]))
self.model.add_module(hidden_non_linearity+str(i+1),nn.ReLU())
self.model.add_module('dropout'+str(i+1),nn.Dropout(p=dropout_p,
inplace=True))
self.model.add_module('out',nn.Linear(layers[-1],num_outputs))
else:
self.model.add_module('out',nn.Linear(num_inputs,num_outputs))
if output_non_linearity:
self.model.add_module(output_non_linearity,output_non_linearity)
# if (model_type.lower() == 'regressor' or model_type.lower() == 'recommender') and output_non_linearity is not None:
# print('Output non linearity = {}'.format(output_non_linearity))
# if output_non_linearity.lower() == 'sigmoid':
# self.model.add_module(output_non_linearity,nn.Sigmoid())
# self.output_non_linearity = output_non_linearity
# self.to(self.device)
self.model = self.model.to(self.device)
self.set_model_params(criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
num_inputs = num_inputs,
num_outputs = num_outputs,
layers = layers,
class_names = class_names)
def forward(self,x):
return self.model(flatten_tensor(x))
def _get_dropout(self):
for layer in self.model:
if type(layer) == torch.nn.modules.dropout.Dropout:
return layer.p
def _set_dropout(self,p=0.2):
for layer in self.model:
if type(layer) == torch.nn.modules.dropout.Dropout:
print('FC: setting dropout prob to {:.3f}'.format(p))
layer.p=p
def set_model_params(self,
criterion = nn.CrossEntropyLoss(),
optimizer_name = 'Adadelta',
lr = 0.1,
dropout_p = 0.45,
model_name = 'FC',
model_type = 'classifier',
best_accuracy = 0.,
best_validation_loss = None,
best_model_file = 'best_model_file.pth',
num_inputs = 10,
num_outputs = 10,
layers =[],
class_names = []):
super(FC, self).set_model_params(
criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file
)
self.class_names = class_names
self.num_classes = num_outputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.layer_dims = layers
if len(self.class_names)==0:
self.class_names = {k:str(v) for k,v in enumerate(list(range(self.num_outputs)))}
else:
self.num_classes = len(self.class_names)
def get_model_params(self):
params = super(FC, self).get_model_params()
params['num_inputs'] = self.num_inputs
params['num_outputs'] = self.num_outputs
params['layers'] = self.layer_dims
params['class_names'] = self.class_names
params['num_classes'] = self.num_classes
return params
| 42.738462 | 125 | 0.49946 | from dreamai.dai_imports import*
from dreamai.utils import *
from dreamai.model import *
class FC(Network):
def __init__(self,
num_inputs=10,
num_outputs=10,
layers=[],
lr=0.003,
class_names=[],
optimizer_name='AdaDelta',
dropout_p=0.2,
hidden_non_linearity='relu',
output_non_linearity=None,
criterion=nn.CrossEntropyLoss(),
model_name='FC',
model_type ='classifier',
best_accuracy=0.,
best_validation_loss=None,
best_model_file = 'best_model_file.pth',
device=None):
super().__init__(device=device)
self.hidden_non_linearity = hidden_non_linearity
self.model = nn.Sequential()
if len(layers) > 0:
self.model.add_module('fc1',nn.Linear(num_inputs,layers[0]))
self.model.add_module(hidden_non_linearity+'1',nn.ReLU())
self.model.add_module('dropout1',nn.Dropout(p=dropout_p,inplace=True))
for i in range(1,len(layers)):
self.model.add_module('fc'+str(i+1),nn.Linear(layers[i-1],layers[i]))
self.model.add_module(hidden_non_linearity+str(i+1),nn.ReLU())
self.model.add_module('dropout'+str(i+1),nn.Dropout(p=dropout_p,
inplace=True))
self.model.add_module('out',nn.Linear(layers[-1],num_outputs))
else:
self.model.add_module('out',nn.Linear(num_inputs,num_outputs))
if output_non_linearity:
self.model.add_module(output_non_linearity,output_non_linearity)
self.model = self.model.to(self.device)
self.set_model_params(criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
num_inputs = num_inputs,
num_outputs = num_outputs,
layers = layers,
class_names = class_names)
def forward(self,x):
return self.model(flatten_tensor(x))
def _get_dropout(self):
for layer in self.model:
if type(layer) == torch.nn.modules.dropout.Dropout:
return layer.p
def _set_dropout(self,p=0.2):
for layer in self.model:
if type(layer) == torch.nn.modules.dropout.Dropout:
print('FC: setting dropout prob to {:.3f}'.format(p))
layer.p=p
def set_model_params(self,
criterion = nn.CrossEntropyLoss(),
optimizer_name = 'Adadelta',
lr = 0.1,
dropout_p = 0.45,
model_name = 'FC',
model_type = 'classifier',
best_accuracy = 0.,
best_validation_loss = None,
best_model_file = 'best_model_file.pth',
num_inputs = 10,
num_outputs = 10,
layers =[],
class_names = []):
super(FC, self).set_model_params(
criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file
)
self.class_names = class_names
self.num_classes = num_outputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.layer_dims = layers
if len(self.class_names)==0:
self.class_names = {k:str(v) for k,v in enumerate(list(range(self.num_outputs)))}
else:
self.num_classes = len(self.class_names)
def get_model_params(self):
params = super(FC, self).get_model_params()
params['num_inputs'] = self.num_inputs
params['num_outputs'] = self.num_outputs
params['layers'] = self.layer_dims
params['class_names'] = self.class_names
params['num_classes'] = self.num_classes
return params
| true | true |
f7fb306aa7ec4b5abf0af2dfa7f682c9969f4efb | 116 | py | Python | detectrino/basics.py | lgvaz/detectrino | dc3b15f44be8f143f03d67a7d05cbad8f6cca78f | [
"Apache-2.0"
] | 1 | 2021-06-23T16:41:03.000Z | 2021-06-23T16:41:03.000Z | detectrino/basics.py | lgvaz/detectrino | dc3b15f44be8f143f03d67a7d05cbad8f6cca78f | [
"Apache-2.0"
] | 5 | 2020-04-27T19:35:44.000Z | 2022-02-26T07:29:35.000Z | detectrino/basics.py | lgvaz/detectrino | dc3b15f44be8f143f03d67a7d05cbad8f6cca78f | [
"Apache-2.0"
] | null | null | null | from ._imports import *
from .core import *
from .data.all import *
from .model.all import *
from .learner import *
| 19.333333 | 24 | 0.724138 | from ._imports import *
from .core import *
from .data.all import *
from .model.all import *
from .learner import *
| true | true |
f7fb3232075947da504efe0223786d7f0c962b46 | 3,335 | py | Python | digsby/ext/buildexts.py | ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | [
"Python-2.0"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | digsby/ext/buildexts.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | digsby/ext/buildexts.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | #__LICENSE_GOES_HERE__
#
# builds "cgui", Digsby's native code extension
#
# use "python buildexts.py build_ext" from the command line
# to build cgui.pyd or cgui.so
#
from wx.build.config import *
import wx
import cguisetup
import sys, os
from os import environ as env
from os.path import join as pj
from pprint import pprint
def build():
global swig_args, includes, defines, cflags, lflags
if os.name == "nt":
WXDIR = env['WXDIR']
WXPY_SRC = env['WXPYDIR']
def die(msg):
print msg
sys.exit(-1)
def sanity_checks():
from path import path
if not path(WXPY_SRC).files('_core.py'):
die(WXPY_SRC + ' does not have _core.py -- is it a valid wxPython?')
swig_args += ['-v']
print 'swig_args:'
pprint(swig_args)
includes += cguisetup.include_dirs
if os.name == 'nt':
includes += [
'%s\\include' % WXPY_SRC,
'%s\\..\\include' % WXPY_SRC,
pj(WXPY_SRC, 'lib', 'vc_dll', 'mswuh'),
]
sources = cguisetup.sources
# WIN32
if os.name == 'nt':
# Unlike Win, on Unix/Mac the wxPy developer package is not separate, so we do
# not need this sanity check there; import wx above should fail on Unix/Mac
# if we've got an invalid wxPython.svn diff
sanity_checks()
sources.append('src/debugapp.cpp')
# add some include dirs for SWIG
swig_args += ['-I' + pj(*([WXPY_SRC] + paths)) for paths in (
['src'],
# ['..', 'include'],
# ['..', 'include', 'wx', 'msw'],
# ['include', 'wx', 'wxPython', 'i_files'],
)]
cflags += ['/Zi', # generates PDBs (debugging symbols files)
'/D_UNICODE'] # use unicode Win32 functions
lflags = lflags or []
lflags += ['/DEBUG',
'/LTCG']
for include in cguisetup.include_dirs:
swig_args += ['-I' + include]
exts = [('cgui', sources + ["src/cgui_wrap.cpp"])]
# common args to distuils.Extension
extopts = dict(include_dirs = includes,
define_macros = defines,
library_dirs = libdirs,
libraries = libs,
extra_compile_args = cflags,
extra_link_args = lflags,
swig_opts = swig_args,
language = 'c++',)
ext_modules = []
for extension_name, sources in exts:
swig_sources = run_swig(files = ['./src/%s.i' % extension_name],
dir = '',
gendir = '.',
package = '.',
USE_SWIG = True,
force = True,
swig_args = swig_args,
swig_deps = swig_deps)
print
print 'building extension %r' % extension_name
print
print 'sources:'
pprint(sources)
print
ext = Extension('_' + extension_name, sources, **extopts)
ext_modules.append(ext)
setup(ext_modules = ext_modules, scripts=['src/cgui.py'])
if __name__ == '__main__':
build()
| 28.504274 | 87 | 0.505847 |
#
# use "python buildexts.py build_ext" from the command line
# to build cgui.pyd or cgui.so
#
from wx.build.config import *
import wx
import cguisetup
import sys, os
from os import environ as env
from os.path import join as pj
from pprint import pprint
def build():
global swig_args, includes, defines, cflags, lflags
if os.name == "nt":
WXDIR = env['WXDIR']
WXPY_SRC = env['WXPYDIR']
def die(msg):
print msg
sys.exit(-1)
def sanity_checks():
from path import path
if not path(WXPY_SRC).files('_core.py'):
die(WXPY_SRC + ' does not have _core.py -- is it a valid wxPython?')
swig_args += ['-v']
print 'swig_args:'
pprint(swig_args)
includes += cguisetup.include_dirs
if os.name == 'nt':
includes += [
'%s\\include' % WXPY_SRC,
'%s\\..\\include' % WXPY_SRC,
pj(WXPY_SRC, 'lib', 'vc_dll', 'mswuh'),
]
sources = cguisetup.sources
# WIN32
if os.name == 'nt':
# Unlike Win, on Unix/Mac the wxPy developer package is not separate, so we do
# not need this sanity check there; import wx above should fail on Unix/Mac
# if we've got an invalid wxPython.svn diff
sanity_checks()
sources.append('src/debugapp.cpp')
swig_args += ['-I' + pj(*([WXPY_SRC] + paths)) for paths in (
['src'],
)]
cflags += ['/Zi',
'/D_UNICODE']
lflags = lflags or []
lflags += ['/DEBUG',
'/LTCG']
for include in cguisetup.include_dirs:
swig_args += ['-I' + include]
exts = [('cgui', sources + ["src/cgui_wrap.cpp"])]
extopts = dict(include_dirs = includes,
define_macros = defines,
library_dirs = libdirs,
libraries = libs,
extra_compile_args = cflags,
extra_link_args = lflags,
swig_opts = swig_args,
language = 'c++',)
ext_modules = []
for extension_name, sources in exts:
swig_sources = run_swig(files = ['./src/%s.i' % extension_name],
dir = '',
gendir = '.',
package = '.',
USE_SWIG = True,
force = True,
swig_args = swig_args,
swig_deps = swig_deps)
print
print 'building extension %r' % extension_name
print
print 'sources:'
pprint(sources)
print
ext = Extension('_' + extension_name, sources, **extopts)
ext_modules.append(ext)
setup(ext_modules = ext_modules, scripts=['src/cgui.py'])
if __name__ == '__main__':
build()
| false | true |
f7fb32e353b204583e9bb19758cc4828ed9e6c2e | 16,079 | py | Python | log_mito/model_790.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_mito/model_790.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_mito/model_790.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 197500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 87.863388 | 710 | 0.80347 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 197500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| true | true |
f7fb330a08ca9e96555a571dac03cc6aae74bba4 | 417 | py | Python | config/gunicorn.py | NDevox/website | 76004e667f2295eddd79d500ba21f02a0480412f | [
"Apache-2.0"
] | null | null | null | config/gunicorn.py | NDevox/website | 76004e667f2295eddd79d500ba21f02a0480412f | [
"Apache-2.0"
] | null | null | null | config/gunicorn.py | NDevox/website | 76004e667f2295eddd79d500ba21f02a0480412f | [
"Apache-2.0"
] | null | null | null | import multiprocessing
import os
accesslog = '-'
bind = f'{os.getenv("GUNICORN_HOST", "0.0.0.0")}:{os.getenv("GUNICORN_PORT", "8000")}' # noqa
capture_output = True
syslog = os.getenv('LOG_SYSLOG', 'false').lower() in ['true', '1', 'yes', 'on']
threads = int(os.getenv('GUNICORN_THREADS', multiprocessing.cpu_count() * 2 + 1)) # noqa
workers = int(os.getenv('GUNICORN_WORKERS', 1))
worker_class = 'gthread'
| 23.166667 | 94 | 0.669065 | import multiprocessing
import os
accesslog = '-'
bind = f'{os.getenv("GUNICORN_HOST", "0.0.0.0")}:{os.getenv("GUNICORN_PORT", "8000")}'
capture_output = True
syslog = os.getenv('LOG_SYSLOG', 'false').lower() in ['true', '1', 'yes', 'on']
threads = int(os.getenv('GUNICORN_THREADS', multiprocessing.cpu_count() * 2 + 1))
workers = int(os.getenv('GUNICORN_WORKERS', 1))
worker_class = 'gthread'
| true | true |
f7fb3500e8b05c2631571dc3f2a87ba31bfc73e9 | 290 | py | Python | Chapter2_Python/ListSlicing.py | gbbDonkiKong/UdemyAI_Template | 9d17edc43f0342675d194f29bf45fde77e4f5f0e | [
"MIT"
] | null | null | null | Chapter2_Python/ListSlicing.py | gbbDonkiKong/UdemyAI_Template | 9d17edc43f0342675d194f29bf45fde77e4f5f0e | [
"MIT"
] | null | null | null | Chapter2_Python/ListSlicing.py | gbbDonkiKong/UdemyAI_Template | 9d17edc43f0342675d194f29bf45fde77e4f5f0e | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
x = [[1, 4, 3, 9],
[3, 1, 5, 2]]
y = ['red', 'blue', 'blue', 'red']
# P1 (x1 = 1, x2 = 3), Klasse = 'red'
x1 = x[0]
x2 = x[1]
plt.scatter(x1, x2, color=y)
# plt.show()
w = [1, 3, 6, 9, 7, 4]
w_squared = [val**2 for val in w[1:5]]
print(w_squared) | 15.263158 | 38 | 0.510345 | import matplotlib.pyplot as plt
x = [[1, 4, 3, 9],
[3, 1, 5, 2]]
y = ['red', 'blue', 'blue', 'red']
x1 = x[0]
x2 = x[1]
plt.scatter(x1, x2, color=y)
w = [1, 3, 6, 9, 7, 4]
w_squared = [val**2 for val in w[1:5]]
print(w_squared) | true | true |
f7fb35162fbb35d57df67445293c9781bc9f7f09 | 19,127 | py | Python | api/jobs/api.py | jkoren/jobhopper | 1c579871be1fe8fe5c1c0f7dbd4309f84479ed3b | [
"MIT"
] | null | null | null | api/jobs/api.py | jkoren/jobhopper | 1c579871be1fe8fe5c1c0f7dbd4309f84479ed3b | [
"MIT"
] | null | null | null | api/jobs/api.py | jkoren/jobhopper | 1c579871be1fe8fe5c1c0f7dbd4309f84479ed3b | [
"MIT"
] | null | null | null | from .models import Socs, BlsOes, StateAbbPairs, OccupationTransitions, SocDescription
from rest_framework import viewsets, permissions, generics
from rest_framework.throttling import AnonRateThrottle
from rest_framework.response import Response
from django.forms.models import model_to_dict
from collections import namedtuple
import django_filters
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.pagination import LimitOffsetPagination
import requests
from requests.auth import HTTPBasicAuth
from typing import Dict, Any
from decouple import config
from rapidfuzz import fuzz
from .serializers import (
BlsOesSerializer,
StateNamesSerializer,
SocListSerializer,
OccupationTransitionsSerializer,
BlsTransitionsSerializer,
)
import logging
log = logging.getLogger()
# Documentation for Django generally refers to these views as views.py rather than api.py
class BlsOesFilter(django_filters.FilterSet):
"""
Create a filter to use with the BlsOes model. When multiple options are chosen in these filters, there
must be no space between comma-separated values
"""
socs = django_filters.BaseInFilter(field_name='soc_code', lookup_expr='in')
areas = django_filters.BaseInFilter(field_name='area_title', lookup_expr='in')
class Meta:
model = BlsOes
fields = ['socs', 'areas']
class BlsOesViewSet(viewsets.ReadOnlyModelViewSet):
"""
ViewSet for wage/employment data by location, SOC code, and year
"""
queryset = BlsOes.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = BlsOesSerializer
throttle_classes = [AnonRateThrottle]
pagination_class = LimitOffsetPagination
filter_class = BlsOesFilter
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class SocListFilter(django_filters.FilterSet):
"""
Create a filter to use with the BlsOes model. When multiple options are chosen in these filters, there
must be no space between comma-separated values. min_transition_observations refers to the minimum number of
observations used to derive transition probabilities for a given source SOC. According to Schubert, Stansbury,
and Taska (2020), this need not be an integer since it can be reweighted by age.
"""
socs = django_filters.BaseInFilter(field_name="soc_code", lookup_expr="in")
min_transition_observations = django_filters.NumberFilter(field_name="total_transition_obs", lookup_expr="gte")
class Meta:
model = SocDescription
fields = ["socs", "min_transition_observations"]
class SocListSimpleViewSet(viewsets.ReadOnlyModelViewSet):
"""
ViewSet for all unique SOC codes and descriptions with wage/employment data available
"""
queryset = SocDescription.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = SocListSerializer
throttle_classes = [AnonRateThrottle]
filter_class = SocListFilter
class SocListSmartViewSet(viewsets.ReadOnlyModelViewSet):
"""
ViewSet for finding SOC codes matching a user's requested keyword
"""
serializer_class = SocListSerializer
permission_classes = [permissions.AllowAny]
throttle_classes = [AnonRateThrottle]
# Default API parameters for O*NET observations and (weighted) minimum number of transitions observed
DEFAULT_ONET_LIMIT = 10
MAX_ONET_LIMIT = 50
DEFAULT_OBS_LIMIT = 1000
# Fuzz.partial_ratio score limit for tiered exact match on SOC title/code and keyword
FUZZ_LIMIT = 90
# Include a manual parameter that can be included in the request query (+ swagger_auto_schema decorator)
KEYWORD_PARAMETER = openapi.Parameter("keyword_search",
openapi.IN_QUERY,
description="Keyword search via O*NET",
type=openapi.TYPE_STRING)
ONET_LIMIT_PARAMETER = openapi.Parameter("onet_limit",
openapi.IN_QUERY,
description=f"Limit to O*NET search results",
type=openapi.TYPE_INTEGER)
OBS_LIMIT_PARAM = openapi.Parameter("min_weighted_obs",
openapi.IN_QUERY,
description="Minimum (weighted) observed transitions from source SOC",
type=openapi.TYPE_NUMBER)
def _set_params(self, request):
"""
Set parameters based on the request. Custom parameters are identified by their openapi.Parameter name
:param request: User-input parameters
:return: Relevant parameters from the request
"""
self.keyword_search = request.query_params.get("keyword_search")
self.onet_limit = request.query_params.get("onet_limit")
self.obs_limit = request.query_params.get("min_weighted_obs")
if not self.onet_limit or int(self.onet_limit) > self.MAX_ONET_LIMIT:
self.onet_limit = self.DEFAULT_ONET_LIMIT
if not self.obs_limit:
self.obs_limit = self.DEFAULT_OBS_LIMIT
def get_queryset(self):
"""
Custom queryset used that is a combination of querysets from a couple models. Overwriting to prevent
schema generation warning.
"""
pass
@staticmethod
def search_onet_keyword(keyword: str,
limit: int = 20) -> Dict[str, Any]:
"""
Search for a keyword that will be matched to SOC codes via the O*Net API
:param keyword: Keyword that's requested (user search)
:param limit: Limit to number of results (should expose this as a parameter)
:return: JSON response, e.g. {'keyword': 'doctor', ...
'career': [{'href': '',
'code': '29-1216.00',
'title': 'General Internal Medicine Physicians',
'tags': {'bright_outlook': ...},
...]}
"""
headers = {"Accept": "application/json"}
username = config("ONET_USERNAME")
password = config("ONET_PASSWORD")
try:
response = requests.get(f"https://services.onetcenter.org/ws/mnm/search?keyword={keyword}",
headers=headers,
params={'end': limit},
auth=HTTPBasicAuth(username, password))
return response.json()
except Exception as e:
log.warning(e)
return None
@swagger_auto_schema(manual_parameters=[KEYWORD_PARAMETER, ONET_LIMIT_PARAMETER, OBS_LIMIT_PARAM])
def list(self, request):
"""
Query parameters:
------------------------
* keyword_search: User-input keyword search for related professions
* onet_limit: Limit to the number of results pulled back from O*NET; capped by MAX_ONET_LIMIT. Responses will
only include smart-search SOCs with transitions data available. If no response is found from O*NET, all
available SOC codes are returned.
* min_weighted_obs: Minimum number of observed transitions (weighted) for a response to be included
"""
# Parameters are pulled from request query, as defined by openapi.Parameter
self._set_params(request=request)
# Django QuerySet with all objects in the SocDescription model
# model_to_dict serializes each object in the model into a JSON/dict
available_socs = (SocDescription
.objects
.filter(total_transition_obs__gte=self.obs_limit))
available_socs = [model_to_dict(item)
for item in available_socs]
available_soc_codes = [soc.get("soc_code") for soc in available_socs]
available_soc_codes = set(available_soc_codes)
# Query for O*NET Socs
onet_soc_codes = None
if self.keyword_search:
try:
onet_socs = self.search_onet_keyword(keyword=self.keyword_search,
limit=self.onet_limit)
log.info(f"Smart search results: {onet_socs}")
onet_soc_codes = onet_socs.get("career")
onet_soc_codes = [soc.get("code", "") for soc in onet_soc_codes]
onet_soc_codes = set([soc.split(".")[0] for soc in onet_soc_codes])
log.info(f"Smart search SOC codes: {onet_soc_codes}")
except Exception as e:
log.info(f"Unable to find search results from O*NET for keyword {self.keyword_search} | {e}")
# Combine O*NET and available transition SOCs to return a response
if not onet_soc_codes:
return Response(available_socs)
# SOC codes in transitions data that are close to an exact match to the keyword - tiered matching, since O*NET
# does not include older SOC codes that exist in the transitions data
fuzz_soc_codes = [soc.get("soc_code") for soc in available_socs
if fuzz.partial_ratio(self.keyword_search.lower(),
soc.get("soc_title").lower() + soc.get("soc_code")) >= self.FUZZ_LIMIT]
fuzz_soc_codes = set(fuzz_soc_codes)
log.info(f"SOC codes/titles that are a close exact match to the keyword search {fuzz_soc_codes}")
# SOC codes in both O*NET and transitions data, + SOC codes whose title closely matches the search parameter
smart_soc_codes = onet_soc_codes.intersection(available_soc_codes)
smart_soc_codes = smart_soc_codes.union(fuzz_soc_codes)
smart_socs = [soc for soc in available_socs
if soc.get("soc_code") in smart_soc_codes]
return Response(smart_socs)
class StateViewSet(viewsets.ReadOnlyModelViewSet):
"""
ViewSet for states
"""
queryset = StateAbbPairs.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = StateNamesSerializer
throttle_classes = [AnonRateThrottle]
class OccupationTransitionsFilter(django_filters.FilterSet):
"""
Create a filter to use with the OccupationTransitions model in the Occupation Transitions viewset
"""
# field_name instead of name, and lookup_expr instead of lookup_type is used for the NumberFilter for Django 2.0+
min_transition_probability = django_filters.NumberFilter(field_name="pi", lookup_expr="gte")
class Meta:
model = OccupationTransitions
fields = ["min_transition_probability", "soc1"]
class OccupationTransitionsViewSet(viewsets.ReadOnlyModelViewSet):
"""
ViewSet for occupation transitions (burning glass) data
"""
queryset = OccupationTransitions.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = OccupationTransitionsSerializer
throttle_classes = [AnonRateThrottle]
pagination_class = LimitOffsetPagination
filter_class = OccupationTransitionsFilter
class BlsTransitionsViewSet(viewsets.ReadOnlyModelViewSet):
"""
A custom ViewSet for BLS OES wage/employment data and occupation transitions/burning glass data
See Swagger docs for more details on the GET endpoint /transitions-extended/.
/transitions-extended/{id}/ is not supported.
Sample endpoint query:
------------------------
/?area_title=Massachusetts&soc=35-3031&min_transitions_probability=0.01
"""
serializer_class = BlsTransitionsSerializer
permission_classes = [permissions.AllowAny]
throttle_classes = [AnonRateThrottle]
# swagger_schema = None # Exclude from swagger schema.
# Use a named tuple to pass data from multiple models to the response
BLS_TRANSITIONS = namedtuple("BlsTransitions", ("bls", "transitions"))
DEFAULT_AREA = "U.S."
DEFAULT_SOC = "35-3031" # 35-3031 is waiters and waitresses
DEFAULT_TRANSITION_PROBABILITY = 0.01
SOC_SWAGGER_PARAM = openapi.Parameter("soc",
openapi.IN_QUERY,
description="Source SOC code",
type=openapi.TYPE_STRING)
AREA_SWAGGER_PARAM = openapi.Parameter("area_title",
openapi.IN_QUERY,
description="Location",
type=openapi.TYPE_STRING)
PI_SWAGGER_PARAM = openapi.Parameter("min_transition_probability",
openapi.IN_QUERY,
description="Minimum transition probability",
type=openapi.TYPE_NUMBER
)
def get_queryset(self):
"""
Custom queryset used that is a combination of querysets from a couple models. Overwriting to prevent
schema generation warning.
"""
pass
def _set_params(self, request):
"""
Set parameters based on the request. Custom parameters are identified by their openapi.Parameter name
:param request: User-input parameters
:return: Relevant parameters from the request
"""
area_title = request.query_params.get("area_title")
source_soc = request.query_params.get("soc")
min_transition_probability = request.query_params.get("min_transition_probability")
if not area_title:
self.area_title_filter = self.DEFAULT_AREA
else:
self.area_title_filter = area_title
if not source_soc:
self.source_soc = self.DEFAULT_SOC
else:
self.source_soc = source_soc
if not min_transition_probability:
self.min_transition_probability = self.DEFAULT_TRANSITION_PROBABILITY
else:
self.min_transition_probability = min_transition_probability
@swagger_auto_schema(manual_parameters=[SOC_SWAGGER_PARAM, PI_SWAGGER_PARAM, AREA_SWAGGER_PARAM])
def list(self, request):
"""
Query parameters:
------------------------
* area_title: Specify an area_title to return wages/employment for that location only
States should be fully spelled out, consistent with the area_title field in the
BlsOes model. The default is specified by DEFAULT_AREA
* soc: Specify a source SOC code to return transitions data for people moving from this
occupation to other occupations. The default is specified by DEFAULT_SOC
* min_transition_probability: Specify the minimum transitions probability. Do not return any
transitions records that have a probability of moving from SOC1 to SOC2 that is lower
than this value.
Multiple selections are not supported for this endpoint. The default response is displayed.
Sample endpoint query:
------------------------
* /?area_title=Massachusetts&soc=11-1011&min_transitions_probability=0.01
Response format:
------------------------
{"source_soc": {
"source_soc_id": 242047,
"source_soc_area_title": "U.S.",
"source_soc_soc_code": "13-2011",
"source_soc_soc_title": "Accountants and Auditors",
"source_soc_hourly_mean_wage": 38.23,
"source_soc_annual_mean_wage": 79520,
"source_soc_total_employment": 1280700,
"source_soc_soc_decimal_code": "13-2011.00",
"source_soc_file_year": 2019
},
"transition_rows": [
{
"id": 1,
"soc1": "13-2011",
"soc2": "11-3031",
"pi": 0.1782961,
"total_transition_obs": 390865.6,
"soc2_id": 241905,
"soc2_area_title": "U.S.",
"soc2_soc_code": "11-3031",
"soc2_soc_title": "Financial Managers",
"soc2_hourly_mean_wage": 70.93,
"soc2_annual_mean_wage": 147530,
"soc2_total_employment": 654790,
"soc2_soc_decimal_code": "11-3031.00",
"soc2_file_year": 2019
},
"""
self._set_params(request)
bls_transitions = self.BLS_TRANSITIONS(
bls=(BlsOes.objects
.filter(area_title=self.area_title_filter)),
transitions=(OccupationTransitions.objects
.filter(soc1=self.source_soc)
.filter(pi__gte=self.min_transition_probability)
),
)
# Convert bls_transitions QuerySet to dicts & join the results
# List of dicts, each containing metadata on SOCs and transitions
bls = [model_to_dict(item)
for item in bls_transitions[0]]
transitions = [model_to_dict(item, exclude=["occleaveshare", "total_soc"])
for item in bls_transitions[1]]
source_soc_info = [item
for item in bls
if item.get("soc_code") == self.source_soc]
source_soc_info = [{f"source_soc_{key}": val
for key, val in record.items()}
for record in source_soc_info]
assert len(source_soc_info) <= 1, "Duplicate SOC wage/employment data found in BlsOes model for this location!"
if source_soc_info:
source_soc_info = source_soc_info[0]
destination_socs = [item.get("soc2") for item in transitions]
destination_soc_map = {}
for item in bls:
if item.get("soc_code") in destination_socs:
destination_soc_map.update({item.get("soc_code"): item})
for transition in transitions:
destination_soc = transition.get("soc2")
destination_metadata = destination_soc_map.get(destination_soc)
if destination_metadata:
destination_metadata = {f"soc2_{key}": val
for key, val in destination_metadata.items()}
transition.update(destination_metadata)
# Alternative for simple response that just lists the results from each model:
# serializer = BlsTransitionsSerializer(bls_transitions)
# return Response(serializer.data)
return Response({
"source_soc": source_soc_info,
"transition_rows": transitions,
})
| 44.17321 | 119 | 0.631725 | from .models import Socs, BlsOes, StateAbbPairs, OccupationTransitions, SocDescription
from rest_framework import viewsets, permissions, generics
from rest_framework.throttling import AnonRateThrottle
from rest_framework.response import Response
from django.forms.models import model_to_dict
from collections import namedtuple
import django_filters
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.pagination import LimitOffsetPagination
import requests
from requests.auth import HTTPBasicAuth
from typing import Dict, Any
from decouple import config
from rapidfuzz import fuzz
from .serializers import (
BlsOesSerializer,
StateNamesSerializer,
SocListSerializer,
OccupationTransitionsSerializer,
BlsTransitionsSerializer,
)
import logging
log = logging.getLogger()
class BlsOesFilter(django_filters.FilterSet):
socs = django_filters.BaseInFilter(field_name='soc_code', lookup_expr='in')
areas = django_filters.BaseInFilter(field_name='area_title', lookup_expr='in')
class Meta:
model = BlsOes
fields = ['socs', 'areas']
class BlsOesViewSet(viewsets.ReadOnlyModelViewSet):
queryset = BlsOes.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = BlsOesSerializer
throttle_classes = [AnonRateThrottle]
pagination_class = LimitOffsetPagination
filter_class = BlsOesFilter
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class SocListFilter(django_filters.FilterSet):
socs = django_filters.BaseInFilter(field_name="soc_code", lookup_expr="in")
min_transition_observations = django_filters.NumberFilter(field_name="total_transition_obs", lookup_expr="gte")
class Meta:
model = SocDescription
fields = ["socs", "min_transition_observations"]
class SocListSimpleViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SocDescription.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = SocListSerializer
throttle_classes = [AnonRateThrottle]
filter_class = SocListFilter
class SocListSmartViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = SocListSerializer
permission_classes = [permissions.AllowAny]
throttle_classes = [AnonRateThrottle]
DEFAULT_ONET_LIMIT = 10
MAX_ONET_LIMIT = 50
DEFAULT_OBS_LIMIT = 1000
FUZZ_LIMIT = 90
KEYWORD_PARAMETER = openapi.Parameter("keyword_search",
openapi.IN_QUERY,
description="Keyword search via O*NET",
type=openapi.TYPE_STRING)
ONET_LIMIT_PARAMETER = openapi.Parameter("onet_limit",
openapi.IN_QUERY,
description=f"Limit to O*NET search results",
type=openapi.TYPE_INTEGER)
OBS_LIMIT_PARAM = openapi.Parameter("min_weighted_obs",
openapi.IN_QUERY,
description="Minimum (weighted) observed transitions from source SOC",
type=openapi.TYPE_NUMBER)
def _set_params(self, request):
self.keyword_search = request.query_params.get("keyword_search")
self.onet_limit = request.query_params.get("onet_limit")
self.obs_limit = request.query_params.get("min_weighted_obs")
if not self.onet_limit or int(self.onet_limit) > self.MAX_ONET_LIMIT:
self.onet_limit = self.DEFAULT_ONET_LIMIT
if not self.obs_limit:
self.obs_limit = self.DEFAULT_OBS_LIMIT
def get_queryset(self):
pass
@staticmethod
def search_onet_keyword(keyword: str,
limit: int = 20) -> Dict[str, Any]:
headers = {"Accept": "application/json"}
username = config("ONET_USERNAME")
password = config("ONET_PASSWORD")
try:
response = requests.get(f"https://services.onetcenter.org/ws/mnm/search?keyword={keyword}",
headers=headers,
params={'end': limit},
auth=HTTPBasicAuth(username, password))
return response.json()
except Exception as e:
log.warning(e)
return None
@swagger_auto_schema(manual_parameters=[KEYWORD_PARAMETER, ONET_LIMIT_PARAMETER, OBS_LIMIT_PARAM])
def list(self, request):
self._set_params(request=request)
available_socs = (SocDescription
.objects
.filter(total_transition_obs__gte=self.obs_limit))
available_socs = [model_to_dict(item)
for item in available_socs]
available_soc_codes = [soc.get("soc_code") for soc in available_socs]
available_soc_codes = set(available_soc_codes)
onet_soc_codes = None
if self.keyword_search:
try:
onet_socs = self.search_onet_keyword(keyword=self.keyword_search,
limit=self.onet_limit)
log.info(f"Smart search results: {onet_socs}")
onet_soc_codes = onet_socs.get("career")
onet_soc_codes = [soc.get("code", "") for soc in onet_soc_codes]
onet_soc_codes = set([soc.split(".")[0] for soc in onet_soc_codes])
log.info(f"Smart search SOC codes: {onet_soc_codes}")
except Exception as e:
log.info(f"Unable to find search results from O*NET for keyword {self.keyword_search} | {e}")
if not onet_soc_codes:
return Response(available_socs)
fuzz_soc_codes = [soc.get("soc_code") for soc in available_socs
if fuzz.partial_ratio(self.keyword_search.lower(),
soc.get("soc_title").lower() + soc.get("soc_code")) >= self.FUZZ_LIMIT]
fuzz_soc_codes = set(fuzz_soc_codes)
log.info(f"SOC codes/titles that are a close exact match to the keyword search {fuzz_soc_codes}")
smart_soc_codes = onet_soc_codes.intersection(available_soc_codes)
smart_soc_codes = smart_soc_codes.union(fuzz_soc_codes)
smart_socs = [soc for soc in available_socs
if soc.get("soc_code") in smart_soc_codes]
return Response(smart_socs)
class StateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = StateAbbPairs.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = StateNamesSerializer
throttle_classes = [AnonRateThrottle]
class OccupationTransitionsFilter(django_filters.FilterSet):
min_transition_probability = django_filters.NumberFilter(field_name="pi", lookup_expr="gte")
class Meta:
model = OccupationTransitions
fields = ["min_transition_probability", "soc1"]
class OccupationTransitionsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = OccupationTransitions.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = OccupationTransitionsSerializer
throttle_classes = [AnonRateThrottle]
pagination_class = LimitOffsetPagination
filter_class = OccupationTransitionsFilter
class BlsTransitionsViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = BlsTransitionsSerializer
permission_classes = [permissions.AllowAny]
throttle_classes = [AnonRateThrottle]
amedtuple("BlsTransitions", ("bls", "transitions"))
DEFAULT_AREA = "U.S."
DEFAULT_SOC = "35-3031"
DEFAULT_TRANSITION_PROBABILITY = 0.01
SOC_SWAGGER_PARAM = openapi.Parameter("soc",
openapi.IN_QUERY,
description="Source SOC code",
type=openapi.TYPE_STRING)
AREA_SWAGGER_PARAM = openapi.Parameter("area_title",
openapi.IN_QUERY,
description="Location",
type=openapi.TYPE_STRING)
PI_SWAGGER_PARAM = openapi.Parameter("min_transition_probability",
openapi.IN_QUERY,
description="Minimum transition probability",
type=openapi.TYPE_NUMBER
)
def get_queryset(self):
pass
def _set_params(self, request):
area_title = request.query_params.get("area_title")
source_soc = request.query_params.get("soc")
min_transition_probability = request.query_params.get("min_transition_probability")
if not area_title:
self.area_title_filter = self.DEFAULT_AREA
else:
self.area_title_filter = area_title
if not source_soc:
self.source_soc = self.DEFAULT_SOC
else:
self.source_soc = source_soc
if not min_transition_probability:
self.min_transition_probability = self.DEFAULT_TRANSITION_PROBABILITY
else:
self.min_transition_probability = min_transition_probability
@swagger_auto_schema(manual_parameters=[SOC_SWAGGER_PARAM, PI_SWAGGER_PARAM, AREA_SWAGGER_PARAM])
def list(self, request):
self._set_params(request)
bls_transitions = self.BLS_TRANSITIONS(
bls=(BlsOes.objects
.filter(area_title=self.area_title_filter)),
transitions=(OccupationTransitions.objects
.filter(soc1=self.source_soc)
.filter(pi__gte=self.min_transition_probability)
),
)
bls = [model_to_dict(item)
for item in bls_transitions[0]]
transitions = [model_to_dict(item, exclude=["occleaveshare", "total_soc"])
for item in bls_transitions[1]]
source_soc_info = [item
for item in bls
if item.get("soc_code") == self.source_soc]
source_soc_info = [{f"source_soc_{key}": val
for key, val in record.items()}
for record in source_soc_info]
assert len(source_soc_info) <= 1, "Duplicate SOC wage/employment data found in BlsOes model for this location!"
if source_soc_info:
source_soc_info = source_soc_info[0]
destination_socs = [item.get("soc2") for item in transitions]
destination_soc_map = {}
for item in bls:
if item.get("soc_code") in destination_socs:
destination_soc_map.update({item.get("soc_code"): item})
for transition in transitions:
destination_soc = transition.get("soc2")
destination_metadata = destination_soc_map.get(destination_soc)
if destination_metadata:
destination_metadata = {f"soc2_{key}": val
for key, val in destination_metadata.items()}
transition.update(destination_metadata)
return Response({
"source_soc": source_soc_info,
"transition_rows": transitions,
})
| true | true |
f7fb37300051dd803972d2cce6c9bea72b3d9f19 | 416 | py | Python | pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py | z415073783/MNN | 62c5ca47964407508a5fa802582e648fc75eb0d9 | [
"Apache-2.0"
] | null | null | null | pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py | z415073783/MNN | 62c5ca47964407508a5fa802582e648fc75eb0d9 | [
"Apache-2.0"
] | 1 | 2021-09-07T09:13:03.000Z | 2021-09-07T09:13:03.000Z | pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py | z415073783/MNN | 62c5ca47964407508a5fa802582e648fc75eb0d9 | [
"Apache-2.0"
] | 1 | 2020-03-10T02:17:47.000Z | 2020-03-10T02:17:47.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: MNN
class BinaryOpOperation(object):
ADD = 0
SUB = 1
MUL = 2
DIV = 3
MAX_TEMP = 4
MIN_TEMP = 5
POW = 6
REALDIV = 7
MINIMUM = 8
MAXIMUM = 9
GREATER = 10
GREATER_EQUAL = 11
LESS = 12
FLOORDIV = 13
SquaredDifference = 14
EQUAL = 15
LESS_EQUAL = 16
FLOORMOD = 17
| 16.64 | 68 | 0.59375 |
class BinaryOpOperation(object):
ADD = 0
SUB = 1
MUL = 2
DIV = 3
MAX_TEMP = 4
MIN_TEMP = 5
POW = 6
REALDIV = 7
MINIMUM = 8
MAXIMUM = 9
GREATER = 10
GREATER_EQUAL = 11
LESS = 12
FLOORDIV = 13
SquaredDifference = 14
EQUAL = 15
LESS_EQUAL = 16
FLOORMOD = 17
| true | true |
f7fb379da517b97c0670e2852a594b1da659580a | 7,609 | py | Python | tests/components/mazda/test_sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 3 | 2021-11-22T22:37:43.000Z | 2022-03-17T00:55:28.000Z | tests/components/mazda/test_sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 25 | 2021-10-02T10:01:14.000Z | 2022-03-31T06:11:49.000Z | tests/components/mazda/test_sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 3 | 2022-01-02T18:49:54.000Z | 2022-01-25T02:03:54.000Z | """The sensor tests for the Mazda Connected Services integration."""
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
PRESSURE_PSI,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from . import init_integration
async def test_sensors(hass):
"""Test creation of the sensors."""
await init_integration(hass)
entity_registry = er.async_get(hass)
# Fuel Remaining Percentage
state = hass.states.get("sensor.my_mazda3_fuel_remaining_percentage")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "My Mazda3 Fuel Remaining Percentage"
)
assert state.attributes.get(ATTR_ICON) == "mdi:gas-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "87.0"
entry = entity_registry.async_get("sensor.my_mazda3_fuel_remaining_percentage")
assert entry
assert entry.unique_id == "JM000000000000000_fuel_remaining_percentage"
# Fuel Distance Remaining
state = hass.states.get("sensor.my_mazda3_fuel_distance_remaining")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Fuel Distance Remaining"
)
assert state.attributes.get(ATTR_ICON) == "mdi:gas-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "381"
entry = entity_registry.async_get("sensor.my_mazda3_fuel_distance_remaining")
assert entry
assert entry.unique_id == "JM000000000000000_fuel_distance_remaining"
# Odometer
state = hass.states.get("sensor.my_mazda3_odometer")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Odometer"
assert state.attributes.get(ATTR_ICON) == "mdi:speedometer"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.TOTAL_INCREASING
assert state.state == "2796"
entry = entity_registry.async_get("sensor.my_mazda3_odometer")
assert entry
assert entry.unique_id == "JM000000000000000_odometer"
# Front Left Tire Pressure
state = hass.states.get("sensor.my_mazda3_front_left_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Front Left Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "35"
entry = entity_registry.async_get("sensor.my_mazda3_front_left_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_front_left_tire_pressure"
# Front Right Tire Pressure
state = hass.states.get("sensor.my_mazda3_front_right_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "My Mazda3 Front Right Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "35"
entry = entity_registry.async_get("sensor.my_mazda3_front_right_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_front_right_tire_pressure"
# Rear Left Tire Pressure
state = hass.states.get("sensor.my_mazda3_rear_left_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Rear Left Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "33"
entry = entity_registry.async_get("sensor.my_mazda3_rear_left_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_rear_left_tire_pressure"
# Rear Right Tire Pressure
state = hass.states.get("sensor.my_mazda3_rear_right_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Rear Right Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "33"
entry = entity_registry.async_get("sensor.my_mazda3_rear_right_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_rear_right_tire_pressure"
async def test_sensors_imperial_units(hass):
"""Test that the sensors work properly with imperial units."""
hass.config.units = IMPERIAL_SYSTEM
await init_integration(hass)
# Fuel Distance Remaining
state = hass.states.get("sensor.my_mazda3_fuel_distance_remaining")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILES
assert state.state == "237"
# Odometer
state = hass.states.get("sensor.my_mazda3_odometer")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILES
assert state.state == "1737"
async def test_electric_vehicle_sensors(hass):
"""Test sensors which are specific to electric vehicles."""
await init_integration(hass, electric_vehicle=True)
entity_registry = er.async_get(hass)
# Fuel Remaining Percentage should not exist for an electric vehicle
entry = entity_registry.async_get("sensor.my_mazda3_fuel_remaining_percentage")
assert entry is None
# Fuel Distance Remaining should not exist for an electric vehicle
entry = entity_registry.async_get("sensor.my_mazda3_fuel_distance_remaining")
assert entry is None
# Charge Level
state = hass.states.get("sensor.my_mazda3_charge_level")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Charge Level"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.BATTERY
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "80"
entry = entity_registry.async_get("sensor.my_mazda3_charge_level")
assert entry
assert entry.unique_id == "JM000000000000000_ev_charge_level"
# Remaining Range
state = hass.states.get("sensor.my_mazda3_remaining_range")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Remaining Range"
assert state.attributes.get(ATTR_ICON) == "mdi:ev-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "218"
entry = entity_registry.async_get("sensor.my_mazda3_remaining_range")
assert entry
assert entry.unique_id == "JM000000000000000_ev_remaining_range"
| 40.908602 | 88 | 0.75529 |
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
PRESSURE_PSI,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from . import init_integration
async def test_sensors(hass):
await init_integration(hass)
entity_registry = er.async_get(hass)
state = hass.states.get("sensor.my_mazda3_fuel_remaining_percentage")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "My Mazda3 Fuel Remaining Percentage"
)
assert state.attributes.get(ATTR_ICON) == "mdi:gas-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "87.0"
entry = entity_registry.async_get("sensor.my_mazda3_fuel_remaining_percentage")
assert entry
assert entry.unique_id == "JM000000000000000_fuel_remaining_percentage"
state = hass.states.get("sensor.my_mazda3_fuel_distance_remaining")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Fuel Distance Remaining"
)
assert state.attributes.get(ATTR_ICON) == "mdi:gas-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "381"
entry = entity_registry.async_get("sensor.my_mazda3_fuel_distance_remaining")
assert entry
assert entry.unique_id == "JM000000000000000_fuel_distance_remaining"
state = hass.states.get("sensor.my_mazda3_odometer")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Odometer"
assert state.attributes.get(ATTR_ICON) == "mdi:speedometer"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.TOTAL_INCREASING
assert state.state == "2796"
entry = entity_registry.async_get("sensor.my_mazda3_odometer")
assert entry
assert entry.unique_id == "JM000000000000000_odometer"
state = hass.states.get("sensor.my_mazda3_front_left_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Front Left Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "35"
entry = entity_registry.async_get("sensor.my_mazda3_front_left_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_front_left_tire_pressure"
state = hass.states.get("sensor.my_mazda3_front_right_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "My Mazda3 Front Right Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "35"
entry = entity_registry.async_get("sensor.my_mazda3_front_right_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_front_right_tire_pressure"
state = hass.states.get("sensor.my_mazda3_rear_left_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Rear Left Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "33"
entry = entity_registry.async_get("sensor.my_mazda3_rear_left_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_rear_left_tire_pressure"
state = hass.states.get("sensor.my_mazda3_rear_right_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Rear Right Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "33"
entry = entity_registry.async_get("sensor.my_mazda3_rear_right_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_rear_right_tire_pressure"
async def test_sensors_imperial_units(hass):
hass.config.units = IMPERIAL_SYSTEM
await init_integration(hass)
state = hass.states.get("sensor.my_mazda3_fuel_distance_remaining")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILES
assert state.state == "237"
state = hass.states.get("sensor.my_mazda3_odometer")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILES
assert state.state == "1737"
async def test_electric_vehicle_sensors(hass):
await init_integration(hass, electric_vehicle=True)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get("sensor.my_mazda3_fuel_remaining_percentage")
assert entry is None
entry = entity_registry.async_get("sensor.my_mazda3_fuel_distance_remaining")
assert entry is None
state = hass.states.get("sensor.my_mazda3_charge_level")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Charge Level"
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.BATTERY
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "80"
entry = entity_registry.async_get("sensor.my_mazda3_charge_level")
assert entry
assert entry.unique_id == "JM000000000000000_ev_charge_level"
state = hass.states.get("sensor.my_mazda3_remaining_range")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Remaining Range"
assert state.attributes.get(ATTR_ICON) == "mdi:ev-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
assert state.state == "218"
entry = entity_registry.async_get("sensor.my_mazda3_remaining_range")
assert entry
assert entry.unique_id == "JM000000000000000_ev_remaining_range"
| true | true |
f7fb37d40db56db91cff73cdbcdc5e682fb5b394 | 1,227 | py | Python | scripts/get_subset_of_impact_vectors.py | natcap/opal | 7b960d51344483bae30d14ccfa6004bd550f3737 | [
"BSD-3-Clause"
] | 1 | 2020-04-15T23:23:27.000Z | 2020-04-15T23:23:27.000Z | scripts/get_subset_of_impact_vectors.py | natcap/opal | 7b960d51344483bae30d14ccfa6004bd550f3737 | [
"BSD-3-Clause"
] | null | null | null | scripts/get_subset_of_impact_vectors.py | natcap/opal | 7b960d51344483bae30d14ccfa6004bd550f3737 | [
"BSD-3-Clause"
] | null | null | null | import os
import glob
import shutil
if __name__ == '__main__':
source_file_uri = os.path.join(os.path.dirname(__file__), '..',
'positive_sdr_simulations.csv')
scenario = 'bare'
dest_dir = '%s_positive_simulations' % scenario
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
filepath_pattern = os.path.join('/colossus', 'colombia_sdr',
scenario,
'simulations',
'watershed_%s',
'random_impact_%s',
'impact_%s.*'
)
source_file = open(source_file_uri)
line_index = 0
for line in source_file:
if line_index == 0:
line_index += 1
continue
ws_index, impact_index = line.split(',')[:2]
ws_index = int(ws_index)
impact_index = int(impact_index)
complete_pattern = filepath_pattern % (ws_index - 1, impact_index,
impact_index)
for file_uri in glob.glob(complete_pattern):
base, ext = os.path.splitext(os.path.basename(file_uri))
new_filename = 'ws%s_impact%s%s' % (ws_index, impact_index, ext)
new_uri = os.path.join(dest_dir, new_filename)
shutil.copyfile(file_uri, new_uri)
| 29.214286 | 76 | 0.616952 | import os
import glob
import shutil
if __name__ == '__main__':
source_file_uri = os.path.join(os.path.dirname(__file__), '..',
'positive_sdr_simulations.csv')
scenario = 'bare'
dest_dir = '%s_positive_simulations' % scenario
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
filepath_pattern = os.path.join('/colossus', 'colombia_sdr',
scenario,
'simulations',
'watershed_%s',
'random_impact_%s',
'impact_%s.*'
)
source_file = open(source_file_uri)
line_index = 0
for line in source_file:
if line_index == 0:
line_index += 1
continue
ws_index, impact_index = line.split(',')[:2]
ws_index = int(ws_index)
impact_index = int(impact_index)
complete_pattern = filepath_pattern % (ws_index - 1, impact_index,
impact_index)
for file_uri in glob.glob(complete_pattern):
base, ext = os.path.splitext(os.path.basename(file_uri))
new_filename = 'ws%s_impact%s%s' % (ws_index, impact_index, ext)
new_uri = os.path.join(dest_dir, new_filename)
shutil.copyfile(file_uri, new_uri)
| true | true |
f7fb3856fdfa0c65137c0e8b3df3e839dd6bd9bc | 1,077 | py | Python | python/medium/1834-single-threaded-cpu.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | python/medium/1834-single-threaded-cpu.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | python/medium/1834-single-threaded-cpu.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | from heapq import heappush, heappop
class Solution:
def getOrder(self, tasks: List[List[int]]) -> List[int]:
"""
0 1 2 3
[[1,2],[2,4],[3,2],[4,1]]
^
0 1 2 3 4 5 6
0 0 0
1 1 1 1 1
2 2 2
3 3
max ts = 6
"""
if not tasks:
return []
order = [] # [0]
available = [] # [[2,4]]
to_process = [] # [,,]
# 0 1 2 3 4 5 6 7
# ^
for i, task in enumerate(tasks):
heappush(to_process, (task[0], task[1], i))
ts = to_process[0][0]
while to_process or available:
while to_process and to_process[0][0] <= ts:
start_ts, time, index = heappop(to_process)
heappush(available, (time, index))
if available:
time, index = heappop(available)
ts += time
order.append(index)
else:
ts = to_process[0][0]
return order | 23.413043 | 60 | 0.410399 | from heapq import heappush, heappop
class Solution:
def getOrder(self, tasks: List[List[int]]) -> List[int]:
if not tasks:
return []
order = []
available = []
to_process = []
for i, task in enumerate(tasks):
heappush(to_process, (task[0], task[1], i))
ts = to_process[0][0]
while to_process or available:
while to_process and to_process[0][0] <= ts:
start_ts, time, index = heappop(to_process)
heappush(available, (time, index))
if available:
time, index = heappop(available)
ts += time
order.append(index)
else:
ts = to_process[0][0]
return order | true | true |
f7fb38af7a2c7fa2919914d8f1b929b1c7b357f6 | 1,429 | py | Python | demo/crime_vgg16_bilstm_hi_dim_train.py | dalo2903/keras-video-classifier | 6dcf0a1e87342bdb057df0176af6489ff926f2d8 | [
"MIT"
] | 1 | 2018-12-27T09:00:02.000Z | 2018-12-27T09:00:02.000Z | demo/crime_vgg16_bilstm_hi_dim_train.py | dalo2903/keras-video-classifier | 6dcf0a1e87342bdb057df0176af6489ff926f2d8 | [
"MIT"
] | null | null | null | demo/crime_vgg16_bilstm_hi_dim_train.py | dalo2903/keras-video-classifier | 6dcf0a1e87342bdb057df0176af6489ff926f2d8 | [
"MIT"
] | null | null | null | import numpy as np
from keras import backend as K
import sys
import os
def main():
K.set_image_dim_ordering('tf')
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from keras_video_classifier.library.recurrent_networks import VGG16BidirectionalLSTMVideoClassifier
from keras_video_classifier.library.utility.plot_utils import plot_and_save_history
from keras_video_classifier.library.utility.crime.UCF_Crime_loader import load_ucf
data_set_name = 'UCF-Anomaly-Detection-Dataset'
input_dir_path = os.path.join(os.path.dirname(__file__), 'very_large_data')
output_dir_path = os.path.join(os.path.dirname(__file__), 'models', data_set_name)
report_dir_path = os.path.join(os.path.dirname(__file__), 'reports', data_set_name)
np.random.seed(42)
# this line downloads the video files of UCF-101 dataset if they are not available in the very_large_data folder
load_ucf(input_dir_path)
classifier = VGG16BidirectionalLSTMVideoClassifier()
history = classifier.fit(data_dir_path=input_dir_path, model_dir_path=output_dir_path, vgg16_include_top=False,
data_set_name=data_set_name)
plot_and_save_history(history, VGG16BidirectionalLSTMVideoClassifier.model_name,
report_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-hi-dim-history.png')
if __name__ == '__main__':
main()
| 39.694444 | 123 | 0.757173 | import numpy as np
from keras import backend as K
import sys
import os
def main():
K.set_image_dim_ordering('tf')
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from keras_video_classifier.library.recurrent_networks import VGG16BidirectionalLSTMVideoClassifier
from keras_video_classifier.library.utility.plot_utils import plot_and_save_history
from keras_video_classifier.library.utility.crime.UCF_Crime_loader import load_ucf
data_set_name = 'UCF-Anomaly-Detection-Dataset'
input_dir_path = os.path.join(os.path.dirname(__file__), 'very_large_data')
output_dir_path = os.path.join(os.path.dirname(__file__), 'models', data_set_name)
report_dir_path = os.path.join(os.path.dirname(__file__), 'reports', data_set_name)
np.random.seed(42)
load_ucf(input_dir_path)
classifier = VGG16BidirectionalLSTMVideoClassifier()
history = classifier.fit(data_dir_path=input_dir_path, model_dir_path=output_dir_path, vgg16_include_top=False,
data_set_name=data_set_name)
plot_and_save_history(history, VGG16BidirectionalLSTMVideoClassifier.model_name,
report_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-hi-dim-history.png')
if __name__ == '__main__':
main()
| true | true |
f7fb3949f6b5e489477ca4a6a801107f3836b3ce | 5,740 | py | Python | docs/source/examples/isaacgym_sequential_shared_memory_eval.py | Toni-SM/skrl | 15b429d89e3b8a1828b207d88463bf7090288d18 | [
"MIT"
] | 43 | 2021-12-19T07:47:43.000Z | 2022-03-31T05:24:42.000Z | docs/source/examples/isaacgym_sequential_shared_memory_eval.py | Toni-SM/skrl | 15b429d89e3b8a1828b207d88463bf7090288d18 | [
"MIT"
] | 5 | 2022-01-05T07:54:13.000Z | 2022-03-08T21:00:39.000Z | docs/source/examples/isaacgym_sequential_shared_memory_eval.py | Toni-SM/skrl | 15b429d89e3b8a1828b207d88463bf7090288d18 | [
"MIT"
] | 1 | 2022-01-31T17:53:52.000Z | 2022-01-31T17:53:52.000Z | import isaacgym
import torch
import torch.nn as nn
import torch.nn.functional as F
# Import the skrl components to build the RL system
from skrl.models.torch import GaussianModel, DeterministicModel
from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG
from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG
from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_isaacgym_env_preview2, load_isaacgym_env_preview3
# Define only the policies for evaluation
class StochasticActor(GaussianModel):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
super().__init__(observation_space, action_space, device, clip_actions,
clip_log_std, min_log_std, max_log_std)
self.linear_layer_1 = nn.Linear(self.num_observations, 32)
self.linear_layer_2 = nn.Linear(32, 32)
self.mean_action_layer = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, states, taken_actions):
x = F.elu(self.linear_layer_1(states))
x = F.elu(self.linear_layer_2(x))
return torch.tanh(self.mean_action_layer(x)), self.log_std_parameter
class DeterministicActor(DeterministicModel):
def __init__(self, observation_space, action_space, device, clip_actions = False):
super().__init__(observation_space, action_space, device, clip_actions)
self.linear_layer_1 = nn.Linear(self.num_observations, 32)
self.linear_layer_2 = nn.Linear(32, 32)
self.action_layer = nn.Linear(32, self.num_actions)
def compute(self, states, taken_actions):
x = F.elu(self.linear_layer_1(states))
x = F.elu(self.linear_layer_2(x))
return torch.tanh(self.action_layer(x))
# Load and wrap the Isaac Gym environment.
# The following lines are intended to support both versions (preview 2 and 3).
# It tries to load from preview 3, but if it fails, it will try to load from preview 2
try:
env = load_isaacgym_env_preview3(task_name="Cartpole")
except Exception as e:
print("Isaac Gym (preview 3) failed: {}\nTrying preview 2...".format(e))
env = load_isaacgym_env_preview2("Cartpole")
env = wrap_env(env)
device = env.device
# Instantiate the agent's policies.
# DDPG requires 4 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#spaces-and-models
models_ddpg = {"policy": DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True)}
# TD3 requires 6 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#spaces-and-models
models_td3 = {"policy": DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True)}
# SAC requires 5 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#spaces-and-models
models_sac = {"policy": StochasticActor(env.observation_space, env.action_space, device, clip_actions=True)}
# load checkpoints
models_ddpg["policy"].load("./runs/22-02-06_19-37-44-874837_DDPG/checkpoints/8000_policy.pt")
models_td3["policy"].load("./runs/22-02-06_19-28-48-436345_TD3/checkpoints/5000_policy.pt")
models_sac["policy"].load("./runs/22-02-06_19-28-48-441161_SAC/checkpoints/3000_policy.pt")
# Configure and instantiate the agents.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#configuration-and-hyperparameters
cfg_ddpg = DDPG_DEFAULT_CONFIG.copy()
cfg_ddpg["random_timesteps"] = 0
# logging to TensorBoard each 25 timesteps and ignore checkpoints
cfg_ddpg["experiment"]["write_interval"] = 25
cfg_ddpg["experiment"]["checkpoint_interval"] = 0
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#configuration-and-hyperparameters
cfg_td3 = TD3_DEFAULT_CONFIG.copy()
cfg_td3["random_timesteps"] = 0
# logging to TensorBoard each 25 timesteps and ignore checkpoints
cfg_td3["experiment"]["write_interval"] = 25
cfg_td3["experiment"]["checkpoint_interval"] = 0
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#configuration-and-hyperparameters
cfg_sac = SAC_DEFAULT_CONFIG.copy()
cfg_sac["random_timesteps"] = 0
# logging to TensorBoard each 25 timesteps and ignore checkpoints
cfg_sac["experiment"]["write_interval"] = 25
cfg_sac["experiment"]["checkpoint_interval"] = 0
agent_ddpg = DDPG(models=models_ddpg,
memory=None,
cfg=cfg_ddpg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent_td3 = TD3(models=models_td3,
memory=None,
cfg=cfg_td3,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent_sac = SAC(models=models_sac,
memory=None,
cfg=cfg_sac,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg = {"timesteps": 8000, "headless": True}
trainer = SequentialTrainer(cfg=cfg,
env=env,
agents=[agent_ddpg, agent_td3, agent_sac],
agents_scope=[])
# evaluate the agents
trainer.eval()
| 44.153846 | 112 | 0.723868 | import isaacgym
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import GaussianModel, DeterministicModel
from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG
from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG
from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_isaacgym_env_preview2, load_isaacgym_env_preview3
class StochasticActor(GaussianModel):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
super().__init__(observation_space, action_space, device, clip_actions,
clip_log_std, min_log_std, max_log_std)
self.linear_layer_1 = nn.Linear(self.num_observations, 32)
self.linear_layer_2 = nn.Linear(32, 32)
self.mean_action_layer = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, states, taken_actions):
x = F.elu(self.linear_layer_1(states))
x = F.elu(self.linear_layer_2(x))
return torch.tanh(self.mean_action_layer(x)), self.log_std_parameter
class DeterministicActor(DeterministicModel):
def __init__(self, observation_space, action_space, device, clip_actions = False):
super().__init__(observation_space, action_space, device, clip_actions)
self.linear_layer_1 = nn.Linear(self.num_observations, 32)
self.linear_layer_2 = nn.Linear(32, 32)
self.action_layer = nn.Linear(32, self.num_actions)
def compute(self, states, taken_actions):
x = F.elu(self.linear_layer_1(states))
x = F.elu(self.linear_layer_2(x))
return torch.tanh(self.action_layer(x))
try:
env = load_isaacgym_env_preview3(task_name="Cartpole")
except Exception as e:
print("Isaac Gym (preview 3) failed: {}\nTrying preview 2...".format(e))
env = load_isaacgym_env_preview2("Cartpole")
env = wrap_env(env)
device = env.device
# DDPG requires 4 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#spaces-and-models
models_ddpg = {"policy": DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True)}
# TD3 requires 6 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#spaces-and-models
models_td3 = {"policy": DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True)}
# SAC requires 5 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#spaces-and-models
models_sac = {"policy": StochasticActor(env.observation_space, env.action_space, device, clip_actions=True)}
# load checkpoints
models_ddpg["policy"].load("./runs/22-02-06_19-37-44-874837_DDPG/checkpoints/8000_policy.pt")
models_td3["policy"].load("./runs/22-02-06_19-28-48-436345_TD3/checkpoints/5000_policy.pt")
models_sac["policy"].load("./runs/22-02-06_19-28-48-441161_SAC/checkpoints/3000_policy.pt")
# Configure and instantiate the agents.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#configuration-and-hyperparameters
cfg_ddpg = DDPG_DEFAULT_CONFIG.copy()
cfg_ddpg["random_timesteps"] = 0
# logging to TensorBoard each 25 timesteps and ignore checkpoints
cfg_ddpg["experiment"]["write_interval"] = 25
cfg_ddpg["experiment"]["checkpoint_interval"] = 0
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#configuration-and-hyperparameters
cfg_td3 = TD3_DEFAULT_CONFIG.copy()
cfg_td3["random_timesteps"] = 0
# logging to TensorBoard each 25 timesteps and ignore checkpoints
cfg_td3["experiment"]["write_interval"] = 25
cfg_td3["experiment"]["checkpoint_interval"] = 0
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#configuration-and-hyperparameters
cfg_sac = SAC_DEFAULT_CONFIG.copy()
cfg_sac["random_timesteps"] = 0
# logging to TensorBoard each 25 timesteps and ignore checkpoints
cfg_sac["experiment"]["write_interval"] = 25
cfg_sac["experiment"]["checkpoint_interval"] = 0
agent_ddpg = DDPG(models=models_ddpg,
memory=None,
cfg=cfg_ddpg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent_td3 = TD3(models=models_td3,
memory=None,
cfg=cfg_td3,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent_sac = SAC(models=models_sac,
memory=None,
cfg=cfg_sac,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg = {"timesteps": 8000, "headless": True}
trainer = SequentialTrainer(cfg=cfg,
env=env,
agents=[agent_ddpg, agent_td3, agent_sac],
agents_scope=[])
# evaluate the agents
trainer.eval()
| true | true |
f7fb3bf624dc695a6936a66718e687066ef8c8b0 | 62 | py | Python | uru_crm/modules/veggies/__init__.py | gitbenji/uru-crm | b5d29c94fe5f9e6f872b2a3b7bd1fe64ce47a841 | [
"MIT"
] | null | null | null | uru_crm/modules/veggies/__init__.py | gitbenji/uru-crm | b5d29c94fe5f9e6f872b2a3b7bd1fe64ce47a841 | [
"MIT"
] | null | null | null | uru_crm/modules/veggies/__init__.py | gitbenji/uru-crm | b5d29c94fe5f9e6f872b2a3b7bd1fe64ce47a841 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .models import Available_Veggie
| 15.5 | 36 | 0.677419 |
from .models import Available_Veggie
| true | true |
f7fb3c54115abca058513a3ce2a02040515b8bbd | 4,444 | py | Python | scripts/pyvision/util/fast_util.py | wolfram2012/ros_track_ssd | c98d54eb923e5bae5fde4abbedda2fe5ba716606 | [
"MIT"
] | null | null | null | scripts/pyvision/util/fast_util.py | wolfram2012/ros_track_ssd | c98d54eb923e5bae5fde4abbedda2fe5ba716606 | [
"MIT"
] | null | null | null | scripts/pyvision/util/fast_util.py | wolfram2012/ros_track_ssd | c98d54eb923e5bae5fde4abbedda2fe5ba716606 | [
"MIT"
] | null | null | null | import numpy as np
from scipy import weave
class LocalMaximumDetector:
def __init__(self,max_length=1000000):
self.max_length = max_length
self.maxes = np.zeros((max_length,2),dtype=np.int)
self.vals = np.zeros((max_length,),dtype=np.float)
def __call__(self, mat, threshold = None, sorted = True):
'''
All any local maximum that are greater than threshhold up to a total of
max_length.
To save time arrays that hold the maxes and vals that are created
once and reused for each call. This means that local maximum detection
is not thread safe. If using this class with threads create an instance
for each thread.
@param mat: 2d Real Matrix input.
@param threshold: Mininum value of local maxima.
@param sorted: set to False to save time and return an unorderd list.
@returns: maxes,vals
'''
r,c = mat.shape
maxes = self.maxes
vals = self.vals
max_length = self.max_length
if threshold != None:
count = weave.inline(
'''
int count = 0;
for( int i = 1; i < r-1 ; i++){
for(int j = 1; j < c-1 ; j++){
// Check if the current location meets the threshold
if (mat(i,j) > threshold &&
mat(i,j) > mat(i,j-1) &&
mat(i,j) > mat(i,j+1) &&
mat(i,j) > mat(i-1,j-1) &&
mat(i,j) > mat(i-1,j) &&
mat(i,j) > mat(i-1,j+1) &&
mat(i,j) > mat(i+1,j-1) &&
mat(i,j) > mat(i+1,j) &&
mat(i,j) > mat(i+1,j+1)){
// This is a local max
maxes(count,0) = i;
maxes(count,1) = j;
vals(count) = mat(i,j);
count += 1;
if(count == max_length){
i = r;
j = c;
}
}
}
}
return_val = count;
''',
arg_names=['mat','maxes','vals','max_length','threshold','r','c'],
type_converters=weave.converters.blitz,
)
else:
count = weave.inline(
'''
int count = 0;
for( int i = 1; i < r-1 ; i++){
for(int j = 1; j < c-1 ; j++){
// Check if the current location meets the threshold
if (mat(i,j) > mat(i,j-1) &&
mat(i,j) > mat(i,j+1) &&
mat(i,j) > mat(i-1,j-1) &&
mat(i,j) > mat(i-1,j) &&
mat(i,j) > mat(i-1,j+1) &&
mat(i,j) > mat(i+1,j-1) &&
mat(i,j) > mat(i+1,j) &&
mat(i,j) > mat(i+1,j+1)){
// This is a local max
maxes(count,0) = i;
maxes(count,1) = j;
vals(count) = mat(i,j);
count += 1;
if(count == max_length){
i = r;
j = c;
}
}
}
}
return_val = count;
''',
arg_names=['mat','maxes','vals','max_length','r','c'],
type_converters=weave.converters.blitz,
)
if sorted == False:
return maxes[:count,:].copy(),vals[:count].copy()
order = np.argsort(vals[:count])[::-1]
maxes = maxes[order]
vals = vals[order]
#print vals
#print maxes
return maxes,vals
| 36.42623 | 82 | 0.34766 | import numpy as np
from scipy import weave
class LocalMaximumDetector:
def __init__(self,max_length=1000000):
self.max_length = max_length
self.maxes = np.zeros((max_length,2),dtype=np.int)
self.vals = np.zeros((max_length,),dtype=np.float)
def __call__(self, mat, threshold = None, sorted = True):
r,c = mat.shape
maxes = self.maxes
vals = self.vals
max_length = self.max_length
if threshold != None:
count = weave.inline(
'''
int count = 0;
for( int i = 1; i < r-1 ; i++){
for(int j = 1; j < c-1 ; j++){
// Check if the current location meets the threshold
if (mat(i,j) > threshold &&
mat(i,j) > mat(i,j-1) &&
mat(i,j) > mat(i,j+1) &&
mat(i,j) > mat(i-1,j-1) &&
mat(i,j) > mat(i-1,j) &&
mat(i,j) > mat(i-1,j+1) &&
mat(i,j) > mat(i+1,j-1) &&
mat(i,j) > mat(i+1,j) &&
mat(i,j) > mat(i+1,j+1)){
// This is a local max
maxes(count,0) = i;
maxes(count,1) = j;
vals(count) = mat(i,j);
count += 1;
if(count == max_length){
i = r;
j = c;
}
}
}
}
return_val = count;
''',
arg_names=['mat','maxes','vals','max_length','threshold','r','c'],
type_converters=weave.converters.blitz,
)
else:
count = weave.inline(
'''
int count = 0;
for( int i = 1; i < r-1 ; i++){
for(int j = 1; j < c-1 ; j++){
// Check if the current location meets the threshold
if (mat(i,j) > mat(i,j-1) &&
mat(i,j) > mat(i,j+1) &&
mat(i,j) > mat(i-1,j-1) &&
mat(i,j) > mat(i-1,j) &&
mat(i,j) > mat(i-1,j+1) &&
mat(i,j) > mat(i+1,j-1) &&
mat(i,j) > mat(i+1,j) &&
mat(i,j) > mat(i+1,j+1)){
// This is a local max
maxes(count,0) = i;
maxes(count,1) = j;
vals(count) = mat(i,j);
count += 1;
if(count == max_length){
i = r;
j = c;
}
}
}
}
return_val = count;
''',
arg_names=['mat','maxes','vals','max_length','r','c'],
type_converters=weave.converters.blitz,
)
if sorted == False:
return maxes[:count,:].copy(),vals[:count].copy()
order = np.argsort(vals[:count])[::-1]
maxes = maxes[order]
vals = vals[order]
return maxes,vals
| true | true |
f7fb3d3bc8254e1bf13f8bf6237bdef7713df69f | 4,431 | py | Python | evaluation/ablation/ablation-exp-gen.py | sgpthomas/diospyros | 27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d | [
"MIT"
] | 27 | 2020-02-16T22:26:34.000Z | 2022-02-17T04:17:19.000Z | evaluation/ablation/ablation-exp-gen.py | sgpthomas/diospyros | 27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d | [
"MIT"
] | 77 | 2020-01-21T15:37:35.000Z | 2022-03-11T19:48:43.000Z | evaluation/ablation/ablation-exp-gen.py | sgpthomas/diospyros | 27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d | [
"MIT"
] | 1 | 2021-09-27T20:35:15.000Z | 2021-09-27T20:35:15.000Z | import argparse
import os
import sys
import subprocess
import shutil
# Name of the directory to store the result of running example-gen.rkt
BASE = "base"
NATURE = "Nature"
def check_path(path, diagnostic):
"""
Make sure that `path` exists and exits otherwise.
"""
if not os.path.exists(path):
print(f"Could not find {path}. {diagnostic}")
sys.exit(1)
def generate_base(params, out):
"""
Generate a folder out/BASE with the required spec for the experiment.
"""
os.makedirs(out)
experiment_path = os.path.join(out, BASE)
shutil.copy("evaluation/ablation/run_all.sh", out)
print(f"Generating {out}/{BASE}")
subprocess.run(
[
"racket",
"src/example-gen.rkt",
"-b", "mat-mul",
"-p", params,
"-o", experiment_path
],
check=True,
stderr=subprocess.PIPE)
# Add all required file for the harness.
shutil.copy("evaluation/ablation/Makefile", experiment_path)
shutil.copy("evaluation/ablation/harness.c", experiment_path)
shutil.copy("evaluation/src/utils.h", experiment_path)
def generate_nature(out):
"""
Generate folder out/nature to run the nature experiment
"""
check_path(
os.path.join(out, BASE),
f"The script should automatically generate this file. Something went wrong in the last step.")
nature = os.path.join(out, NATURE)
shutil.copytree("evaluation/ablation/nature", nature)
shutil.copy("evaluation/src/utils.h", nature)
def run_experiment(timeout, out):
"""
Invoke the rewriter on the spec file and save the result of the rewrite
into out/timeout/res.rkt
"""
# My programming has the best defenses.
spec_path = os.path.join(out, BASE, "spec.rkt")
check_path(
spec_path,
f"The script should automatically generate this file. Something went wrong in the last step.")
# Make the directory with this file
exp_dir = os.path.join(out, str(timeout))
shutil.copytree(os.path.join(out, BASE), exp_dir)
result = os.path.join(exp_dir, "res.rkt")
with open(result, 'w+') as f:
print(f"Running rewriter with timeout={timeout}")
subprocess.run(
[
"cargo",
"run",
"--release",
"--manifest-path", "src/dios-egraphs/Cargo.toml",
spec_path,
"--no-ac"
],
env=dict(os.environ, TIMEOUT=str(timeout)),
stderr=subprocess.PIPE,
check=True,
stdout=f)
cxx_out = os.path.join(exp_dir, "kernel.c")
with open(cxx_out, 'w+') as f:
print(f"Compiling {result} to C++")
subprocess.run(
[
"racket",
"src/main.rkt",
"-e",
exp_dir
],
env=dict(os.environ, TIMEOUT=str(timeout)),
stderr=subprocess.PIPE,
check=True,
stdout=f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--timeouts', nargs='+',
help='Timeouts to run the ablation study',
required=True)
parser.add_argument('-p', '--parameter',
help='Location of the parameters file', required=True)
parser.add_argument('-o', '--output-dir',
help='Location of the output directory', required=True)
args = parser.parse_args()
# Make sure that compiler is built and expected scripts exist.
paths = [
"src/dios-egraphs/Cargo.toml",
"src/example-gen.rkt",
"evaluation/ablation/harness.c",
"evaluation/ablation/Makefile",
"evaluation/ablation/run_all.sh",
]
for path in paths:
check_path(
path,
"Are you running the script from the repository root?"
)
check_path(
"evaluation/src/utils.h",
"Is the file `evaluation/src/utils.h` missing?"
)
# Don't overwrite the output directory
if os.path.exists(args.output_dir):
print(f'{args.output_dir} already exists. Refusing to overwrite it.')
sys.exit(1)
generate_base(args.parameter, args.output_dir)
generate_nature(args.output_dir)
for timeout in args.timeouts:
run_experiment(timeout, args.output_dir)
| 29.344371 | 102 | 0.589483 | import argparse
import os
import sys
import subprocess
import shutil
BASE = "base"
NATURE = "Nature"
def check_path(path, diagnostic):
if not os.path.exists(path):
print(f"Could not find {path}. {diagnostic}")
sys.exit(1)
def generate_base(params, out):
os.makedirs(out)
experiment_path = os.path.join(out, BASE)
shutil.copy("evaluation/ablation/run_all.sh", out)
print(f"Generating {out}/{BASE}")
subprocess.run(
[
"racket",
"src/example-gen.rkt",
"-b", "mat-mul",
"-p", params,
"-o", experiment_path
],
check=True,
stderr=subprocess.PIPE)
shutil.copy("evaluation/ablation/Makefile", experiment_path)
shutil.copy("evaluation/ablation/harness.c", experiment_path)
shutil.copy("evaluation/src/utils.h", experiment_path)
def generate_nature(out):
check_path(
os.path.join(out, BASE),
f"The script should automatically generate this file. Something went wrong in the last step.")
nature = os.path.join(out, NATURE)
shutil.copytree("evaluation/ablation/nature", nature)
shutil.copy("evaluation/src/utils.h", nature)
def run_experiment(timeout, out):
spec_path = os.path.join(out, BASE, "spec.rkt")
check_path(
spec_path,
f"The script should automatically generate this file. Something went wrong in the last step.")
exp_dir = os.path.join(out, str(timeout))
shutil.copytree(os.path.join(out, BASE), exp_dir)
result = os.path.join(exp_dir, "res.rkt")
with open(result, 'w+') as f:
print(f"Running rewriter with timeout={timeout}")
subprocess.run(
[
"cargo",
"run",
"--release",
"--manifest-path", "src/dios-egraphs/Cargo.toml",
spec_path,
"--no-ac"
],
env=dict(os.environ, TIMEOUT=str(timeout)),
stderr=subprocess.PIPE,
check=True,
stdout=f)
cxx_out = os.path.join(exp_dir, "kernel.c")
with open(cxx_out, 'w+') as f:
print(f"Compiling {result} to C++")
subprocess.run(
[
"racket",
"src/main.rkt",
"-e",
exp_dir
],
env=dict(os.environ, TIMEOUT=str(timeout)),
stderr=subprocess.PIPE,
check=True,
stdout=f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--timeouts', nargs='+',
help='Timeouts to run the ablation study',
required=True)
parser.add_argument('-p', '--parameter',
help='Location of the parameters file', required=True)
parser.add_argument('-o', '--output-dir',
help='Location of the output directory', required=True)
args = parser.parse_args()
paths = [
"src/dios-egraphs/Cargo.toml",
"src/example-gen.rkt",
"evaluation/ablation/harness.c",
"evaluation/ablation/Makefile",
"evaluation/ablation/run_all.sh",
]
for path in paths:
check_path(
path,
"Are you running the script from the repository root?"
)
check_path(
"evaluation/src/utils.h",
"Is the file `evaluation/src/utils.h` missing?"
)
if os.path.exists(args.output_dir):
print(f'{args.output_dir} already exists. Refusing to overwrite it.')
sys.exit(1)
generate_base(args.parameter, args.output_dir)
generate_nature(args.output_dir)
for timeout in args.timeouts:
run_experiment(timeout, args.output_dir)
| true | true |
f7fb3d59ef04f637d8cc1e566d99d9c76171e27d | 189 | py | Python | src/oxygen/__init__.py | stude1/robotframework-oxygen | e9845ab5d392d5d75d296d8cbca47f3a66659697 | [
"MIT"
] | 13 | 2020-05-15T08:30:13.000Z | 2022-01-24T01:10:29.000Z | src/oxygen/__init__.py | stude1/robotframework-oxygen | e9845ab5d392d5d75d296d8cbca47f3a66659697 | [
"MIT"
] | 23 | 2020-05-18T09:00:16.000Z | 2022-01-20T06:32:38.000Z | src/oxygen/__init__.py | stude1/robotframework-oxygen | e9845ab5d392d5d75d296d8cbca47f3a66659697 | [
"MIT"
] | 5 | 2020-08-21T07:08:18.000Z | 2021-11-29T18:04:46.000Z | from .version import VERSION
from .base_handler import BaseHandler
from .oxygen import listener, OxygenLibrary
__all__ = ['BaseHandler', 'listener', 'OxygenLibrary']
__version__ = VERSION
| 27 | 54 | 0.798942 | from .version import VERSION
from .base_handler import BaseHandler
from .oxygen import listener, OxygenLibrary
__all__ = ['BaseHandler', 'listener', 'OxygenLibrary']
__version__ = VERSION
| true | true |
f7fb3eb2144b59bf46943a6752ca3e8a7f8da079 | 22,015 | py | Python | ibis/impala/ddl.py | ZeroCool2u/ibis | 19d152fa97f828ad70e4cd46f3cd7d2bae27eb64 | [
"Apache-2.0"
] | null | null | null | ibis/impala/ddl.py | ZeroCool2u/ibis | 19d152fa97f828ad70e4cd46f3cd7d2bae27eb64 | [
"Apache-2.0"
] | null | null | null | ibis/impala/ddl.py | ZeroCool2u/ibis | 19d152fa97f828ad70e4cd46f3cd7d2bae27eb64 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
from ibis.backends.base_sql import quote_identifier
from ibis.sql.compiler import DDL, DML
from .compiler import _type_to_sql_string
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
def _is_fully_qualified(x):
return bool(fully_qualified_re.search(x))
def _is_quoted(x):
regex = re.compile(r"(?:`(.*)`|(.*))")
quoted, _ = regex.match(x).groups()
return quoted is not None
class ImpalaQualifiedSQLStatement:
def _get_scoped_name(self, obj_name, database):
if database:
scoped_name = '{}.`{}`'.format(database, obj_name)
else:
if not _is_fully_qualified(obj_name):
if _is_quoted(obj_name):
return obj_name
else:
return '`{}`'.format(obj_name)
else:
return obj_name
return scoped_name
class ImpalaDDL(DDL, ImpalaQualifiedSQLStatement):
pass
class ImpalaDML(DML, ImpalaQualifiedSQLStatement):
pass
class CreateDDL(ImpalaDDL):
def _if_exists(self):
return 'IF NOT EXISTS ' if self.can_exist else ''
_format_aliases = {'TEXT': 'TEXTFILE'}
def _sanitize_format(format):
if format is None:
return
format = format.upper()
format = _format_aliases.get(format, format)
if format not in ('PARQUET', 'AVRO', 'TEXTFILE'):
raise ValueError('Invalid format: {!r}'.format(format))
return format
def _serdeproperties(props):
formatted_props = _format_properties(props)
return 'SERDEPROPERTIES {}'.format(formatted_props)
def format_tblproperties(props):
formatted_props = _format_properties(props)
return 'TBLPROPERTIES {}'.format(formatted_props)
def _format_properties(props):
tokens = []
for k, v in sorted(props.items()):
tokens.append(" '{}'='{}'".format(k, v))
return '(\n{}\n)'.format(',\n'.join(tokens))
class CreateTable(CreateDDL):
"""
Parameters
----------
partition :
"""
def __init__(
self,
table_name,
database=None,
external=False,
format='parquet',
can_exist=False,
partition=None,
path=None,
tbl_properties=None,
):
self.table_name = table_name
self.database = database
self.partition = partition
self.path = path
self.external = external
self.can_exist = can_exist
self.format = _sanitize_format(format)
self.tbl_properties = tbl_properties
@property
def _prefix(self):
if self.external:
return 'CREATE EXTERNAL TABLE'
else:
return 'CREATE TABLE'
def _create_line(self):
scoped_name = self._get_scoped_name(self.table_name, self.database)
return '{} {}{}'.format(self._prefix, self._if_exists(), scoped_name)
def _location(self):
return "LOCATION '{}'".format(self.path) if self.path else None
def _storage(self):
# By the time we're here, we have a valid format
return 'STORED AS {}'.format(self.format)
@property
def pieces(self):
yield self._create_line()
for piece in filter(None, self._pieces):
yield piece
def compile(self):
return '\n'.join(self.pieces)
class CTAS(CreateTable):
"""
Create Table As Select
"""
def __init__(
self,
table_name,
select,
database=None,
external=False,
format='parquet',
can_exist=False,
path=None,
partition=None,
):
super().__init__(
table_name,
database=database,
external=external,
format=format,
can_exist=can_exist,
path=path,
partition=partition,
)
self.select = select
@property
def _pieces(self):
yield self._partitioned_by()
yield self._storage()
yield self._location()
yield 'AS'
yield self.select.compile()
def _partitioned_by(self):
if self.partition is not None:
return 'PARTITIONED BY ({})'.format(
', '.join(
quote_identifier(expr._name) for expr in self.partition
)
)
return None
class CreateView(CTAS):
"""Create a view"""
def __init__(self, table_name, select, database=None, can_exist=False):
super().__init__(
table_name, select, database=database, can_exist=can_exist
)
@property
def _pieces(self):
yield 'AS'
yield self.select.compile()
@property
def _prefix(self):
return 'CREATE VIEW'
class CreateTableParquet(CreateTable):
def __init__(
self,
table_name,
path,
example_file=None,
example_table=None,
schema=None,
external=True,
**kwargs,
):
super().__init__(
table_name,
external=external,
format='parquet',
path=path,
**kwargs,
)
self.example_file = example_file
self.example_table = example_table
self.schema = schema
@property
def _pieces(self):
if self.example_file is not None:
yield "LIKE PARQUET '{0}'".format(self.example_file)
elif self.example_table is not None:
yield "LIKE {0}".format(self.example_table)
elif self.schema is not None:
yield format_schema(self.schema)
else:
raise NotImplementedError
yield self._storage()
yield self._location()
class CreateTableWithSchema(CreateTable):
def __init__(self, table_name, schema, table_format=None, **kwargs):
super().__init__(table_name, **kwargs)
self.schema = schema
self.table_format = table_format
@property
def _pieces(self):
if self.partition is not None:
main_schema = self.schema
part_schema = self.partition
if not isinstance(part_schema, sch.Schema):
part_schema = sch.Schema(
part_schema, [self.schema[name] for name in part_schema]
)
to_delete = []
for name in self.partition:
if name in self.schema:
to_delete.append(name)
if len(to_delete):
main_schema = main_schema.delete(to_delete)
yield format_schema(main_schema)
yield 'PARTITIONED BY {}'.format(format_schema(part_schema))
else:
yield format_schema(self.schema)
if self.table_format is not None:
yield '\n'.join(self.table_format.to_ddl())
else:
yield self._storage()
yield self._location()
class DelimitedFormat:
def __init__(
self,
path,
delimiter=None,
escapechar=None,
na_rep=None,
lineterminator=None,
):
self.path = path
self.delimiter = delimiter
self.escapechar = escapechar
self.lineterminator = lineterminator
self.na_rep = na_rep
def to_ddl(self):
yield 'ROW FORMAT DELIMITED'
if self.delimiter is not None:
yield "FIELDS TERMINATED BY '{}'".format(self.delimiter)
if self.escapechar is not None:
yield "ESCAPED BY '{}'".format(self.escapechar)
if self.lineterminator is not None:
yield "LINES TERMINATED BY '{}'".format(self.lineterminator)
yield "LOCATION '{}'".format(self.path)
if self.na_rep is not None:
props = {'serialization.null.format': self.na_rep}
yield format_tblproperties(props)
class AvroFormat:
def __init__(self, path, avro_schema):
self.path = path
self.avro_schema = avro_schema
def to_ddl(self):
yield 'STORED AS AVRO'
yield "LOCATION '{}'".format(self.path)
schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)
schema = '\n'.join(x.rstrip() for x in schema.splitlines())
props = {'avro.schema.literal': schema}
yield format_tblproperties(props)
class ParquetFormat:
def __init__(self, path):
self.path = path
def to_ddl(self):
yield 'STORED AS PARQUET'
yield "LOCATION '{}'".format(self.path)
class CreateTableDelimited(CreateTableWithSchema):
def __init__(
self,
table_name,
path,
schema,
delimiter=None,
escapechar=None,
lineterminator=None,
na_rep=None,
external=True,
**kwargs,
):
table_format = DelimitedFormat(
path,
delimiter=delimiter,
escapechar=escapechar,
lineterminator=lineterminator,
na_rep=na_rep,
)
super().__init__(
table_name, schema, table_format, external=external, **kwargs
)
class CreateTableAvro(CreateTable):
def __init__(self, table_name, path, avro_schema, external=True, **kwargs):
super().__init__(table_name, external=external, **kwargs)
self.table_format = AvroFormat(path, avro_schema)
@property
def _pieces(self):
yield '\n'.join(self.table_format.to_ddl())
class InsertSelect(ImpalaDML):
def __init__(
self,
table_name,
select_expr,
database=None,
partition=None,
partition_schema=None,
overwrite=False,
):
self.table_name = table_name
self.database = database
self.select = select_expr
self.partition = partition
self.partition_schema = partition_schema
self.overwrite = overwrite
def compile(self):
if self.overwrite:
cmd = 'INSERT OVERWRITE'
else:
cmd = 'INSERT INTO'
if self.partition is not None:
part = _format_partition(self.partition, self.partition_schema)
partition = ' {} '.format(part)
else:
partition = ''
select_query = self.select.compile()
scoped_name = self._get_scoped_name(self.table_name, self.database)
return '{0} {1}{2}\n{3}'.format(
cmd, scoped_name, partition, select_query
)
def _format_partition(partition, partition_schema):
tokens = []
if isinstance(partition, dict):
for name in partition_schema:
if name in partition:
tok = _format_partition_kv(
name, partition[name], partition_schema[name]
)
else:
# dynamic partitioning
tok = name
tokens.append(tok)
else:
for name, value in zip(partition_schema, partition):
tok = _format_partition_kv(name, value, partition_schema[name])
tokens.append(tok)
return 'PARTITION ({})'.format(', '.join(tokens))
def _format_partition_kv(k, v, type):
if type == dt.string:
value_formatted = '"{}"'.format(v)
else:
value_formatted = str(v)
return '{}={}'.format(k, value_formatted)
class LoadData(ImpalaDDL):
"""
Generate DDL for LOAD DATA command. Cannot be cancelled
"""
def __init__(
self,
table_name,
path,
database=None,
partition=None,
partition_schema=None,
overwrite=False,
):
self.table_name = table_name
self.database = database
self.path = path
self.partition = partition
self.partition_schema = partition_schema
self.overwrite = overwrite
def compile(self):
overwrite = 'OVERWRITE ' if self.overwrite else ''
if self.partition is not None:
partition = '\n' + _format_partition(
self.partition, self.partition_schema
)
else:
partition = ''
scoped_name = self._get_scoped_name(self.table_name, self.database)
return "LOAD DATA INPATH '{}' {}INTO TABLE {}{}".format(
self.path, overwrite, scoped_name, partition
)
class AlterTable(ImpalaDDL):
def __init__(
self,
table,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
self.table = table
self.location = location
self.format = _sanitize_format(format)
self.tbl_properties = tbl_properties
self.serde_properties = serde_properties
def _wrap_command(self, cmd):
return 'ALTER TABLE {}'.format(cmd)
def _format_properties(self, prefix=''):
tokens = []
if self.location is not None:
tokens.append("LOCATION '{}'".format(self.location))
if self.format is not None:
tokens.append("FILEFORMAT {}".format(self.format))
if self.tbl_properties is not None:
tokens.append(format_tblproperties(self.tbl_properties))
if self.serde_properties is not None:
tokens.append(_serdeproperties(self.serde_properties))
if len(tokens) > 0:
return '\n{}{}'.format(prefix, '\n'.join(tokens))
else:
return ''
def compile(self):
props = self._format_properties()
action = '{} SET {}'.format(self.table, props)
return self._wrap_command(action)
class PartitionProperties(AlterTable):
def __init__(
self,
table,
partition,
partition_schema,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
super().__init__(
table,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
self.partition = partition
self.partition_schema = partition_schema
def _compile(self, cmd, property_prefix=''):
part = _format_partition(self.partition, self.partition_schema)
if cmd:
part = '{} {}'.format(cmd, part)
props = self._format_properties(property_prefix)
action = '{} {}{}'.format(self.table, part, props)
return self._wrap_command(action)
class AddPartition(PartitionProperties):
def __init__(self, table, partition, partition_schema, location=None):
super().__init__(table, partition, partition_schema, location=location)
def compile(self):
return self._compile('ADD')
class AlterPartition(PartitionProperties):
def compile(self):
return self._compile('', 'SET ')
class DropPartition(PartitionProperties):
def __init__(self, table, partition, partition_schema):
super().__init__(table, partition, partition_schema)
def compile(self):
return self._compile('DROP')
class RenameTable(AlterTable):
def __init__(
self, old_name, new_name, old_database=None, new_database=None
):
# if either database is None, the name is assumed to be fully scoped
self.old_name = old_name
self.old_database = old_database
self.new_name = new_name
self.new_database = new_database
new_qualified_name = new_name
if new_database is not None:
new_qualified_name = self._get_scoped_name(new_name, new_database)
old_qualified_name = old_name
if old_database is not None:
old_qualified_name = self._get_scoped_name(old_name, old_database)
self.old_qualified_name = old_qualified_name
self.new_qualified_name = new_qualified_name
def compile(self):
cmd = '{} RENAME TO {}'.format(
self.old_qualified_name, self.new_qualified_name
)
return self._wrap_command(cmd)
class DropObject(ImpalaDDL):
def __init__(self, must_exist=True):
self.must_exist = must_exist
def compile(self):
if_exists = '' if self.must_exist else 'IF EXISTS '
object_name = self._object_name()
return 'DROP {} {}{}'.format(self._object_type, if_exists, object_name)
class DropTable(DropObject):
_object_type = 'TABLE'
def __init__(self, table_name, database=None, must_exist=True):
super().__init__(must_exist=must_exist)
self.table_name = table_name
self.database = database
def _object_name(self):
return self._get_scoped_name(self.table_name, self.database)
class TruncateTable(ImpalaDDL):
_object_type = 'TABLE'
def __init__(self, table_name, database=None):
self.table_name = table_name
self.database = database
def compile(self):
name = self._get_scoped_name(self.table_name, self.database)
return 'TRUNCATE TABLE {}'.format(name)
class DropView(DropTable):
_object_type = 'VIEW'
class CacheTable(ImpalaDDL):
def __init__(self, table_name, database=None, pool='default'):
self.table_name = table_name
self.database = database
self.pool = pool
def compile(self):
scoped_name = self._get_scoped_name(self.table_name, self.database)
return "ALTER TABLE {} SET CACHED IN '{}'".format(
scoped_name, self.pool
)
class CreateDatabase(CreateDDL):
def __init__(self, name, path=None, can_exist=False):
self.name = name
self.path = path
self.can_exist = can_exist
def compile(self):
name = quote_identifier(self.name)
create_decl = 'CREATE DATABASE'
create_line = '{} {}{}'.format(create_decl, self._if_exists(), name)
if self.path is not None:
create_line += "\nLOCATION '{}'".format(self.path)
return create_line
class DropDatabase(DropObject):
_object_type = 'DATABASE'
def __init__(self, name, must_exist=True):
super().__init__(must_exist=must_exist)
self.name = name
def _object_name(self):
return self.name
def format_schema(schema):
elements = [
_format_schema_element(name, t)
for name, t in zip(schema.names, schema.types)
]
return '({})'.format(',\n '.join(elements))
def _format_schema_element(name, t):
return '{} {}'.format(
quote_identifier(name, force=True), _type_to_sql_string(t),
)
class CreateFunction(ImpalaDDL):
_object_type = 'FUNCTION'
def __init__(self, func, name=None, database=None):
self.func = func
self.name = name or func.name
self.database = database
def _impala_signature(self):
scoped_name = self._get_scoped_name(self.name, self.database)
input_sig = _impala_input_signature(self.func.inputs)
output_sig = _type_to_sql_string(self.func.output)
return '{}({}) returns {}'.format(scoped_name, input_sig, output_sig)
class CreateUDF(CreateFunction):
def compile(self):
create_decl = 'CREATE FUNCTION'
impala_sig = self._impala_signature()
param_line = "location '{}' symbol='{}'".format(
self.func.lib_path, self.func.so_symbol
)
return ' '.join([create_decl, impala_sig, param_line])
class CreateUDA(CreateFunction):
def compile(self):
create_decl = 'CREATE AGGREGATE FUNCTION'
impala_sig = self._impala_signature()
tokens = ["location '{}'".format(self.func.lib_path)]
fn_names = (
'init_fn',
'update_fn',
'merge_fn',
'serialize_fn',
'finalize_fn',
)
for fn in fn_names:
value = getattr(self.func, fn)
if value is not None:
tokens.append("{}='{}'".format(fn, value))
return ' '.join([create_decl, impala_sig]) + ' ' + '\n'.join(tokens)
class DropFunction(DropObject):
def __init__(
self, name, inputs, must_exist=True, aggregate=False, database=None
):
super().__init__(must_exist=must_exist)
self.name = name
self.inputs = tuple(map(dt.dtype, inputs))
self.must_exist = must_exist
self.aggregate = aggregate
self.database = database
def _impala_signature(self):
full_name = self._get_scoped_name(self.name, self.database)
input_sig = _impala_input_signature(self.inputs)
return '{}({})'.format(full_name, input_sig)
def _object_name(self):
return self.name
def compile(self):
tokens = ['DROP']
if self.aggregate:
tokens.append('AGGREGATE')
tokens.append('FUNCTION')
if not self.must_exist:
tokens.append('IF EXISTS')
tokens.append(self._impala_signature())
return ' '.join(tokens)
class ListFunction(ImpalaDDL):
def __init__(self, database, like=None, aggregate=False):
self.database = database
self.like = like
self.aggregate = aggregate
def compile(self):
statement = 'SHOW '
if self.aggregate:
statement += 'AGGREGATE '
statement += 'FUNCTIONS IN {}'.format(self.database)
if self.like:
statement += " LIKE '{}'".format(self.like)
return statement
def _impala_input_signature(inputs):
# TODO: varargs '{}...'.format(val)
return ', '.join(map(_type_to_sql_string, inputs))
| 26.979167 | 79 | 0.607722 |
import json
import re
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
from ibis.backends.base_sql import quote_identifier
from ibis.sql.compiler import DDL, DML
from .compiler import _type_to_sql_string
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
def _is_fully_qualified(x):
return bool(fully_qualified_re.search(x))
def _is_quoted(x):
regex = re.compile(r"(?:`(.*)`|(.*))")
quoted, _ = regex.match(x).groups()
return quoted is not None
class ImpalaQualifiedSQLStatement:
def _get_scoped_name(self, obj_name, database):
if database:
scoped_name = '{}.`{}`'.format(database, obj_name)
else:
if not _is_fully_qualified(obj_name):
if _is_quoted(obj_name):
return obj_name
else:
return '`{}`'.format(obj_name)
else:
return obj_name
return scoped_name
class ImpalaDDL(DDL, ImpalaQualifiedSQLStatement):
pass
class ImpalaDML(DML, ImpalaQualifiedSQLStatement):
pass
class CreateDDL(ImpalaDDL):
def _if_exists(self):
return 'IF NOT EXISTS ' if self.can_exist else ''
_format_aliases = {'TEXT': 'TEXTFILE'}
def _sanitize_format(format):
if format is None:
return
format = format.upper()
format = _format_aliases.get(format, format)
if format not in ('PARQUET', 'AVRO', 'TEXTFILE'):
raise ValueError('Invalid format: {!r}'.format(format))
return format
def _serdeproperties(props):
formatted_props = _format_properties(props)
return 'SERDEPROPERTIES {}'.format(formatted_props)
def format_tblproperties(props):
formatted_props = _format_properties(props)
return 'TBLPROPERTIES {}'.format(formatted_props)
def _format_properties(props):
tokens = []
for k, v in sorted(props.items()):
tokens.append(" '{}'='{}'".format(k, v))
return '(\n{}\n)'.format(',\n'.join(tokens))
class CreateTable(CreateDDL):
def __init__(
self,
table_name,
database=None,
external=False,
format='parquet',
can_exist=False,
partition=None,
path=None,
tbl_properties=None,
):
self.table_name = table_name
self.database = database
self.partition = partition
self.path = path
self.external = external
self.can_exist = can_exist
self.format = _sanitize_format(format)
self.tbl_properties = tbl_properties
@property
def _prefix(self):
if self.external:
return 'CREATE EXTERNAL TABLE'
else:
return 'CREATE TABLE'
def _create_line(self):
scoped_name = self._get_scoped_name(self.table_name, self.database)
return '{} {}{}'.format(self._prefix, self._if_exists(), scoped_name)
def _location(self):
return "LOCATION '{}'".format(self.path) if self.path else None
def _storage(self):
return 'STORED AS {}'.format(self.format)
@property
def pieces(self):
yield self._create_line()
for piece in filter(None, self._pieces):
yield piece
def compile(self):
return '\n'.join(self.pieces)
class CTAS(CreateTable):
def __init__(
self,
table_name,
select,
database=None,
external=False,
format='parquet',
can_exist=False,
path=None,
partition=None,
):
super().__init__(
table_name,
database=database,
external=external,
format=format,
can_exist=can_exist,
path=path,
partition=partition,
)
self.select = select
@property
def _pieces(self):
yield self._partitioned_by()
yield self._storage()
yield self._location()
yield 'AS'
yield self.select.compile()
def _partitioned_by(self):
if self.partition is not None:
return 'PARTITIONED BY ({})'.format(
', '.join(
quote_identifier(expr._name) for expr in self.partition
)
)
return None
class CreateView(CTAS):
def __init__(self, table_name, select, database=None, can_exist=False):
super().__init__(
table_name, select, database=database, can_exist=can_exist
)
@property
def _pieces(self):
yield 'AS'
yield self.select.compile()
@property
def _prefix(self):
return 'CREATE VIEW'
class CreateTableParquet(CreateTable):
def __init__(
self,
table_name,
path,
example_file=None,
example_table=None,
schema=None,
external=True,
**kwargs,
):
super().__init__(
table_name,
external=external,
format='parquet',
path=path,
**kwargs,
)
self.example_file = example_file
self.example_table = example_table
self.schema = schema
@property
def _pieces(self):
if self.example_file is not None:
yield "LIKE PARQUET '{0}'".format(self.example_file)
elif self.example_table is not None:
yield "LIKE {0}".format(self.example_table)
elif self.schema is not None:
yield format_schema(self.schema)
else:
raise NotImplementedError
yield self._storage()
yield self._location()
class CreateTableWithSchema(CreateTable):
def __init__(self, table_name, schema, table_format=None, **kwargs):
super().__init__(table_name, **kwargs)
self.schema = schema
self.table_format = table_format
@property
def _pieces(self):
if self.partition is not None:
main_schema = self.schema
part_schema = self.partition
if not isinstance(part_schema, sch.Schema):
part_schema = sch.Schema(
part_schema, [self.schema[name] for name in part_schema]
)
to_delete = []
for name in self.partition:
if name in self.schema:
to_delete.append(name)
if len(to_delete):
main_schema = main_schema.delete(to_delete)
yield format_schema(main_schema)
yield 'PARTITIONED BY {}'.format(format_schema(part_schema))
else:
yield format_schema(self.schema)
if self.table_format is not None:
yield '\n'.join(self.table_format.to_ddl())
else:
yield self._storage()
yield self._location()
class DelimitedFormat:
def __init__(
self,
path,
delimiter=None,
escapechar=None,
na_rep=None,
lineterminator=None,
):
self.path = path
self.delimiter = delimiter
self.escapechar = escapechar
self.lineterminator = lineterminator
self.na_rep = na_rep
def to_ddl(self):
yield 'ROW FORMAT DELIMITED'
if self.delimiter is not None:
yield "FIELDS TERMINATED BY '{}'".format(self.delimiter)
if self.escapechar is not None:
yield "ESCAPED BY '{}'".format(self.escapechar)
if self.lineterminator is not None:
yield "LINES TERMINATED BY '{}'".format(self.lineterminator)
yield "LOCATION '{}'".format(self.path)
if self.na_rep is not None:
props = {'serialization.null.format': self.na_rep}
yield format_tblproperties(props)
class AvroFormat:
def __init__(self, path, avro_schema):
self.path = path
self.avro_schema = avro_schema
def to_ddl(self):
yield 'STORED AS AVRO'
yield "LOCATION '{}'".format(self.path)
schema = json.dumps(self.avro_schema, indent=2, sort_keys=True)
schema = '\n'.join(x.rstrip() for x in schema.splitlines())
props = {'avro.schema.literal': schema}
yield format_tblproperties(props)
class ParquetFormat:
def __init__(self, path):
self.path = path
def to_ddl(self):
yield 'STORED AS PARQUET'
yield "LOCATION '{}'".format(self.path)
class CreateTableDelimited(CreateTableWithSchema):
def __init__(
self,
table_name,
path,
schema,
delimiter=None,
escapechar=None,
lineterminator=None,
na_rep=None,
external=True,
**kwargs,
):
table_format = DelimitedFormat(
path,
delimiter=delimiter,
escapechar=escapechar,
lineterminator=lineterminator,
na_rep=na_rep,
)
super().__init__(
table_name, schema, table_format, external=external, **kwargs
)
class CreateTableAvro(CreateTable):
def __init__(self, table_name, path, avro_schema, external=True, **kwargs):
super().__init__(table_name, external=external, **kwargs)
self.table_format = AvroFormat(path, avro_schema)
@property
def _pieces(self):
yield '\n'.join(self.table_format.to_ddl())
class InsertSelect(ImpalaDML):
def __init__(
self,
table_name,
select_expr,
database=None,
partition=None,
partition_schema=None,
overwrite=False,
):
self.table_name = table_name
self.database = database
self.select = select_expr
self.partition = partition
self.partition_schema = partition_schema
self.overwrite = overwrite
def compile(self):
if self.overwrite:
cmd = 'INSERT OVERWRITE'
else:
cmd = 'INSERT INTO'
if self.partition is not None:
part = _format_partition(self.partition, self.partition_schema)
partition = ' {} '.format(part)
else:
partition = ''
select_query = self.select.compile()
scoped_name = self._get_scoped_name(self.table_name, self.database)
return '{0} {1}{2}\n{3}'.format(
cmd, scoped_name, partition, select_query
)
def _format_partition(partition, partition_schema):
tokens = []
if isinstance(partition, dict):
for name in partition_schema:
if name in partition:
tok = _format_partition_kv(
name, partition[name], partition_schema[name]
)
else:
# dynamic partitioning
tok = name
tokens.append(tok)
else:
for name, value in zip(partition_schema, partition):
tok = _format_partition_kv(name, value, partition_schema[name])
tokens.append(tok)
return 'PARTITION ({})'.format(', '.join(tokens))
def _format_partition_kv(k, v, type):
if type == dt.string:
value_formatted = '"{}"'.format(v)
else:
value_formatted = str(v)
return '{}={}'.format(k, value_formatted)
class LoadData(ImpalaDDL):
def __init__(
self,
table_name,
path,
database=None,
partition=None,
partition_schema=None,
overwrite=False,
):
self.table_name = table_name
self.database = database
self.path = path
self.partition = partition
self.partition_schema = partition_schema
self.overwrite = overwrite
def compile(self):
overwrite = 'OVERWRITE ' if self.overwrite else ''
if self.partition is not None:
partition = '\n' + _format_partition(
self.partition, self.partition_schema
)
else:
partition = ''
scoped_name = self._get_scoped_name(self.table_name, self.database)
return "LOAD DATA INPATH '{}' {}INTO TABLE {}{}".format(
self.path, overwrite, scoped_name, partition
)
class AlterTable(ImpalaDDL):
def __init__(
self,
table,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
self.table = table
self.location = location
self.format = _sanitize_format(format)
self.tbl_properties = tbl_properties
self.serde_properties = serde_properties
def _wrap_command(self, cmd):
return 'ALTER TABLE {}'.format(cmd)
def _format_properties(self, prefix=''):
tokens = []
if self.location is not None:
tokens.append("LOCATION '{}'".format(self.location))
if self.format is not None:
tokens.append("FILEFORMAT {}".format(self.format))
if self.tbl_properties is not None:
tokens.append(format_tblproperties(self.tbl_properties))
if self.serde_properties is not None:
tokens.append(_serdeproperties(self.serde_properties))
if len(tokens) > 0:
return '\n{}{}'.format(prefix, '\n'.join(tokens))
else:
return ''
def compile(self):
props = self._format_properties()
action = '{} SET {}'.format(self.table, props)
return self._wrap_command(action)
class PartitionProperties(AlterTable):
def __init__(
self,
table,
partition,
partition_schema,
location=None,
format=None,
tbl_properties=None,
serde_properties=None,
):
super().__init__(
table,
location=location,
format=format,
tbl_properties=tbl_properties,
serde_properties=serde_properties,
)
self.partition = partition
self.partition_schema = partition_schema
def _compile(self, cmd, property_prefix=''):
part = _format_partition(self.partition, self.partition_schema)
if cmd:
part = '{} {}'.format(cmd, part)
props = self._format_properties(property_prefix)
action = '{} {}{}'.format(self.table, part, props)
return self._wrap_command(action)
class AddPartition(PartitionProperties):
def __init__(self, table, partition, partition_schema, location=None):
super().__init__(table, partition, partition_schema, location=location)
def compile(self):
return self._compile('ADD')
class AlterPartition(PartitionProperties):
def compile(self):
return self._compile('', 'SET ')
class DropPartition(PartitionProperties):
def __init__(self, table, partition, partition_schema):
super().__init__(table, partition, partition_schema)
def compile(self):
return self._compile('DROP')
class RenameTable(AlterTable):
def __init__(
self, old_name, new_name, old_database=None, new_database=None
):
# if either database is None, the name is assumed to be fully scoped
self.old_name = old_name
self.old_database = old_database
self.new_name = new_name
self.new_database = new_database
new_qualified_name = new_name
if new_database is not None:
new_qualified_name = self._get_scoped_name(new_name, new_database)
old_qualified_name = old_name
if old_database is not None:
old_qualified_name = self._get_scoped_name(old_name, old_database)
self.old_qualified_name = old_qualified_name
self.new_qualified_name = new_qualified_name
def compile(self):
cmd = '{} RENAME TO {}'.format(
self.old_qualified_name, self.new_qualified_name
)
return self._wrap_command(cmd)
class DropObject(ImpalaDDL):
def __init__(self, must_exist=True):
self.must_exist = must_exist
def compile(self):
if_exists = '' if self.must_exist else 'IF EXISTS '
object_name = self._object_name()
return 'DROP {} {}{}'.format(self._object_type, if_exists, object_name)
class DropTable(DropObject):
_object_type = 'TABLE'
def __init__(self, table_name, database=None, must_exist=True):
super().__init__(must_exist=must_exist)
self.table_name = table_name
self.database = database
def _object_name(self):
return self._get_scoped_name(self.table_name, self.database)
class TruncateTable(ImpalaDDL):
_object_type = 'TABLE'
def __init__(self, table_name, database=None):
self.table_name = table_name
self.database = database
def compile(self):
name = self._get_scoped_name(self.table_name, self.database)
return 'TRUNCATE TABLE {}'.format(name)
class DropView(DropTable):
_object_type = 'VIEW'
class CacheTable(ImpalaDDL):
def __init__(self, table_name, database=None, pool='default'):
self.table_name = table_name
self.database = database
self.pool = pool
def compile(self):
scoped_name = self._get_scoped_name(self.table_name, self.database)
return "ALTER TABLE {} SET CACHED IN '{}'".format(
scoped_name, self.pool
)
class CreateDatabase(CreateDDL):
def __init__(self, name, path=None, can_exist=False):
self.name = name
self.path = path
self.can_exist = can_exist
def compile(self):
name = quote_identifier(self.name)
create_decl = 'CREATE DATABASE'
create_line = '{} {}{}'.format(create_decl, self._if_exists(), name)
if self.path is not None:
create_line += "\nLOCATION '{}'".format(self.path)
return create_line
class DropDatabase(DropObject):
_object_type = 'DATABASE'
def __init__(self, name, must_exist=True):
super().__init__(must_exist=must_exist)
self.name = name
def _object_name(self):
return self.name
def format_schema(schema):
elements = [
_format_schema_element(name, t)
for name, t in zip(schema.names, schema.types)
]
return '({})'.format(',\n '.join(elements))
def _format_schema_element(name, t):
return '{} {}'.format(
quote_identifier(name, force=True), _type_to_sql_string(t),
)
class CreateFunction(ImpalaDDL):
_object_type = 'FUNCTION'
def __init__(self, func, name=None, database=None):
self.func = func
self.name = name or func.name
self.database = database
def _impala_signature(self):
scoped_name = self._get_scoped_name(self.name, self.database)
input_sig = _impala_input_signature(self.func.inputs)
output_sig = _type_to_sql_string(self.func.output)
return '{}({}) returns {}'.format(scoped_name, input_sig, output_sig)
class CreateUDF(CreateFunction):
def compile(self):
create_decl = 'CREATE FUNCTION'
impala_sig = self._impala_signature()
param_line = "location '{}' symbol='{}'".format(
self.func.lib_path, self.func.so_symbol
)
return ' '.join([create_decl, impala_sig, param_line])
class CreateUDA(CreateFunction):
def compile(self):
create_decl = 'CREATE AGGREGATE FUNCTION'
impala_sig = self._impala_signature()
tokens = ["location '{}'".format(self.func.lib_path)]
fn_names = (
'init_fn',
'update_fn',
'merge_fn',
'serialize_fn',
'finalize_fn',
)
for fn in fn_names:
value = getattr(self.func, fn)
if value is not None:
tokens.append("{}='{}'".format(fn, value))
return ' '.join([create_decl, impala_sig]) + ' ' + '\n'.join(tokens)
class DropFunction(DropObject):
def __init__(
self, name, inputs, must_exist=True, aggregate=False, database=None
):
super().__init__(must_exist=must_exist)
self.name = name
self.inputs = tuple(map(dt.dtype, inputs))
self.must_exist = must_exist
self.aggregate = aggregate
self.database = database
def _impala_signature(self):
full_name = self._get_scoped_name(self.name, self.database)
input_sig = _impala_input_signature(self.inputs)
return '{}({})'.format(full_name, input_sig)
def _object_name(self):
return self.name
def compile(self):
tokens = ['DROP']
if self.aggregate:
tokens.append('AGGREGATE')
tokens.append('FUNCTION')
if not self.must_exist:
tokens.append('IF EXISTS')
tokens.append(self._impala_signature())
return ' '.join(tokens)
class ListFunction(ImpalaDDL):
def __init__(self, database, like=None, aggregate=False):
self.database = database
self.like = like
self.aggregate = aggregate
def compile(self):
statement = 'SHOW '
if self.aggregate:
statement += 'AGGREGATE '
statement += 'FUNCTIONS IN {}'.format(self.database)
if self.like:
statement += " LIKE '{}'".format(self.like)
return statement
def _impala_input_signature(inputs):
# TODO: varargs '{}...'.format(val)
return ', '.join(map(_type_to_sql_string, inputs))
| true | true |
f7fb40398c878dfaf2588248dc7ef17938220a50 | 100 | py | Python | tests/test_example.py | ro-56/python_package_skeleton | dd64a2d6921f9bb07335241c34e9b50a72a05f1a | [
"MIT"
] | null | null | null | tests/test_example.py | ro-56/python_package_skeleton | dd64a2d6921f9bb07335241c34e9b50a72a05f1a | [
"MIT"
] | null | null | null | tests/test_example.py | ro-56/python_package_skeleton | dd64a2d6921f9bb07335241c34e9b50a72a05f1a | [
"MIT"
] | null | null | null | import python_package_skeleton
def test_example():
assert python_package_skeleton is not None
| 16.666667 | 46 | 0.82 | import python_package_skeleton
def test_example():
assert python_package_skeleton is not None
| true | true |
f7fb40c173b3d7347b95d90e09c986f46b084ceb | 1,648 | py | Python | pagarmecoreapi/models/create_checkout_card_installment_option_request.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 6 | 2021-09-02T19:55:04.000Z | 2022-03-16T14:06:15.000Z | pagarmecoreapi/models/create_checkout_card_installment_option_request.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-10-11T22:48:15.000Z | 2022-01-24T18:24:23.000Z | pagarmecoreapi/models/create_checkout_card_installment_option_request.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-09-12T21:43:32.000Z | 2022-03-07T16:58:54.000Z | # -*- coding: utf-8 -*-
"""
pagarmecoreapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class CreateCheckoutCardInstallmentOptionRequest(object):
"""Implementation of the 'CreateCheckoutCardInstallmentOptionRequest' model.
Options for card installment
Attributes:
number (int): Installment quantity
total (int): Total amount
"""
# Create a mapping from Model property names to API property names
_names = {
"number":'number',
"total":'total'
}
def __init__(self,
number=None,
total=None):
"""Constructor for the CreateCheckoutCardInstallmentOptionRequest class"""
# Initialize members of the class
self.number = number
self.total = total
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
number = dictionary.get('number')
total = dictionary.get('total')
# Return an object of this model
return cls(number,
total)
| 25.75 | 84 | 0.585558 |
class CreateCheckoutCardInstallmentOptionRequest(object):
_names = {
"number":'number',
"total":'total'
}
def __init__(self,
number=None,
total=None):
self.number = number
self.total = total
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
number = dictionary.get('number')
total = dictionary.get('total')
return cls(number,
total)
| true | true |
f7fb410f11b30bda329800229f4b00eac1f72216 | 7,797 | py | Python | custom_components/cozylife/tcp_client.py | Daandeve/hass-cozylife | 59ca726179de3c9b22e4149c68b3bed8cca2848d | [
"MIT"
] | null | null | null | custom_components/cozylife/tcp_client.py | Daandeve/hass-cozylife | 59ca726179de3c9b22e4149c68b3bed8cca2848d | [
"MIT"
] | null | null | null | custom_components/cozylife/tcp_client.py | Daandeve/hass-cozylife | 59ca726179de3c9b22e4149c68b3bed8cca2848d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import socket
import time
from typing import Optional, Union, Any
import logging
try:
from .utils import get_pid_list, get_sn
except:
from utils import get_pid_list, get_sn
CMD_INFO = 0
CMD_QUERY = 2
CMD_SET = 3
CMD_LIST = [CMD_INFO, CMD_QUERY, CMD_SET]
_LOGGER = logging.getLogger(__name__)
class tcp_client(object):
"""
Represents a device
send:{"cmd":0,"pv":0,"sn":"1636463553873","msg":{}}
receiver:{"cmd":0,"pv":0,"sn":"1636463553873","msg":{"did":"629168597cb94c4c1d8f","dtp":"02","pid":"e2s64v",
"mac":"7cb94c4c1d8f","ip":"192.168.123.57","rssi":-33,"sv":"1.0.0","hv":"0.0.1"},"res":0}
send:{"cmd":2,"pv":0,"sn":"1636463611798","msg":{"attr":[0]}}
receiver:{"cmd":2,"pv":0,"sn":"1636463611798","msg":{"attr":[1,2,3,4,5,6],"data":{"1":0,"2":0,"3":1000,"4":1000,
"5":65535,"6":65535}},"res":0}
send:{"cmd":3,"pv":0,"sn":"1636463662455","msg":{"attr":[1],"data":{"1":0}}}
receiver:{"cmd":3,"pv":0,"sn":"1636463662455","msg":{"attr":[1],"data":{"1":0}},"res":0}
receiver:{"cmd":10,"pv":0,"sn":"1636463664000","res":0,"msg":{"attr":[1,2,3,4,5,6],"data":{"1":0,"2":0,"3":1000,
"4":1000,"5":65535,"6":65535}}}
"""
_ip = str
_port = 5555
_connect = None # socket
_device_id = 'temp_id' # str
# _device_key = str
_pid = str
_device_type_code = '01'
_icon = str
_device_model_name = 'light'
_dpid = [3, 5]
# last sn
_sn = str
def __init__(self, ip, timeout=3):
self._ip = ip
self.timeout = timeout
def disconnect(self):
if self._connect:
try:
#self._connect.shutdown(socket.SHUT_RDWR)
self._connect.close()
except:
pass
self._connect = None
def __del__(self):
self.disconnect()
def _initSocket(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.timeout)
s.connect((self._ip, self._port))
self._connect = s
except:
_LOGGER.info(f'_initSocketerror,ip={self._ip}')
self.disconnect()
@property
def check(self) -> bool:
"""
Determine whether the device is filtered
:return:
"""
return True
@property
def dpid(self):
return self._dpid
@property
def device_model_name(self):
return self._device_model_name
@property
def icon(self):
return self._icon
@property
def device_type_code(self) -> str:
return self._device_type_code
@property
def device_id(self):
return self._device_id
def _device_info(self) -> None:
"""
get info for device model
:return:
"""
self._only_send(CMD_INFO, {})
try:
try:
resp = self._connect.recv(1024)
except:
self.disconnect()
self._initSocket()
return None
resp_json = json.loads(resp.strip())
except:
_LOGGER.info('_device_info.recv.error')
return None
if resp_json.get('msg') is None or type(resp_json['msg']) is not dict:
_LOGGER.info('_device_info.recv.error1')
return None
if resp_json['msg'].get('did') is None:
_LOGGER.info('_device_info.recv.error2')
return None
self._device_id = resp_json['msg']['did']
if resp_json['msg'].get('pid') is None:
_LOGGER.info('_device_info.recv.error3')
return None
self._pid = resp_json['msg']['pid']
pid_list = get_pid_list()
for item in pid_list:
match = False
for item1 in item['device_model']:
if item1['device_product_id'] == self._pid:
match = True
self._icon = item1['icon']
self._device_model_name = item1['device_model_name']
self._dpid = item1['dpid']
break
if match:
self._device_type_code = item['device_type_code']
break
# _LOGGER.info(pid_list)
_LOGGER.info(self._device_id)
_LOGGER.info(self._device_type_code)
_LOGGER.info(self._pid)
_LOGGER.info(self._device_model_name)
_LOGGER.info(self._icon)
def _get_package(self, cmd: int, payload: dict) -> bytes:
"""
package message
:param cmd:int:
:param payload:
:return:
"""
self._sn = get_sn()
if CMD_SET == cmd:
message = {
'pv': 0,
'cmd': cmd,
'sn': self._sn,
'msg': {
'attr': [int(item) for item in payload.keys()],
'data': payload,
}
}
elif CMD_QUERY == cmd:
message = {
'pv': 0,
'cmd': cmd,
'sn': self._sn,
'msg': {
'attr': [0],
}
}
elif CMD_INFO == cmd:
message = {
'pv': 0,
'cmd': cmd,
'sn': self._sn,
'msg': {}
}
else:
raise Exception('CMD is not valid')
payload_str = json.dumps(message, separators=(',', ':',))
_LOGGER.info(f'_package={payload_str}')
return bytes(payload_str + "\r\n", encoding='utf8')
def _send_receiver(self, cmd: int, payload: dict) -> Union[dict, Any]:
"""
send & receiver
:param cmd:
:param payload:
:return:
"""
try:
self._connect.send(self._get_package(cmd, payload))
except:
try:
self.disconnect()
self._initSocket()
self._connect.send(self._get_package(cmd, payload))
except:
pass
try:
i = 10
while i > 0:
res = self._connect.recv(1024)
# print(f'res={res},sn={self._sn},{self._sn in str(res)}')
i -= 1
# only allow same sn
if self._sn in str(res):
payload = json.loads(res.strip())
if payload is None or len(payload) == 0:
return None
if payload.get('msg') is None or type(payload['msg']) is not dict:
return None
if payload['msg'].get('data') is None or type(payload['msg']['data']) is not dict:
return None
return payload['msg']['data']
return None
except Exception as e:
# print(f'e={e}')
_LOGGER.info(f'_only_send.recv.error:{e}')
return {}
def _only_send(self, cmd: int, payload: dict) -> None:
"""
send but not receiver
:param cmd:
:param payload:
:return:
"""
try:
self._connect.send(self._get_package(cmd, payload))
except:
self.disconnect()
try:
self._initSocket()
self._connect.send(self._get_package(cmd, payload))
except:
pass
def control(self, payload: dict) -> bool:
"""
control use dpid
:param payload:
:return:
"""
self._only_send(CMD_SET, payload)
return True
def query(self) -> dict:
"""
query device state
:return:
"""
return self._send_receiver(CMD_QUERY, {})
| 28.25 | 116 | 0.492497 |
import json
import socket
import time
from typing import Optional, Union, Any
import logging
try:
from .utils import get_pid_list, get_sn
except:
from utils import get_pid_list, get_sn
CMD_INFO = 0
CMD_QUERY = 2
CMD_SET = 3
CMD_LIST = [CMD_INFO, CMD_QUERY, CMD_SET]
_LOGGER = logging.getLogger(__name__)
class tcp_client(object):
_ip = str
_port = 5555
_connect = None
_device_id = 'temp_id'
_pid = str
_device_type_code = '01'
_icon = str
_device_model_name = 'light'
_dpid = [3, 5]
_sn = str
def __init__(self, ip, timeout=3):
self._ip = ip
self.timeout = timeout
def disconnect(self):
if self._connect:
try:
self._connect.close()
except:
pass
self._connect = None
def __del__(self):
self.disconnect()
def _initSocket(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.timeout)
s.connect((self._ip, self._port))
self._connect = s
except:
_LOGGER.info(f'_initSocketerror,ip={self._ip}')
self.disconnect()
@property
def check(self) -> bool:
return True
@property
def dpid(self):
return self._dpid
@property
def device_model_name(self):
return self._device_model_name
@property
def icon(self):
return self._icon
@property
def device_type_code(self) -> str:
return self._device_type_code
@property
def device_id(self):
return self._device_id
def _device_info(self) -> None:
self._only_send(CMD_INFO, {})
try:
try:
resp = self._connect.recv(1024)
except:
self.disconnect()
self._initSocket()
return None
resp_json = json.loads(resp.strip())
except:
_LOGGER.info('_device_info.recv.error')
return None
if resp_json.get('msg') is None or type(resp_json['msg']) is not dict:
_LOGGER.info('_device_info.recv.error1')
return None
if resp_json['msg'].get('did') is None:
_LOGGER.info('_device_info.recv.error2')
return None
self._device_id = resp_json['msg']['did']
if resp_json['msg'].get('pid') is None:
_LOGGER.info('_device_info.recv.error3')
return None
self._pid = resp_json['msg']['pid']
pid_list = get_pid_list()
for item in pid_list:
match = False
for item1 in item['device_model']:
if item1['device_product_id'] == self._pid:
match = True
self._icon = item1['icon']
self._device_model_name = item1['device_model_name']
self._dpid = item1['dpid']
break
if match:
self._device_type_code = item['device_type_code']
break
_LOGGER.info(self._device_id)
_LOGGER.info(self._device_type_code)
_LOGGER.info(self._pid)
_LOGGER.info(self._device_model_name)
_LOGGER.info(self._icon)
def _get_package(self, cmd: int, payload: dict) -> bytes:
self._sn = get_sn()
if CMD_SET == cmd:
message = {
'pv': 0,
'cmd': cmd,
'sn': self._sn,
'msg': {
'attr': [int(item) for item in payload.keys()],
'data': payload,
}
}
elif CMD_QUERY == cmd:
message = {
'pv': 0,
'cmd': cmd,
'sn': self._sn,
'msg': {
'attr': [0],
}
}
elif CMD_INFO == cmd:
message = {
'pv': 0,
'cmd': cmd,
'sn': self._sn,
'msg': {}
}
else:
raise Exception('CMD is not valid')
payload_str = json.dumps(message, separators=(',', ':',))
_LOGGER.info(f'_package={payload_str}')
return bytes(payload_str + "\r\n", encoding='utf8')
def _send_receiver(self, cmd: int, payload: dict) -> Union[dict, Any]:
try:
self._connect.send(self._get_package(cmd, payload))
except:
try:
self.disconnect()
self._initSocket()
self._connect.send(self._get_package(cmd, payload))
except:
pass
try:
i = 10
while i > 0:
res = self._connect.recv(1024)
i -= 1
if self._sn in str(res):
payload = json.loads(res.strip())
if payload is None or len(payload) == 0:
return None
if payload.get('msg') is None or type(payload['msg']) is not dict:
return None
if payload['msg'].get('data') is None or type(payload['msg']['data']) is not dict:
return None
return payload['msg']['data']
return None
except Exception as e:
_LOGGER.info(f'_only_send.recv.error:{e}')
return {}
def _only_send(self, cmd: int, payload: dict) -> None:
try:
self._connect.send(self._get_package(cmd, payload))
except:
self.disconnect()
try:
self._initSocket()
self._connect.send(self._get_package(cmd, payload))
except:
pass
def control(self, payload: dict) -> bool:
self._only_send(CMD_SET, payload)
return True
def query(self) -> dict:
return self._send_receiver(CMD_QUERY, {})
| true | true |
f7fb41dd37ad3ace248a8f3d060c2475f9655c07 | 2,879 | py | Python | test/recon/test_recon.py | DobromirM/swim-system-python | a5b4f05457f1eb2739a920c42dfc721c83a1226a | [
"Apache-2.0"
] | 8 | 2019-11-11T19:38:59.000Z | 2022-01-06T11:13:04.000Z | test/recon/test_recon.py | swimos/swim-system-python | 727c09b6e7300b063e320364373ff724d9b8af90 | [
"Apache-2.0"
] | 40 | 2019-10-29T10:35:49.000Z | 2021-05-14T22:18:35.000Z | test/recon/test_recon.py | DobromirM/swim-system-python | a5b4f05457f1eb2739a920c42dfc721c83a1226a | [
"Apache-2.0"
] | 3 | 2020-01-31T18:28:58.000Z | 2021-08-25T08:53:13.000Z | # Copyright 2015-2021 SWIM.AI inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swimai.structures import RecordMap, Attr, Text, Slot
from swimai.recon import Recon
from swimai.recon._parsers import _ReconParser
from swimai.recon._writers import _ReconWriter
class TestRecon(unittest.TestCase):
def test_parse(self):
# Given
recon_string = '@sync(node: "foo/node", lane: "foo/lane")"Hello, World"'
# When
actual = Recon.parse(recon_string)
# Then
self.assertIsInstance(actual, RecordMap)
self.assertEqual('sync', actual._tag)
self.assertEqual(2, actual.size)
self.assertEqual('foo/node', actual.get_item(0).value.get_item(0).value.value)
self.assertEqual('foo/lane', actual.get_item(0).value.get_item(1).value.value)
self.assertEqual('Hello, World', actual.get_item(1).value)
def test_to_string(self):
# Given
value = RecordMap.create()
value.add(Attr.create_attr(Text.create_from('remove'),
RecordMap.create_record_map(
Slot.create_slot(Text.create_from('key'), Text.create_from('foo')))))
# When
actual = Recon.to_string(value)
# Then
self.assertEqual('@remove(key:foo)', actual)
def test_get_writer_once(self):
# When
actual = Recon._get_writer()
# Then
self.assertIsInstance(actual, _ReconWriter)
self.assertEqual(Recon._get_writer(), actual)
def test_get_writer_multiple(self):
# Given
expected = Recon._get_writer()
# When
actual = Recon._get_writer()
# Then
self.assertIsInstance(actual, _ReconWriter)
self.assertEqual(expected, actual)
self.assertEqual(Recon._get_writer(), actual)
def get_parser_once(self):
# When
actual = Recon._get_parser()
# Then
self.assertIsInstance(actual, _ReconParser)
self.assertEqual(Recon._get_parser(), actual)
def get_parser_multiple(self):
# Given
expected = Recon._get_parser()
# When
actual = Recon._get_parser()
# Then
self.assertIsInstance(actual, _ReconParser)
self.assertEqual(expected, actual)
self.assertEqual(Recon._get_parser(), actual)
| 35.109756 | 108 | 0.652657 |
import unittest
from swimai.structures import RecordMap, Attr, Text, Slot
from swimai.recon import Recon
from swimai.recon._parsers import _ReconParser
from swimai.recon._writers import _ReconWriter
class TestRecon(unittest.TestCase):
def test_parse(self):
recon_string = '@sync(node: "foo/node", lane: "foo/lane")"Hello, World"'
actual = Recon.parse(recon_string)
self.assertIsInstance(actual, RecordMap)
self.assertEqual('sync', actual._tag)
self.assertEqual(2, actual.size)
self.assertEqual('foo/node', actual.get_item(0).value.get_item(0).value.value)
self.assertEqual('foo/lane', actual.get_item(0).value.get_item(1).value.value)
self.assertEqual('Hello, World', actual.get_item(1).value)
def test_to_string(self):
value = RecordMap.create()
value.add(Attr.create_attr(Text.create_from('remove'),
RecordMap.create_record_map(
Slot.create_slot(Text.create_from('key'), Text.create_from('foo')))))
actual = Recon.to_string(value)
self.assertEqual('@remove(key:foo)', actual)
def test_get_writer_once(self):
actual = Recon._get_writer()
self.assertIsInstance(actual, _ReconWriter)
self.assertEqual(Recon._get_writer(), actual)
def test_get_writer_multiple(self):
expected = Recon._get_writer()
actual = Recon._get_writer()
self.assertIsInstance(actual, _ReconWriter)
self.assertEqual(expected, actual)
self.assertEqual(Recon._get_writer(), actual)
def get_parser_once(self):
actual = Recon._get_parser()
self.assertIsInstance(actual, _ReconParser)
self.assertEqual(Recon._get_parser(), actual)
def get_parser_multiple(self):
expected = Recon._get_parser()
actual = Recon._get_parser()
self.assertIsInstance(actual, _ReconParser)
self.assertEqual(expected, actual)
self.assertEqual(Recon._get_parser(), actual)
| true | true |
f7fb424e1240eace1bfbe7d85e0651d436711408 | 2,045 | py | Python | vitrage_dashboard/api/vitrage.py | mail2nsrajesh/vitrage-dashboard | 3b23bc5b3c63dfa0aeff54f09796d8e7663f681f | [
"Apache-2.0"
] | null | null | null | vitrage_dashboard/api/vitrage.py | mail2nsrajesh/vitrage-dashboard | 3b23bc5b3c63dfa0aeff54f09796d8e7663f681f | [
"Apache-2.0"
] | null | null | null | vitrage_dashboard/api/vitrage.py | mail2nsrajesh/vitrage-dashboard | 3b23bc5b3c63dfa0aeff54f09796d8e7663f681f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from horizon.utils.memoized import memoized # noqa
from keystoneauth1.identity.generic.token import Token
from keystoneauth1.session import Session
from openstack_dashboard.api import base
from vitrageclient import client as vitrage_client
@memoized
def vitrageclient(request, password=None):
endpoint = base.url_for(request, 'identity')
tokenId = request.user.token.id
tenentName = request.user.tenant_name
auth = Token(auth_url=endpoint, token=tokenId, project_name=tenentName)
session = Session(auth=auth, timeout=600)
return vitrage_client.Client('1', session)
def topology(request, query=None, graph_type='tree', all_tenants='false'):
return vitrageclient(request).topology.get(query=query,
graph_type=graph_type,
all_tenants=all_tenants)
def alarms(request, vitrage_id='all', all_tenants='false'):
return vitrageclient(request).alarm.list(vitrage_id=vitrage_id,
all_tenants=all_tenants)
def rca(request, alarm_id, all_tenants='false'):
return vitrageclient(request).rca.get(alarm_id=alarm_id,
all_tenants=all_tenants)
def templates(request, template_id='all'):
if template_id == 'all':
return vitrageclient(request).template.list()
return vitrageclient(request).template.show(template_id)
| 38.584906 | 77 | 0.694377 |
from horizon.utils.memoized import memoized
from keystoneauth1.identity.generic.token import Token
from keystoneauth1.session import Session
from openstack_dashboard.api import base
from vitrageclient import client as vitrage_client
@memoized
def vitrageclient(request, password=None):
endpoint = base.url_for(request, 'identity')
tokenId = request.user.token.id
tenentName = request.user.tenant_name
auth = Token(auth_url=endpoint, token=tokenId, project_name=tenentName)
session = Session(auth=auth, timeout=600)
return vitrage_client.Client('1', session)
def topology(request, query=None, graph_type='tree', all_tenants='false'):
return vitrageclient(request).topology.get(query=query,
graph_type=graph_type,
all_tenants=all_tenants)
def alarms(request, vitrage_id='all', all_tenants='false'):
return vitrageclient(request).alarm.list(vitrage_id=vitrage_id,
all_tenants=all_tenants)
def rca(request, alarm_id, all_tenants='false'):
return vitrageclient(request).rca.get(alarm_id=alarm_id,
all_tenants=all_tenants)
def templates(request, template_id='all'):
if template_id == 'all':
return vitrageclient(request).template.list()
return vitrageclient(request).template.show(template_id)
| true | true |
f7fb42a391f08966123d877bed890d4c15d4a18c | 12,081 | py | Python | selfdrive/car/gm/interface.py | wzpyh/openpilot | a422246dc30bce11e970514f13f7c110f4470cc3 | [
"MIT"
] | null | null | null | selfdrive/car/gm/interface.py | wzpyh/openpilot | a422246dc30bce11e970514f13f7c110f4470cc3 | [
"MIT"
] | null | null | null | selfdrive/car/gm/interface.py | wzpyh/openpilot | a422246dc30bce11e970514f13f7c110f4470cc3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from cereal import car, log
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.gm.values import DBC, CAR, STOCK_CONTROL_MSGS
from selfdrive.car.gm.carstate import CarState, CruiseButtons, get_powertrain_can_parser
try:
from selfdrive.car.gm.carcontroller import CarController
except ImportError:
CarController = None
# Car chimes, beeps, blinker sounds etc
class CM:
TOCK = 0x81
TICK = 0x82
LOW_BEEP = 0x84
HIGH_BEEP = 0x85
LOW_CHIME = 0x86
HIGH_CHIME = 0x87
class CanBus(object):
def __init__(self):
self.powertrain = 0
self.obstacle = 1
self.chassis = 2
self.sw_gmlan = 3
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.frame = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.acc_active_prev = 0
# *** init the major players ***
canbus = CanBus()
self.CS = CarState(CP, canbus)
self.VM = VehicleModel(CP)
self.pt_cp = get_powertrain_can_parser(CP, canbus)
self.ch_cp_dbc_name = DBC[CP.carFingerprint]['chassis']
# sending if read only is False
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(canbus, CP.carFingerprint, CP.enableCamera)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 4.0
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.0
@staticmethod
def get_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carName = "gm"
ret.carFingerprint = candidate
ret.enableCruise = False
# Presence of a camera on the object bus is ok.
# Have to go passive if ASCM is online (ACC-enabled cars),
# or camera is on powertrain bus (LKA cars without ACC).
ret.enableCamera = not any(x for x in STOCK_CONTROL_MSGS[candidate] if x in fingerprint)
std_cargo = 136
if candidate == CAR.VOLT:
# supports stop and go, but initial engage must be above 18mph (which include conservatism)
ret.minEnableSpeed = 18 * CV.MPH_TO_MS
# kg of standard extra cargo to count for drive, gas, etc...
ret.mass = 1607 + std_cargo
ret.safetyModel = car.CarParams.SafetyModels.gm
ret.wheelbase = 2.69
ret.steerRatio = 15.7
ret.steerRatioRear = 0.
ret.centerToFront = ret.wheelbase * 0.4 # wild guess
elif candidate == CAR.CADILLAC_CT6:
# engage speed is decided by pcm
ret.minEnableSpeed = -1
# kg of standard extra cargo to count for drive, gas, etc...
ret.mass = 4016. * CV.LB_TO_KG + std_cargo
ret.safetyModel = car.CarParams.SafetyModels.cadillac
ret.wheelbase = 3.11
ret.steerRatio = 14.6 # it's 16.3 without rear active steering
ret.steerRatioRear = 0. # TODO: there is RAS on this car!
ret.centerToFront = ret.wheelbase * 0.465
# hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923. * CV.LB_TO_KG + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 85400
tireStiffnessRear_civic = 90000
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = tireStiffnessFront_civic * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = tireStiffnessRear_civic * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# same tuning for Volt and CT6 for now
ret.steerKiBP, ret.steerKpBP = [[0.], [0.]]
ret.steerKpV, ret.steerKiV = [[0.2], [0.00]]
ret.steerKf = 0.00004 # full torque for 20 deg at 80mph means 0.00007818594
ret.steerMaxBP = [0.] # m/s
ret.steerMaxV = [1.]
ret.gasMaxBP = [0.]
ret.gasMaxV = [.5]
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.longPidDeadzoneBP = [0.]
ret.longPidDeadzoneV = [0.]
ret.longitudinalKpBP = [5., 35.]
ret.longitudinalKpV = [2.4, 1.5]
ret.longitudinalKiBP = [0.]
ret.longitudinalKiV = [0.36]
ret.steerLimitAlert = True
ret.stoppingControl = True
ret.startAccel = 0.8
ret.steerActuatorDelay = 0.1 # Default delay, not measured yet
ret.steerRateCost = 1.0
ret.steerControlType = car.CarParams.SteerControlType.torque
return ret
# returns a car.CarState
def update(self, c):
self.pt_cp.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.pt_cp)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.aEgo = self.CS.a_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gas pedal information.
ret.gas = self.CS.pedal_gas / 254.0
ret.gasPressed = self.CS.user_gas_pressed
# brake pedal
ret.brake = self.CS.user_brake / 0xd0
ret.brakePressed = self.CS.brake_pressed
# steering wheel
ret.steeringAngle = self.CS.angle_steers
# torque and user override. Driver awareness
# timer resets when the user uses the steering wheel.
ret.steeringPressed = self.CS.steer_override
ret.steeringTorque = self.CS.steer_torque_driver
# cruise state
ret.cruiseState.available = bool(self.CS.main_on)
cruiseEnabled = self.CS.pcm_acc_status != 0
ret.cruiseState.enabled = cruiseEnabled
ret.cruiseState.standstill = False
ret.leftBlinker = self.CS.left_blinker_on
ret.rightBlinker = self.CS.right_blinker_on
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
ret.gearShifter = self.CS.gear_shifter
buttonEvents = []
# blinkers
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on
buttonEvents.append(be)
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_buttons != CruiseButtons.UNPRESS:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
if not (cruiseEnabled and self.CS.standstill):
be.type = 'accelCruise' # Suppress resume button if we're resuming from stop so we don't adjust speed.
elif but == CruiseButtons.DECEL_SET:
be.type = 'decelCruise'
elif but == CruiseButtons.CANCEL:
be.type = 'cancel'
elif but == CruiseButtons.MAIN:
be.type = 'altButton3'
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.steer_error:
events.append(create_event('steerUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if self.CS.steer_not_allowed:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.car_fingerprint == CAR.VOLT:
if self.CS.brake_error:
events.append(create_event('brakeUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if not self.CS.gear_shifter_valid:
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CS.gear_shifter == 3:
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if ret.vEgo < self.CP.minEnableSpeed:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
if self.CS.park_brake:
events.append(create_event('parkBrake', [ET.NO_ENTRY, ET.USER_DISABLE]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed): # and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in ["accelCruise", "decelCruise"] and not b.pressed:
events.append(create_event('buttonEnable', [ET.ENABLE]))
# do disable on button down
if b.type == "cancel" and b.pressed:
events.append(create_event('buttonCancel', [ET.USER_DISABLE]))
if self.CS.car_fingerprint == CAR.CADILLAC_CT6:
if self.CS.acc_active and not self.acc_active_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
if not self.CS.acc_active:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
ret.events = events
# update previous brake/gas pressed
self.acc_active_prev = self.CS.acc_active
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
# cast to reader so it can't be modified
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c, perception_state=log.Live20Data.new_message()):
hud_v_cruise = c.hudControl.setSpeed
if hud_v_cruise > 70:
hud_v_cruise = 0
chime, chime_count = {
"none": (0, 0),
"beepSingle": (CM.HIGH_CHIME, 1),
"beepTriple": (CM.HIGH_CHIME, 3),
"beepRepeated": (CM.LOW_CHIME, -1),
"chimeSingle": (CM.LOW_CHIME, 1),
"chimeDouble": (CM.LOW_CHIME, 2),
"chimeRepeated": (CM.LOW_CHIME, -1),
"chimeContinuous": (CM.LOW_CHIME, -1)}[str(c.hudControl.audibleAlert)]
# For Openpilot, "enabled" includes pre-enable.
# In GM, PCM faults out if ACC command overlaps user gas.
enabled = c.enabled and not self.CS.user_gas_pressed
self.CC.update(self.sendcan, enabled, self.CS, self.frame, \
c.actuators,
hud_v_cruise, c.hudControl.lanesVisible, \
c.hudControl.leadVisible, \
chime, chime_count)
self.frame += 1
| 36.170659 | 112 | 0.681401 |
from cereal import car, log
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.gm.values import DBC, CAR, STOCK_CONTROL_MSGS
from selfdrive.car.gm.carstate import CarState, CruiseButtons, get_powertrain_can_parser
try:
from selfdrive.car.gm.carcontroller import CarController
except ImportError:
CarController = None
class CM:
TOCK = 0x81
TICK = 0x82
LOW_BEEP = 0x84
HIGH_BEEP = 0x85
LOW_CHIME = 0x86
HIGH_CHIME = 0x87
class CanBus(object):
def __init__(self):
self.powertrain = 0
self.obstacle = 1
self.chassis = 2
self.sw_gmlan = 3
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.frame = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.acc_active_prev = 0
canbus = CanBus()
self.CS = CarState(CP, canbus)
self.VM = VehicleModel(CP)
self.pt_cp = get_powertrain_can_parser(CP, canbus)
self.ch_cp_dbc_name = DBC[CP.carFingerprint]['chassis']
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(canbus, CP.carFingerprint, CP.enableCamera)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 4.0
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.0
@staticmethod
def get_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carName = "gm"
ret.carFingerprint = candidate
ret.enableCruise = False
ret.enableCamera = not any(x for x in STOCK_CONTROL_MSGS[candidate] if x in fingerprint)
std_cargo = 136
if candidate == CAR.VOLT:
ret.minEnableSpeed = 18 * CV.MPH_TO_MS
ret.mass = 1607 + std_cargo
ret.safetyModel = car.CarParams.SafetyModels.gm
ret.wheelbase = 2.69
ret.steerRatio = 15.7
ret.steerRatioRear = 0.
ret.centerToFront = ret.wheelbase * 0.4
elif candidate == CAR.CADILLAC_CT6:
ret.minEnableSpeed = -1
ret.mass = 4016. * CV.LB_TO_KG + std_cargo
ret.safetyModel = car.CarParams.SafetyModels.cadillac
ret.wheelbase = 3.11
ret.steerRatio = 14.6
ret.steerRatioRear = 0. # TODO: there is RAS on this car!
ret.centerToFront = ret.wheelbase * 0.465
# hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923. * CV.LB_TO_KG + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 85400
tireStiffnessRear_civic = 90000
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = tireStiffnessFront_civic * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = tireStiffnessRear_civic * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# same tuning for Volt and CT6 for now
ret.steerKiBP, ret.steerKpBP = [[0.], [0.]]
ret.steerKpV, ret.steerKiV = [[0.2], [0.00]]
ret.steerKf = 0.00004 # full torque for 20 deg at 80mph means 0.00007818594
ret.steerMaxBP = [0.] # m/s
ret.steerMaxV = [1.]
ret.gasMaxBP = [0.]
ret.gasMaxV = [.5]
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.longPidDeadzoneBP = [0.]
ret.longPidDeadzoneV = [0.]
ret.longitudinalKpBP = [5., 35.]
ret.longitudinalKpV = [2.4, 1.5]
ret.longitudinalKiBP = [0.]
ret.longitudinalKiV = [0.36]
ret.steerLimitAlert = True
ret.stoppingControl = True
ret.startAccel = 0.8
ret.steerActuatorDelay = 0.1 # Default delay, not measured yet
ret.steerRateCost = 1.0
ret.steerControlType = car.CarParams.SteerControlType.torque
return ret
# returns a car.CarState
def update(self, c):
self.pt_cp.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.pt_cp)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.aEgo = self.CS.a_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gas pedal information.
ret.gas = self.CS.pedal_gas / 254.0
ret.gasPressed = self.CS.user_gas_pressed
# brake pedal
ret.brake = self.CS.user_brake / 0xd0
ret.brakePressed = self.CS.brake_pressed
# steering wheel
ret.steeringAngle = self.CS.angle_steers
# torque and user override. Driver awareness
# timer resets when the user uses the steering wheel.
ret.steeringPressed = self.CS.steer_override
ret.steeringTorque = self.CS.steer_torque_driver
# cruise state
ret.cruiseState.available = bool(self.CS.main_on)
cruiseEnabled = self.CS.pcm_acc_status != 0
ret.cruiseState.enabled = cruiseEnabled
ret.cruiseState.standstill = False
ret.leftBlinker = self.CS.left_blinker_on
ret.rightBlinker = self.CS.right_blinker_on
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
ret.gearShifter = self.CS.gear_shifter
buttonEvents = []
# blinkers
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on
buttonEvents.append(be)
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_buttons != CruiseButtons.UNPRESS:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
if not (cruiseEnabled and self.CS.standstill):
be.type = 'accelCruise' # Suppress resume button if we're resuming from stop so we don't adjust speed.
elif but == CruiseButtons.DECEL_SET:
be.type = 'decelCruise'
elif but == CruiseButtons.CANCEL:
be.type = 'cancel'
elif but == CruiseButtons.MAIN:
be.type = 'altButton3'
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.steer_error:
events.append(create_event('steerUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if self.CS.steer_not_allowed:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.car_fingerprint == CAR.VOLT:
if self.CS.brake_error:
events.append(create_event('brakeUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if not self.CS.gear_shifter_valid:
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CS.gear_shifter == 3:
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if ret.vEgo < self.CP.minEnableSpeed:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
if self.CS.park_brake:
events.append(create_event('parkBrake', [ET.NO_ENTRY, ET.USER_DISABLE]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
for b in ret.buttonEvents:
if b.type in ["accelCruise", "decelCruise"] and not b.pressed:
events.append(create_event('buttonEnable', [ET.ENABLE]))
if b.type == "cancel" and b.pressed:
events.append(create_event('buttonCancel', [ET.USER_DISABLE]))
if self.CS.car_fingerprint == CAR.CADILLAC_CT6:
if self.CS.acc_active and not self.acc_active_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
if not self.CS.acc_active:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
ret.events = events
self.acc_active_prev = self.CS.acc_active
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c, perception_state=log.Live20Data.new_message()):
hud_v_cruise = c.hudControl.setSpeed
if hud_v_cruise > 70:
hud_v_cruise = 0
chime, chime_count = {
"none": (0, 0),
"beepSingle": (CM.HIGH_CHIME, 1),
"beepTriple": (CM.HIGH_CHIME, 3),
"beepRepeated": (CM.LOW_CHIME, -1),
"chimeSingle": (CM.LOW_CHIME, 1),
"chimeDouble": (CM.LOW_CHIME, 2),
"chimeRepeated": (CM.LOW_CHIME, -1),
"chimeContinuous": (CM.LOW_CHIME, -1)}[str(c.hudControl.audibleAlert)]
# For Openpilot, "enabled" includes pre-enable.
# In GM, PCM faults out if ACC command overlaps user gas.
enabled = c.enabled and not self.CS.user_gas_pressed
self.CC.update(self.sendcan, enabled, self.CS, self.frame, \
c.actuators,
hud_v_cruise, c.hudControl.lanesVisible, \
c.hudControl.leadVisible, \
chime, chime_count)
self.frame += 1
| true | true |
f7fb43c6f0c42872270e32e5c6024a860a8c3c49 | 1,037 | py | Python | bighack/urls.py | mithron/jobalance | ad14a1b5f1e6163a7db26c274985d36078e8da4a | [
"MIT"
] | 1 | 2017-11-20T11:26:57.000Z | 2017-11-20T11:26:57.000Z | bighack/urls.py | mithron/jobalance | ad14a1b5f1e6163a7db26c274985d36078e8da4a | [
"MIT"
] | null | null | null | bighack/urls.py | mithron/jobalance | ad14a1b5f1e6163a7db26c274985d36078e8da4a | [
"MIT"
] | 2 | 2019-02-17T19:45:16.000Z | 2019-08-10T07:45:29.000Z | """bighack URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from api.views import FrontendAppView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/v0/', include('api.urls', namespace='api', app_name='api')),
url(r'^$', FrontendAppView.as_view()),
url(r'^index.html', FrontendAppView.as_view()),
]
| 38.407407 | 79 | 0.700096 | from django.conf.urls import url, include
from django.contrib import admin
from api.views import FrontendAppView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/v0/', include('api.urls', namespace='api', app_name='api')),
url(r'^$', FrontendAppView.as_view()),
url(r'^index.html', FrontendAppView.as_view()),
]
| true | true |
f7fb4411c4f43a8dcf03cea84fecf403f7ca7050 | 1,986 | py | Python | src/tensorflow/igibson/utils/navigate_env.py | suresh-guttikonda/sim-environment | cc8faec17714d58c0e1f0227c8b7d4cf8817a136 | [
"Apache-2.0"
] | null | null | null | src/tensorflow/igibson/utils/navigate_env.py | suresh-guttikonda/sim-environment | cc8faec17714d58c0e1f0227c8b7d4cf8817a136 | [
"Apache-2.0"
] | null | null | null | src/tensorflow/igibson/utils/navigate_env.py | suresh-guttikonda/sim-environment | cc8faec17714d58c0e1f0227c8b7d4cf8817a136 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from gibson2.envs.igibson_env import iGibsonEnv
from gibson2.utils.utils import l2_distance
from utils import datautils
import numpy as np
import gym
class NavigateGibsonEnv(iGibsonEnv):
def __init__(
self,
config_file,
scene_id=None,
mode='headless',
action_timestep=1 / 10.0,
physics_timestep=1 / 240.0,
device_idx=0,
render_to_tensor=False,
automatic_reset=False,
):
super(NavigateGibsonEnv, self).__init__(config_file=config_file,
scene_id=scene_id,
mode=mode,
action_timestep=action_timestep,
physics_timestep=physics_timestep,
device_idx=device_idx,
render_to_tensor=render_to_tensor,
automatic_reset=automatic_reset)
output_size = 18 + np.prod((56, 56, 3))
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf,
shape=(output_size, ),
dtype=np.float32)
def step(self, action):
state, reward, done, info = super(NavigateGibsonEnv, self).step(action)
# process image for training
rgb = datautils.process_raw_image(state['rgb'])
custom_state = np.concatenate([self.robots[0].calc_state(),
np.reshape(rgb, [-1])], 0)
# distance based reward
reward = reward - l2_distance(
self.robots[0].get_position()[:2],
self.task.target_pos[:2]
)
return custom_state, reward, done, info
def reset(self):
state = super(NavigateGibsonEnv, self).reset()
# process image for training
rgb = datautils.process_raw_image(state['rgb'])
custom_state = np.concatenate([self.robots[0].calc_state(),
np.reshape(rgb, [-1])], 0)
return custom_state
| 30.553846 | 79 | 0.574522 |
from gibson2.envs.igibson_env import iGibsonEnv
from gibson2.utils.utils import l2_distance
from utils import datautils
import numpy as np
import gym
class NavigateGibsonEnv(iGibsonEnv):
def __init__(
self,
config_file,
scene_id=None,
mode='headless',
action_timestep=1 / 10.0,
physics_timestep=1 / 240.0,
device_idx=0,
render_to_tensor=False,
automatic_reset=False,
):
super(NavigateGibsonEnv, self).__init__(config_file=config_file,
scene_id=scene_id,
mode=mode,
action_timestep=action_timestep,
physics_timestep=physics_timestep,
device_idx=device_idx,
render_to_tensor=render_to_tensor,
automatic_reset=automatic_reset)
output_size = 18 + np.prod((56, 56, 3))
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf,
shape=(output_size, ),
dtype=np.float32)
def step(self, action):
state, reward, done, info = super(NavigateGibsonEnv, self).step(action)
rgb = datautils.process_raw_image(state['rgb'])
custom_state = np.concatenate([self.robots[0].calc_state(),
np.reshape(rgb, [-1])], 0)
reward = reward - l2_distance(
self.robots[0].get_position()[:2],
self.task.target_pos[:2]
)
return custom_state, reward, done, info
def reset(self):
state = super(NavigateGibsonEnv, self).reset()
rgb = datautils.process_raw_image(state['rgb'])
custom_state = np.concatenate([self.robots[0].calc_state(),
np.reshape(rgb, [-1])], 0)
return custom_state
| true | true |
f7fb447bb5566e988140086dd1124b76c763f4b7 | 13,416 | py | Python | src/framed/bioreactor/base.py | cdanielmachado/framed | 36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345 | [
"Apache-2.0"
] | 25 | 2015-01-07T16:17:03.000Z | 2022-01-24T09:11:50.000Z | src/framed/bioreactor/base.py | cdanielmachado/framed | 36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345 | [
"Apache-2.0"
] | 12 | 2016-02-18T12:50:09.000Z | 2020-12-18T08:56:44.000Z | src/framed/bioreactor/base.py | cdanielmachado/framed | 36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345 | [
"Apache-2.0"
] | 14 | 2015-02-17T14:55:27.000Z | 2021-08-09T17:57:57.000Z | """ This module defines the base classes used for modeling and analyzing bioreactors
Author: Kai Zhuang
"""
from __future__ import print_function
from builtins import object
__author__ = 'kaizhuang'
from copy import deepcopy
from ..solvers import solver_instance
from framed.solvers.solution import Status
from scipy.integrate import ode
import numpy
import collections
import warnings
class Organism(object):
"""
Organism describes a generic biological organism.
"""
def __init__(self, model, id=None, fba_objective=None, fba_constraints={}, model_deepcopy=True):
"""
:param model: the mathematical model of the organism
:param fba_objective (dict): the FBA objective function. (only useful if model is a FBA model)
:param fba_constraints (dict): none standard FBA constraints. This can be useful for creating knockout strains
:param model_deepcopy (bool): if True, a deepcopy of the model will be created inside the Organism instance,
otherwise, a reference of the model will be created
:return: none
"""
if model_deepcopy:
self.model = deepcopy(model)
else:
self.model = model
if id:
self.id = id
else:
self.id = model.id
if fba_objective:
self.fba_objective = fba_objective
else:
self.fba_objective = {model.biomass_reaction: 1}
self.fba_constraints = fba_constraints
self.fba_solution = []
self.environment = None # upon initiation, the organism is not placed in any environment
def update(self):
"""
The update() method is used to change the internal state of the organism.
- This method is called at each integration step.
- One usage of this method is to describe how the FBA uptake constraint changes in response to the changes in
the metabolite concentrations.
** this is an abstract method, must be implemented in strain specific subclasses **
"""
raise NotImplementedError
class Environment(object):
"""
This class describes a generic environment that contains a number of organisms and metabolites
"""
def __init__(self):
self.organisms = []
self.metabolites = []
self.initial_conditions = []
def update(self):
"""
The update() method is used to change the internal state of the environment.
This method is called at each integration step.
** this is an abstract method, must be implemented for specific environments **
"""
raise NotImplementedError("update() method must be implemented for the specific environment")
def set_organisms(self, organisms):
self.organisms = []
self.add_organisms(organisms)
def set_metabolites(self, metabolites):
self.metabolites = []
self.add_metabolites(metabolites)
def add_organism(self, organism):
organism.environment = self
self.organisms.append(organism)
def add_organisms(self, organisms):
for organism in organisms:
self.add_organism(organism)
def add_metabolite(self, metabolite):
self.metabolites.append(metabolite)
def add_metabolites(self, metabolites):
for metabolite in metabolites:
self.add_metabolite(metabolite)
class DynamicSystem(object):
"""
This class describes a generic dynamic system
"""
def ode_RHS(self, y, t):
"""
this is the Right Hand Side of the system of ODE that describe the dynamic multi-species system
:param y: state variables such as liquid volume, biomass concentrations, and metabolite concentrations
:param t: time
:return:
** this is an abstract method, must be implemented for specific environments **
"""
raise NotImplementedError("the RHS of the ODE must be described for the each specific environment")
def integrate(self, t0, tf, dt, initial_conditions, solver, verbose=False):
"""
the integrate() solves the ODE of the dynamic system using the designated solver
:param t0: initial time
:param tf: final time
:param dt: time step
:param initial_conditions (array-like): initial conditions of the ODE system
:param solver: the designated solver
:return:
"""
if solver == 'analytical':
try:
t, y = self.analytical_integrator(t0, tf, dt, initial_conditions, solver, verbose)
except NotImplementedError:
warnings.warn('analytical solver have no been implemented yet. will use numerical solver dopri5'. FutureWarning)
t, y = self.numerical_integrator(t0, tf, dt, initial_conditions, solver='dopri5')
else:
t, y = self.numerical_integrator(t0, tf, dt, initial_conditions, solver, verbose)
return t, y
def numerical_integrator(self, t0, tf, dt, initial_conditions, solver, verbose):
"""
the numerical_integrator() method integrates the ODE of the dynamic system using a numerical solver
"""
f = self._ode_RHS
if initial_conditions:
y0 = initial_conditions
else:
y0 = self.initial_conditions
MdFBA_ode = ode(f).set_integrator(solver)
MdFBA_ode.set_initial_value(y0, t0)
t = [t0]
y = [y0]
while MdFBA_ode.successful() and MdFBA_ode.t < tf:
MdFBA_ode.integrate(MdFBA_ode.t + dt)
t.append(MdFBA_ode.t)
y.append(MdFBA_ode.y)
if verbose:
print(MdFBA_ode.t)
t = numpy.array(t)
y = numpy.array(y)
return t, y
def analytical_integrator(self, t0, tf, dt, initial_conditions, solver, verbose):
"""
the analytical_integrator() method integrates the ODE of the dynamic system using a user-defined analytical method
** this is an abstract method, must be implemented for specific dynamic systems **
"""
raise NotImplementedError
class Bioreactor(Environment, DynamicSystem):
"""
This class describes a generic bioreactor with one influent (feed) stream and one effluent stream
"""
def __init__(self, organisms=[], metabolites=[], id='Generic Bioreactor', flow_rate_in=0, flow_rate_out=0,
volume_max=None, Xfeed=None, Sfeed=None, deltaX=None, deltaS=None, initial_conditions=[]):
"""
:param organisms: list of Organism
:param metabolites: list of string
:param flow_rate_in:
:param flow_rate_out:
:param volume_max (float): liquid capacity of the bioreactor
:param Xfeed: concentration of organisms in the feed stream [g/L]
:param Sfeed: concentration of metabolites in the feed stream [mmol/L]
:param deltaX: custom defined terms to dX/dt [g/L/hr]
:param deltaS (list of float): special custom defined terms to dX/dt [mmol/L/hr]
:param initial_conditions: list of float
:return:
"""
if organisms:
if not isinstance(organisms, collections.Iterable):
organisms = [organisms]
self.set_organisms(organisms)
else:
self.set_organisms([])
if metabolites:
if not isinstance(metabolites, collections.Iterable):
metabolites = [metabolites]
self.set_metabolites(metabolites)
else:
self.set_metabolites([])
self.id = id
self.flow_rate_in = flow_rate_in
self.flow_rate_out = flow_rate_out
self.volume_max = volume_max
self.initial_conditions = initial_conditions
self.set_Xfeed(Xfeed)
self.set_Sfeed(Sfeed)
self.set_deltaX(deltaX)
self.set_deltaS(deltaS)
def set_organisms(self, organisms, Xfeed=None, deltaX=None):
super(Bioreactor, self).set_organisms(organisms)
self.set_Xfeed(Xfeed)
self.set_deltaX(deltaX)
def set_metabolites(self, metabolites, Sfeed=None, deltaS=None):
super(Bioreactor, self).set_metabolites(metabolites)
self.set_Sfeed(Sfeed)
self.set_deltaS(deltaS)
def set_Xfeed(self, Xfeed):
if Xfeed:
assert len(Xfeed) == len(self.organisms), 'The length of Xfeed should equal to the number of organisms'
self.Xfeed = Xfeed
else:
self.Xfeed = numpy.zeros(len(self.organisms))
def set_Sfeed(self, Sfeed):
if Sfeed:
assert len(Sfeed) == len(self.metabolites), 'The length of Sfeed should equal to the number of metabolites'
self.Sfeed = Sfeed
else:
self.Sfeed = numpy.zeros(len(self.metabolites))
def set_deltaX(self, deltaX):
if deltaX:
self.deltaX = deltaX
else:
self.deltaX = numpy.zeros(len(self.organisms))
def set_deltaS(self, deltaS):
if deltaS:
self.deltaS = deltaS
else:
self.deltaS = numpy.zeros(len(self.metabolites))
def set_initial_conditions(self, Vinit, Xinit, Sinit):
assert type(Vinit) == type(Xinit) == type(Sinit) == list
self.initial_conditions = Vinit + Xinit + Sinit
def update(self, time):
if self.volume_max:
if self.V > self.volume_max:
raise ValueError('liquid volume of the bioreactor exceeds volume_max.')
def _ode_RHS(self, t, y):
"""
the RHS of the ODE that describe the bioreactor system
:param y:
y[0]: volume
y[1] to y[number_of_organisms]: biomass of the organisms
y[number_of_organisms + 1] to y[-1] concentration of metabolites
:param t: time
:return: dy
"""
number_of_organisms = len(self.organisms)
number_of_metabolites = len(self.metabolites)
assert (len(y) == 1 + number_of_organisms + number_of_metabolites)
dy = numpy.zeros(len(y))
# creating class variables V, X, S, time from y and t.
# making them class variables so that class methods like update() can access them
self.V = y[0]
self.X = y[1:number_of_organisms + 1]
self.S = y[number_of_organisms + 1:]
self.time = t
# assigning growth rates and metabolic production/consumption rates here
# in this method, these rates are calculated using FBA
vs = numpy.zeros([number_of_organisms, number_of_metabolites]) # fluxes through metabolites
mu = numpy.zeros([number_of_organisms]) # growth rates of organisms
for i, organism in enumerate(self.organisms):
organism.update() # updating the internal states of the organism
# eg. updating the uptake constraints based on metabolite concentrations
if t == 0:
organism.solver = solver_instance(organism.model)
organism.fba_solution = organism.solver.solve(organism.fba_objective, minimize=False,
constraints=organism.fba_constraints)
if organism.fba_solution.status == Status.OPTIMAL:
mu[i] = organism.fba_solution.fobj
for j, metabolite in enumerate(self.metabolites):
if metabolite in organism.model.reactions.keys():
vs[i, j] = organism.fba_solution.values[metabolite]
else:
mu[i] = 0
for j, metabolite in enumerate(self.metabolites):
if metabolite in organism.model.reactions.keys():
vs[i, j] = 0
# updating the internal states of the bioreactor
# eg. flow rates, feed concentrations, and custom defined dX/dt and dS/dt terms
self.update(t)
# calculating the rates of change of reactor volume[L], biomass [g/L] and metabolite [mmol/L]
dy[0] = self.flow_rate_in - self.flow_rate_out # dV/dt [L/hr]
dy[1:number_of_organisms + 1] = mu * self.X + self.flow_rate_in / self.V * (
self.Xfeed - self.X) + self.deltaX # dX/dt [g/L/hr]
dy[number_of_organisms + 1:] = numpy.dot(self.X, vs) + self.flow_rate_in / self.V * (
self.Sfeed - self.S) + self.deltaS # dS/dt [mmol/L/hr]
return dy
def calculate_yield_from_dfba(self):
"""
Abstract used for calculating product yield from dFBA solution.
This is useful for certain analysis methods (eg. DySScO).
This should be implemented for specific bioreactors
"""
raise NotImplementedError
def calculate_titer_from_dfba(self):
"""
Abstract used for calculating product titer from dFBA solution.
This is useful for certain analysis methods (eg. DySScO).
This should be implemented for specific bioreactors
"""
raise NotImplementedError
def calculate_productivity_from_dfba(self):
"""
Abstract used for calculating productivity from dFBA solution.
This is useful for certain analysis methods (eg. DySScO).
This should be implemented for specific bioreactors
"""
raise NotImplementedError
| 36.555858 | 128 | 0.629696 | from __future__ import print_function
from builtins import object
__author__ = 'kaizhuang'
from copy import deepcopy
from ..solvers import solver_instance
from framed.solvers.solution import Status
from scipy.integrate import ode
import numpy
import collections
import warnings
class Organism(object):
def __init__(self, model, id=None, fba_objective=None, fba_constraints={}, model_deepcopy=True):
if model_deepcopy:
self.model = deepcopy(model)
else:
self.model = model
if id:
self.id = id
else:
self.id = model.id
if fba_objective:
self.fba_objective = fba_objective
else:
self.fba_objective = {model.biomass_reaction: 1}
self.fba_constraints = fba_constraints
self.fba_solution = []
self.environment = None
def update(self):
raise NotImplementedError
class Environment(object):
def __init__(self):
self.organisms = []
self.metabolites = []
self.initial_conditions = []
def update(self):
raise NotImplementedError("update() method must be implemented for the specific environment")
def set_organisms(self, organisms):
self.organisms = []
self.add_organisms(organisms)
def set_metabolites(self, metabolites):
self.metabolites = []
self.add_metabolites(metabolites)
def add_organism(self, organism):
organism.environment = self
self.organisms.append(organism)
def add_organisms(self, organisms):
for organism in organisms:
self.add_organism(organism)
def add_metabolite(self, metabolite):
self.metabolites.append(metabolite)
def add_metabolites(self, metabolites):
for metabolite in metabolites:
self.add_metabolite(metabolite)
class DynamicSystem(object):
def ode_RHS(self, y, t):
raise NotImplementedError("the RHS of the ODE must be described for the each specific environment")
def integrate(self, t0, tf, dt, initial_conditions, solver, verbose=False):
if solver == 'analytical':
try:
t, y = self.analytical_integrator(t0, tf, dt, initial_conditions, solver, verbose)
except NotImplementedError:
warnings.warn('analytical solver have no been implemented yet. will use numerical solver dopri5'. FutureWarning)
t, y = self.numerical_integrator(t0, tf, dt, initial_conditions, solver='dopri5')
else:
t, y = self.numerical_integrator(t0, tf, dt, initial_conditions, solver, verbose)
return t, y
def numerical_integrator(self, t0, tf, dt, initial_conditions, solver, verbose):
f = self._ode_RHS
if initial_conditions:
y0 = initial_conditions
else:
y0 = self.initial_conditions
MdFBA_ode = ode(f).set_integrator(solver)
MdFBA_ode.set_initial_value(y0, t0)
t = [t0]
y = [y0]
while MdFBA_ode.successful() and MdFBA_ode.t < tf:
MdFBA_ode.integrate(MdFBA_ode.t + dt)
t.append(MdFBA_ode.t)
y.append(MdFBA_ode.y)
if verbose:
print(MdFBA_ode.t)
t = numpy.array(t)
y = numpy.array(y)
return t, y
def analytical_integrator(self, t0, tf, dt, initial_conditions, solver, verbose):
raise NotImplementedError
class Bioreactor(Environment, DynamicSystem):
def __init__(self, organisms=[], metabolites=[], id='Generic Bioreactor', flow_rate_in=0, flow_rate_out=0,
volume_max=None, Xfeed=None, Sfeed=None, deltaX=None, deltaS=None, initial_conditions=[]):
if organisms:
if not isinstance(organisms, collections.Iterable):
organisms = [organisms]
self.set_organisms(organisms)
else:
self.set_organisms([])
if metabolites:
if not isinstance(metabolites, collections.Iterable):
metabolites = [metabolites]
self.set_metabolites(metabolites)
else:
self.set_metabolites([])
self.id = id
self.flow_rate_in = flow_rate_in
self.flow_rate_out = flow_rate_out
self.volume_max = volume_max
self.initial_conditions = initial_conditions
self.set_Xfeed(Xfeed)
self.set_Sfeed(Sfeed)
self.set_deltaX(deltaX)
self.set_deltaS(deltaS)
def set_organisms(self, organisms, Xfeed=None, deltaX=None):
super(Bioreactor, self).set_organisms(organisms)
self.set_Xfeed(Xfeed)
self.set_deltaX(deltaX)
def set_metabolites(self, metabolites, Sfeed=None, deltaS=None):
super(Bioreactor, self).set_metabolites(metabolites)
self.set_Sfeed(Sfeed)
self.set_deltaS(deltaS)
def set_Xfeed(self, Xfeed):
if Xfeed:
assert len(Xfeed) == len(self.organisms), 'The length of Xfeed should equal to the number of organisms'
self.Xfeed = Xfeed
else:
self.Xfeed = numpy.zeros(len(self.organisms))
def set_Sfeed(self, Sfeed):
if Sfeed:
assert len(Sfeed) == len(self.metabolites), 'The length of Sfeed should equal to the number of metabolites'
self.Sfeed = Sfeed
else:
self.Sfeed = numpy.zeros(len(self.metabolites))
def set_deltaX(self, deltaX):
if deltaX:
self.deltaX = deltaX
else:
self.deltaX = numpy.zeros(len(self.organisms))
def set_deltaS(self, deltaS):
if deltaS:
self.deltaS = deltaS
else:
self.deltaS = numpy.zeros(len(self.metabolites))
def set_initial_conditions(self, Vinit, Xinit, Sinit):
assert type(Vinit) == type(Xinit) == type(Sinit) == list
self.initial_conditions = Vinit + Xinit + Sinit
def update(self, time):
if self.volume_max:
if self.V > self.volume_max:
raise ValueError('liquid volume of the bioreactor exceeds volume_max.')
def _ode_RHS(self, t, y):
number_of_organisms = len(self.organisms)
number_of_metabolites = len(self.metabolites)
assert (len(y) == 1 + number_of_organisms + number_of_metabolites)
dy = numpy.zeros(len(y))
self.V = y[0]
self.X = y[1:number_of_organisms + 1]
self.S = y[number_of_organisms + 1:]
self.time = t
vs = numpy.zeros([number_of_organisms, number_of_metabolites])
mu = numpy.zeros([number_of_organisms])
for i, organism in enumerate(self.organisms):
organism.update()
if t == 0:
organism.solver = solver_instance(organism.model)
organism.fba_solution = organism.solver.solve(organism.fba_objective, minimize=False,
constraints=organism.fba_constraints)
if organism.fba_solution.status == Status.OPTIMAL:
mu[i] = organism.fba_solution.fobj
for j, metabolite in enumerate(self.metabolites):
if metabolite in organism.model.reactions.keys():
vs[i, j] = organism.fba_solution.values[metabolite]
else:
mu[i] = 0
for j, metabolite in enumerate(self.metabolites):
if metabolite in organism.model.reactions.keys():
vs[i, j] = 0
self.update(t)
dy[0] = self.flow_rate_in - self.flow_rate_out
dy[1:number_of_organisms + 1] = mu * self.X + self.flow_rate_in / self.V * (
self.Xfeed - self.X) + self.deltaX
dy[number_of_organisms + 1:] = numpy.dot(self.X, vs) + self.flow_rate_in / self.V * (
self.Sfeed - self.S) + self.deltaS
return dy
def calculate_yield_from_dfba(self):
raise NotImplementedError
def calculate_titer_from_dfba(self):
raise NotImplementedError
def calculate_productivity_from_dfba(self):
raise NotImplementedError
| true | true |
f7fb44b4acc10a4e04c8d07ca643fecbbf1ea48d | 568 | py | Python | yosim/servers/models.py | thoongnv/yosim | 22bcaceb2c40735363496d9404970a73c4b944bc | [
"MIT"
] | 2 | 2022-02-15T03:41:13.000Z | 2022-02-15T03:44:46.000Z | yosim/servers/models.py | thoongnv/yosim | 22bcaceb2c40735363496d9404970a73c4b944bc | [
"MIT"
] | 5 | 2021-06-08T22:26:24.000Z | 2022-03-12T00:21:35.000Z | yosim/servers/models.py | thoongnv/yosim | 22bcaceb2c40735363496d9404970a73c4b944bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Server(models.Model):
last_contact = models.IntegerField()
version = models.CharField(max_length=32)
hostname = models.CharField(unique=True, max_length=64)
information = models.TextField()
class Meta:
db_table = 'server'
def __str__(self):
return "{} - {} - {} - {}".\
format(self.last_contact, self.version,
self.hostname, self.information)
| 28.4 | 61 | 0.663732 |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Server(models.Model):
last_contact = models.IntegerField()
version = models.CharField(max_length=32)
hostname = models.CharField(unique=True, max_length=64)
information = models.TextField()
class Meta:
db_table = 'server'
def __str__(self):
return "{} - {} - {} - {}".\
format(self.last_contact, self.version,
self.hostname, self.information)
| true | true |
f7fb44c13c6bdd9ddea79dbd19b24d52529fc950 | 3,613 | py | Python | tests/test_parametric_reactors/test_flf_system_code_reactor.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | 25 | 2021-06-22T07:29:32.000Z | 2022-03-28T13:30:53.000Z | tests/test_parametric_reactors/test_flf_system_code_reactor.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | 181 | 2021-06-22T00:56:30.000Z | 2022-03-31T22:36:54.000Z | tests/test_parametric_reactors/test_flf_system_code_reactor.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | 8 | 2021-07-05T12:29:43.000Z | 2022-03-24T13:17:37.000Z | import math
import os
import unittest
from pathlib import Path
import pytest
import paramak
class TestFlfSystemCodeReactor(unittest.TestCase):
"""Tests the FlfSystemCodeReactor functionality"""
def setUp(self):
self.test_reactor = paramak.FlfSystemCodeReactor(
inner_blanket_radius=100,
blanket_thickness=60,
blanket_height=500,
lower_blanket_thickness=50,
upper_blanket_thickness=40,
blanket_vv_gap=20,
upper_vv_thickness=10,
vv_thickness=10,
lower_vv_thickness=10,
rotation_angle=180,
)
def test_input_variable_names(self):
"""tests that the number of inputs variables is correct"""
assert len(self.test_reactor.input_variables.keys()) == 13
assert len(self.test_reactor.input_variable_names) == 13
def test_stp_file_creation(self):
"""Exports a step file and checks that it was saved successfully"""
os.system("rm *.stp")
self.test_reactor.export_stp(filename="cylinder.stp")
assert Path("cylinder.stp").is_file()
def test_multiple_stp_file_creation(self):
"""Exports the reactor as separate step files and checks
that they are saved successfully"""
os.system("rm *.stp")
self.test_reactor.export_stp()
assert Path("lower_vacuum_vessel.stp").is_file()
assert Path("lower_blanket.stp").is_file()
assert Path("blanket.stp").is_file()
assert Path("upper_blanket.stp").is_file()
assert Path("upper_vacuum_vessel.stp").is_file()
assert Path("vacuum_vessel.stp").is_file()
def test_order_of_names_in_reactor(self):
"""tests the order of Shapes in the reactor is as expected"""
assert self.test_reactor.name == [
"blanket",
"vacuum_vessel",
"upper_blanket",
"lower_blanket",
"lower_vacuum_vessel",
"upper_vacuum_vessel",
]
def test_blanket_volume_against_analytical_volume(self):
"""Checks the volume of the blanket is approximately equal
to the analytical volume of the half cylinder"""
outer_volume = (
math.pi
* math.pow(
self.test_reactor.inner_blanket_radius
+ self.test_reactor.blanket_thickness,
2,
)
* self.test_reactor.blanket_height
)
inner_volume = (
math.pi
* math.pow(self.test_reactor.inner_blanket_radius, 2)
* self.test_reactor.blanket_height
)
sector_fraction = 360.0 / self.test_reactor.rotation_angle
blanket_volume = (outer_volume - inner_volume) / sector_fraction
assert pytest.approx(self.test_reactor.volume()[0]) == blanket_volume
def test_upper_blanket_volume_against_analytical_volume(self):
"""Checks the volume of the upper_blanket is approximately equal
to the analytical volume of the half cylinder"""
full_rotation_volume = (
math.pi
* math.pow(
self.test_reactor.inner_blanket_radius
+ self.test_reactor.blanket_thickness
+ self.test_reactor.blanket_vv_gap,
2,
)
* self.test_reactor.upper_blanket_thickness
)
sector_fraction = 360.0 / self.test_reactor.rotation_angle
blanket_volume = full_rotation_volume / sector_fraction
assert pytest.approx(self.test_reactor.volume()[2]) == blanket_volume
| 33.766355 | 77 | 0.631331 | import math
import os
import unittest
from pathlib import Path
import pytest
import paramak
class TestFlfSystemCodeReactor(unittest.TestCase):
def setUp(self):
self.test_reactor = paramak.FlfSystemCodeReactor(
inner_blanket_radius=100,
blanket_thickness=60,
blanket_height=500,
lower_blanket_thickness=50,
upper_blanket_thickness=40,
blanket_vv_gap=20,
upper_vv_thickness=10,
vv_thickness=10,
lower_vv_thickness=10,
rotation_angle=180,
)
def test_input_variable_names(self):
assert len(self.test_reactor.input_variables.keys()) == 13
assert len(self.test_reactor.input_variable_names) == 13
def test_stp_file_creation(self):
os.system("rm *.stp")
self.test_reactor.export_stp(filename="cylinder.stp")
assert Path("cylinder.stp").is_file()
def test_multiple_stp_file_creation(self):
os.system("rm *.stp")
self.test_reactor.export_stp()
assert Path("lower_vacuum_vessel.stp").is_file()
assert Path("lower_blanket.stp").is_file()
assert Path("blanket.stp").is_file()
assert Path("upper_blanket.stp").is_file()
assert Path("upper_vacuum_vessel.stp").is_file()
assert Path("vacuum_vessel.stp").is_file()
def test_order_of_names_in_reactor(self):
assert self.test_reactor.name == [
"blanket",
"vacuum_vessel",
"upper_blanket",
"lower_blanket",
"lower_vacuum_vessel",
"upper_vacuum_vessel",
]
def test_blanket_volume_against_analytical_volume(self):
outer_volume = (
math.pi
* math.pow(
self.test_reactor.inner_blanket_radius
+ self.test_reactor.blanket_thickness,
2,
)
* self.test_reactor.blanket_height
)
inner_volume = (
math.pi
* math.pow(self.test_reactor.inner_blanket_radius, 2)
* self.test_reactor.blanket_height
)
sector_fraction = 360.0 / self.test_reactor.rotation_angle
blanket_volume = (outer_volume - inner_volume) / sector_fraction
assert pytest.approx(self.test_reactor.volume()[0]) == blanket_volume
def test_upper_blanket_volume_against_analytical_volume(self):
full_rotation_volume = (
math.pi
* math.pow(
self.test_reactor.inner_blanket_radius
+ self.test_reactor.blanket_thickness
+ self.test_reactor.blanket_vv_gap,
2,
)
* self.test_reactor.upper_blanket_thickness
)
sector_fraction = 360.0 / self.test_reactor.rotation_angle
blanket_volume = full_rotation_volume / sector_fraction
assert pytest.approx(self.test_reactor.volume()[2]) == blanket_volume
| true | true |
f7fb450fa06058c4866cea07fa284fd3bdbb596d | 2,572 | py | Python | placementApp/permissions.py | deepnanda30/placement-portal-web | 2b7450ab48e7c7ee9f3179da7edda01e840f6542 | [
"MIT"
] | 1 | 2020-09-27T12:28:49.000Z | 2020-09-27T12:28:49.000Z | placementApp/permissions.py | deepnanda30/placement-portal-web | 2b7450ab48e7c7ee9f3179da7edda01e840f6542 | [
"MIT"
] | 25 | 2020-03-04T04:20:57.000Z | 2022-02-27T00:40:07.000Z | placementApp/permissions.py | deepnanda30/placement-portal-web | 2b7450ab48e7c7ee9f3179da7edda01e840f6542 | [
"MIT"
] | 15 | 2020-02-28T12:23:25.000Z | 2020-05-22T18:17:19.000Z | from rest_framework.permissions import BasePermission, SAFE_METHODS
# from customer.models import User
class IsStaffOrOwner(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
if request.user.is_authenticated and (
request.user.is_student() and view.action == "list"
):
return False
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return (
request.user.id == obj.id or request.user.is_tpo() or request.user.is_co()
)
class IsTPOOrReadOnly(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
if view.action == "create":
return request.user.is_authenticated and request.user.is_tpo()
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
if view.action == "retrieve":
return True
return request.user.is_authenticated and request.user.is_tpo()
class IsStaff(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
return request.user.is_authenticated and (
request.user.is_co() or request.user.is_tpo()
)
def has_object_permission(self, request, view, obj):
return True
class ApplicationPermissions(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
if view.action == "create":
return request.user.is_authenticated and request.user.is_student()
# Only students can apply
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
if view.action == "retrieve":
return request.user.is_authenticated
# All authenticated users can Retrieve applications
return request.user.is_authenticated and request.user.is_tpo()
# Only TPO can update applications
class IsStudentOrReadOnly(BasePermission):
message = "You do not have required permission to perform this action"
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return request.user.is_student() and obj.email == request.user.email
# Students can view, update and delete their profiles
| 34.293333 | 86 | 0.68818 | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsStaffOrOwner(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
if request.user.is_authenticated and (
request.user.is_student() and view.action == "list"
):
return False
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return (
request.user.id == obj.id or request.user.is_tpo() or request.user.is_co()
)
class IsTPOOrReadOnly(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
if view.action == "create":
return request.user.is_authenticated and request.user.is_tpo()
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
if view.action == "retrieve":
return True
return request.user.is_authenticated and request.user.is_tpo()
class IsStaff(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
return request.user.is_authenticated and (
request.user.is_co() or request.user.is_tpo()
)
def has_object_permission(self, request, view, obj):
return True
class ApplicationPermissions(BasePermission):
message = "You do not have the permission to perform this action."
def has_permission(self, request, view):
if view.action == "create":
return request.user.is_authenticated and request.user.is_student()
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
if view.action == "retrieve":
return request.user.is_authenticated
return request.user.is_authenticated and request.user.is_tpo()
class IsStudentOrReadOnly(BasePermission):
message = "You do not have required permission to perform this action"
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return request.user.is_student() and obj.email == request.user.email
| true | true |
f7fb4d6e8546b6f2f590dd05a22a5c6aaa218e9c | 147 | py | Python | cupons/urls.py | MisterLenivec/django-online-store | 56f0496ef68e9580772f91f31b94182dc81858c5 | [
"MIT"
] | 1 | 2021-07-05T18:45:58.000Z | 2021-07-05T18:45:58.000Z | cupons/urls.py | MisterLenivec/django-online-store | 56f0496ef68e9580772f91f31b94182dc81858c5 | [
"MIT"
] | 9 | 2019-12-27T21:03:29.000Z | 2022-03-12T00:10:27.000Z | cupons/urls.py | MisterLenivec/django-online-store | 56f0496ef68e9580772f91f31b94182dc81858c5 | [
"MIT"
] | 1 | 2022-03-14T10:05:17.000Z | 2022-03-14T10:05:17.000Z | from django.urls import path
from .views import cupon_apply
app_name = 'cupons'
urlpatterns = [
path('apply/', cupon_apply, name='apply')
]
| 14.7 | 45 | 0.707483 | from django.urls import path
from .views import cupon_apply
app_name = 'cupons'
urlpatterns = [
path('apply/', cupon_apply, name='apply')
]
| true | true |
f7fb4e3cd4b83e81e5c65969bdc8b00122b9a5d5 | 13,864 | py | Python | qiskit/circuit/instruction.py | michelle4654/qiskit-terra | 1b18ea1debf2e9d3c0c3cf39e8c434352d2b2707 | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/instruction.py | michelle4654/qiskit-terra | 1b18ea1debf2e9d3c0c3cf39e8c434352d2b2707 | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/instruction.py | michelle4654/qiskit-terra | 1b18ea1debf2e9d3c0c3cf39e8c434352d2b2707 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A generic quantum instruction.
Instructions can be implementable on hardware (u, cx, etc.) or in simulation
(snapshot, noise, etc.).
Instructions can be unitary (a.k.a Gate) or non-unitary.
Instructions are identified by the following:
name: A string to identify the type of instruction.
Used to request a specific instruction on the backend, or in visualizing circuits.
num_qubits, num_clbits: dimensions of the instruction.
params: List of parameters to specialize a specific instruction instance.
Instructions do not have any context about where they are in a circuit (which qubits/clbits).
The circuit itself keeps this context.
"""
import warnings
import copy
from itertools import zip_longest
import numpy
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.circuit.classicalregister import ClassicalRegister
from qiskit.qobj.qasm_qobj import QasmQobjInstruction
from qiskit.circuit.parameter import ParameterExpression
from .tools import pi_check
_CUTOFF_PRECISION = 1E-10
class Instruction:
"""Generic quantum instruction."""
def __init__(self, name, num_qubits, num_clbits, params):
"""Create a new instruction.
Args:
name (str): instruction name
num_qubits (int): instruction's qubit width
num_clbits (int): instruction's clbit width
params (list[int|float|complex|str|ndarray|list|ParameterExpression]):
list of parameters
Raises:
CircuitError: when the register is not in the correct format.
"""
if not isinstance(num_qubits, int) or not isinstance(num_clbits, int):
raise CircuitError("num_qubits and num_clbits must be integer.")
if num_qubits < 0 or num_clbits < 0:
raise CircuitError(
"bad instruction dimensions: %d qubits, %d clbits." %
num_qubits, num_clbits)
self.name = name
self.num_qubits = num_qubits
self.num_clbits = num_clbits
self._params = [] # a list of gate params stored
# tuple (ClassicalRegister, int) when the instruction has a conditional ("if")
self.condition = None
# list of instructions (and their contexts) that this instruction is composed of
# empty definition means opaque or fundamental instruction
self._definition = None
self.params = params
def __eq__(self, other):
"""Two instructions are the same if they have the same name,
same dimensions, and same params.
Args:
other (instruction): other instruction
Returns:
bool: are self and other equal.
"""
if type(self) is not type(other) or \
self.name != other.name or \
self.num_qubits != other.num_qubits or \
self.num_clbits != other.num_clbits or \
self.definition != other.definition:
return False
for self_param, other_param in zip_longest(self.params, other.params):
try:
if self_param == other_param:
continue
except ValueError:
pass
try:
if numpy.shape(self_param) == numpy.shape(other_param) \
and numpy.allclose(self_param, other_param,
atol=_CUTOFF_PRECISION):
continue
except TypeError:
pass
try:
if numpy.isclose(float(self_param), float(other_param),
atol=_CUTOFF_PRECISION):
continue
except TypeError:
pass
return False
return True
def _define(self):
"""Populates self.definition with a decomposition of this gate."""
pass
@property
def params(self):
"""return instruction params."""
return self._params
@params.setter
def params(self, parameters):
self._params = []
for single_param in parameters:
self._params.append(self.validate_parameter(single_param))
def validate_parameter(self, parameter):
"""Instruction parameters has no validation or normalization."""
return parameter
def is_parameterized(self):
"""Return True .IFF. instruction is parameterized else False"""
return any(isinstance(param, ParameterExpression)
and param.parameters
for param in self.params)
@property
def definition(self):
"""Return definition in terms of other basic gates."""
if self._definition is None:
self._define()
return self._definition
@definition.setter
def definition(self, array):
"""Set gate representation"""
self._definition = array
@property
def decompositions(self):
"""Get the decompositions of the instruction from the SessionEquivalenceLibrary."""
# pylint: disable=cyclic-import
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
return sel.get_entry(self)
@decompositions.setter
def decompositions(self, decompositions):
"""Set the decompositions of the instruction from the SessionEquivalenceLibrary."""
# pylint: disable=cyclic-import
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
sel.set_entry(self, decompositions)
def add_decomposition(self, decomposition):
"""Add a decomposition of the instruction to the SessionEquivalenceLibrary."""
# pylint: disable=cyclic-import
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
sel.add_equivalence(self, decomposition)
def assemble(self):
"""Assemble a QasmQobjInstruction"""
instruction = QasmQobjInstruction(name=self.name)
# Evaluate parameters
if self.params:
params = [
x.evalf(x) if hasattr(x, 'evalf') else x for x in self.params]
instruction.params = params
# Add placeholder for qarg and carg params
if self.num_qubits:
instruction.qubits = list(range(self.num_qubits))
if self.num_clbits:
instruction.memory = list(range(self.num_clbits))
# Add condition parameters for assembler. This is needed to convert
# to a qobj conditional instruction at assemble time and after
# conversion will be deleted by the assembler.
if self.condition:
instruction._condition = self.condition
return instruction
def mirror(self):
"""DEPRECATED: use instruction.reverse_ops().
Return:
qiskit.circuit.Instruction: a new instruction with sub-instructions
reversed.
"""
warnings.warn('instruction.mirror() is deprecated. Use circuit.reverse_ops()'
'to reverse the order of gates.', DeprecationWarning)
return self.reverse_ops()
def reverse_ops(self):
"""For a composite instruction, reverse the order of sub-instructions.
This is done by recursively reversing all sub-instructions.
It does not invert any gate.
Returns:
qiskit.circuit.Instruction: a new instruction with
sub-instructions reversed.
"""
if not self._definition:
return self.copy()
reverse_inst = self.copy(name=self.name + '_reverse')
reverse_inst.definition._data = [(inst.reverse_ops(), qargs, cargs)
for inst, qargs, cargs in reversed(self._definition)]
return reverse_inst
def inverse(self):
"""Invert this instruction.
If the instruction is composite (i.e. has a definition),
then its definition will be recursively inverted.
Special instructions inheriting from Instruction can
implement their own inverse (e.g. T and Tdg, Barrier, etc.)
Returns:
qiskit.circuit.Instruction: a fresh instruction for the inverse
Raises:
CircuitError: if the instruction is not composite
and an inverse has not been implemented for it.
"""
if self.definition is None:
raise CircuitError("inverse() not implemented for %s." % self.name)
from qiskit.circuit import QuantumCircuit, Gate # pylint: disable=cyclic-import
if self.num_clbits:
inverse_gate = Instruction(name=self.name + '_dg',
num_qubits=self.num_qubits,
num_clbits=self.num_clbits,
params=self.params.copy())
else:
inverse_gate = Gate(name=self.name + '_dg',
num_qubits=self.num_qubits,
params=self.params.copy())
inverse_gate.definition = QuantumCircuit(*self.definition.qregs, *self.definition.cregs)
inverse_gate.definition._data = [(inst.inverse(), qargs, cargs)
for inst, qargs, cargs in reversed(self._definition)]
return inverse_gate
def c_if(self, classical, val):
"""Add classical condition on register classical and value val."""
if not isinstance(classical, ClassicalRegister):
raise CircuitError("c_if must be used with a classical register")
if val < 0:
raise CircuitError("condition value should be non-negative")
self.condition = (classical, val)
return self
def copy(self, name=None):
"""
Copy of the instruction.
Args:
name (str): name to be given to the copied circuit,
if None then the name stays the same.
Returns:
qiskit.circuit.Instruction: a copy of the current instruction, with the name
updated if it was provided
"""
cpy = self.__deepcopy__()
if name:
cpy.name = name
return cpy
def __deepcopy__(self, _memo=None):
cpy = copy.copy(self)
cpy._params = copy.copy(self._params)
if self._definition:
cpy._definition = copy.deepcopy(self._definition, _memo)
return cpy
def _qasmif(self, string):
"""Print an if statement if needed."""
if self.condition is None:
return string
return "if(%s==%d) " % (self.condition[0].name, self.condition[1]) + string
def qasm(self):
"""Return a default OpenQASM string for the instruction.
Derived instructions may override this to print in a
different format (e.g. measure q[0] -> c[0];).
"""
name_param = self.name
if self.params:
name_param = "%s(%s)" % (name_param, ",".join(
[pi_check(i, ndigits=8, output='qasm') for i in self.params]))
return self._qasmif(name_param)
def broadcast_arguments(self, qargs, cargs):
"""
Validation of the arguments.
Args:
qargs (List): List of quantum bit arguments.
cargs (List): List of classical bit arguments.
Yields:
Tuple(List, List): A tuple with single arguments.
Raises:
CircuitError: If the input is not valid. For example, the number of
arguments does not match the gate expectation.
"""
if len(qargs) != self.num_qubits:
raise CircuitError(
'The amount of qubit arguments does not match the instruction expectation.')
# [[q[0], q[1]], [c[0], c[1]]] -> [q[0], c[0]], [q[1], c[1]]
flat_qargs = [qarg for sublist in qargs for qarg in sublist]
flat_cargs = [carg for sublist in cargs for carg in sublist]
yield flat_qargs, flat_cargs
def _return_repeat(self, exponent):
return Instruction(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
num_clbits=self.num_clbits, params=self.params)
def repeat(self, n):
"""Creates an instruction with `gate` repeated `n` amount of times.
Args:
n (int): Number of times to repeat the instruction
Returns:
qiskit.circuit.Instruction: Containing the definition.
Raises:
CircuitError: If n < 1.
"""
if int(n) != n or n < 1:
raise CircuitError("Repeat can only be called with strictly positive integer.")
n = int(n)
instruction = self._return_repeat(n)
qargs = [] if self.num_qubits == 0 else QuantumRegister(self.num_qubits, 'q')
cargs = [] if self.num_clbits == 0 else ClassicalRegister(self.num_clbits, 'c')
if instruction.definition is None:
# pylint: disable=cyclic-import
from qiskit import QuantumCircuit
qc = QuantumCircuit()
if qargs:
qc.add_register(qargs)
if cargs:
qc.add_register(cargs)
qc.data = [(self, qargs[:], cargs[:])] * n
instruction.definition = qc
return instruction
| 36.104167 | 96 | 0.615479 |
import warnings
import copy
from itertools import zip_longest
import numpy
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.circuit.classicalregister import ClassicalRegister
from qiskit.qobj.qasm_qobj import QasmQobjInstruction
from qiskit.circuit.parameter import ParameterExpression
from .tools import pi_check
_CUTOFF_PRECISION = 1E-10
class Instruction:
def __init__(self, name, num_qubits, num_clbits, params):
if not isinstance(num_qubits, int) or not isinstance(num_clbits, int):
raise CircuitError("num_qubits and num_clbits must be integer.")
if num_qubits < 0 or num_clbits < 0:
raise CircuitError(
"bad instruction dimensions: %d qubits, %d clbits." %
num_qubits, num_clbits)
self.name = name
self.num_qubits = num_qubits
self.num_clbits = num_clbits
self._params = []
self.condition = None
self._definition = None
self.params = params
def __eq__(self, other):
if type(self) is not type(other) or \
self.name != other.name or \
self.num_qubits != other.num_qubits or \
self.num_clbits != other.num_clbits or \
self.definition != other.definition:
return False
for self_param, other_param in zip_longest(self.params, other.params):
try:
if self_param == other_param:
continue
except ValueError:
pass
try:
if numpy.shape(self_param) == numpy.shape(other_param) \
and numpy.allclose(self_param, other_param,
atol=_CUTOFF_PRECISION):
continue
except TypeError:
pass
try:
if numpy.isclose(float(self_param), float(other_param),
atol=_CUTOFF_PRECISION):
continue
except TypeError:
pass
return False
return True
def _define(self):
pass
@property
def params(self):
return self._params
@params.setter
def params(self, parameters):
self._params = []
for single_param in parameters:
self._params.append(self.validate_parameter(single_param))
def validate_parameter(self, parameter):
return parameter
def is_parameterized(self):
return any(isinstance(param, ParameterExpression)
and param.parameters
for param in self.params)
@property
def definition(self):
if self._definition is None:
self._define()
return self._definition
@definition.setter
def definition(self, array):
self._definition = array
@property
def decompositions(self):
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
return sel.get_entry(self)
@decompositions.setter
def decompositions(self, decompositions):
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
sel.set_entry(self, decompositions)
def add_decomposition(self, decomposition):
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
sel.add_equivalence(self, decomposition)
def assemble(self):
instruction = QasmQobjInstruction(name=self.name)
if self.params:
params = [
x.evalf(x) if hasattr(x, 'evalf') else x for x in self.params]
instruction.params = params
if self.num_qubits:
instruction.qubits = list(range(self.num_qubits))
if self.num_clbits:
instruction.memory = list(range(self.num_clbits))
if self.condition:
instruction._condition = self.condition
return instruction
def mirror(self):
warnings.warn('instruction.mirror() is deprecated. Use circuit.reverse_ops()'
'to reverse the order of gates.', DeprecationWarning)
return self.reverse_ops()
def reverse_ops(self):
if not self._definition:
return self.copy()
reverse_inst = self.copy(name=self.name + '_reverse')
reverse_inst.definition._data = [(inst.reverse_ops(), qargs, cargs)
for inst, qargs, cargs in reversed(self._definition)]
return reverse_inst
def inverse(self):
if self.definition is None:
raise CircuitError("inverse() not implemented for %s." % self.name)
from qiskit.circuit import QuantumCircuit, Gate
if self.num_clbits:
inverse_gate = Instruction(name=self.name + '_dg',
num_qubits=self.num_qubits,
num_clbits=self.num_clbits,
params=self.params.copy())
else:
inverse_gate = Gate(name=self.name + '_dg',
num_qubits=self.num_qubits,
params=self.params.copy())
inverse_gate.definition = QuantumCircuit(*self.definition.qregs, *self.definition.cregs)
inverse_gate.definition._data = [(inst.inverse(), qargs, cargs)
for inst, qargs, cargs in reversed(self._definition)]
return inverse_gate
def c_if(self, classical, val):
if not isinstance(classical, ClassicalRegister):
raise CircuitError("c_if must be used with a classical register")
if val < 0:
raise CircuitError("condition value should be non-negative")
self.condition = (classical, val)
return self
def copy(self, name=None):
cpy = self.__deepcopy__()
if name:
cpy.name = name
return cpy
def __deepcopy__(self, _memo=None):
cpy = copy.copy(self)
cpy._params = copy.copy(self._params)
if self._definition:
cpy._definition = copy.deepcopy(self._definition, _memo)
return cpy
def _qasmif(self, string):
if self.condition is None:
return string
return "if(%s==%d) " % (self.condition[0].name, self.condition[1]) + string
def qasm(self):
name_param = self.name
if self.params:
name_param = "%s(%s)" % (name_param, ",".join(
[pi_check(i, ndigits=8, output='qasm') for i in self.params]))
return self._qasmif(name_param)
def broadcast_arguments(self, qargs, cargs):
if len(qargs) != self.num_qubits:
raise CircuitError(
'The amount of qubit arguments does not match the instruction expectation.')
flat_qargs = [qarg for sublist in qargs for qarg in sublist]
flat_cargs = [carg for sublist in cargs for carg in sublist]
yield flat_qargs, flat_cargs
def _return_repeat(self, exponent):
return Instruction(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
num_clbits=self.num_clbits, params=self.params)
def repeat(self, n):
if int(n) != n or n < 1:
raise CircuitError("Repeat can only be called with strictly positive integer.")
n = int(n)
instruction = self._return_repeat(n)
qargs = [] if self.num_qubits == 0 else QuantumRegister(self.num_qubits, 'q')
cargs = [] if self.num_clbits == 0 else ClassicalRegister(self.num_clbits, 'c')
if instruction.definition is None:
from qiskit import QuantumCircuit
qc = QuantumCircuit()
if qargs:
qc.add_register(qargs)
if cargs:
qc.add_register(cargs)
qc.data = [(self, qargs[:], cargs[:])] * n
instruction.definition = qc
return instruction
| true | true |
f7fb4f7ae829a3e37ec8732635df733ece8b21ed | 908 | py | Python | tests/test_token.py | chie8842/tiny_tokenizer | 6599873c050f4e064c88381688d8476346b57099 | [
"MIT"
] | null | null | null | tests/test_token.py | chie8842/tiny_tokenizer | 6599873c050f4e064c88381688d8476346b57099 | [
"MIT"
] | null | null | null | tests/test_token.py | chie8842/tiny_tokenizer | 6599873c050f4e064c88381688d8476346b57099 | [
"MIT"
] | null | null | null | """Test for word tokenizers"""
import unittest
from tiny_tokenizer.tiny_tokenizer_token import Token
class TokenTest(unittest.TestCase):
"""Test ordinal word tokenizer."""
def test_token_without_feature(self):
token = Token(surface="大崎")
self.assertEqual("大崎", token.surface)
self.assertEqual("", token.feature)
def test_token_with_postag(self):
token = Token(surface="大崎", postag="名詞")
self.assertEqual("大崎", token.surface)
self.assertEqual("名詞", token.feature)
def test_token_with_postag2(self):
token = Token(
surface="大崎",
postag="名詞",
postag2="固有名詞,人名,姓",
conj_type="*",
conj_form="*",
origin_form="大崎",
yomi="オオサキ",
pron="オーサキ")
self.assertEqual(
"名詞,固有名詞,人名,姓,*,*,大崎,オオサキ,オーサキ",
token.feature)
| 26.705882 | 53 | 0.577093 | import unittest
from tiny_tokenizer.tiny_tokenizer_token import Token
class TokenTest(unittest.TestCase):
def test_token_without_feature(self):
token = Token(surface="大崎")
self.assertEqual("大崎", token.surface)
self.assertEqual("", token.feature)
def test_token_with_postag(self):
token = Token(surface="大崎", postag="名詞")
self.assertEqual("大崎", token.surface)
self.assertEqual("名詞", token.feature)
def test_token_with_postag2(self):
token = Token(
surface="大崎",
postag="名詞",
postag2="固有名詞,人名,姓",
conj_type="*",
conj_form="*",
origin_form="大崎",
yomi="オオサキ",
pron="オーサキ")
self.assertEqual(
"名詞,固有名詞,人名,姓,*,*,大崎,オオサキ,オーサキ",
token.feature)
| true | true |
f7fb4faea4862b30e0c9e995a07c42deabde0ff6 | 9,856 | py | Python | Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/huawei/hwc_smn_topic.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/huawei/hwc_smn_topic.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/huawei/hwc_smn_topic.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Huawei
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
###############################################################################
# Documentation
###############################################################################
DOCUMENTATION = '''
---
module: hwc_smn_topic
description:
- Represents a SMN notification topic resource.
short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
author: Huawei Inc. (@huaweicloud)
requirements:
- requests >= 2.18.4
- keystoneauth1 >= 3.6.0
options:
state:
description:
- Whether the given object should exist in Huaweicloud Cloud.
type: str
choices: ['present', 'absent']
default: 'present'
display_name:
description:
- Topic display name, which is presented as the name of the email
sender in an email message. The topic display name contains a
maximum of 192 bytes.
type: str
required: false
name:
description:
- Name of the topic to be created. The topic name is a string of 1
to 256 characters. It must contain upper- or lower-case letters,
digits, hyphens (-), and underscores C(_), and must start with a
letter or digit.
type: str
required: true
extends_documentation_fragment:
- community.general.hwc
'''
EXAMPLES = '''
- name: Create a smn topic
community.general.hwc_smn_topic:
identity_endpoint: "{{ identity_endpoint }}"
user_name: "{{ user_name }}"
password: "{{ password }}"
domain_name: "{{ domain_name }}"
project_name: "{{ project_name }}"
region: "{{ region }}"
name: "ansible_smn_topic_test"
state: present
'''
RETURN = '''
create_time:
description:
- Time when the topic was created.
returned: success
type: str
display_name:
description:
- Topic display name, which is presented as the name of the email
sender in an email message. The topic display name contains a
maximum of 192 bytes.
returned: success
type: str
name:
description:
- Name of the topic to be created. The topic name is a string of 1
to 256 characters. It must contain upper- or lower-case letters,
digits, hyphens (-), and underscores C(_), and must start with a
letter or digit.
returned: success
type: str
push_policy:
description:
- Message pushing policy. 0 indicates that the message sending
fails and the message is cached in the queue. 1 indicates that
the failed message is discarded.
returned: success
type: int
topic_urn:
description:
- Resource identifier of a topic, which is unique.
returned: success
type: str
update_time:
description:
- Time when the topic was updated.
returned: success
type: str
'''
###############################################################################
# Imports
###############################################################################
from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
HwcModule, navigate_value,
are_different_dicts, is_empty_value,
build_path, get_region)
import re
###############################################################################
# Main
###############################################################################
def main():
"""Main function"""
module = HwcModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'],
type='str'),
display_name=dict(type='str'),
name=dict(required=True, type='str')
),
supports_check_mode=True,
)
config = Config(module, "smn")
state = module.params['state']
if not module.params.get("id"):
module.params['id'] = get_resource_id(config)
fetch = None
link = self_link(module)
# the link will include Nones if required format parameters are missed
if not re.search('/None/|/None$', link):
client = config.client(get_region(module), "smn", "project")
fetch = fetch_resource(module, client, link)
changed = False
if fetch:
if state == 'present':
expect = _get_resource_editable_properties(module)
current_state = response_to_hash(module, fetch)
current = {'display_name': current_state['display_name']}
if are_different_dicts(expect, current):
if not module.check_mode:
fetch = update(config)
fetch = response_to_hash(module, fetch)
changed = True
else:
fetch = current_state
else:
if not module.check_mode:
delete(config)
fetch = {}
changed = True
else:
if state == 'present':
if not module.check_mode:
fetch = create(config)
fetch = response_to_hash(module, fetch)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(config):
module = config.module
client = config.client(get_region(module), "smn", "project")
link = "notifications/topics"
r = None
try:
r = client.post(link, create_resource_opts(module))
except HwcClientException as ex:
msg = ("module(hwc_smn_topic): error creating "
"resource, error: %s" % str(ex))
module.fail_json(msg=msg)
return get_resource(config, r)
def update(config):
module = config.module
client = config.client(get_region(module), "smn", "project")
link = self_link(module)
try:
client.put(link, update_resource_opts(module))
except HwcClientException as ex:
msg = ("module(hwc_smn_topic): error updating "
"resource, error: %s" % str(ex))
module.fail_json(msg=msg)
return fetch_resource(module, client, link)
def delete(config):
module = config.module
client = config.client(get_region(module), "smn", "project")
link = self_link(module)
try:
client.delete(link)
except HwcClientException as ex:
msg = ("module(hwc_smn_topic): error deleting "
"resource, error: %s" % str(ex))
module.fail_json(msg=msg)
def fetch_resource(module, client, link):
try:
return client.get(link)
except HwcClientException as ex:
msg = ("module(hwc_smn_topic): error fetching "
"resource, error: %s" % str(ex))
module.fail_json(msg=msg)
def get_resource(config, result):
module = config.module
client = config.client(get_region(module), "smn", "project")
v = ""
try:
v = navigate_value(result, ['topic_urn'])
except Exception as ex:
module.fail_json(msg=str(ex))
d = {'topic_urn': v}
url = build_path(module, 'notifications/topics/{topic_urn}', d)
return fetch_resource(module, client, url)
def get_resource_id(config):
module = config.module
client = config.client(get_region(module), "smn", "project")
link = "notifications/topics"
query_link = "?offset={offset}&limit=10"
link += query_link
p = {'offset': 0}
v = module.params.get('name')
ids = set()
while True:
r = None
try:
r = client.get(link.format(**p))
except Exception:
pass
if r is None:
break
r = r.get('topics', [])
if r == []:
break
for i in r:
if i.get('name') == v:
ids.add(i.get('topic_urn'))
if len(ids) >= 2:
module.fail_json(msg="Multiple resources are found")
p['offset'] += 1
return ids.pop() if ids else None
def self_link(module):
return build_path(module, "notifications/topics/{id}")
def create_resource_opts(module):
params = dict()
v = module.params.get('display_name')
if not is_empty_value(v):
params["display_name"] = v
v = module.params.get('name')
if not is_empty_value(v):
params["name"] = v
return params
def update_resource_opts(module):
params = dict()
v = module.params.get('display_name')
if not is_empty_value(v):
params["display_name"] = v
return params
def _get_resource_editable_properties(module):
return {
"display_name": module.params.get("display_name"),
}
def response_to_hash(module, response):
"""Remove unnecessary properties from the response.
This is for doing comparisons with Ansible's current parameters.
"""
return {
u'create_time': response.get(u'create_time'),
u'display_name': response.get(u'display_name'),
u'name': response.get(u'name'),
u'push_policy': _push_policy_convert_from_response(
response.get('push_policy')),
u'topic_urn': response.get(u'topic_urn'),
u'update_time': response.get(u'update_time')
}
def _push_policy_convert_from_response(value):
return {
0: "the message sending fails and is cached in the queue",
1: "the failed message is discarded",
}.get(int(value))
if __name__ == '__main__':
main()
| 29.073746 | 118 | 0.56808 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
| true | true |
f7fb505ad76c416833293aff0d93b0ddfd7961d0 | 3,797 | py | Python | agentsbar/environments.py | laszukdawid/ai-traineree-client | f0b60ecbca0616ff40c5944a7da4ccbc1f0556e0 | [
"Apache-2.0"
] | null | null | null | agentsbar/environments.py | laszukdawid/ai-traineree-client | f0b60ecbca0616ff40c5944a7da4ccbc1f0556e0 | [
"Apache-2.0"
] | null | null | null | agentsbar/environments.py | laszukdawid/ai-traineree-client | f0b60ecbca0616ff40c5944a7da4ccbc1f0556e0 | [
"Apache-2.0"
] | null | null | null | from dataclasses import asdict
from typing import Any, Dict, List
from agentsbar.client import Client
from agentsbar.types import EnvironmentCreate
from agentsbar.utils import response_raise_error_if_any
ENV_PREFIX = "/environments"
def get_many(client: Client) -> List[Dict]:
"""Gets environments belonging to authenticated user.
Parameters:
client (Client): Authenticated client.
Returns:
List of environments.
"""
response = client.get(f"{ENV_PREFIX}/")
return response.json()
def get(client: Client, env_name: str) -> Dict:
"""Get indepth information about a specific environment.
Parameters:
client (Client): Authenticated client.
env_name (str): Name of environment.
Returns:
Details of an environment.
"""
response = client.get(f'{ENV_PREFIX}/{env_name}')
response_raise_error_if_any(response)
return response.json()
def create(client: Client, env_create: EnvironmentCreate) -> Dict:
"""Creates an environment with specified configuration.
Parameters:
client (Client): Authenticated client.
config (dict): Configuration of an environment.
Returns:
Details of an environment.
"""
env_create_dict = asdict(env_create)
response = client.post(f'{ENV_PREFIX}/', data=env_create_dict)
response_raise_error_if_any(response)
return response.json()
def delete(client: Client, env_name: str) -> bool:
"""Deletes specified environment.
Parameters:
client (Client): Authenticated client.
env_name (str): Name of the environment.
Returns:
Whether environment was delete. True if an environment was delete, False if there was no such environment.
"""
response = client.delete(f'{ENV_PREFIX}/{env_name}')
response_raise_error_if_any(response)
return response.status_code == 202
def reset(client: Client, env_name: str) -> List[float]:
"""Resets the environment to starting position.
Parameters:
client (Client): Authenticated client.
env_name (str): Name of the environment.
Returns:
Environment state in the starting position.
"""
response = client.post(f"{ENV_PREFIX}/{env_name}/reset")
response_raise_error_if_any(response)
return response.json()
def step(client: Client, env_name: str, step) -> Dict[str, Any]:
"""Steps the environment based on provided data.
Parameters:
client (Client): Authenticated client.
env_name (str): Name of the environment.
step (EnvStep): Step information for the environment. Consists of action details and whether to commit.
Returns:
Environment state after taking provided actions. Consists of "observation", "reward", "done" and "info".
"""
response = client.post(f"{ENV_PREFIX}/{env_name}/step", data=step)
response_raise_error_if_any(response)
return response.json()
def commit(client: Client, env_name: str) -> Dict[str, Any]:
"""Commits last provided data. Must be proceeded by environment `step`.
Useful when environment requires many agents or when agent is allowed to make mistakes.
Parameters:
client (Client): Authenticated client.
env_name (str): Name of the environment
Returns:
Data about the state the environment has transtioned into.
This should be the same as when using `step` with `commit=True`.
"""
response = client.post(f"{ENV_PREFIX}/{env_name}/commit")
response_raise_error_if_any(response)
return response.json()
def info(client: Client, env_name: str) -> Dict[str, Any]:
response = client.get(f"{ENV_PREFIX}/{env_name}/info")
response_raise_error_if_any(response)
return response.json()
| 29.207692 | 114 | 0.687912 | from dataclasses import asdict
from typing import Any, Dict, List
from agentsbar.client import Client
from agentsbar.types import EnvironmentCreate
from agentsbar.utils import response_raise_error_if_any
ENV_PREFIX = "/environments"
def get_many(client: Client) -> List[Dict]:
response = client.get(f"{ENV_PREFIX}/")
return response.json()
def get(client: Client, env_name: str) -> Dict:
response = client.get(f'{ENV_PREFIX}/{env_name}')
response_raise_error_if_any(response)
return response.json()
def create(client: Client, env_create: EnvironmentCreate) -> Dict:
env_create_dict = asdict(env_create)
response = client.post(f'{ENV_PREFIX}/', data=env_create_dict)
response_raise_error_if_any(response)
return response.json()
def delete(client: Client, env_name: str) -> bool:
response = client.delete(f'{ENV_PREFIX}/{env_name}')
response_raise_error_if_any(response)
return response.status_code == 202
def reset(client: Client, env_name: str) -> List[float]:
response = client.post(f"{ENV_PREFIX}/{env_name}/reset")
response_raise_error_if_any(response)
return response.json()
def step(client: Client, env_name: str, step) -> Dict[str, Any]:
response = client.post(f"{ENV_PREFIX}/{env_name}/step", data=step)
response_raise_error_if_any(response)
return response.json()
def commit(client: Client, env_name: str) -> Dict[str, Any]:
response = client.post(f"{ENV_PREFIX}/{env_name}/commit")
response_raise_error_if_any(response)
return response.json()
def info(client: Client, env_name: str) -> Dict[str, Any]:
response = client.get(f"{ENV_PREFIX}/{env_name}/info")
response_raise_error_if_any(response)
return response.json()
| true | true |
f7fb508ac353c588a4347cfd6dc0e5564b45f405 | 2,291 | py | Python | experiments/ashvin/icml2020/hand/pen/demo_awr3.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/pen/demo_awr3.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/pen/demo_awr3.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
num_epochs=3000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
replay_buffer_size=int(1E6),
algorithm="SAC",
version="normal",
collection_mode='batch',
layer_size=256,
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
policy_weight_decay=1e-4,
bc_loss_type="mle",
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_path=["demos/icml2020/hand/pen.npy"],
# demo_off_policy_path=[
# "ashvin/icml2020/hand/pen/demo-bc1/run5/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run4/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run5/video_*.p",
# ],
),
logger_variant=dict(
tensorboard=True,
),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
)
search_space = {
'env': ["pen-v0", ],
'seedid': range(3),
'trainer_kwargs.beta': [100, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| 27.939024 | 74 | 0.602794 |
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
num_epochs=3000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
replay_buffer_size=int(1E6),
algorithm="SAC",
version="normal",
collection_mode='batch',
layer_size=256,
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
policy_weight_decay=1e-4,
bc_loss_type="mle",
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_path=["demos/icml2020/hand/pen.npy"],
),
logger_variant=dict(
tensorboard=True,
),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
)
search_space = {
'env': ["pen-v0", ],
'seedid': range(3),
'trainer_kwargs.beta': [100, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| true | true |
f7fb50f279fdc0e3a371f6fba36b3162b2cfac7c | 549 | py | Python | iquhack/migrations/0002_auto_20201218_0849.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | 1 | 2021-12-19T01:05:26.000Z | 2021-12-19T01:05:26.000Z | iquhack/migrations/0002_auto_20201218_0849.py | iQuISE/iquise-website | e6125fe938c549e020cd53a5aa718de101e972e9 | [
"MIT"
] | 16 | 2020-07-29T14:12:30.000Z | 2021-08-24T13:00:48.000Z | iquhack/migrations/0002_auto_20201218_0849.py | mwalsh161/iquise-website | ab674d7881e418fe02b533ae477982e328e8fec7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-12-18 13:49
from __future__ import unicode_literals
from django.db import migrations, models
import iquhack.models
class Migration(migrations.Migration):
dependencies = [
('iquhack', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='logo',
field=models.FileField(blank=True, help_text='SVG files strongly encouraged!', upload_to=iquhack.models.upload_sponsor_logo),
),
]
| 24.954545 | 137 | 0.653916 |
from __future__ import unicode_literals
from django.db import migrations, models
import iquhack.models
class Migration(migrations.Migration):
dependencies = [
('iquhack', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='logo',
field=models.FileField(blank=True, help_text='SVG files strongly encouraged!', upload_to=iquhack.models.upload_sponsor_logo),
),
]
| true | true |
f7fb510ceafae706ba62587d85df749b94bf2bd2 | 4,411 | py | Python | dev/circuitpython/examples/trellism4_wavefile_synth/trellism4_wavefile_synth.py | scripsi/picodeebee | 0ec77e92f09fa8711705623482e57a5e0b702696 | [
"MIT"
] | 7 | 2021-03-15T10:06:20.000Z | 2022-03-23T02:53:15.000Z | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/trellism4_wavefile_synth/trellism4_wavefile_synth.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | 5 | 2021-04-27T18:21:11.000Z | 2021-05-02T14:17:14.000Z | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/trellism4_wavefile_synth/trellism4_wavefile_synth.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
This synthesizer is loaded with wave files for 3 octaves of notes each in 4 different waveforms.
It uses Mixer to play up to 7 notes at once.
Play notes with the rainbow buttons. Change waveform types ith the white buttons in the last column.
"""
import board
from audiocore import WaveFile
from audioio import AudioOut
from audiomixer import Mixer
import adafruit_trellism4
# trellis helper object
trellis = adafruit_trellism4.TrellisM4Express()
# low brightness on the neopixles
trellis.pixels.brightness = 0.05
# each musical note letter
note_letters = ["C", "D", "E", "F", "G", "A", "B"]
# colors of the rainbow
colors = [
(255, 0, 0),
(255, 127, 0),
(255, 255, 0),
(0, 255, 0),
(0, 0, 255),
(56, 43, 105),
(139, 0, 255),
]
# dictionary holding note string to wave file value.
# e.g. {... "sined4": audioio.WaveFile(open("notes/sine/d4.wav")), ...}
notes = {}
# list of all waveform types
WAVE_TYPES = ["sine", "square", "sawtooth", "triangle"]
# current waveform type. Will get changed from the last column
current_wave_type = "sine"
# load the notes dictionary
for wave_type in WAVE_TYPES:
for octave in range(3, 6): # [3,4,5]
for note_letter in note_letters:
# note with octave e.g. a4
cur_note = "{}{}".format(note_letter, octave)
# add wave file to dictionary
key = "{}{}".format(wave_type, cur_note)
notes[key] = WaveFile(
open("notes/{}/{}.wav".format(wave_type, cur_note), "rb")
)
# main audio object
audio = AudioOut(left_channel=board.A0, right_channel=board.A1)
# mixer to allow pylyphonic playback
mixer = Mixer(
voice_count=8,
sample_rate=8000,
channel_count=2,
bits_per_sample=16,
samples_signed=True,
)
audio.play(mixer)
# turn on the rainbow lights
for i, color in enumerate(colors):
trellis.pixels[i, 0] = color
trellis.pixels[i, 1] = color
trellis.pixels[i, 2] = color
# list of keys pressed on the previous iteration
prev_pressed = []
# voice recycling variables
available_voices = [1, 2, 3, 4, 5, 6, 7]
# key to voice dictionary e.g. {... (1,2):4, (1,3):3, ...}
used_voices = {}
# waveform selector in the last column
# default to index 0 sine
trellis.pixels[7, 0] = (255, 255, 255)
while True:
cur_keys = trellis.pressed_keys
# if the keys are different from previous iteration
if cur_keys != prev_pressed:
# loop over currently pressed keys
for key in cur_keys:
# if it's a note key. First 7 columns.
if key[0] < len(note_letters):
# if we aren't already playing this note and we have available voice
if key not in used_voices.keys() and available_voices:
# build not string
note_for_key = "{}{}".format(note_letters[key[0]], key[1] + 3)
note_to_play = "{}{}".format(current_wave_type, note_for_key)
# if the note exists in the notes dictionary
if note_to_play in notes:
# get an available voice
voice_to_use = available_voices.pop()
used_voices[key] = voice_to_use
# play the note
mixer.play(notes[note_to_play], voice=voice_to_use, loop=True)
else: # last column
current_wave_type = WAVE_TYPES[key[1]]
# turn off all last column pixels
for y_pixel in range(0, 4):
trellis.pixels[7, y_pixel] = (0, 0, 0)
# turn on selected
trellis.pixels[7, key[1]] = (255, 255, 255)
if mixer.playing:
# loop over each note that is playing
for key in used_voices:
# if the key is no longer down
if key not in cur_keys:
# stop playing
mixer.stop_voice(used_voices[key])
# recycle voice
available_voices.append(used_voices[key])
used_voices.pop(key, None)
# update variable for next iteration
prev_pressed = cur_keys
| 35.572581 | 101 | 0.584901 |
import board
from audiocore import WaveFile
from audioio import AudioOut
from audiomixer import Mixer
import adafruit_trellism4
trellis = adafruit_trellism4.TrellisM4Express()
trellis.pixels.brightness = 0.05
note_letters = ["C", "D", "E", "F", "G", "A", "B"]
colors = [
(255, 0, 0),
(255, 127, 0),
(255, 255, 0),
(0, 255, 0),
(0, 0, 255),
(56, 43, 105),
(139, 0, 255),
]
notes = {}
WAVE_TYPES = ["sine", "square", "sawtooth", "triangle"]
current_wave_type = "sine"
for wave_type in WAVE_TYPES:
for octave in range(3, 6):
for note_letter in note_letters:
cur_note = "{}{}".format(note_letter, octave)
key = "{}{}".format(wave_type, cur_note)
notes[key] = WaveFile(
open("notes/{}/{}.wav".format(wave_type, cur_note), "rb")
)
audio = AudioOut(left_channel=board.A0, right_channel=board.A1)
mixer = Mixer(
voice_count=8,
sample_rate=8000,
channel_count=2,
bits_per_sample=16,
samples_signed=True,
)
audio.play(mixer)
for i, color in enumerate(colors):
trellis.pixels[i, 0] = color
trellis.pixels[i, 1] = color
trellis.pixels[i, 2] = color
prev_pressed = []
available_voices = [1, 2, 3, 4, 5, 6, 7]
used_voices = {}
trellis.pixels[7, 0] = (255, 255, 255)
while True:
cur_keys = trellis.pressed_keys
if cur_keys != prev_pressed:
for key in cur_keys:
if key[0] < len(note_letters):
# if we aren't already playing this note and we have available voice
if key not in used_voices.keys() and available_voices:
note_for_key = "{}{}".format(note_letters[key[0]], key[1] + 3)
note_to_play = "{}{}".format(current_wave_type, note_for_key)
if note_to_play in notes:
voice_to_use = available_voices.pop()
used_voices[key] = voice_to_use
mixer.play(notes[note_to_play], voice=voice_to_use, loop=True)
else:
current_wave_type = WAVE_TYPES[key[1]]
for y_pixel in range(0, 4):
trellis.pixels[7, y_pixel] = (0, 0, 0)
trellis.pixels[7, key[1]] = (255, 255, 255)
if mixer.playing:
for key in used_voices:
if key not in cur_keys:
mixer.stop_voice(used_voices[key])
available_voices.append(used_voices[key])
used_voices.pop(key, None)
prev_pressed = cur_keys
| true | true |
f7fb523d5b03c83be4d6ab80ed45d3c8fd9e55ab | 29,562 | py | Python | sdk/python/pulumi_aws/msk/outputs.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/msk/outputs.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/msk/outputs.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ClusterBrokerNodeGroupInfo',
'ClusterClientAuthentication',
'ClusterClientAuthenticationSasl',
'ClusterClientAuthenticationTls',
'ClusterConfigurationInfo',
'ClusterEncryptionInfo',
'ClusterEncryptionInfoEncryptionInTransit',
'ClusterLoggingInfo',
'ClusterLoggingInfoBrokerLogs',
'ClusterLoggingInfoBrokerLogsCloudwatchLogs',
'ClusterLoggingInfoBrokerLogsFirehose',
'ClusterLoggingInfoBrokerLogsS3',
'ClusterOpenMonitoring',
'ClusterOpenMonitoringPrometheus',
'ClusterOpenMonitoringPrometheusJmxExporter',
'ClusterOpenMonitoringPrometheusNodeExporter',
'GetBrokerNodesNodeInfoListResult',
]
@pulumi.output_type
class ClusterBrokerNodeGroupInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientSubnets":
suggest = "client_subnets"
elif key == "ebsVolumeSize":
suggest = "ebs_volume_size"
elif key == "instanceType":
suggest = "instance_type"
elif key == "securityGroups":
suggest = "security_groups"
elif key == "azDistribution":
suggest = "az_distribution"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterBrokerNodeGroupInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterBrokerNodeGroupInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterBrokerNodeGroupInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_subnets: Sequence[str],
ebs_volume_size: int,
instance_type: str,
security_groups: Sequence[str],
az_distribution: Optional[str] = None):
"""
:param Sequence[str] client_subnets: A list of subnets to connect to in client VPC ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-prop-brokernodegroupinfo-clientsubnets)).
:param int ebs_volume_size: The size in GiB of the EBS volume for the data drive on each broker node.
:param str instance_type: Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. ([Pricing info](https://aws.amazon.com/msk/pricing/))
:param Sequence[str] security_groups: A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster.
:param str az_distribution: The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently the only valid value is `DEFAULT`.
"""
pulumi.set(__self__, "client_subnets", client_subnets)
pulumi.set(__self__, "ebs_volume_size", ebs_volume_size)
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "security_groups", security_groups)
if az_distribution is not None:
pulumi.set(__self__, "az_distribution", az_distribution)
@property
@pulumi.getter(name="clientSubnets")
def client_subnets(self) -> Sequence[str]:
"""
A list of subnets to connect to in client VPC ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-prop-brokernodegroupinfo-clientsubnets)).
"""
return pulumi.get(self, "client_subnets")
@property
@pulumi.getter(name="ebsVolumeSize")
def ebs_volume_size(self) -> int:
"""
The size in GiB of the EBS volume for the data drive on each broker node.
"""
return pulumi.get(self, "ebs_volume_size")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. ([Pricing info](https://aws.amazon.com/msk/pricing/))
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Sequence[str]:
"""
A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster.
"""
return pulumi.get(self, "security_groups")
@property
@pulumi.getter(name="azDistribution")
def az_distribution(self) -> Optional[str]:
"""
The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently the only valid value is `DEFAULT`.
"""
return pulumi.get(self, "az_distribution")
@pulumi.output_type
class ClusterClientAuthentication(dict):
def __init__(__self__, *,
sasl: Optional['outputs.ClusterClientAuthenticationSasl'] = None,
tls: Optional['outputs.ClusterClientAuthenticationTls'] = None):
"""
:param 'ClusterClientAuthenticationSaslArgs' sasl: Configuration block for specifying SASL client authentication. See below.
:param 'ClusterClientAuthenticationTlsArgs' tls: Configuration block for specifying TLS client authentication. See below.
"""
if sasl is not None:
pulumi.set(__self__, "sasl", sasl)
if tls is not None:
pulumi.set(__self__, "tls", tls)
@property
@pulumi.getter
def sasl(self) -> Optional['outputs.ClusterClientAuthenticationSasl']:
"""
Configuration block for specifying SASL client authentication. See below.
"""
return pulumi.get(self, "sasl")
@property
@pulumi.getter
def tls(self) -> Optional['outputs.ClusterClientAuthenticationTls']:
"""
Configuration block for specifying TLS client authentication. See below.
"""
return pulumi.get(self, "tls")
@pulumi.output_type
class ClusterClientAuthenticationSasl(dict):
def __init__(__self__, *,
iam: Optional[bool] = None,
scram: Optional[bool] = None):
"""
:param bool iam: Enables IAM client authentication. Defaults to `false`.
:param bool scram: Enables SCRAM client authentication via AWS Secrets Manager. Defaults to `false`.
"""
if iam is not None:
pulumi.set(__self__, "iam", iam)
if scram is not None:
pulumi.set(__self__, "scram", scram)
@property
@pulumi.getter
def iam(self) -> Optional[bool]:
"""
Enables IAM client authentication. Defaults to `false`.
"""
return pulumi.get(self, "iam")
@property
@pulumi.getter
def scram(self) -> Optional[bool]:
"""
Enables SCRAM client authentication via AWS Secrets Manager. Defaults to `false`.
"""
return pulumi.get(self, "scram")
@pulumi.output_type
class ClusterClientAuthenticationTls(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateAuthorityArns":
suggest = "certificate_authority_arns"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterClientAuthenticationTls. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterClientAuthenticationTls.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterClientAuthenticationTls.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_authority_arns: Optional[Sequence[str]] = None):
"""
:param Sequence[str] certificate_authority_arns: List of ACM Certificate Authority Amazon Resource Names (ARNs).
"""
if certificate_authority_arns is not None:
pulumi.set(__self__, "certificate_authority_arns", certificate_authority_arns)
@property
@pulumi.getter(name="certificateAuthorityArns")
def certificate_authority_arns(self) -> Optional[Sequence[str]]:
"""
List of ACM Certificate Authority Amazon Resource Names (ARNs).
"""
return pulumi.get(self, "certificate_authority_arns")
@pulumi.output_type
class ClusterConfigurationInfo(dict):
def __init__(__self__, *,
arn: str,
revision: int):
"""
:param str arn: Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster.
:param int revision: Revision of the MSK Configuration to use in the cluster.
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "revision", revision)
@property
@pulumi.getter
def arn(self) -> str:
"""
Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def revision(self) -> int:
"""
Revision of the MSK Configuration to use in the cluster.
"""
return pulumi.get(self, "revision")
@pulumi.output_type
class ClusterEncryptionInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "encryptionAtRestKmsKeyArn":
suggest = "encryption_at_rest_kms_key_arn"
elif key == "encryptionInTransit":
suggest = "encryption_in_transit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEncryptionInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEncryptionInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEncryptionInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
encryption_at_rest_kms_key_arn: Optional[str] = None,
encryption_in_transit: Optional['outputs.ClusterEncryptionInfoEncryptionInTransit'] = None):
"""
:param str encryption_at_rest_kms_key_arn: You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest.
:param 'ClusterEncryptionInfoEncryptionInTransitArgs' encryption_in_transit: Configuration block to specify encryption in transit. See below.
"""
if encryption_at_rest_kms_key_arn is not None:
pulumi.set(__self__, "encryption_at_rest_kms_key_arn", encryption_at_rest_kms_key_arn)
if encryption_in_transit is not None:
pulumi.set(__self__, "encryption_in_transit", encryption_in_transit)
@property
@pulumi.getter(name="encryptionAtRestKmsKeyArn")
def encryption_at_rest_kms_key_arn(self) -> Optional[str]:
"""
You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest.
"""
return pulumi.get(self, "encryption_at_rest_kms_key_arn")
@property
@pulumi.getter(name="encryptionInTransit")
def encryption_in_transit(self) -> Optional['outputs.ClusterEncryptionInfoEncryptionInTransit']:
"""
Configuration block to specify encryption in transit. See below.
"""
return pulumi.get(self, "encryption_in_transit")
@pulumi.output_type
class ClusterEncryptionInfoEncryptionInTransit(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientBroker":
suggest = "client_broker"
elif key == "inCluster":
suggest = "in_cluster"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEncryptionInfoEncryptionInTransit. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEncryptionInfoEncryptionInTransit.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEncryptionInfoEncryptionInTransit.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_broker: Optional[str] = None,
in_cluster: Optional[bool] = None):
"""
:param str client_broker: Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT`. Default value is `TLS`.
:param bool in_cluster: Whether data communication among broker nodes is encrypted. Default value: `true`.
"""
if client_broker is not None:
pulumi.set(__self__, "client_broker", client_broker)
if in_cluster is not None:
pulumi.set(__self__, "in_cluster", in_cluster)
@property
@pulumi.getter(name="clientBroker")
def client_broker(self) -> Optional[str]:
"""
Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT`. Default value is `TLS`.
"""
return pulumi.get(self, "client_broker")
@property
@pulumi.getter(name="inCluster")
def in_cluster(self) -> Optional[bool]:
"""
Whether data communication among broker nodes is encrypted. Default value: `true`.
"""
return pulumi.get(self, "in_cluster")
@pulumi.output_type
class ClusterLoggingInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "brokerLogs":
suggest = "broker_logs"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
broker_logs: 'outputs.ClusterLoggingInfoBrokerLogs'):
"""
:param 'ClusterLoggingInfoBrokerLogsArgs' broker_logs: Configuration block for Broker Logs settings for logging info. See below.
"""
pulumi.set(__self__, "broker_logs", broker_logs)
@property
@pulumi.getter(name="brokerLogs")
def broker_logs(self) -> 'outputs.ClusterLoggingInfoBrokerLogs':
"""
Configuration block for Broker Logs settings for logging info. See below.
"""
return pulumi.get(self, "broker_logs")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogs(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudwatchLogs":
suggest = "cloudwatch_logs"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfoBrokerLogs. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfoBrokerLogs.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfoBrokerLogs.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloudwatch_logs: Optional['outputs.ClusterLoggingInfoBrokerLogsCloudwatchLogs'] = None,
firehose: Optional['outputs.ClusterLoggingInfoBrokerLogsFirehose'] = None,
s3: Optional['outputs.ClusterLoggingInfoBrokerLogsS3'] = None):
if cloudwatch_logs is not None:
pulumi.set(__self__, "cloudwatch_logs", cloudwatch_logs)
if firehose is not None:
pulumi.set(__self__, "firehose", firehose)
if s3 is not None:
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter(name="cloudwatchLogs")
def cloudwatch_logs(self) -> Optional['outputs.ClusterLoggingInfoBrokerLogsCloudwatchLogs']:
return pulumi.get(self, "cloudwatch_logs")
@property
@pulumi.getter
def firehose(self) -> Optional['outputs.ClusterLoggingInfoBrokerLogsFirehose']:
return pulumi.get(self, "firehose")
@property
@pulumi.getter
def s3(self) -> Optional['outputs.ClusterLoggingInfoBrokerLogsS3']:
return pulumi.get(self, "s3")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogsCloudwatchLogs(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "logGroup":
suggest = "log_group"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfoBrokerLogsCloudwatchLogs. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfoBrokerLogsCloudwatchLogs.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfoBrokerLogsCloudwatchLogs.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
log_group: Optional[str] = None):
"""
:param bool enabled: Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs.
:param str log_group: Name of the Cloudwatch Log Group to deliver logs to.
"""
pulumi.set(__self__, "enabled", enabled)
if log_group is not None:
pulumi.set(__self__, "log_group", log_group)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="logGroup")
def log_group(self) -> Optional[str]:
"""
Name of the Cloudwatch Log Group to deliver logs to.
"""
return pulumi.get(self, "log_group")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogsFirehose(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deliveryStream":
suggest = "delivery_stream"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfoBrokerLogsFirehose. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfoBrokerLogsFirehose.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfoBrokerLogsFirehose.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
delivery_stream: Optional[str] = None):
"""
:param bool enabled: Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs.
:param str delivery_stream: Name of the Kinesis Data Firehose delivery stream to deliver logs to.
"""
pulumi.set(__self__, "enabled", enabled)
if delivery_stream is not None:
pulumi.set(__self__, "delivery_stream", delivery_stream)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="deliveryStream")
def delivery_stream(self) -> Optional[str]:
"""
Name of the Kinesis Data Firehose delivery stream to deliver logs to.
"""
return pulumi.get(self, "delivery_stream")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogsS3(dict):
def __init__(__self__, *,
enabled: bool,
bucket: Optional[str] = None,
prefix: Optional[str] = None):
"""
:param bool enabled: Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs.
:param str bucket: Name of the S3 bucket to deliver logs to.
:param str prefix: Prefix to append to the folder name.
"""
pulumi.set(__self__, "enabled", enabled)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
"""
Name of the S3 bucket to deliver logs to.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
Prefix to append to the folder name.
"""
return pulumi.get(self, "prefix")
@pulumi.output_type
class ClusterOpenMonitoring(dict):
def __init__(__self__, *,
prometheus: 'outputs.ClusterOpenMonitoringPrometheus'):
"""
:param 'ClusterOpenMonitoringPrometheusArgs' prometheus: Configuration block for Prometheus settings for open monitoring. See below.
"""
pulumi.set(__self__, "prometheus", prometheus)
@property
@pulumi.getter
def prometheus(self) -> 'outputs.ClusterOpenMonitoringPrometheus':
"""
Configuration block for Prometheus settings for open monitoring. See below.
"""
return pulumi.get(self, "prometheus")
@pulumi.output_type
class ClusterOpenMonitoringPrometheus(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "jmxExporter":
suggest = "jmx_exporter"
elif key == "nodeExporter":
suggest = "node_exporter"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenMonitoringPrometheus. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOpenMonitoringPrometheus.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOpenMonitoringPrometheus.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
jmx_exporter: Optional['outputs.ClusterOpenMonitoringPrometheusJmxExporter'] = None,
node_exporter: Optional['outputs.ClusterOpenMonitoringPrometheusNodeExporter'] = None):
"""
:param 'ClusterOpenMonitoringPrometheusJmxExporterArgs' jmx_exporter: Configuration block for JMX Exporter. See below.
:param 'ClusterOpenMonitoringPrometheusNodeExporterArgs' node_exporter: Configuration block for Node Exporter. See below.
"""
if jmx_exporter is not None:
pulumi.set(__self__, "jmx_exporter", jmx_exporter)
if node_exporter is not None:
pulumi.set(__self__, "node_exporter", node_exporter)
@property
@pulumi.getter(name="jmxExporter")
def jmx_exporter(self) -> Optional['outputs.ClusterOpenMonitoringPrometheusJmxExporter']:
"""
Configuration block for JMX Exporter. See below.
"""
return pulumi.get(self, "jmx_exporter")
@property
@pulumi.getter(name="nodeExporter")
def node_exporter(self) -> Optional['outputs.ClusterOpenMonitoringPrometheusNodeExporter']:
"""
Configuration block for Node Exporter. See below.
"""
return pulumi.get(self, "node_exporter")
@pulumi.output_type
class ClusterOpenMonitoringPrometheusJmxExporter(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enabledInBroker":
suggest = "enabled_in_broker"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenMonitoringPrometheusJmxExporter. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOpenMonitoringPrometheusJmxExporter.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOpenMonitoringPrometheusJmxExporter.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled_in_broker: bool):
"""
:param bool enabled_in_broker: Indicates whether you want to enable or disable the JMX Exporter.
"""
pulumi.set(__self__, "enabled_in_broker", enabled_in_broker)
@property
@pulumi.getter(name="enabledInBroker")
def enabled_in_broker(self) -> bool:
"""
Indicates whether you want to enable or disable the JMX Exporter.
"""
return pulumi.get(self, "enabled_in_broker")
@pulumi.output_type
class ClusterOpenMonitoringPrometheusNodeExporter(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enabledInBroker":
suggest = "enabled_in_broker"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenMonitoringPrometheusNodeExporter. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOpenMonitoringPrometheusNodeExporter.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOpenMonitoringPrometheusNodeExporter.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled_in_broker: bool):
"""
:param bool enabled_in_broker: Indicates whether you want to enable or disable the JMX Exporter.
"""
pulumi.set(__self__, "enabled_in_broker", enabled_in_broker)
@property
@pulumi.getter(name="enabledInBroker")
def enabled_in_broker(self) -> bool:
"""
Indicates whether you want to enable or disable the JMX Exporter.
"""
return pulumi.get(self, "enabled_in_broker")
@pulumi.output_type
class GetBrokerNodesNodeInfoListResult(dict):
def __init__(__self__, *,
attached_eni_id: str,
broker_id: float,
client_subnet: str,
client_vpc_ip_address: str,
endpoints: Sequence[str],
node_arn: str):
"""
:param str attached_eni_id: The attached elastic network interface of the broker
:param float broker_id: The ID of the broker
:param str client_subnet: The client subnet to which this broker node belongs
:param str client_vpc_ip_address: The client virtual private cloud (VPC) IP address
:param Sequence[str] endpoints: Set of endpoints for accessing the broker. This does not include ports
:param str node_arn: The Amazon Resource Name (ARN) of the node
"""
pulumi.set(__self__, "attached_eni_id", attached_eni_id)
pulumi.set(__self__, "broker_id", broker_id)
pulumi.set(__self__, "client_subnet", client_subnet)
pulumi.set(__self__, "client_vpc_ip_address", client_vpc_ip_address)
pulumi.set(__self__, "endpoints", endpoints)
pulumi.set(__self__, "node_arn", node_arn)
@property
@pulumi.getter(name="attachedEniId")
def attached_eni_id(self) -> str:
"""
The attached elastic network interface of the broker
"""
return pulumi.get(self, "attached_eni_id")
@property
@pulumi.getter(name="brokerId")
def broker_id(self) -> float:
"""
The ID of the broker
"""
return pulumi.get(self, "broker_id")
@property
@pulumi.getter(name="clientSubnet")
def client_subnet(self) -> str:
"""
The client subnet to which this broker node belongs
"""
return pulumi.get(self, "client_subnet")
@property
@pulumi.getter(name="clientVpcIpAddress")
def client_vpc_ip_address(self) -> str:
"""
The client virtual private cloud (VPC) IP address
"""
return pulumi.get(self, "client_vpc_ip_address")
@property
@pulumi.getter
def endpoints(self) -> Sequence[str]:
"""
Set of endpoints for accessing the broker. This does not include ports
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter(name="nodeArn")
def node_arn(self) -> str:
"""
The Amazon Resource Name (ARN) of the node
"""
return pulumi.get(self, "node_arn")
| 38.144516 | 286 | 0.65618 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ClusterBrokerNodeGroupInfo',
'ClusterClientAuthentication',
'ClusterClientAuthenticationSasl',
'ClusterClientAuthenticationTls',
'ClusterConfigurationInfo',
'ClusterEncryptionInfo',
'ClusterEncryptionInfoEncryptionInTransit',
'ClusterLoggingInfo',
'ClusterLoggingInfoBrokerLogs',
'ClusterLoggingInfoBrokerLogsCloudwatchLogs',
'ClusterLoggingInfoBrokerLogsFirehose',
'ClusterLoggingInfoBrokerLogsS3',
'ClusterOpenMonitoring',
'ClusterOpenMonitoringPrometheus',
'ClusterOpenMonitoringPrometheusJmxExporter',
'ClusterOpenMonitoringPrometheusNodeExporter',
'GetBrokerNodesNodeInfoListResult',
]
@pulumi.output_type
class ClusterBrokerNodeGroupInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientSubnets":
suggest = "client_subnets"
elif key == "ebsVolumeSize":
suggest = "ebs_volume_size"
elif key == "instanceType":
suggest = "instance_type"
elif key == "securityGroups":
suggest = "security_groups"
elif key == "azDistribution":
suggest = "az_distribution"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterBrokerNodeGroupInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterBrokerNodeGroupInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterBrokerNodeGroupInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_subnets: Sequence[str],
ebs_volume_size: int,
instance_type: str,
security_groups: Sequence[str],
az_distribution: Optional[str] = None):
pulumi.set(__self__, "client_subnets", client_subnets)
pulumi.set(__self__, "ebs_volume_size", ebs_volume_size)
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "security_groups", security_groups)
if az_distribution is not None:
pulumi.set(__self__, "az_distribution", az_distribution)
@property
@pulumi.getter(name="clientSubnets")
def client_subnets(self) -> Sequence[str]:
return pulumi.get(self, "client_subnets")
@property
@pulumi.getter(name="ebsVolumeSize")
def ebs_volume_size(self) -> int:
return pulumi.get(self, "ebs_volume_size")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Sequence[str]:
return pulumi.get(self, "security_groups")
@property
@pulumi.getter(name="azDistribution")
def az_distribution(self) -> Optional[str]:
return pulumi.get(self, "az_distribution")
@pulumi.output_type
class ClusterClientAuthentication(dict):
def __init__(__self__, *,
sasl: Optional['outputs.ClusterClientAuthenticationSasl'] = None,
tls: Optional['outputs.ClusterClientAuthenticationTls'] = None):
if sasl is not None:
pulumi.set(__self__, "sasl", sasl)
if tls is not None:
pulumi.set(__self__, "tls", tls)
@property
@pulumi.getter
def sasl(self) -> Optional['outputs.ClusterClientAuthenticationSasl']:
return pulumi.get(self, "sasl")
@property
@pulumi.getter
def tls(self) -> Optional['outputs.ClusterClientAuthenticationTls']:
return pulumi.get(self, "tls")
@pulumi.output_type
class ClusterClientAuthenticationSasl(dict):
def __init__(__self__, *,
iam: Optional[bool] = None,
scram: Optional[bool] = None):
if iam is not None:
pulumi.set(__self__, "iam", iam)
if scram is not None:
pulumi.set(__self__, "scram", scram)
@property
@pulumi.getter
def iam(self) -> Optional[bool]:
return pulumi.get(self, "iam")
@property
@pulumi.getter
def scram(self) -> Optional[bool]:
return pulumi.get(self, "scram")
@pulumi.output_type
class ClusterClientAuthenticationTls(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateAuthorityArns":
suggest = "certificate_authority_arns"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterClientAuthenticationTls. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterClientAuthenticationTls.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterClientAuthenticationTls.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_authority_arns: Optional[Sequence[str]] = None):
if certificate_authority_arns is not None:
pulumi.set(__self__, "certificate_authority_arns", certificate_authority_arns)
@property
@pulumi.getter(name="certificateAuthorityArns")
def certificate_authority_arns(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "certificate_authority_arns")
@pulumi.output_type
class ClusterConfigurationInfo(dict):
def __init__(__self__, *,
arn: str,
revision: int):
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "revision", revision)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def revision(self) -> int:
return pulumi.get(self, "revision")
@pulumi.output_type
class ClusterEncryptionInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "encryptionAtRestKmsKeyArn":
suggest = "encryption_at_rest_kms_key_arn"
elif key == "encryptionInTransit":
suggest = "encryption_in_transit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEncryptionInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEncryptionInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEncryptionInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
encryption_at_rest_kms_key_arn: Optional[str] = None,
encryption_in_transit: Optional['outputs.ClusterEncryptionInfoEncryptionInTransit'] = None):
if encryption_at_rest_kms_key_arn is not None:
pulumi.set(__self__, "encryption_at_rest_kms_key_arn", encryption_at_rest_kms_key_arn)
if encryption_in_transit is not None:
pulumi.set(__self__, "encryption_in_transit", encryption_in_transit)
@property
@pulumi.getter(name="encryptionAtRestKmsKeyArn")
def encryption_at_rest_kms_key_arn(self) -> Optional[str]:
return pulumi.get(self, "encryption_at_rest_kms_key_arn")
@property
@pulumi.getter(name="encryptionInTransit")
def encryption_in_transit(self) -> Optional['outputs.ClusterEncryptionInfoEncryptionInTransit']:
return pulumi.get(self, "encryption_in_transit")
@pulumi.output_type
class ClusterEncryptionInfoEncryptionInTransit(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientBroker":
suggest = "client_broker"
elif key == "inCluster":
suggest = "in_cluster"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEncryptionInfoEncryptionInTransit. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEncryptionInfoEncryptionInTransit.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEncryptionInfoEncryptionInTransit.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_broker: Optional[str] = None,
in_cluster: Optional[bool] = None):
if client_broker is not None:
pulumi.set(__self__, "client_broker", client_broker)
if in_cluster is not None:
pulumi.set(__self__, "in_cluster", in_cluster)
@property
@pulumi.getter(name="clientBroker")
def client_broker(self) -> Optional[str]:
return pulumi.get(self, "client_broker")
@property
@pulumi.getter(name="inCluster")
def in_cluster(self) -> Optional[bool]:
return pulumi.get(self, "in_cluster")
@pulumi.output_type
class ClusterLoggingInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "brokerLogs":
suggest = "broker_logs"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
broker_logs: 'outputs.ClusterLoggingInfoBrokerLogs'):
pulumi.set(__self__, "broker_logs", broker_logs)
@property
@pulumi.getter(name="brokerLogs")
def broker_logs(self) -> 'outputs.ClusterLoggingInfoBrokerLogs':
return pulumi.get(self, "broker_logs")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogs(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudwatchLogs":
suggest = "cloudwatch_logs"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfoBrokerLogs. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfoBrokerLogs.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfoBrokerLogs.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloudwatch_logs: Optional['outputs.ClusterLoggingInfoBrokerLogsCloudwatchLogs'] = None,
firehose: Optional['outputs.ClusterLoggingInfoBrokerLogsFirehose'] = None,
s3: Optional['outputs.ClusterLoggingInfoBrokerLogsS3'] = None):
if cloudwatch_logs is not None:
pulumi.set(__self__, "cloudwatch_logs", cloudwatch_logs)
if firehose is not None:
pulumi.set(__self__, "firehose", firehose)
if s3 is not None:
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter(name="cloudwatchLogs")
def cloudwatch_logs(self) -> Optional['outputs.ClusterLoggingInfoBrokerLogsCloudwatchLogs']:
return pulumi.get(self, "cloudwatch_logs")
@property
@pulumi.getter
def firehose(self) -> Optional['outputs.ClusterLoggingInfoBrokerLogsFirehose']:
return pulumi.get(self, "firehose")
@property
@pulumi.getter
def s3(self) -> Optional['outputs.ClusterLoggingInfoBrokerLogsS3']:
return pulumi.get(self, "s3")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogsCloudwatchLogs(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "logGroup":
suggest = "log_group"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfoBrokerLogsCloudwatchLogs. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfoBrokerLogsCloudwatchLogs.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfoBrokerLogsCloudwatchLogs.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
log_group: Optional[str] = None):
pulumi.set(__self__, "enabled", enabled)
if log_group is not None:
pulumi.set(__self__, "log_group", log_group)
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="logGroup")
def log_group(self) -> Optional[str]:
return pulumi.get(self, "log_group")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogsFirehose(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deliveryStream":
suggest = "delivery_stream"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterLoggingInfoBrokerLogsFirehose. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterLoggingInfoBrokerLogsFirehose.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterLoggingInfoBrokerLogsFirehose.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
delivery_stream: Optional[str] = None):
pulumi.set(__self__, "enabled", enabled)
if delivery_stream is not None:
pulumi.set(__self__, "delivery_stream", delivery_stream)
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="deliveryStream")
def delivery_stream(self) -> Optional[str]:
return pulumi.get(self, "delivery_stream")
@pulumi.output_type
class ClusterLoggingInfoBrokerLogsS3(dict):
def __init__(__self__, *,
enabled: bool,
bucket: Optional[str] = None,
prefix: Optional[str] = None):
pulumi.set(__self__, "enabled", enabled)
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
return pulumi.get(self, "prefix")
@pulumi.output_type
class ClusterOpenMonitoring(dict):
def __init__(__self__, *,
prometheus: 'outputs.ClusterOpenMonitoringPrometheus'):
pulumi.set(__self__, "prometheus", prometheus)
@property
@pulumi.getter
def prometheus(self) -> 'outputs.ClusterOpenMonitoringPrometheus':
return pulumi.get(self, "prometheus")
@pulumi.output_type
class ClusterOpenMonitoringPrometheus(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "jmxExporter":
suggest = "jmx_exporter"
elif key == "nodeExporter":
suggest = "node_exporter"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenMonitoringPrometheus. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOpenMonitoringPrometheus.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOpenMonitoringPrometheus.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
jmx_exporter: Optional['outputs.ClusterOpenMonitoringPrometheusJmxExporter'] = None,
node_exporter: Optional['outputs.ClusterOpenMonitoringPrometheusNodeExporter'] = None):
if jmx_exporter is not None:
pulumi.set(__self__, "jmx_exporter", jmx_exporter)
if node_exporter is not None:
pulumi.set(__self__, "node_exporter", node_exporter)
@property
@pulumi.getter(name="jmxExporter")
def jmx_exporter(self) -> Optional['outputs.ClusterOpenMonitoringPrometheusJmxExporter']:
return pulumi.get(self, "jmx_exporter")
@property
@pulumi.getter(name="nodeExporter")
def node_exporter(self) -> Optional['outputs.ClusterOpenMonitoringPrometheusNodeExporter']:
return pulumi.get(self, "node_exporter")
@pulumi.output_type
class ClusterOpenMonitoringPrometheusJmxExporter(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enabledInBroker":
suggest = "enabled_in_broker"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenMonitoringPrometheusJmxExporter. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOpenMonitoringPrometheusJmxExporter.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOpenMonitoringPrometheusJmxExporter.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled_in_broker: bool):
pulumi.set(__self__, "enabled_in_broker", enabled_in_broker)
@property
@pulumi.getter(name="enabledInBroker")
def enabled_in_broker(self) -> bool:
return pulumi.get(self, "enabled_in_broker")
@pulumi.output_type
class ClusterOpenMonitoringPrometheusNodeExporter(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enabledInBroker":
suggest = "enabled_in_broker"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenMonitoringPrometheusNodeExporter. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOpenMonitoringPrometheusNodeExporter.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOpenMonitoringPrometheusNodeExporter.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled_in_broker: bool):
pulumi.set(__self__, "enabled_in_broker", enabled_in_broker)
@property
@pulumi.getter(name="enabledInBroker")
def enabled_in_broker(self) -> bool:
return pulumi.get(self, "enabled_in_broker")
@pulumi.output_type
class GetBrokerNodesNodeInfoListResult(dict):
def __init__(__self__, *,
attached_eni_id: str,
broker_id: float,
client_subnet: str,
client_vpc_ip_address: str,
endpoints: Sequence[str],
node_arn: str):
pulumi.set(__self__, "attached_eni_id", attached_eni_id)
pulumi.set(__self__, "broker_id", broker_id)
pulumi.set(__self__, "client_subnet", client_subnet)
pulumi.set(__self__, "client_vpc_ip_address", client_vpc_ip_address)
pulumi.set(__self__, "endpoints", endpoints)
pulumi.set(__self__, "node_arn", node_arn)
@property
@pulumi.getter(name="attachedEniId")
def attached_eni_id(self) -> str:
return pulumi.get(self, "attached_eni_id")
@property
@pulumi.getter(name="brokerId")
def broker_id(self) -> float:
return pulumi.get(self, "broker_id")
@property
@pulumi.getter(name="clientSubnet")
def client_subnet(self) -> str:
return pulumi.get(self, "client_subnet")
@property
@pulumi.getter(name="clientVpcIpAddress")
def client_vpc_ip_address(self) -> str:
return pulumi.get(self, "client_vpc_ip_address")
@property
@pulumi.getter
def endpoints(self) -> Sequence[str]:
return pulumi.get(self, "endpoints")
@property
@pulumi.getter(name="nodeArn")
def node_arn(self) -> str:
return pulumi.get(self, "node_arn")
| true | true |
f7fb52da2bf41b87bf5e95e2cb604acacdc7ca3b | 2,198 | py | Python | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/AssignUsersRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/AssignUsersRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/AssignUsersRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AssignUsersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'AssignUsers','ccc')
def get_UserRamIds(self):
return self.get_query_params().get('UserRamIds')
def set_UserRamIds(self,UserRamIds):
for i in range(len(UserRamIds)):
if UserRamIds[i] is not None:
self.add_query_param('UserRamId.' + str(i + 1) , UserRamIds[i]);
def get_SkillLevels(self):
return self.get_query_params().get('SkillLevels')
def set_SkillLevels(self,SkillLevels):
for i in range(len(SkillLevels)):
if SkillLevels[i] is not None:
self.add_query_param('SkillLevel.' + str(i + 1) , SkillLevels[i]);
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_RoleIds(self):
return self.get_query_params().get('RoleIds')
def set_RoleIds(self,RoleIds):
for i in range(len(RoleIds)):
if RoleIds[i] is not None:
self.add_query_param('RoleId.' + str(i + 1) , RoleIds[i]);
def get_SkillGroupIds(self):
return self.get_query_params().get('SkillGroupIds')
def set_SkillGroupIds(self,SkillGroupIds):
for i in range(len(SkillGroupIds)):
if SkillGroupIds[i] is not None:
self.add_query_param('SkillGroupId.' + str(i + 1) , SkillGroupIds[i]); | 35.451613 | 74 | 0.729754 |
from aliyunsdkcore.request import RpcRequest
class AssignUsersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'AssignUsers','ccc')
def get_UserRamIds(self):
return self.get_query_params().get('UserRamIds')
def set_UserRamIds(self,UserRamIds):
for i in range(len(UserRamIds)):
if UserRamIds[i] is not None:
self.add_query_param('UserRamId.' + str(i + 1) , UserRamIds[i]);
def get_SkillLevels(self):
return self.get_query_params().get('SkillLevels')
def set_SkillLevels(self,SkillLevels):
for i in range(len(SkillLevels)):
if SkillLevels[i] is not None:
self.add_query_param('SkillLevel.' + str(i + 1) , SkillLevels[i]);
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_RoleIds(self):
return self.get_query_params().get('RoleIds')
def set_RoleIds(self,RoleIds):
for i in range(len(RoleIds)):
if RoleIds[i] is not None:
self.add_query_param('RoleId.' + str(i + 1) , RoleIds[i]);
def get_SkillGroupIds(self):
return self.get_query_params().get('SkillGroupIds')
def set_SkillGroupIds(self,SkillGroupIds):
for i in range(len(SkillGroupIds)):
if SkillGroupIds[i] is not None:
self.add_query_param('SkillGroupId.' + str(i + 1) , SkillGroupIds[i]); | true | true |
f7fb5330e15c47936ba1adcbcb8963f8a89a8cf9 | 135 | py | Python | nanosound_oled/amp_power.py | divanikus/Nanomesher_NanoSound | e533e165b16a72b07a680cbc180854e544cbeee1 | [
"Apache-2.0"
] | 21 | 2017-10-03T12:28:03.000Z | 2022-03-07T00:16:52.000Z | nanosound_oled/amp_power.py | divanikus/Nanomesher_NanoSound | e533e165b16a72b07a680cbc180854e544cbeee1 | [
"Apache-2.0"
] | 11 | 2019-01-07T13:36:00.000Z | 2020-10-16T16:58:59.000Z | nanosound_oled/amp_power.py | divanikus/Nanomesher_NanoSound | e533e165b16a72b07a680cbc180854e544cbeee1 | [
"Apache-2.0"
] | 18 | 2017-10-24T01:56:24.000Z | 2022-01-31T19:35:06.000Z | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(27,GPIO.OUT)
GPIO.output(27,GPIO.HIGH)
| 15 | 25 | 0.777778 | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(27,GPIO.OUT)
GPIO.output(27,GPIO.HIGH)
| true | true |
f7fb541837fd69348b53851dc9c26d7d29e24326 | 19,355 | py | Python | uploadstreaks/uploadstreaks.py | trumoose/OB13-Cogs | 07d37bd93e03fc66f044a5083475eafe397cbd2f | [
"MIT"
] | null | null | null | uploadstreaks/uploadstreaks.py | trumoose/OB13-Cogs | 07d37bd93e03fc66f044a5083475eafe397cbd2f | [
"MIT"
] | null | null | null | uploadstreaks/uploadstreaks.py | trumoose/OB13-Cogs | 07d37bd93e03fc66f044a5083475eafe397cbd2f | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021-present Obi-Wan3
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import typing
from datetime import datetime, timedelta, timezone
import discord
from redbot.core import commands, Config, bank
from redbot.core.utils.chat_formatting import humanize_list
class UploadStreaks(commands.Cog):
"""
Streaks & Points for Uploads
A leaderboard with points and streaks for uploading attachments in specific channels per interval of time.
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=14000605, force_registration=True)
default_guild = {
"challenges": {}
}
self.config.register_guild(**default_guild)
@commands.Cog.listener("on_message")
async def _message_listener(self, message: discord.Message):
# Ignore these messages
if (
not message.guild or # Message not in a guild
await self.bot.cog_disabled_in_guild(self, message.guild) or # Cog disabled in guild
message.author.bot or # Message author is a bot
not message.attachments # There are no attachments in this message
):
return
async with self.config.guild(message.guild).challenges() as settings:
for challenge in settings.values():
if (
not challenge['active'] or # Challenge not active
message.channel.id not in challenge['channels'] or # Message not in challenge channel
(challenge['role'] and challenge['role'] not in [r.id for r in message.author.roles]) or # Author does not have role
datetime.utcfromtimestamp(challenge['interval'][1]) > datetime.utcnow() # Challenge not started
):
continue
orig = challenge['users'].get(str(message.author.id))
if orig:
interval_before = (datetime.utcnow() - timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_start = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_end = (datetime.utcnow() + timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
# Last entry was also in this interval
if interval_start <= challenge['users'][str(message.author.id)][2] <= interval_end:
challenge['users'][str(message.author.id)] = (orig[0], orig[1], message.created_at.timestamp())
continue
# Streak continued
if interval_before <= challenge['users'][str(message.author.id)][2] <= interval_start:
challenge['users'][str(message.author.id)] = (orig[0]+1, orig[1]+1, message.created_at.timestamp())
# Streak restarted
else:
challenge['users'][str(message.author.id)] = (orig[0]+1, 1, message.created_at.timestamp())
else:
challenge['users'][str(message.author.id)] = (1, 1, message.created_at.timestamp())
if challenge['credits'] > 0:
await bank.deposit_credits(message.author, challenge['credits'])
@commands.guild_only()
@commands.group(name="uploadstreaks")
async def _upload_streaks(self, ctx: commands.Context):
"""UploadStreaks Settings"""
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="list")
async def _list(self, ctx: commands.Context):
"""List the current UploadStreaks challenges."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Challenges", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
embed.description = ""
for count, name in enumerate(settings.keys()):
embed.description += f"**{count+1}.** {name}"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="leaderboard", aliases=['ldb'])
async def _leaderboard(self, ctx: commands.Context, challenge: str, num=10):
"""See the current UploadStreaks leaderboard for a challenge."""
settings = await self.config.guild(ctx.guild).challenges()
if challenge not in settings.keys():
return await ctx.send("No challenge was found with that name.")
embed = discord.Embed(title=f"UploadStreaks Challenge `{challenge}`", color=await ctx.embed_color())
if not settings[challenge]['users']:
embed.description = "No users have participated in this challenge yet."
else:
embed.description = "```Streak Points User\n"
ldb = sorted(settings[challenge]['users'].items(), key=lambda x: x[1][1], reverse=True)
for i in range(min(num, len(ldb))):
member = ctx.guild.get_member(int(ldb[i][0]))
if member:
name = member.display_name
else:
try:
name = (await self.bot.fetch_user(int(ldb[i][0]))).name
except discord.HTTPException:
continue
embed.description += f"{(str(ldb[i][1][1])+settings[challenge]['streak']).center(6)} {str(ldb[i][1][0]).center(6)} {name}\n"
embed.description += "```"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="user")
async def _user(self, ctx: commands.Context, user: discord.Member):
"""See a user's UploadStreaks points."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Info for {user.display_name}", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
u = challenge['users'].get(str(user.id))
if u:
embed.add_field(name=f"Challenge `{name}`", inline=False, value=f"Points: {u[0]}\nStreak: {u[1]}{challenge['streak']}")
if not embed.fields:
embed.description = "This user has not participated in any UploadStreaks challenges."
return await ctx.send(embed=embed)
@commands.admin_or_permissions(administrator=True)
@_upload_streaks.group(name="settings")
async def _settings(self, ctx: commands.Context):
"""UploadStreaks Settings"""
@_settings.command(name="new")
async def _settings_new(self, ctx: commands.Context, challenge: str, streak_name: str, interval: int, utc_day_start: int, credits: typing.Optional[int] = 0, role: typing.Optional[discord.Role] = None, *channels: discord.TextChannel):
"""
Start a new UploadStreaks challenge. See below for paramters:
`challenge`: the name of the challenge
`streak_name`: the name of the streak (e.g. `d` for days)
`interval`: a number representing the length in days for each interval (e.g. `5`)
`utc_day_start`: a number representing the UTC hour to start the day on (e.g. `2` or `23`)
`credits`: the amount of credits to be awarded to a user on post (optional, default 0)
`role`: the role to automatically detect challenge entries from (leave empty for everyone)
`channels`: the channels to listen in for entries
"""
# Test utc_day_start
if not(0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
# Convert interval
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
if datetime.utcnow().hour < utc_day_start:
ts = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
else:
ts = (datetime.utcnow() + timedelta(days=1)).replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
# Test credit amount
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
challenges[challenge] = {
"active": True,
"streak": streak_name,
"interval": (interval, ts, utc_day_start),
"credits": credits,
"role": role.id if role else None,
"channels": [c.id for c in channels],
"users": {}
}
starts_in = datetime.utcfromtimestamp(ts) - datetime.utcnow()
return await ctx.send(f"A new challenge `{challenge}` was successfully added! If you want to edit anything, use `{ctx.clean_prefix}uploadstreaks settings edit`. The challenge will start in {starts_in.seconds//3600} hrs {(starts_in.seconds//60)%60} mins at {datetime.utcfromtimestamp(ts)} UTC.")
@_settings.command(name="toggle")
async def _settings_toggle(self, ctx: commands.Context, challenge_name: str, true_or_false: bool):
"""
Toggle whether an UploadStreaks challenge is active.
**Warning:** this *may* break users' streaks if a challenge is toggled off for longer than the interval.
"""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["active"] = true_or_false
return await ctx.tick()
@_settings.command(name="reset")
async def _settings_reset(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
"""Reset all streaks & points of an UploadStreaks challenge."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["users"] = {}
return await ctx.tick()
@_settings.command(name="delete")
async def _settings_delete(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
"""Delete an UploadStreaks challenge."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
del challenges[challenge_name]
return await ctx.tick()
@_settings.group(name="edit")
async def _settings_edit(self, ctx: commands.Context):
"""Edit an UploadStreaks Challenge"""
@_settings_edit.command(name="streakname")
async def _settings_edit_streak_name(self, ctx: commands.Context, challenge_name: str, streak_name: str):
"""Edit the name of the streak for an UploadStreaks challenge."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["streak"] = streak_name
return await ctx.tick()
@_settings_edit.command(name="interval")
async def _settings_edit_interval(self, ctx: commands.Context, challenge_name: str, interval: int, utc_day_start: int):
"""Edit the interval of an UploadStreaks challenge."""
# Convert interval
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
# Test utc_day_start
if not (0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["interval"] = (interval, challenges[challenge_name]["interval"][1], utc_day_start)
return await ctx.tick()
@_settings_edit.command(name="credits")
async def _settings_edit_credits(self, ctx: commands.Context, challenge_name: str, credits: int):
"""Edit the awarded credits of an UploadStreaks challenge."""
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["credits"] = credits
return await ctx.tick()
@_settings_edit.command(name="role")
async def _settings_edit_role(self, ctx: commands.Context, challenge_name: str, role: discord.Role = None):
"""Edit the role of an UploadStreaks challenge (leave empty for everyone)."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["role"] = role.id if role else None
return await ctx.tick()
@_settings_edit.command(name="channels")
async def _settings_edit_channels(self, ctx: commands.Context, challenge_name: str, *channels: discord.TextChannel):
"""Edit the channels of an UploadStreaks challenge."""
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["channels"] = [c.id for c in channels]
return await ctx.tick()
@_settings.group(name="set")
async def _settings_set(self, ctx: commands.Context):
"""Manually Set User Streaks & Points"""
@_settings_set.command(name="points")
async def _settings_set_points(self, ctx: commands.Context, user: discord.Member, challenge_name: str, points: int):
"""Manually set a user's points in an UploadStreaks challenge."""
if points < 1:
return await ctx.send("The points must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (points, orig[1], orig[2])
return await ctx.tick()
@_settings_set.command(name="streak")
async def _settings_set_streak(self, ctx: commands.Context, user: discord.Member, challenge_name: str, streak: int):
"""Manually set a user's streak in an UploadStreaks challenge."""
if streak < 1:
return await ctx.send("The streak must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (orig[0], streak, orig[2])
return await ctx.tick()
@commands.bot_has_permissions(embed_links=True)
@_settings.command(name="view")
async def _settings_view(self, ctx: commands.Context):
"""View the settings of UploadStreaks challenges in this server."""
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title="UploadStreaks Settings", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
channels = []
for c in challenge['channels']:
if ch := ctx.guild.get_channel(c):
channels.append(ch.mention)
embed.add_field(
name=f"Challenge `{name}`",
inline=False,
value=f"""
**Active:** {challenge['active']}
**Streak Name:** {challenge['streak']}
**Interval:** {challenge['interval'][0]} days (started on {datetime.utcfromtimestamp(challenge['interval'][1])})
**Credits:** {challenge['streak']}
**Role:** {ctx.guild.get_role(challenge['role']).mention if challenge['role'] and ctx.guild.get_role(challenge['role']) else None }
**Channels:** {humanize_list(channels)}
"""
)
return await ctx.send(embed=embed)
| 47.672414 | 302 | 0.637251 |
import typing
from datetime import datetime, timedelta, timezone
import discord
from redbot.core import commands, Config, bank
from redbot.core.utils.chat_formatting import humanize_list
class UploadStreaks(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=14000605, force_registration=True)
default_guild = {
"challenges": {}
}
self.config.register_guild(**default_guild)
@commands.Cog.listener("on_message")
async def _message_listener(self, message: discord.Message):
if (
not message.guild or
await self.bot.cog_disabled_in_guild(self, message.guild) or
message.author.bot or
not message.attachments
):
return
async with self.config.guild(message.guild).challenges() as settings:
for challenge in settings.values():
if (
not challenge['active'] or
message.channel.id not in challenge['channels'] or
(challenge['role'] and challenge['role'] not in [r.id for r in message.author.roles]) or
datetime.utcfromtimestamp(challenge['interval'][1]) > datetime.utcnow()
):
continue
orig = challenge['users'].get(str(message.author.id))
if orig:
interval_before = (datetime.utcnow() - timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_start = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
interval_end = (datetime.utcnow() + timedelta(days=challenge['interval'][0])).replace(microsecond=0, second=0, minute=0, hour=challenge['interval'][2], tzinfo=timezone.utc).timestamp()
if interval_start <= challenge['users'][str(message.author.id)][2] <= interval_end:
challenge['users'][str(message.author.id)] = (orig[0], orig[1], message.created_at.timestamp())
continue
if interval_before <= challenge['users'][str(message.author.id)][2] <= interval_start:
challenge['users'][str(message.author.id)] = (orig[0]+1, orig[1]+1, message.created_at.timestamp())
else:
challenge['users'][str(message.author.id)] = (orig[0]+1, 1, message.created_at.timestamp())
else:
challenge['users'][str(message.author.id)] = (1, 1, message.created_at.timestamp())
if challenge['credits'] > 0:
await bank.deposit_credits(message.author, challenge['credits'])
@commands.guild_only()
@commands.group(name="uploadstreaks")
async def _upload_streaks(self, ctx: commands.Context):
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="list")
async def _list(self, ctx: commands.Context):
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Challenges", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
embed.description = ""
for count, name in enumerate(settings.keys()):
embed.description += f"**{count+1}.** {name}"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="leaderboard", aliases=['ldb'])
async def _leaderboard(self, ctx: commands.Context, challenge: str, num=10):
settings = await self.config.guild(ctx.guild).challenges()
if challenge not in settings.keys():
return await ctx.send("No challenge was found with that name.")
embed = discord.Embed(title=f"UploadStreaks Challenge `{challenge}`", color=await ctx.embed_color())
if not settings[challenge]['users']:
embed.description = "No users have participated in this challenge yet."
else:
embed.description = "```Streak Points User\n"
ldb = sorted(settings[challenge]['users'].items(), key=lambda x: x[1][1], reverse=True)
for i in range(min(num, len(ldb))):
member = ctx.guild.get_member(int(ldb[i][0]))
if member:
name = member.display_name
else:
try:
name = (await self.bot.fetch_user(int(ldb[i][0]))).name
except discord.HTTPException:
continue
embed.description += f"{(str(ldb[i][1][1])+settings[challenge]['streak']).center(6)} {str(ldb[i][1][0]).center(6)} {name}\n"
embed.description += "```"
return await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@_upload_streaks.command(name="user")
async def _user(self, ctx: commands.Context, user: discord.Member):
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title=f"UploadStreaks Info for {user.display_name}", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
u = challenge['users'].get(str(user.id))
if u:
embed.add_field(name=f"Challenge `{name}`", inline=False, value=f"Points: {u[0]}\nStreak: {u[1]}{challenge['streak']}")
if not embed.fields:
embed.description = "This user has not participated in any UploadStreaks challenges."
return await ctx.send(embed=embed)
@commands.admin_or_permissions(administrator=True)
@_upload_streaks.group(name="settings")
async def _settings(self, ctx: commands.Context):
@_settings.command(name="new")
async def _settings_new(self, ctx: commands.Context, challenge: str, streak_name: str, interval: int, utc_day_start: int, credits: typing.Optional[int] = 0, role: typing.Optional[discord.Role] = None, *channels: discord.TextChannel):
if not(0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
if datetime.utcnow().hour < utc_day_start:
ts = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
else:
ts = (datetime.utcnow() + timedelta(days=1)).replace(microsecond=0, second=0, minute=0, hour=utc_day_start, tzinfo=timezone.utc).timestamp()
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
challenges[challenge] = {
"active": True,
"streak": streak_name,
"interval": (interval, ts, utc_day_start),
"credits": credits,
"role": role.id if role else None,
"channels": [c.id for c in channels],
"users": {}
}
starts_in = datetime.utcfromtimestamp(ts) - datetime.utcnow()
return await ctx.send(f"A new challenge `{challenge}` was successfully added! If you want to edit anything, use `{ctx.clean_prefix}uploadstreaks settings edit`. The challenge will start in {starts_in.seconds//3600} hrs {(starts_in.seconds//60)%60} mins at {datetime.utcfromtimestamp(ts)} UTC.")
@_settings.command(name="toggle")
async def _settings_toggle(self, ctx: commands.Context, challenge_name: str, true_or_false: bool):
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["active"] = true_or_false
return await ctx.tick()
@_settings.command(name="reset")
async def _settings_reset(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["users"] = {}
return await ctx.tick()
@_settings.command(name="delete")
async def _settings_delete(self, ctx: commands.Context, challenge_name: str, enter_true_to_confirm: bool):
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
del challenges[challenge_name]
return await ctx.tick()
@_settings.group(name="edit")
async def _settings_edit(self, ctx: commands.Context):
@_settings_edit.command(name="streakname")
async def _settings_edit_streak_name(self, ctx: commands.Context, challenge_name: str, streak_name: str):
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["streak"] = streak_name
return await ctx.tick()
@_settings_edit.command(name="interval")
async def _settings_edit_interval(self, ctx: commands.Context, challenge_name: str, interval: int, utc_day_start: int):
if interval <= 0:
return await ctx.send(f"`{interval}` is not a positive integer!")
if not (0 <= utc_day_start < 24):
return await ctx.send(f"`{utc_day_start}` is not a valid hour (in 24-hr format)!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["interval"] = (interval, challenges[challenge_name]["interval"][1], utc_day_start)
return await ctx.tick()
@_settings_edit.command(name="credits")
async def _settings_edit_credits(self, ctx: commands.Context, challenge_name: str, credits: int):
if credits < 0:
return await ctx.send("The amount of credits must be a positive integer!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["credits"] = credits
return await ctx.tick()
@_settings_edit.command(name="role")
async def _settings_edit_role(self, ctx: commands.Context, challenge_name: str, role: discord.Role = None):
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["role"] = role.id if role else None
return await ctx.tick()
@_settings_edit.command(name="channels")
async def _settings_edit_channels(self, ctx: commands.Context, challenge_name: str, *channels: discord.TextChannel):
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
challenges[challenge_name]["channels"] = [c.id for c in channels]
return await ctx.tick()
@_settings.group(name="set")
async def _settings_set(self, ctx: commands.Context):
@_settings_set.command(name="points")
async def _settings_set_points(self, ctx: commands.Context, user: discord.Member, challenge_name: str, points: int):
if points < 1:
return await ctx.send("The points must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (points, orig[1], orig[2])
return await ctx.tick()
@_settings_set.command(name="streak")
async def _settings_set_streak(self, ctx: commands.Context, user: discord.Member, challenge_name: str, streak: int):
if streak < 1:
return await ctx.send("The streak must be at least `1`!")
async with self.config.guild(ctx.guild).challenges() as challenges:
if challenge_name not in challenges.keys():
return await ctx.send("There was no UploadStreaks challenge found with that name!")
orig = challenges[challenge_name]['users'].get(str(user.id))
if not orig:
return await ctx.send("That user has not participated in the challenge yet!")
challenges[challenge_name]['users'][str(user.id)] = (orig[0], streak, orig[2])
return await ctx.tick()
@commands.bot_has_permissions(embed_links=True)
@_settings.command(name="view")
async def _settings_view(self, ctx: commands.Context):
settings = await self.config.guild(ctx.guild).challenges()
embed = discord.Embed(title="UploadStreaks Settings", color=await ctx.embed_color())
if not settings:
embed.description = "No UploadStreaks Challenges Found"
else:
for name, challenge in settings.items():
channels = []
for c in challenge['channels']:
if ch := ctx.guild.get_channel(c):
channels.append(ch.mention)
embed.add_field(
name=f"Challenge `{name}`",
inline=False,
value=f"""
**Active:** {challenge['active']}
**Streak Name:** {challenge['streak']}
**Interval:** {challenge['interval'][0]} days (started on {datetime.utcfromtimestamp(challenge['interval'][1])})
**Credits:** {challenge['streak']}
**Role:** {ctx.guild.get_role(challenge['role']).mention if challenge['role'] and ctx.guild.get_role(challenge['role']) else None }
**Channels:** {humanize_list(channels)}
"""
)
return await ctx.send(embed=embed)
| true | true |
f7fb54897f8a43dabdc0581b6989814cc668957d | 55 | py | Python | oscar/templatetags/sorting_tags.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | null | null | null | oscar/templatetags/sorting_tags.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | null | null | null | oscar/templatetags/sorting_tags.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | 1 | 2019-07-10T06:32:14.000Z | 2019-07-10T06:32:14.000Z | from django_sorting.templatetags.sorting_tags import *
| 27.5 | 54 | 0.872727 | from django_sorting.templatetags.sorting_tags import *
| true | true |
f7fb555585ebe863907c108e7db58f0e2104f720 | 3,924 | py | Python | Loader.py | MingSun-Tse/PytorchWCT | 9d11cc0995c0610c129b78ff5f72a26f4d60e10a | [
"MIT"
] | null | null | null | Loader.py | MingSun-Tse/PytorchWCT | 9d11cc0995c0610c129b78ff5f72a26f4d60e10a | [
"MIT"
] | null | null | null | Loader.py | MingSun-Tse/PytorchWCT | 9d11cc0995c0610c129b78ff5f72a26f4d60e10a | [
"MIT"
] | null | null | null | from PIL import Image
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torch.utils.data as data
from os import listdir
from os.path import join
import numpy as np
import torch
import os
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def default_loader(path):
return Image.open(path).convert('RGB')
class Dataset(data.Dataset):
def __init__(self, contentPath, stylePath, texturePath, fineSize, picked_content_mark=".", picked_style_mark=".", synthesis=False):
super(Dataset,self).__init__()
self.fineSize = fineSize
self.synthesis = synthesis
if synthesis:
self.texturePath = texturePath
self.texture_image_list = [x for x in listdir(texturePath) if is_image_file(x)]
else:
self.contentPath = contentPath
self.stylePath = stylePath
content_imgs = [x for x in listdir(contentPath) if is_image_file(x) and picked_content_mark in x]
style_imgs = [x for x in listdir(stylePath) if is_image_file(x) and picked_style_mark in x]
pairs = [[c, s] for c in content_imgs for s in style_imgs]
self.content_image_list = list(np.array(pairs)[:, 0])
self.style_image_list = list(np.array(pairs)[:, 1])
# self.normalize = transforms.Normalize(mean=[103.939,116.779,123.68],std=[1, 1, 1])
# normalize = transforms.Normalize(mean=[123.68,103.939,116.779],std=[1, 1, 1])
# self.prep = transforms.Compose([
# transforms.Scale(fineSize),
# transforms.ToTensor(),
# #transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]), #turn to BGR
# ])
def __getitem__(self, index):
if not self.synthesis: # style transfer
contentImgPath = os.path.join(self.contentPath, self.content_image_list[index])
styleImgPath = os.path.join(self.stylePath, self.style_image_list[index])
contentImg = default_loader(contentImgPath)
styleImg = default_loader(styleImgPath)
if self.fineSize != 0:
# w, h = contentImg.size
# if w > h:
# neww = self.fineSize
# newh = int(h * neww / w)
# else:
# newh = self.fineSize
# neww = int(w * newh / h)
contentImg = contentImg.resize((self.fineSize, self.fineSize)) # if using fine size, it may well be testing, so use square image.
styleImg = styleImg.resize((self.fineSize, self.fineSize))
contentImg = transforms.ToTensor()(contentImg)
styleImg = transforms.ToTensor()(styleImg)
return contentImg.squeeze(0), styleImg.squeeze(0), \
self.content_image_list[index].split(".")[0] + "+" + self.style_image_list[index].split(".")[0] + ".jpg"
else: # texture synthesis
textureImgPath = os.path.join(self.texturePath, self.texture_image_list[index])
textureImg = default_loader(textureImgPath)
if self.fineSize != 0:
w, h = textureImg.size
if w > h:
neww = self.fineSize
newh = int(h * neww / w)
else:
newh = self.fineSize
neww = int(w * newh / h)
textureImg = textureImg.resize((neww,newh))
w, h = textureImg.size
contentImg = torch.rand([3, 3000, int(3000.0/h*w)]) # uniform noise (range [0,1]) with the same dimension as texture image
textureImg = transforms.ToTensor()(textureImg)
return contentImg.squeeze(0), textureImg.squeeze(0), self.texture_image_list[index].split(".")[0] + ".jpg"
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(self.texture_image_list) if self.synthesis else len(self.content_image_list)
| 45.627907 | 139 | 0.63787 | from PIL import Image
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torch.utils.data as data
from os import listdir
from os.path import join
import numpy as np
import torch
import os
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def default_loader(path):
return Image.open(path).convert('RGB')
class Dataset(data.Dataset):
def __init__(self, contentPath, stylePath, texturePath, fineSize, picked_content_mark=".", picked_style_mark=".", synthesis=False):
super(Dataset,self).__init__()
self.fineSize = fineSize
self.synthesis = synthesis
if synthesis:
self.texturePath = texturePath
self.texture_image_list = [x for x in listdir(texturePath) if is_image_file(x)]
else:
self.contentPath = contentPath
self.stylePath = stylePath
content_imgs = [x for x in listdir(contentPath) if is_image_file(x) and picked_content_mark in x]
style_imgs = [x for x in listdir(stylePath) if is_image_file(x) and picked_style_mark in x]
pairs = [[c, s] for c in content_imgs for s in style_imgs]
self.content_image_list = list(np.array(pairs)[:, 0])
self.style_image_list = list(np.array(pairs)[:, 1])
contentImgPath = os.path.join(self.contentPath, self.content_image_list[index])
styleImgPath = os.path.join(self.stylePath, self.style_image_list[index])
contentImg = default_loader(contentImgPath)
styleImg = default_loader(styleImgPath)
if self.fineSize != 0:
contentImg = contentImg.resize((self.fineSize, self.fineSize))
styleImg = styleImg.resize((self.fineSize, self.fineSize))
contentImg = transforms.ToTensor()(contentImg)
styleImg = transforms.ToTensor()(styleImg)
return contentImg.squeeze(0), styleImg.squeeze(0), \
self.content_image_list[index].split(".")[0] + "+" + self.style_image_list[index].split(".")[0] + ".jpg"
else:
textureImgPath = os.path.join(self.texturePath, self.texture_image_list[index])
textureImg = default_loader(textureImgPath)
if self.fineSize != 0:
w, h = textureImg.size
if w > h:
neww = self.fineSize
newh = int(h * neww / w)
else:
newh = self.fineSize
neww = int(w * newh / h)
textureImg = textureImg.resize((neww,newh))
w, h = textureImg.size
contentImg = torch.rand([3, 3000, int(3000.0/h*w)])
textureImg = transforms.ToTensor()(textureImg)
return contentImg.squeeze(0), textureImg.squeeze(0), self.texture_image_list[index].split(".")[0] + ".jpg"
def __len__(self):
return len(self.texture_image_list) if self.synthesis else len(self.content_image_list)
| true | true |
f7fb55e5d35f2646ea4c54c7cd8670add5a6be16 | 343 | py | Python | model.py | Zadigo/flask_startup_template | 29eb4c29ef57c7fe8ff0041d980c03148549b0e2 | [
"MIT"
] | null | null | null | model.py | Zadigo/flask_startup_template | 29eb4c29ef57c7fe8ff0041d980c03148549b0e2 | [
"MIT"
] | null | null | null | model.py | Zadigo/flask_startup_template | 29eb4c29ef57c7fe8ff0041d980c03148549b0e2 | [
"MIT"
] | null | null | null | # from karista import db
# import datetime
# class Emails(db.Model):
# email_id = db.Column(db.Integer, primary_key=True)
# email = db.Column(db.Email, unique=False, nullable=False)
# date_creer = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
# def __repr__(self):
# return self.email | 34.3 | 92 | 0.685131 | true | true | |
f7fb56903b64027389bd473544f5466f0d6ac88d | 1,016 | py | Python | autowrt/__main__.py | krumelmonster/autowrt | 135228c921a2f41787e694900efb359cdd72a950 | [
"Unlicense"
] | 2 | 2021-05-14T17:37:31.000Z | 2021-05-14T18:11:51.000Z | autowrt/__main__.py | krumelmonster/autowrt | 135228c921a2f41787e694900efb359cdd72a950 | [
"Unlicense"
] | 1 | 2021-03-14T19:48:58.000Z | 2021-03-14T19:48:58.000Z | autowrt/__main__.py | krumelmonster/autowrt | 135228c921a2f41787e694900efb359cdd72a950 | [
"Unlicense"
] | null | null | null | from autowrt.router import Xiaomi
import os, sys
def main(imagedir: str = None, confpath: str = None, logdir: str = None):
confpath = confpath or os.getcwd()
imagedir = imagedir or os.getcwd()
logdir = logdir or os.getcwd()
confscript=os.path.join(confpath, 'config.py')
if not os.path.isfile(confscript):
print("Please copy config.py to {} and adjust to your needs!".format(confscript), file=sys.stderr)
oldpath=sys.path
sys.path.insert(0,confpath)
import config
sys.path=oldpath
config.config.logdir=os.path.join(logdir, config.config.logdir)
config.config.imagedir=os.path.join(imagedir, config.config.imagedir)
xiaomi = Xiaomi(config.config)
xiaomi.install_openwrt()
if __name__ == '__main__':
from appdirs import *
appname = "autowrt"
appauthor = "krumelmonster"
main(
imagedir=user_data_dir(appname, appauthor),
confpath=user_config_dir(appname, appauthor),
logdir=user_log_dir(appname, appauthor)
)
| 30.787879 | 106 | 0.691929 | from autowrt.router import Xiaomi
import os, sys
def main(imagedir: str = None, confpath: str = None, logdir: str = None):
confpath = confpath or os.getcwd()
imagedir = imagedir or os.getcwd()
logdir = logdir or os.getcwd()
confscript=os.path.join(confpath, 'config.py')
if not os.path.isfile(confscript):
print("Please copy config.py to {} and adjust to your needs!".format(confscript), file=sys.stderr)
oldpath=sys.path
sys.path.insert(0,confpath)
import config
sys.path=oldpath
config.config.logdir=os.path.join(logdir, config.config.logdir)
config.config.imagedir=os.path.join(imagedir, config.config.imagedir)
xiaomi = Xiaomi(config.config)
xiaomi.install_openwrt()
if __name__ == '__main__':
from appdirs import *
appname = "autowrt"
appauthor = "krumelmonster"
main(
imagedir=user_data_dir(appname, appauthor),
confpath=user_config_dir(appname, appauthor),
logdir=user_log_dir(appname, appauthor)
)
| true | true |
f7fb56adbe14a0507a6126dfda108fe9202885f7 | 22,830 | py | Python | irmark1/templates/complete.py | jram098/IRG | 5265d61c2e67cce43a3261563b3b0f3cea27d9e4 | [
"Apache-2.0"
] | 1 | 2019-11-07T22:22:53.000Z | 2019-11-07T22:22:53.000Z | irmark1/templates/complete.py | jram098/IRG | 5265d61c2e67cce43a3261563b3b0f3cea27d9e4 | [
"Apache-2.0"
] | null | null | null | irmark1/templates/complete.py | jram098/IRG | 5265d61c2e67cce43a3261563b3b0f3cea27d9e4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Scripts to drive an IR Mark One (2) car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] [--camera=(single|stereo)] [--meta=<key:value> ...]
manage.py (train) [--tub=<tub1,tub2,..tubn>] [--file=<file> ...] (--model=<model>) [--transfer=<model>] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
"""
import os
import time
from docopt import docopt
import numpy as np
import irmark1 as m1
#import parts
from irmark1.parts.transform import Lambda, TriggeredCallback, DelayedTrigger
from irmark1.parts.datastore import TubHandler
from irmark1.parts.controller import LocalWebController, JoystickController
from irmark1.parts.throttle_filter import ThrottleFilter
from irmark1.parts.behavior import BehaviorPart
from irmark1.parts.file_watcher import FileWatcher
from irmark1.parts.launch import AiLaunch
from irmark1.utils import *
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[] ):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = m1.vehicle.Vehicle()
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from irmark1.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from irmark1.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from irmark1.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
else:
print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
if cfg.DONKEY_GYM:
from irmark1.parts.dgym import DonkeyGymEnv
inputs = []
threaded = True
if cfg.DONKEY_GYM:
from irmark1.parts.dgym import DonkeyGymEnv
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, env_name=cfg.DONKEY_GYM_ENV_NAME)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from irmark1.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "WEBCAM":
from irmark1.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from irmark1.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from irmark1.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from irmark1.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from irmark1.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "D435i":
from irmark1.parts.realsense2 import RS_D435i
cam = RS_D435i(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
from irmark1.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from irmark1.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
else:
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController()
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from irmark1.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
#then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
#IMU
if cfg.HAVE_IMU:
from irmark1.parts.imu import Mpu6050
imu = Mpu6050()
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
class ImgPreProcess():
'''
preprocess camera image for inference.
normalize and crop if needed.
'''
def __init__(self, cfg):
self.cfg = cfg
def run(self, img_arr):
return normalize_and_crop(img_arr, self.cfg)
if "coral" in model_type:
inf_input = 'cam/image_array'
else:
inf_input = 'cam/normalized/cropped'
V.add(ImgPreProcess(cfg),
inputs=['cam/image_array'],
outputs=[inf_input],
run_condition='run_pilot')
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = [inf_input, "behavior/one_hot_state_array"]
#IMU
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=[inf_input,
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs=[inf_input]
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = m1.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle, user_throttle
else:
return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM:
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from irmark1.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from irmark1.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from irmark1.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from irmark1.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from irmark1.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(motor, inputs=["throttle"])
#add tub to save data
inputs=['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types=['image_array',
'float', 'float',
'str']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.HAVE_IMU:
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
if cfg.PUB_CAMERA_IMAGES:
from irmark1.parts.network import TCPServeValue
from irmark1.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
print("You can now go to <your pi ip address>:8887 to drive your car.")
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
#tell the controller about the tub
ctr.set_tub(tub)
if cfg.BUTTON_PRESS_NEW_TUB:
def new_tub_dir():
V.parts.pop()
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
ctr.set_tub(tub)
ctr.set_button_down_trigger('cross', new_tub_dir)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = m1.load_config()
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'], model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
if args['train']:
from train import multi_train, preprocessFileList
tub = args['--tub']
model = args['--model']
transfer = args['--transfer']
model_type = args['--type']
continuous = args['--continuous']
aug = args['--aug']
dirs = preprocessFileList( args['--file'] )
if tub is not None:
tub_paths = [os.path.expanduser(n) for n in tub.split(',')]
dirs.extend( tub_paths )
multi_train(cfg, dirs, model, transfer, model_type, continuous, aug)
| 38.826531 | 190 | 0.613447 |
import os
import time
from docopt import docopt
import numpy as np
import irmark1 as m1
from irmark1.parts.transform import Lambda, TriggeredCallback, DelayedTrigger
from irmark1.parts.datastore import TubHandler
from irmark1.parts.controller import LocalWebController, JoystickController
from irmark1.parts.throttle_filter import ThrottleFilter
from irmark1.parts.behavior import BehaviorPart
from irmark1.parts.file_watcher import FileWatcher
from irmark1.parts.launch import AiLaunch
from irmark1.utils import *
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[] ):
if cfg.DONKEY_GYM:
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
V = m1.vehicle.Vehicle()
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from irmark1.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from irmark1.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from irmark1.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
else:
print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
if cfg.DONKEY_GYM:
from irmark1.parts.dgym import DonkeyGymEnv
inputs = []
threaded = True
if cfg.DONKEY_GYM:
from irmark1.parts.dgym import DonkeyGymEnv
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, env_name=cfg.DONKEY_GYM_ENV_NAME)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from irmark1.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "WEBCAM":
from irmark1.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from irmark1.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from irmark1.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from irmark1.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from irmark1.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "D435i":
from irmark1.parts.realsense2 import RS_D435i
cam = RS_D435i(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
from irmark1.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from irmark1.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
else:
ctr = LocalWebController()
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1
if recording:
return -1
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from irmark1.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
ctr.set_button_down_trigger('circle', show_record_acount_status)
if cfg.HAVE_IMU:
from irmark1.parts.imu import Mpu6050
imu = Mpu6050()
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
class ImgPreProcess():
def __init__(self, cfg):
self.cfg = cfg
def run(self, img_arr):
return normalize_and_crop(img_arr, self.cfg)
if "coral" in model_type:
inf_input = 'cam/image_array'
else:
inf_input = 'cam/normalized/cropped'
V.add(ImgPreProcess(cfg),
inputs=['cam/image_array'],
outputs=[inf_input],
run_condition='run_pilot')
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = [inf_input, "behavior/one_hot_state_array"]
elif model_type == "imu":
assert(cfg.HAVE_IMU)
inputs=[inf_input,
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs=[inf_input]
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
kl = m1.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle, user_throttle
else:
return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM:
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from irmark1.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from irmark1.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from irmark1.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from irmark1.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from irmark1.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(motor, inputs=["throttle"])
#add tub to save data
inputs=['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types=['image_array',
'float', 'float',
'str']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.HAVE_IMU:
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
if cfg.PUB_CAMERA_IMAGES:
from irmark1.parts.network import TCPServeValue
from irmark1.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
print("You can now go to <your pi ip address>:8887 to drive your car.")
elif isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
#tell the controller about the tub
ctr.set_tub(tub)
if cfg.BUTTON_PRESS_NEW_TUB:
def new_tub_dir():
V.parts.pop()
tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
ctr.set_tub(tub)
ctr.set_button_down_trigger('cross', new_tub_dir)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = m1.load_config()
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'], model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
if args['train']:
from train import multi_train, preprocessFileList
tub = args['--tub']
model = args['--model']
transfer = args['--transfer']
model_type = args['--type']
continuous = args['--continuous']
aug = args['--aug']
dirs = preprocessFileList( args['--file'] )
if tub is not None:
tub_paths = [os.path.expanduser(n) for n in tub.split(',')]
dirs.extend( tub_paths )
multi_train(cfg, dirs, model, transfer, model_type, continuous, aug)
| true | true |
f7fb59808ba0a6fc372d370b26c1fc45691c8bb2 | 111 | py | Python | applicationinsights/__init__.py | Rebeccalau/ApplicationInsights-Python | cc91fede2d6d6c48acaa5687aa13ca491a17025a | [
"MIT"
] | 89 | 2015-05-06T22:02:17.000Z | 2019-04-22T14:50:33.000Z | applicationinsights/__init__.py | Rebeccalau/ApplicationInsights-Python | cc91fede2d6d6c48acaa5687aa13ca491a17025a | [
"MIT"
] | 115 | 2015-04-29T17:44:52.000Z | 2019-04-25T21:39:02.000Z | applicationinsights/__init__.py | Rebeccalau/ApplicationInsights-Python | cc91fede2d6d6c48acaa5687aa13ca491a17025a | [
"MIT"
] | 59 | 2015-04-19T13:34:52.000Z | 2019-04-25T21:04:02.000Z | from .TelemetryClient import TelemetryClient
from . import channel
from . import logging
from . import requests | 27.75 | 44 | 0.828829 | from .TelemetryClient import TelemetryClient
from . import channel
from . import logging
from . import requests | true | true |
f7fb5a20f6035fc77ef1fcd6f6277b27a12e61f3 | 13,596 | py | Python | ethereum/processblock.py | volut-staging/pyethereum | f44902a4e9ea0fee00f9ef3e58ce1fd566ffe45b | [
"MIT"
] | null | null | null | ethereum/processblock.py | volut-staging/pyethereum | f44902a4e9ea0fee00f9ef3e58ce1fd566ffe45b | [
"MIT"
] | null | null | null | ethereum/processblock.py | volut-staging/pyethereum | f44902a4e9ea0fee00f9ef3e58ce1fd566ffe45b | [
"MIT"
] | null | null | null | import sys
import rlp
from rlp.sedes import CountableList, binary
from rlp.utils import decode_hex, encode_hex, ascii_chr
from ethereum import opcodes
from ethereum import utils
from ethereum import specials
from ethereum import bloom
from ethereum import vm as vm
from ethereum.exceptions import InvalidNonce, InsufficientStartGas, UnsignedTransaction, \
BlockGasLimitReached, InsufficientBalance, VerificationFailed
from ethereum.utils import safe_ord, normalize_address, mk_contract_address, \
mk_metropolis_contract_address, big_endian_to_int
from ethereum import transactions
import ethereum.config as config
sys.setrecursionlimit(100000)
from ethereum.slogging import get_logger
log_tx = get_logger('eth.pb.tx')
log_msg = get_logger('eth.pb.msg')
log_state = get_logger('eth.pb.msg.state')
TT255 = 2 ** 255
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
OUT_OF_GAS = -1
# contract creating transactions send to an empty address
CREATE_CONTRACT_ADDRESS = b''
def verify(block, parent):
from ethereum import blocks
try:
block2 = rlp.decode(rlp.encode(block), blocks.Block,
env=parent.env, parent=parent)
assert block == block2
return True
except VerificationFailed:
return False
class Log(rlp.Serializable):
# TODO: original version used zpad (here replaced by int32.serialize); had
# comment "why zpad"?
fields = [
('address', utils.address),
('topics', CountableList(utils.int32)),
('data', binary)
]
def __init__(self, address, topics, data):
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
super(Log, self).__init__(address, topics, data)
def bloomables(self):
return [self.address] + [utils.int32.serialize(x) for x in self.topics]
def to_dict(self):
return {
"bloom": encode_hex(bloom.b64(bloom.bloom_from_list(self.bloomables()))),
"address": encode_hex(self.address),
"data": b'0x' + encode_hex(self.data),
"topics": [encode_hex(utils.int32.serialize(t))
for t in self.topics]
}
def __repr__(self):
return '<Log(address=%r, topics=%r, data=%r)>' % \
(encode_hex(self.address), self.topics, self.data)
def validate_transaction(block, tx):
def rp(what, actual, target):
return '%r: %r actual:%r target:%r' % (tx, what, actual, target)
# (1) The transaction signature is valid;
if not tx.sender: # sender is set and validated on Transaction initialization
if block.number >= config.default_config["METROPOLIS_FORK_BLKNUM"]:
tx._sender = normalize_address(config.default_config["METROPOLIS_ENTRY_POINT"])
else:
raise UnsignedTransaction(tx)
if block.number >= config.default_config["HOMESTEAD_FORK_BLKNUM"]:
tx.check_low_s()
# (2) the transaction nonce is valid (equivalent to the
# sender account's current nonce);
acctnonce = block.get_nonce(tx.sender)
if acctnonce != tx.nonce:
raise InvalidNonce(rp('nonce', tx.nonce, acctnonce))
# (3) the gas limit is no smaller than the intrinsic gas,
# g0, used by the transaction;
if tx.startgas < tx.intrinsic_gas_used:
raise InsufficientStartGas(rp('startgas', tx.startgas, tx.intrinsic_gas_used))
# (4) the sender account balance contains at least the
# cost, v0, required in up-front payment.
total_cost = tx.value + tx.gasprice * tx.startgas
if block.get_balance(tx.sender) < total_cost:
raise InsufficientBalance(rp('balance', block.get_balance(tx.sender), total_cost))
# check block gas limit
if block.gas_used + tx.startgas > block.gas_limit:
raise BlockGasLimitReached(rp('gaslimit', block.gas_used + tx.startgas, block.gas_limit))
return True
class lazy_safe_encode(object):
"""Creates a lazy and logging safe representation of transaction data.
Use this in logging of transactions; instead of
>>> log.debug(data=data)
do this:
>>> log.debug(data=lazy_safe_encode(data))
"""
def __init__(self, data):
self.data = data
def __str__(self):
if not isinstance(self.data, (str, unicode)):
return repr(self.data)
else:
return encode_hex(self.data)
def __repr__(self):
return str(self)
def apply_transaction(block, tx):
validate_transaction(block, tx)
# print(block.get_nonce(tx.sender), '@@@')
def rp(what, actual, target):
return '%r: %r actual:%r target:%r' % (tx, what, actual, target)
intrinsic_gas = tx.intrinsic_gas_used
if block.number >= block.config['HOMESTEAD_FORK_BLKNUM']:
assert tx.s * 2 < transactions.secpk1n
if not tx.to or tx.to == CREATE_CONTRACT_ADDRESS:
intrinsic_gas += opcodes.CREATE[3]
if tx.startgas < intrinsic_gas:
raise InsufficientStartGas(rp('startgas', tx.startgas, intrinsic_gas))
log_tx.debug('TX NEW', tx_dict=tx.log_dict())
# start transacting #################
block.increment_nonce(tx.sender)
# buy startgas
assert block.get_balance(tx.sender) >= tx.startgas * tx.gasprice
block.delta_balance(tx.sender, -tx.startgas * tx.gasprice)
message_gas = tx.startgas - intrinsic_gas
message_data = vm.CallData([safe_ord(x) for x in tx.data], 0, len(tx.data))
message = vm.Message(tx.sender, tx.to, tx.value, message_gas, message_data, code_address=tx.to)
# MESSAGE
ext = VMExt(block, tx)
if tx.to and tx.to != CREATE_CONTRACT_ADDRESS:
result, gas_remained, data = apply_msg(ext, message)
log_tx.debug('_res_', result=result, gas_remained=gas_remained, data=lazy_safe_encode(data))
else: # CREATE
result, gas_remained, data = create_contract(ext, message)
assert utils.is_numeric(gas_remained)
log_tx.debug('_create_', result=result, gas_remained=gas_remained, data=lazy_safe_encode(data))
assert gas_remained >= 0
log_tx.debug("TX APPLIED", result=result, gas_remained=gas_remained,
data=lazy_safe_encode(data))
if not result: # 0 = OOG failure in both cases
log_tx.debug('TX FAILED', reason='out of gas',
startgas=tx.startgas, gas_remained=gas_remained)
block.gas_used += tx.startgas
block.delta_balance(block.coinbase, tx.gasprice * tx.startgas)
output = b''
success = 0
else:
log_tx.debug('TX SUCCESS', data=lazy_safe_encode(data))
gas_used = tx.startgas - gas_remained
block.refunds += len(set(block.suicides)) * opcodes.GSUICIDEREFUND
if block.refunds > 0:
log_tx.debug('Refunding', gas_refunded=min(block.refunds, gas_used // 2))
gas_remained += min(block.refunds, gas_used // 2)
gas_used -= min(block.refunds, gas_used // 2)
block.refunds = 0
# sell remaining gas
block.delta_balance(tx.sender, tx.gasprice * gas_remained)
block.delta_balance(block.coinbase, tx.gasprice * gas_used)
block.gas_used += gas_used
if tx.to:
output = b''.join(map(ascii_chr, data))
else:
output = data
success = 1
block.commit_state()
suicides = block.suicides
block.suicides = []
for s in suicides:
block.ether_delta -= block.get_balance(s)
block.set_balance(s, 0)
block.del_account(s)
block.add_transaction_to_list(tx)
block.logs = []
return success, output
# External calls that can be made from inside the VM. To use the EVM with a
# different blockchain system, database, set parameters for testing, just
# swap out the functions here
class VMExt():
def __init__(self, block, tx):
self._block = block
self.get_code = block.get_code
self.get_balance = block.get_balance
self.set_balance = block.set_balance
self.get_nonce = block.get_nonce
self.set_nonce = block.set_nonce
self.set_storage_data = block.set_storage_data
self.get_storage_data = block.get_storage_data
self.get_storage_bytes = block.get_storage_bytes
self.log_storage = lambda x: block.account_to_dict(x)['storage']
self.add_suicide = lambda x: block.suicides.append(x)
self.add_refund = lambda x: \
setattr(block, 'refunds', block.refunds + x)
self.block_hash = lambda x: block.get_ancestor_hash(block.number - x) \
if (1 <= block.number - x <= 256 and x <= block.number) else b''
self.block_coinbase = block.coinbase
self.block_timestamp = block.timestamp
self.block_number = block.number
self.block_difficulty = block.difficulty
self.block_gas_limit = block.gas_limit
self.log = lambda addr, topics, data: \
block.add_log(Log(addr, topics, data))
self.tx_origin = tx.sender
self.tx_gasprice = tx.gasprice
self.create = lambda msg: create_contract(self, msg)
self.msg = lambda msg: _apply_msg(self, msg, self.get_code(msg.code_address))
self.account_exists = block.account_exists
self.post_homestead_hardfork = lambda: block.number >= block.config['HOMESTEAD_FORK_BLKNUM']
def apply_msg(ext, msg):
return _apply_msg(ext, msg, ext.get_code(msg.code_address))
def _apply_msg(ext, msg, code):
trace_msg = log_msg.is_active('trace')
if trace_msg:
log_msg.debug("MSG APPLY", sender=encode_hex(msg.sender), to=encode_hex(msg.to),
gas=msg.gas, value=msg.value,
data=encode_hex(msg.data.extract_all()))
if log_state.is_active('trace'):
log_state.trace('MSG PRE STATE SENDER', account=encode_hex(msg.sender),
bal=ext.get_balance(msg.sender),
state=ext.log_storage(msg.sender))
log_state.trace('MSG PRE STATE RECIPIENT', account=encode_hex(msg.to),
bal=ext.get_balance(msg.to),
state=ext.log_storage(msg.to))
# log_state.trace('CODE', code=code)
# Transfer value, instaquit if not enough
snapshot = ext._block.snapshot()
if msg.transfers_value:
if not ext._block.transfer_value(msg.sender, msg.to, msg.value):
log_msg.debug('MSG TRANSFER FAILED', have=ext.get_balance(msg.to),
want=msg.value)
return 1, msg.gas, []
# Main loop
if msg.code_address in specials.specials:
res, gas, dat = specials.specials[msg.code_address](ext, msg)
else:
res, gas, dat = vm.vm_execute(ext, msg, code)
# gas = int(gas)
# assert utils.is_numeric(gas)
if trace_msg:
log_msg.debug('MSG APPLIED', gas_remained=gas,
sender=encode_hex(msg.sender), to=encode_hex(msg.to), data=dat)
if log_state.is_active('trace'):
log_state.trace('MSG POST STATE SENDER', account=encode_hex(msg.sender),
bal=ext.get_balance(msg.sender),
state=ext.log_storage(msg.sender))
log_state.trace('MSG POST STATE RECIPIENT', account=encode_hex(msg.to),
bal=ext.get_balance(msg.to),
state=ext.log_storage(msg.to))
if res == 0:
log_msg.debug('REVERTING')
ext._block.revert(snapshot)
return res, gas, dat
def create_contract(ext, msg):
log_msg.debug('CONTRACT CREATION')
#print('CREATING WITH GAS', msg.gas)
sender = decode_hex(msg.sender) if len(msg.sender) == 40 else msg.sender
code = msg.data.extract_all()
if ext._block.number >= ext._block.config['METROPOLIS_FORK_BLKNUM']:
msg.to = mk_metropolis_contract_address(msg.sender, code)
if ext.get_code(msg.to):
if ext.get_nonce(msg.to) >= 2 ** 40:
ext.set_nonce(msg.to, (ext.get_nonce(msg.to) + 1) % 2 ** 160)
msg.to = normalize_address((ext.get_nonce(msg.to) - 1) % 2 ** 160)
else:
ext.set_nonce(msg.to, (big_endian_to_int(msg.to) + 2) % 2 ** 160)
msg.to = normalize_address((ext.get_nonce(msg.to) - 1) % 2 ** 160)
else:
if ext.tx_origin != msg.sender:
ext._block.increment_nonce(msg.sender)
nonce = utils.encode_int(ext._block.get_nonce(msg.sender) - 1)
msg.to = mk_contract_address(sender, nonce)
b = ext.get_balance(msg.to)
if b > 0:
ext.set_balance(msg.to, b)
ext._block.set_nonce(msg.to, 0)
ext._block.set_code(msg.to, b'')
ext._block.reset_storage(msg.to)
msg.is_create = True
# assert not ext.get_code(msg.to)
msg.data = vm.CallData([], 0, 0)
snapshot = ext._block.snapshot()
res, gas, dat = _apply_msg(ext, msg, code)
assert utils.is_numeric(gas)
if res:
if not len(dat):
return 1, gas, msg.to
gcost = len(dat) * opcodes.GCONTRACTBYTE
if gas >= gcost:
gas -= gcost
else:
dat = []
log_msg.debug('CONTRACT CREATION OOG', have=gas, want=gcost, block_number=ext._block.number)
if ext._block.number >= ext._block.config['HOMESTEAD_FORK_BLKNUM']:
ext._block.revert(snapshot)
return 0, 0, b''
ext._block.set_code(msg.to, b''.join(map(ascii_chr, dat)))
return 1, gas, msg.to
else:
return 0, gas, b''
| 38.515581 | 104 | 0.636437 | import sys
import rlp
from rlp.sedes import CountableList, binary
from rlp.utils import decode_hex, encode_hex, ascii_chr
from ethereum import opcodes
from ethereum import utils
from ethereum import specials
from ethereum import bloom
from ethereum import vm as vm
from ethereum.exceptions import InvalidNonce, InsufficientStartGas, UnsignedTransaction, \
BlockGasLimitReached, InsufficientBalance, VerificationFailed
from ethereum.utils import safe_ord, normalize_address, mk_contract_address, \
mk_metropolis_contract_address, big_endian_to_int
from ethereum import transactions
import ethereum.config as config
sys.setrecursionlimit(100000)
from ethereum.slogging import get_logger
log_tx = get_logger('eth.pb.tx')
log_msg = get_logger('eth.pb.msg')
log_state = get_logger('eth.pb.msg.state')
TT255 = 2 ** 255
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
OUT_OF_GAS = -1
CREATE_CONTRACT_ADDRESS = b''
def verify(block, parent):
from ethereum import blocks
try:
block2 = rlp.decode(rlp.encode(block), blocks.Block,
env=parent.env, parent=parent)
assert block == block2
return True
except VerificationFailed:
return False
class Log(rlp.Serializable):
fields = [
('address', utils.address),
('topics', CountableList(utils.int32)),
('data', binary)
]
def __init__(self, address, topics, data):
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
super(Log, self).__init__(address, topics, data)
def bloomables(self):
return [self.address] + [utils.int32.serialize(x) for x in self.topics]
def to_dict(self):
return {
"bloom": encode_hex(bloom.b64(bloom.bloom_from_list(self.bloomables()))),
"address": encode_hex(self.address),
"data": b'0x' + encode_hex(self.data),
"topics": [encode_hex(utils.int32.serialize(t))
for t in self.topics]
}
def __repr__(self):
return '<Log(address=%r, topics=%r, data=%r)>' % \
(encode_hex(self.address), self.topics, self.data)
def validate_transaction(block, tx):
def rp(what, actual, target):
return '%r: %r actual:%r target:%r' % (tx, what, actual, target)
if not tx.sender:
if block.number >= config.default_config["METROPOLIS_FORK_BLKNUM"]:
tx._sender = normalize_address(config.default_config["METROPOLIS_ENTRY_POINT"])
else:
raise UnsignedTransaction(tx)
if block.number >= config.default_config["HOMESTEAD_FORK_BLKNUM"]:
tx.check_low_s()
acctnonce = block.get_nonce(tx.sender)
if acctnonce != tx.nonce:
raise InvalidNonce(rp('nonce', tx.nonce, acctnonce))
# (3) the gas limit is no smaller than the intrinsic gas,
# g0, used by the transaction;
if tx.startgas < tx.intrinsic_gas_used:
raise InsufficientStartGas(rp('startgas', tx.startgas, tx.intrinsic_gas_used))
# (4) the sender account balance contains at least the
# cost, v0, required in up-front payment.
total_cost = tx.value + tx.gasprice * tx.startgas
if block.get_balance(tx.sender) < total_cost:
raise InsufficientBalance(rp('balance', block.get_balance(tx.sender), total_cost))
# check block gas limit
if block.gas_used + tx.startgas > block.gas_limit:
raise BlockGasLimitReached(rp('gaslimit', block.gas_used + tx.startgas, block.gas_limit))
return True
class lazy_safe_encode(object):
def __init__(self, data):
self.data = data
def __str__(self):
if not isinstance(self.data, (str, unicode)):
return repr(self.data)
else:
return encode_hex(self.data)
def __repr__(self):
return str(self)
def apply_transaction(block, tx):
validate_transaction(block, tx)
# print(block.get_nonce(tx.sender), '@@@')
def rp(what, actual, target):
return '%r: %r actual:%r target:%r' % (tx, what, actual, target)
intrinsic_gas = tx.intrinsic_gas_used
if block.number >= block.config['HOMESTEAD_FORK_BLKNUM']:
assert tx.s * 2 < transactions.secpk1n
if not tx.to or tx.to == CREATE_CONTRACT_ADDRESS:
intrinsic_gas += opcodes.CREATE[3]
if tx.startgas < intrinsic_gas:
raise InsufficientStartGas(rp('startgas', tx.startgas, intrinsic_gas))
log_tx.debug('TX NEW', tx_dict=tx.log_dict())
# start transacting #################
block.increment_nonce(tx.sender)
# buy startgas
assert block.get_balance(tx.sender) >= tx.startgas * tx.gasprice
block.delta_balance(tx.sender, -tx.startgas * tx.gasprice)
message_gas = tx.startgas - intrinsic_gas
message_data = vm.CallData([safe_ord(x) for x in tx.data], 0, len(tx.data))
message = vm.Message(tx.sender, tx.to, tx.value, message_gas, message_data, code_address=tx.to)
# MESSAGE
ext = VMExt(block, tx)
if tx.to and tx.to != CREATE_CONTRACT_ADDRESS:
result, gas_remained, data = apply_msg(ext, message)
log_tx.debug('_res_', result=result, gas_remained=gas_remained, data=lazy_safe_encode(data))
else: # CREATE
result, gas_remained, data = create_contract(ext, message)
assert utils.is_numeric(gas_remained)
log_tx.debug('_create_', result=result, gas_remained=gas_remained, data=lazy_safe_encode(data))
assert gas_remained >= 0
log_tx.debug("TX APPLIED", result=result, gas_remained=gas_remained,
data=lazy_safe_encode(data))
if not result: # 0 = OOG failure in both cases
log_tx.debug('TX FAILED', reason='out of gas',
startgas=tx.startgas, gas_remained=gas_remained)
block.gas_used += tx.startgas
block.delta_balance(block.coinbase, tx.gasprice * tx.startgas)
output = b''
success = 0
else:
log_tx.debug('TX SUCCESS', data=lazy_safe_encode(data))
gas_used = tx.startgas - gas_remained
block.refunds += len(set(block.suicides)) * opcodes.GSUICIDEREFUND
if block.refunds > 0:
log_tx.debug('Refunding', gas_refunded=min(block.refunds, gas_used // 2))
gas_remained += min(block.refunds, gas_used // 2)
gas_used -= min(block.refunds, gas_used // 2)
block.refunds = 0
# sell remaining gas
block.delta_balance(tx.sender, tx.gasprice * gas_remained)
block.delta_balance(block.coinbase, tx.gasprice * gas_used)
block.gas_used += gas_used
if tx.to:
output = b''.join(map(ascii_chr, data))
else:
output = data
success = 1
block.commit_state()
suicides = block.suicides
block.suicides = []
for s in suicides:
block.ether_delta -= block.get_balance(s)
block.set_balance(s, 0)
block.del_account(s)
block.add_transaction_to_list(tx)
block.logs = []
return success, output
# External calls that can be made from inside the VM. To use the EVM with a
# different blockchain system, database, set parameters for testing, just
# swap out the functions here
class VMExt():
def __init__(self, block, tx):
self._block = block
self.get_code = block.get_code
self.get_balance = block.get_balance
self.set_balance = block.set_balance
self.get_nonce = block.get_nonce
self.set_nonce = block.set_nonce
self.set_storage_data = block.set_storage_data
self.get_storage_data = block.get_storage_data
self.get_storage_bytes = block.get_storage_bytes
self.log_storage = lambda x: block.account_to_dict(x)['storage']
self.add_suicide = lambda x: block.suicides.append(x)
self.add_refund = lambda x: \
setattr(block, 'refunds', block.refunds + x)
self.block_hash = lambda x: block.get_ancestor_hash(block.number - x) \
if (1 <= block.number - x <= 256 and x <= block.number) else b''
self.block_coinbase = block.coinbase
self.block_timestamp = block.timestamp
self.block_number = block.number
self.block_difficulty = block.difficulty
self.block_gas_limit = block.gas_limit
self.log = lambda addr, topics, data: \
block.add_log(Log(addr, topics, data))
self.tx_origin = tx.sender
self.tx_gasprice = tx.gasprice
self.create = lambda msg: create_contract(self, msg)
self.msg = lambda msg: _apply_msg(self, msg, self.get_code(msg.code_address))
self.account_exists = block.account_exists
self.post_homestead_hardfork = lambda: block.number >= block.config['HOMESTEAD_FORK_BLKNUM']
def apply_msg(ext, msg):
return _apply_msg(ext, msg, ext.get_code(msg.code_address))
def _apply_msg(ext, msg, code):
trace_msg = log_msg.is_active('trace')
if trace_msg:
log_msg.debug("MSG APPLY", sender=encode_hex(msg.sender), to=encode_hex(msg.to),
gas=msg.gas, value=msg.value,
data=encode_hex(msg.data.extract_all()))
if log_state.is_active('trace'):
log_state.trace('MSG PRE STATE SENDER', account=encode_hex(msg.sender),
bal=ext.get_balance(msg.sender),
state=ext.log_storage(msg.sender))
log_state.trace('MSG PRE STATE RECIPIENT', account=encode_hex(msg.to),
bal=ext.get_balance(msg.to),
state=ext.log_storage(msg.to))
# log_state.trace('CODE', code=code)
# Transfer value, instaquit if not enough
snapshot = ext._block.snapshot()
if msg.transfers_value:
if not ext._block.transfer_value(msg.sender, msg.to, msg.value):
log_msg.debug('MSG TRANSFER FAILED', have=ext.get_balance(msg.to),
want=msg.value)
return 1, msg.gas, []
# Main loop
if msg.code_address in specials.specials:
res, gas, dat = specials.specials[msg.code_address](ext, msg)
else:
res, gas, dat = vm.vm_execute(ext, msg, code)
# gas = int(gas)
# assert utils.is_numeric(gas)
if trace_msg:
log_msg.debug('MSG APPLIED', gas_remained=gas,
sender=encode_hex(msg.sender), to=encode_hex(msg.to), data=dat)
if log_state.is_active('trace'):
log_state.trace('MSG POST STATE SENDER', account=encode_hex(msg.sender),
bal=ext.get_balance(msg.sender),
state=ext.log_storage(msg.sender))
log_state.trace('MSG POST STATE RECIPIENT', account=encode_hex(msg.to),
bal=ext.get_balance(msg.to),
state=ext.log_storage(msg.to))
if res == 0:
log_msg.debug('REVERTING')
ext._block.revert(snapshot)
return res, gas, dat
def create_contract(ext, msg):
log_msg.debug('CONTRACT CREATION')
#print('CREATING WITH GAS', msg.gas)
sender = decode_hex(msg.sender) if len(msg.sender) == 40 else msg.sender
code = msg.data.extract_all()
if ext._block.number >= ext._block.config['METROPOLIS_FORK_BLKNUM']:
msg.to = mk_metropolis_contract_address(msg.sender, code)
if ext.get_code(msg.to):
if ext.get_nonce(msg.to) >= 2 ** 40:
ext.set_nonce(msg.to, (ext.get_nonce(msg.to) + 1) % 2 ** 160)
msg.to = normalize_address((ext.get_nonce(msg.to) - 1) % 2 ** 160)
else:
ext.set_nonce(msg.to, (big_endian_to_int(msg.to) + 2) % 2 ** 160)
msg.to = normalize_address((ext.get_nonce(msg.to) - 1) % 2 ** 160)
else:
if ext.tx_origin != msg.sender:
ext._block.increment_nonce(msg.sender)
nonce = utils.encode_int(ext._block.get_nonce(msg.sender) - 1)
msg.to = mk_contract_address(sender, nonce)
b = ext.get_balance(msg.to)
if b > 0:
ext.set_balance(msg.to, b)
ext._block.set_nonce(msg.to, 0)
ext._block.set_code(msg.to, b'')
ext._block.reset_storage(msg.to)
msg.is_create = True
# assert not ext.get_code(msg.to)
msg.data = vm.CallData([], 0, 0)
snapshot = ext._block.snapshot()
res, gas, dat = _apply_msg(ext, msg, code)
assert utils.is_numeric(gas)
if res:
if not len(dat):
return 1, gas, msg.to
gcost = len(dat) * opcodes.GCONTRACTBYTE
if gas >= gcost:
gas -= gcost
else:
dat = []
log_msg.debug('CONTRACT CREATION OOG', have=gas, want=gcost, block_number=ext._block.number)
if ext._block.number >= ext._block.config['HOMESTEAD_FORK_BLKNUM']:
ext._block.revert(snapshot)
return 0, 0, b''
ext._block.set_code(msg.to, b''.join(map(ascii_chr, dat)))
return 1, gas, msg.to
else:
return 0, gas, b''
| true | true |
f7fb5aea429174463bb79f3d130a4f2a062fec42 | 8,652 | py | Python | train.py | vietnguyen1991/Character-level-cnn-tensorflow | f4067093ef54a92fd3cd6558823fe6a06bfc5614 | [
"MIT"
] | 12 | 2019-01-31T22:59:43.000Z | 2019-08-21T07:14:47.000Z | train.py | vietnguyen91/Character-level-cnn-tensorflow | f4067093ef54a92fd3cd6558823fe6a06bfc5614 | [
"MIT"
] | 1 | 2019-10-25T12:59:48.000Z | 2019-10-25T12:59:57.000Z | train.py | nhviet1009/Character-level-cnn-tensorflow | f4067093ef54a92fd3cd6558823fe6a06bfc5614 | [
"MIT"
] | 6 | 2019-02-10T14:12:45.000Z | 2019-08-10T17:18:20.000Z | """
@author: Thang Nguyen <nhthang1009@gmail.com>
"""
import os
import shutil
import numpy as np
import tensorflow as tf
from src.character_level_cnn import Char_level_cnn
from src.utils import get_num_classes, create_dataset
tf.flags.DEFINE_string("alphabet", """abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""",
"Valid characters used for model")
tf.flags.DEFINE_string("train_set", "data/train.csv", "Path to the training set")
tf.flags.DEFINE_string("test_set", "data/test.csv", "Path to the test set")
tf.flags.DEFINE_integer("test_interval", 1, "Number of epochs between testing phases")
tf.flags.DEFINE_integer("max_length", 1014, "Maximum length of input")
tf.flags.DEFINE_string("feature", "small", "large or small")
tf.flags.DEFINE_integer("batch_size", 128, "Minibatch size")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs")
tf.flags.DEFINE_float("lr", 1e-2, "Learning rate")
tf.flags.DEFINE_string("optimizer", "sgd", "sgd or adam")
tf.flags.DEFINE_float("dropout", 0.5, "Dropout's probability")
tf.flags.DEFINE_string("log_path", "tensorboard/char_level_cnn", "path to tensorboard folder")
tf.flags.DEFINE_string("saved_path", "trained_models", "path to store trained model")
tf.flags.DEFINE_float("es_min_delta", 0.,
"Early stopping's parameter: minimum change loss to qualify as an improvement")
tf.flags.DEFINE_integer("es_patience", 3,
"Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
def train():
num_classes = get_num_classes(FLAGS.train_set)
model = Char_level_cnn(batch_size=FLAGS.batch_size, num_classes=num_classes, feature=FLAGS.feature)
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = True
training_set, num_training_iters = create_dataset(FLAGS.train_set, FLAGS.alphabet, FLAGS.max_length,
FLAGS.batch_size, True)
test_set, num_test_iters = create_dataset(FLAGS.test_set, FLAGS.alphabet, FLAGS.max_length, FLAGS.batch_size, False)
train_iterator = training_set.make_initializable_iterator()
test_iterator = test_set.make_initializable_iterator()
handle = tf.placeholder(tf.string, shape=[])
keep_prob = tf.placeholder(tf.float32, name='dropout_prob')
iterator = tf.data.Iterator.from_string_handle(handle, training_set.output_types, training_set.output_shapes)
texts, labels = iterator.get_next()
logits = model.forward(texts, keep_prob)
loss = model.loss(logits, labels)
loss_summary = tf.summary.scalar("loss", loss)
accuracy = model.accuracy(logits, labels)
accuracy_summary = tf.summary.scalar("accuracy", accuracy)
batch_size = tf.unstack(tf.shape(texts))[0]
confusion = model.confusion_matrix(logits, labels)
global_step = tf.Variable(0, name="global_step", trainable=False)
if FLAGS.optimizer == "sgd":
values = [FLAGS.lr]
boundaries = []
for i in range(1, 10):
values.append(FLAGS.lr / pow(2, i))
boundaries.append(3 * num_training_iters * i)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
else:
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
merged = tf.summary.merge([loss_summary, accuracy_summary])
init = tf.global_variables_initializer()
saver = tf.train.Saver()
if os.path.isdir(FLAGS.log_path):
shutil.rmtree(FLAGS.log_path)
os.makedirs(FLAGS.log_path)
if os.path.isdir(FLAGS.saved_path):
shutil.rmtree(FLAGS.saved_path)
os.makedirs(FLAGS.saved_path)
output_file = open(FLAGS.saved_path + os.sep + "logs.txt", "w")
output_file.write("Model's parameters: {}".format(FLAGS.flag_values_dict()))
best_loss = 1e5
best_epoch = 0
with tf.Session(config=session_conf) as sess:
train_writer = tf.summary.FileWriter(FLAGS.log_path + os.sep + 'train', sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_path + os.sep + 'test')
sess.run(init)
for epoch in range(FLAGS.num_epochs):
sess.run(train_iterator.initializer)
sess.run(test_iterator.initializer)
train_handle = sess.run(train_iterator.string_handle())
test_handle = sess.run(test_iterator.string_handle())
train_iter = 0
while True:
try:
_, tr_loss, tr_accuracy, summary, step = sess.run(
[train_op, loss, accuracy, merged, global_step],
feed_dict={handle: train_handle, keep_prob: FLAGS.dropout})
print("Epoch: {}/{}, Iteration: {}/{}, Loss: {}, Accuracy: {}".format(
epoch + 1,
FLAGS.num_epochs,
train_iter + 1,
num_training_iters,
tr_loss, tr_accuracy))
train_writer.add_summary(summary, step)
train_iter += 1
except (tf.errors.OutOfRangeError, StopIteration):
break
if epoch % FLAGS.test_interval == 0:
loss_ls = []
loss_summary = tf.Summary()
accuracy_ls = []
accuracy_summary = tf.Summary()
confusion_matrix = np.zeros([num_classes, num_classes], np.int32)
num_samples = 0
while True:
try:
test_loss, test_accuracy, test_confusion, samples = sess.run(
[loss, accuracy, confusion, batch_size],
feed_dict={handle: test_handle, keep_prob: 1.0})
loss_ls.append(test_loss * samples)
accuracy_ls.append(test_accuracy * samples)
confusion_matrix += test_confusion
num_samples += samples
except (tf.errors.OutOfRangeError, StopIteration):
break
mean_test_loss = sum(loss_ls) / num_samples
loss_summary.value.add(tag='loss', simple_value=mean_test_loss)
test_writer.add_summary(loss_summary, epoch)
mean_test_accuracy = sum(accuracy_ls) / num_samples
accuracy_summary.value.add(tag='accuracy', simple_value=mean_test_accuracy)
test_writer.add_summary(accuracy_summary, epoch)
output_file.write(
"Epoch: {}/{} \nTest loss: {} Test accuracy: {} \nTest confusion matrix: \n{}\n\n".format(
epoch + 1, FLAGS.num_epochs,
mean_test_loss,
mean_test_accuracy,
confusion_matrix))
print("Epoch: {}/{}, Final loss: {}, Final accuracy: {}".format(epoch + 1, FLAGS.num_epochs,
mean_test_loss,
mean_test_accuracy))
if mean_test_loss + FLAGS.es_min_delta < best_loss:
best_loss = mean_test_loss
best_epoch = epoch
saver.save(sess, FLAGS.saved_path + os.sep + "char_level_cnn")
if epoch - best_epoch > FLAGS.es_patience > 0:
print("Stop training at epoch {}. The lowest loss achieved is {}".format(epoch, best_loss))
break
output_file.close()
if __name__ == "__main__":
train()
| 51.5 | 164 | 0.585298 | import os
import shutil
import numpy as np
import tensorflow as tf
from src.character_level_cnn import Char_level_cnn
from src.utils import get_num_classes, create_dataset
tf.flags.DEFINE_string("alphabet", """abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""",
"Valid characters used for model")
tf.flags.DEFINE_string("train_set", "data/train.csv", "Path to the training set")
tf.flags.DEFINE_string("test_set", "data/test.csv", "Path to the test set")
tf.flags.DEFINE_integer("test_interval", 1, "Number of epochs between testing phases")
tf.flags.DEFINE_integer("max_length", 1014, "Maximum length of input")
tf.flags.DEFINE_string("feature", "small", "large or small")
tf.flags.DEFINE_integer("batch_size", 128, "Minibatch size")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs")
tf.flags.DEFINE_float("lr", 1e-2, "Learning rate")
tf.flags.DEFINE_string("optimizer", "sgd", "sgd or adam")
tf.flags.DEFINE_float("dropout", 0.5, "Dropout's probability")
tf.flags.DEFINE_string("log_path", "tensorboard/char_level_cnn", "path to tensorboard folder")
tf.flags.DEFINE_string("saved_path", "trained_models", "path to store trained model")
tf.flags.DEFINE_float("es_min_delta", 0.,
"Early stopping's parameter: minimum change loss to qualify as an improvement")
tf.flags.DEFINE_integer("es_patience", 3,
"Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
def train():
num_classes = get_num_classes(FLAGS.train_set)
model = Char_level_cnn(batch_size=FLAGS.batch_size, num_classes=num_classes, feature=FLAGS.feature)
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = True
training_set, num_training_iters = create_dataset(FLAGS.train_set, FLAGS.alphabet, FLAGS.max_length,
FLAGS.batch_size, True)
test_set, num_test_iters = create_dataset(FLAGS.test_set, FLAGS.alphabet, FLAGS.max_length, FLAGS.batch_size, False)
train_iterator = training_set.make_initializable_iterator()
test_iterator = test_set.make_initializable_iterator()
handle = tf.placeholder(tf.string, shape=[])
keep_prob = tf.placeholder(tf.float32, name='dropout_prob')
iterator = tf.data.Iterator.from_string_handle(handle, training_set.output_types, training_set.output_shapes)
texts, labels = iterator.get_next()
logits = model.forward(texts, keep_prob)
loss = model.loss(logits, labels)
loss_summary = tf.summary.scalar("loss", loss)
accuracy = model.accuracy(logits, labels)
accuracy_summary = tf.summary.scalar("accuracy", accuracy)
batch_size = tf.unstack(tf.shape(texts))[0]
confusion = model.confusion_matrix(logits, labels)
global_step = tf.Variable(0, name="global_step", trainable=False)
if FLAGS.optimizer == "sgd":
values = [FLAGS.lr]
boundaries = []
for i in range(1, 10):
values.append(FLAGS.lr / pow(2, i))
boundaries.append(3 * num_training_iters * i)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
else:
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
merged = tf.summary.merge([loss_summary, accuracy_summary])
init = tf.global_variables_initializer()
saver = tf.train.Saver()
if os.path.isdir(FLAGS.log_path):
shutil.rmtree(FLAGS.log_path)
os.makedirs(FLAGS.log_path)
if os.path.isdir(FLAGS.saved_path):
shutil.rmtree(FLAGS.saved_path)
os.makedirs(FLAGS.saved_path)
output_file = open(FLAGS.saved_path + os.sep + "logs.txt", "w")
output_file.write("Model's parameters: {}".format(FLAGS.flag_values_dict()))
best_loss = 1e5
best_epoch = 0
with tf.Session(config=session_conf) as sess:
train_writer = tf.summary.FileWriter(FLAGS.log_path + os.sep + 'train', sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_path + os.sep + 'test')
sess.run(init)
for epoch in range(FLAGS.num_epochs):
sess.run(train_iterator.initializer)
sess.run(test_iterator.initializer)
train_handle = sess.run(train_iterator.string_handle())
test_handle = sess.run(test_iterator.string_handle())
train_iter = 0
while True:
try:
_, tr_loss, tr_accuracy, summary, step = sess.run(
[train_op, loss, accuracy, merged, global_step],
feed_dict={handle: train_handle, keep_prob: FLAGS.dropout})
print("Epoch: {}/{}, Iteration: {}/{}, Loss: {}, Accuracy: {}".format(
epoch + 1,
FLAGS.num_epochs,
train_iter + 1,
num_training_iters,
tr_loss, tr_accuracy))
train_writer.add_summary(summary, step)
train_iter += 1
except (tf.errors.OutOfRangeError, StopIteration):
break
if epoch % FLAGS.test_interval == 0:
loss_ls = []
loss_summary = tf.Summary()
accuracy_ls = []
accuracy_summary = tf.Summary()
confusion_matrix = np.zeros([num_classes, num_classes], np.int32)
num_samples = 0
while True:
try:
test_loss, test_accuracy, test_confusion, samples = sess.run(
[loss, accuracy, confusion, batch_size],
feed_dict={handle: test_handle, keep_prob: 1.0})
loss_ls.append(test_loss * samples)
accuracy_ls.append(test_accuracy * samples)
confusion_matrix += test_confusion
num_samples += samples
except (tf.errors.OutOfRangeError, StopIteration):
break
mean_test_loss = sum(loss_ls) / num_samples
loss_summary.value.add(tag='loss', simple_value=mean_test_loss)
test_writer.add_summary(loss_summary, epoch)
mean_test_accuracy = sum(accuracy_ls) / num_samples
accuracy_summary.value.add(tag='accuracy', simple_value=mean_test_accuracy)
test_writer.add_summary(accuracy_summary, epoch)
output_file.write(
"Epoch: {}/{} \nTest loss: {} Test accuracy: {} \nTest confusion matrix: \n{}\n\n".format(
epoch + 1, FLAGS.num_epochs,
mean_test_loss,
mean_test_accuracy,
confusion_matrix))
print("Epoch: {}/{}, Final loss: {}, Final accuracy: {}".format(epoch + 1, FLAGS.num_epochs,
mean_test_loss,
mean_test_accuracy))
if mean_test_loss + FLAGS.es_min_delta < best_loss:
best_loss = mean_test_loss
best_epoch = epoch
saver.save(sess, FLAGS.saved_path + os.sep + "char_level_cnn")
if epoch - best_epoch > FLAGS.es_patience > 0:
print("Stop training at epoch {}. The lowest loss achieved is {}".format(epoch, best_loss))
break
output_file.close()
if __name__ == "__main__":
train()
| true | true |
f7fb5be32144fa75c99705a27a898f52f78a27f3 | 31,247 | py | Python | venv/Lib/site-packages/pandas/tests/series/apply/test_series_apply.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/series/apply/test_series_apply.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/series/apply/test_series_apply.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | from collections import Counter, defaultdict
from itertools import chain
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, isna
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
datetime_series.apply(np.sqrt), np.sqrt(datetime_series)
)
# element-wise apply
import math
tm.assert_series_equal(
datetime_series.apply(math.exp), np.exp(datetime_series)
)
# empty series
s = Series(dtype=object, name="foo", index=pd.Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=pd.date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = pd.Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = pd.Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
s = pd.Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = pd.Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
# GH 21245
s = pd.Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
class TestSeriesAggregate:
def test_transform(self, string_series):
# transforming functions
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.transform(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
result = string_series.apply(np.sqrt)
tm.assert_series_equal(result, expected)
# list-like
result = string_series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.transform([np.sqrt])
tm.assert_frame_equal(result, expected)
result = string_series.transform(["sqrt"])
tm.assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
result = string_series.transform(["sqrt", "abs"])
expected.columns = ["sqrt", "abs"]
tm.assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
msg = "transforms cannot produce aggregated results"
with pytest.raises(ValueError, match=msg):
string_series.transform(["min", "max"])
msg = "cannot combine transform and aggregation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(["sqrt", "max"])
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.transform(["sqrt", "max"])
msg = "cannot perform both aggregation and transformation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg({"foo": np.sqrt, "bar": "sum"})
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"]})
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype="int64", name="series")
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]})
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(
lambda x: Series([x, x ** 2], index=["x", "x^2"])
)
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg("size")
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", "c"), # see GH12863
("any", "a"),
],
),
),
)
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if tm.is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
with pytest.raises(expected):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_transform_none_to_type(self):
# GH34377
df = pd.DataFrame({"a": [None]})
msg = "DataFrame constructor called with incompatible data and dtype"
with pytest.raises(TypeError, match=msg):
df.transform({"a": int})
def test_series_apply_no_suffix_index(self):
# GH36189
s = pd.Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
class TestSeriesMap:
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(self, index):
if isinstance(index, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(index)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int(self):
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference(self):
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = pd.DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = pd.Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter(self):
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key(self):
# https://github.com/pandas-dev/pandas/issues/17648
# Checks that np.nan key is appropriately mapped
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
# __missing__ is a dict concept, not a Mapping concept,
# so it should not change the result!
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = pd.Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
exp = pd.Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
def test_apply_series_on_date_time_index_aware_series(self, dti, exp):
# GH 25959
# Calling apply on a localized time series should not cause an error
index = dti.tz_localize("UTC").index
result = pd.Series(index).apply(lambda x: pd.Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = pd.Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, pd.Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
# GH 13228
ser = pd.Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
# https://github.com/pandas-dev/pandas/issues/32815
s = pd.Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
def test_apply_to_timedelta(self):
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
| 37.376794 | 89 | 0.539412 | from collections import Counter, defaultdict
from itertools import chain
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, isna
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
datetime_series.apply(np.sqrt), np.sqrt(datetime_series)
)
import math
tm.assert_series_equal(
datetime_series.apply(math.exp), np.exp(datetime_series)
)
s = Series(dtype=object, name="foo", index=pd.Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
ser.map(func)
ser.apply(func)
def test_apply_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=pd.date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = pd.Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = pd.Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
s = pd.Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = pd.Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
s = pd.Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
class TestSeriesAggregate:
def test_transform(self, string_series):
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
result = string_series.transform(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
result = string_series.apply(np.sqrt)
tm.assert_series_equal(result, expected)
result = string_series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.transform([np.sqrt])
tm.assert_frame_equal(result, expected)
result = string_series.transform(["sqrt"])
tm.assert_frame_equal(result, expected)
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
result = string_series.transform(["sqrt", "abs"])
expected.columns = ["sqrt", "abs"]
tm.assert_frame_equal(result, expected)
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
msg = "transforms cannot produce aggregated results"
with pytest.raises(ValueError, match=msg):
string_series.transform(["min", "max"])
msg = "cannot combine transform and aggregation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(["sqrt", "max"])
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.transform(["sqrt", "max"])
msg = "cannot perform both aggregation and transformation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg({"foo": np.sqrt, "bar": "sum"})
def test_demo(self):
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"]})
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype="int64", name="series")
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]})
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
result = datetime_series.apply(
lambda x: Series([x, x ** 2], index=["x", "x^2"])
)
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(self, string_series):
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
s = Series([1, 2, None])
result = s.agg("size")
expected = s.size
assert result == expected
result = s.agg(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", "c"),
("any", "a"),
],
),
),
)
def test_agg_cython_table(self, series, func, expected):
result = series.agg(func)
if tm.is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform(self, series, func, expected):
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError),
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises(self, series, func, expected):
with pytest.raises(expected):
series.agg(func)
def test_transform_none_to_type(self):
df = pd.DataFrame({"a": [None]})
msg = "DataFrame constructor called with incompatible data and dtype"
with pytest.raises(TypeError, match=msg):
df.transform({"a": int})
def test_series_apply_no_suffix_index(self):
s = pd.Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
class TestSeriesMap:
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(self, index):
if isinstance(index, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(index)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int(self):
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference(self):
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
df = pd.DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = pd.Series(["A", "B", "A", "B"], index=df.index)
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter(self):
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key(self):
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(self, non_dict_mapping_subclass):
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = pd.Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
exp = pd.Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(self, vals, mapping, exp):
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
def test_apply_series_on_date_time_index_aware_series(self, dti, exp):
index = dti.tz_localize("UTC").index
result = pd.Series(index).apply(lambda x: pd.Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = pd.Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, pd.Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
ser = pd.Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
s = pd.Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
def test_apply_to_timedelta(self):
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
| true | true |
f7fb5c3cc3be647cb0404baf8d7024dc58158c0d | 12,416 | py | Python | anyway/accidents_around_schools.py | shaniwein/anyway | dcd13bf7dc4a120f4d697ab0c08b906f43eea52e | [
"MIT"
] | 1 | 2022-01-19T18:23:03.000Z | 2022-01-19T18:23:03.000Z | anyway/accidents_around_schools.py | shaniwein/anyway | dcd13bf7dc4a120f4d697ab0c08b906f43eea52e | [
"MIT"
] | 2 | 2021-11-02T13:37:23.000Z | 2021-11-23T15:51:06.000Z | anyway/accidents_around_schools.py | shaniwein/anyway | dcd13bf7dc4a120f4d697ab0c08b906f43eea52e | [
"MIT"
] | null | null | null | import os
import math
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import or_
from anyway.backend_constants import BE_CONST
from anyway.models import AccidentMarker, Involved, School
from anyway.app_and_db import db
SUBTYPE_ACCIDENT_WITH_PEDESTRIAN = 1
LOCATION_ACCURACY_PRECISE = True
LOCATION_ACCURACY_PRECISE_INT = 1
INJURED_TYPE_PEDESTRIAN = 1
YISHUV_SYMBOL_NOT_EXIST = -1
CONTENT_ENCODING = "utf-8"
HEBREW_ENCODING = "cp1255"
ANYWAY_UI_FORMAT_MAP_ONLY = "https://www.anyway.co.il/?zoom=17&start_date={start_date}&end_date={end_date}&lat={latitude}&lon={longitude}&show_fatal=1&show_severe=1&show_light=1&approx={location_approx}&accurate={location_accurate}&show_markers=1&show_discussions=0&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype={acc_type}&controlmeasure=0&district=0&case_type=0&show_rsa=0&age_groups=1,2,3,4&map_only=true"
ANYWAY_UI_FORMAT_WITH_FILTERS = "https://www.anyway.co.il/?zoom=17&start_date={start_date}&end_date={end_date}&lat={latitude}&lon={longitude}&show_fatal=1&show_severe=1&show_light=1&approx={location_approx}&accurate={location_accurate}&show_markers=1&show_discussions=0&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype={acc_type}&controlmeasure=0&district=0&case_type=0&show_rsa=0&age_groups=1,2,3,4"
DATE_INPUT_FORMAT = "%d-%m-%Y"
DATE_URL_FORMAT = "%Y-%m-%d"
def get_bounding_box(latitude, longitude, distance_in_km):
latitude = math.radians(latitude)
longitude = math.radians(longitude)
radius = 6371
# Radius of the parallel at given latitude
parallel_radius = radius * math.cos(latitude)
lat_min = latitude - distance_in_km / radius
lat_max = latitude + distance_in_km / radius
lon_min = longitude - distance_in_km / parallel_radius
lon_max = longitude + distance_in_km / parallel_radius
rad2deg = math.degrees
return rad2deg(lat_min), rad2deg(lon_min), rad2deg(lat_max), rad2deg(lon_max)
def acc_inv_query(longitude, latitude, distance, start_date, end_date, school):
lat_min, lon_min, lat_max, lon_max = get_bounding_box(latitude, longitude, distance)
base_x = lon_min
base_y = lat_min
distance_x = lon_max
distance_y = lat_max
pol_str = "POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format(
base_x, base_y, distance_x, distance_y
)
query_obj = (
db.session.query(Involved, AccidentMarker)
.join(AccidentMarker, AccidentMarker.provider_and_id == Involved.provider_and_id)
.filter(AccidentMarker.geom.intersects(pol_str))
.filter(Involved.injured_type == INJURED_TYPE_PEDESTRIAN)
.filter(AccidentMarker.provider_and_id == Involved.provider_and_id)
.filter(
or_(
(AccidentMarker.provider_code == BE_CONST.CBS_ACCIDENT_TYPE_1_CODE),
(AccidentMarker.provider_code == BE_CONST.CBS_ACCIDENT_TYPE_3_CODE),
)
)
.filter(AccidentMarker.created >= start_date)
.filter(AccidentMarker.created < end_date)
.filter(AccidentMarker.location_accuracy == LOCATION_ACCURACY_PRECISE_INT)
.filter(AccidentMarker.yishuv_symbol != YISHUV_SYMBOL_NOT_EXIST)
.filter(Involved.age_group.in_([1, 2, 3, 4]))
) # ages 0-19
df = pd.read_sql_query(query_obj.with_labels().statement, query_obj.session.bind)
if LOCATION_ACCURACY_PRECISE:
location_accurate = 1
location_approx = ""
else:
location_accurate = 1
location_approx = 1
ui_url_map_only = ANYWAY_UI_FORMAT_MAP_ONLY.format(
latitude=school["latitude"],
longitude=school["longitude"],
start_date=start_date.strftime(DATE_URL_FORMAT),
end_date=end_date.strftime(DATE_URL_FORMAT),
acc_type=SUBTYPE_ACCIDENT_WITH_PEDESTRIAN,
location_accurate=location_accurate,
location_approx=location_approx,
)
ui_url_with_filters = ANYWAY_UI_FORMAT_WITH_FILTERS.format(
latitude=school["latitude"],
longitude=school["longitude"],
start_date=start_date.strftime(DATE_URL_FORMAT),
end_date=end_date.strftime(DATE_URL_FORMAT),
acc_type=SUBTYPE_ACCIDENT_WITH_PEDESTRIAN,
location_accurate=location_accurate,
location_approx=location_approx,
)
df["anyway_link"] = ui_url_map_only
df["anyway_link_with_filters"] = ui_url_with_filters
df["school_id"] = school["id"]
df["school_name"] = school["school_name"]
df["school_yishuv_symbol"] = school["yishuv_symbol"]
df["school_yishuv_name"] = school["yishuv_name"]
df["school_longitude"] = school["longitude"]
df["school_latitude"] = school["latitude"]
return df
def main(start_date, end_date, distance, output_path):
schools_query = sa.select([School])
df_schools = pd.read_sql_query(schools_query, db.session.bind)
df_total = pd.DataFrame()
df_schools = df_schools.drop_duplicates( # pylint: disable=no-member
["yishuv_name", "longitude", "latitude"]
)
df_schools.dropna(subset=["yishuv_name"], inplace=True)
df_schools = df_schools[df_schools.yishuv_symbol != 0]
df_schools.to_csv(os.path.join(output_path, "df_schools.csv"), encoding=CONTENT_ENCODING)
for _, school in df_schools.iterrows():
df_total = pd.concat(
[
df_total,
acc_inv_query(
longitude=school["longitude"],
latitude=school["latitude"],
distance=distance,
start_date=start_date,
end_date=end_date,
school=school,
),
],
axis=0,
)
df_total.to_csv(os.path.join(output_path, "df_total.csv"), encoding=CONTENT_ENCODING)
df_total_involved_count = (
df_total.groupby(
[
"school_name",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
"anyway_link",
"school_id",
]
)
.size()
.reset_index(name="injured_count")
.sort_values("injured_count", ascending=False)
)
df_total_involved_count.reset_index().to_csv(
os.path.join(output_path, "df_total_involved_count.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_involved_by_injury = (
df_total.groupby(
[
"school_id",
"school_name",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
"involved_injury_severity",
"anyway_link",
]
)
.size()
.reset_index(name="injured_count")
.sort_values("injured_count", ascending=False)
)
df_total_involved_by_injury.reset_index().to_csv(
os.path.join(output_path, "df_total_involved_by_injury.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_involved_injiry_severity_1_2 = (
df_total[
(df_total.involved_injury_severity == 1) | (df_total.involved_injury_severity == 2)
]
.groupby(
[
"school_id",
"school_name",
"anyway_link",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
]
)
.size()
.reset_index(name="injured_count")
.sort_values("injured_count", ascending=False)
)
df_total_involved_injiry_severity_1_2.reset_index().to_csv(
os.path.join(output_path, "df_total_involved_injiry_severity_1_2.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_accident_count = (
df_total.drop_duplicates(
[
"school_id",
"school_name",
"anyway_link",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
"provider_and_id",
]
)
.groupby(["school_id", "school_name", "school_yishuv_symbol", "school_yishuv_name"])
.size()
.reset_index(name="accidents_count")
.sort_values("accidents_count", ascending=False)
)
df_total_accident_count.reset_index().to_csv(
os.path.join(output_path, "df_total_accident_count.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_involved_count_by_yishuv = (
df_total.groupby(
[
"school_yishuv_name",
"school_id",
"school_name",
"anyway_link_with_filters",
"school_longitude",
"school_latitude",
"involved_injury_severity",
]
)
.size()
.reset_index(name="injured_count")
.loc[
:,
[
"school_yishuv_name",
"school_name",
"anyway_link_with_filters",
"involved_injury_severity",
"injured_count",
"school_longitude",
"school_latitude",
"school_id",
],
]
)
df_total_involved_count_by_yishuv = df_total_involved_count_by_yishuv.set_index(
[
"school_yishuv_name",
"school_name",
"anyway_link_with_filters",
"school_longitude",
"school_latitude",
"school_id",
"involved_injury_severity",
]
).unstack(-1)
df_total_involved_count_by_yishuv.fillna(
{"injured_count": 0, "total_injured_count": 0}, inplace=True
)
df_total_involved_count_by_yishuv.loc[
:, (slice("injured_count"), slice(None))
] = df_total_involved_count_by_yishuv.loc[:, (slice("injured_count"), slice(None))].apply(
lambda x: x.apply(int)
)
df_total_involved_count_by_yishuv["total_injured_count"] = (
df_total_involved_count_by_yishuv.loc[:, ["injured_count"]].sum(axis=1)
).apply(int)
groups = df_total_involved_count_by_yishuv.loc[
:,
[
"school_yishuv_name",
"school_name",
"school_longitude",
"school_latitude",
"school_id",
"total_injured_count",
],
].groupby(["school_yishuv_name"])
rank_in_yishuv = groups["total_injured_count"].rank(method="dense", ascending=False)
rank_in_yishuv.name = "rank"
rank_in_yishuv = rank_in_yishuv.apply(int)
rank_in_yishuv = rank_in_yishuv.to_frame(name="rank_in_yishuv").reset_index()
joined_df = pd.merge(
df_total_involved_count_by_yishuv.reset_index(),
rank_in_yishuv,
on=["school_yishuv_name", "school_name", "school_longitude", "school_latitude"],
how="left",
)
joined_df.sort_values(["school_yishuv_name", "rank_in_yishuv"], ascending=True, inplace=True)
joined_df.columns = [
col if type(col) == str else "_".join(map(str, col)) for col in joined_df.columns.values
]
joined_df = joined_df.loc[
:,
[
"school_yishuv_name",
"school_name",
"rank_in_yishuv",
"school_longitude",
"school_latitude",
"injured_count_1",
"injured_count_2",
"injured_count_3",
"total_injured_count_",
"anyway_link_with_filters",
"school_id",
],
]
joined_df.columns = [
"school_yishuv_name",
"school_name",
"rank_in_yishuv",
"school_longitude",
"school_latitude",
"killed_count",
"severly_injured_count",
"light_injured_count",
"total_injured_killed_count",
"anyway_link",
"school_id",
]
joined_df.to_csv(
os.path.join(output_path, "df_total_involved_count_by_yishuv.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
| 35.884393 | 519 | 0.625644 | import os
import math
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import or_
from anyway.backend_constants import BE_CONST
from anyway.models import AccidentMarker, Involved, School
from anyway.app_and_db import db
SUBTYPE_ACCIDENT_WITH_PEDESTRIAN = 1
LOCATION_ACCURACY_PRECISE = True
LOCATION_ACCURACY_PRECISE_INT = 1
INJURED_TYPE_PEDESTRIAN = 1
YISHUV_SYMBOL_NOT_EXIST = -1
CONTENT_ENCODING = "utf-8"
HEBREW_ENCODING = "cp1255"
ANYWAY_UI_FORMAT_MAP_ONLY = "https://www.anyway.co.il/?zoom=17&start_date={start_date}&end_date={end_date}&lat={latitude}&lon={longitude}&show_fatal=1&show_severe=1&show_light=1&approx={location_approx}&accurate={location_accurate}&show_markers=1&show_discussions=0&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype={acc_type}&controlmeasure=0&district=0&case_type=0&show_rsa=0&age_groups=1,2,3,4&map_only=true"
ANYWAY_UI_FORMAT_WITH_FILTERS = "https://www.anyway.co.il/?zoom=17&start_date={start_date}&end_date={end_date}&lat={latitude}&lon={longitude}&show_fatal=1&show_severe=1&show_light=1&approx={location_approx}&accurate={location_accurate}&show_markers=1&show_discussions=0&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype={acc_type}&controlmeasure=0&district=0&case_type=0&show_rsa=0&age_groups=1,2,3,4"
DATE_INPUT_FORMAT = "%d-%m-%Y"
DATE_URL_FORMAT = "%Y-%m-%d"
def get_bounding_box(latitude, longitude, distance_in_km):
latitude = math.radians(latitude)
longitude = math.radians(longitude)
radius = 6371
parallel_radius = radius * math.cos(latitude)
lat_min = latitude - distance_in_km / radius
lat_max = latitude + distance_in_km / radius
lon_min = longitude - distance_in_km / parallel_radius
lon_max = longitude + distance_in_km / parallel_radius
rad2deg = math.degrees
return rad2deg(lat_min), rad2deg(lon_min), rad2deg(lat_max), rad2deg(lon_max)
def acc_inv_query(longitude, latitude, distance, start_date, end_date, school):
lat_min, lon_min, lat_max, lon_max = get_bounding_box(latitude, longitude, distance)
base_x = lon_min
base_y = lat_min
distance_x = lon_max
distance_y = lat_max
pol_str = "POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format(
base_x, base_y, distance_x, distance_y
)
query_obj = (
db.session.query(Involved, AccidentMarker)
.join(AccidentMarker, AccidentMarker.provider_and_id == Involved.provider_and_id)
.filter(AccidentMarker.geom.intersects(pol_str))
.filter(Involved.injured_type == INJURED_TYPE_PEDESTRIAN)
.filter(AccidentMarker.provider_and_id == Involved.provider_and_id)
.filter(
or_(
(AccidentMarker.provider_code == BE_CONST.CBS_ACCIDENT_TYPE_1_CODE),
(AccidentMarker.provider_code == BE_CONST.CBS_ACCIDENT_TYPE_3_CODE),
)
)
.filter(AccidentMarker.created >= start_date)
.filter(AccidentMarker.created < end_date)
.filter(AccidentMarker.location_accuracy == LOCATION_ACCURACY_PRECISE_INT)
.filter(AccidentMarker.yishuv_symbol != YISHUV_SYMBOL_NOT_EXIST)
.filter(Involved.age_group.in_([1, 2, 3, 4]))
)
df = pd.read_sql_query(query_obj.with_labels().statement, query_obj.session.bind)
if LOCATION_ACCURACY_PRECISE:
location_accurate = 1
location_approx = ""
else:
location_accurate = 1
location_approx = 1
ui_url_map_only = ANYWAY_UI_FORMAT_MAP_ONLY.format(
latitude=school["latitude"],
longitude=school["longitude"],
start_date=start_date.strftime(DATE_URL_FORMAT),
end_date=end_date.strftime(DATE_URL_FORMAT),
acc_type=SUBTYPE_ACCIDENT_WITH_PEDESTRIAN,
location_accurate=location_accurate,
location_approx=location_approx,
)
ui_url_with_filters = ANYWAY_UI_FORMAT_WITH_FILTERS.format(
latitude=school["latitude"],
longitude=school["longitude"],
start_date=start_date.strftime(DATE_URL_FORMAT),
end_date=end_date.strftime(DATE_URL_FORMAT),
acc_type=SUBTYPE_ACCIDENT_WITH_PEDESTRIAN,
location_accurate=location_accurate,
location_approx=location_approx,
)
df["anyway_link"] = ui_url_map_only
df["anyway_link_with_filters"] = ui_url_with_filters
df["school_id"] = school["id"]
df["school_name"] = school["school_name"]
df["school_yishuv_symbol"] = school["yishuv_symbol"]
df["school_yishuv_name"] = school["yishuv_name"]
df["school_longitude"] = school["longitude"]
df["school_latitude"] = school["latitude"]
return df
def main(start_date, end_date, distance, output_path):
schools_query = sa.select([School])
df_schools = pd.read_sql_query(schools_query, db.session.bind)
df_total = pd.DataFrame()
df_schools = df_schools.drop_duplicates(
["yishuv_name", "longitude", "latitude"]
)
df_schools.dropna(subset=["yishuv_name"], inplace=True)
df_schools = df_schools[df_schools.yishuv_symbol != 0]
df_schools.to_csv(os.path.join(output_path, "df_schools.csv"), encoding=CONTENT_ENCODING)
for _, school in df_schools.iterrows():
df_total = pd.concat(
[
df_total,
acc_inv_query(
longitude=school["longitude"],
latitude=school["latitude"],
distance=distance,
start_date=start_date,
end_date=end_date,
school=school,
),
],
axis=0,
)
df_total.to_csv(os.path.join(output_path, "df_total.csv"), encoding=CONTENT_ENCODING)
df_total_involved_count = (
df_total.groupby(
[
"school_name",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
"anyway_link",
"school_id",
]
)
.size()
.reset_index(name="injured_count")
.sort_values("injured_count", ascending=False)
)
df_total_involved_count.reset_index().to_csv(
os.path.join(output_path, "df_total_involved_count.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_involved_by_injury = (
df_total.groupby(
[
"school_id",
"school_name",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
"involved_injury_severity",
"anyway_link",
]
)
.size()
.reset_index(name="injured_count")
.sort_values("injured_count", ascending=False)
)
df_total_involved_by_injury.reset_index().to_csv(
os.path.join(output_path, "df_total_involved_by_injury.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_involved_injiry_severity_1_2 = (
df_total[
(df_total.involved_injury_severity == 1) | (df_total.involved_injury_severity == 2)
]
.groupby(
[
"school_id",
"school_name",
"anyway_link",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
]
)
.size()
.reset_index(name="injured_count")
.sort_values("injured_count", ascending=False)
)
df_total_involved_injiry_severity_1_2.reset_index().to_csv(
os.path.join(output_path, "df_total_involved_injiry_severity_1_2.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_accident_count = (
df_total.drop_duplicates(
[
"school_id",
"school_name",
"anyway_link",
"school_longitude",
"school_latitude",
"school_yishuv_symbol",
"school_yishuv_name",
"provider_and_id",
]
)
.groupby(["school_id", "school_name", "school_yishuv_symbol", "school_yishuv_name"])
.size()
.reset_index(name="accidents_count")
.sort_values("accidents_count", ascending=False)
)
df_total_accident_count.reset_index().to_csv(
os.path.join(output_path, "df_total_accident_count.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
df_total_involved_count_by_yishuv = (
df_total.groupby(
[
"school_yishuv_name",
"school_id",
"school_name",
"anyway_link_with_filters",
"school_longitude",
"school_latitude",
"involved_injury_severity",
]
)
.size()
.reset_index(name="injured_count")
.loc[
:,
[
"school_yishuv_name",
"school_name",
"anyway_link_with_filters",
"involved_injury_severity",
"injured_count",
"school_longitude",
"school_latitude",
"school_id",
],
]
)
df_total_involved_count_by_yishuv = df_total_involved_count_by_yishuv.set_index(
[
"school_yishuv_name",
"school_name",
"anyway_link_with_filters",
"school_longitude",
"school_latitude",
"school_id",
"involved_injury_severity",
]
).unstack(-1)
df_total_involved_count_by_yishuv.fillna(
{"injured_count": 0, "total_injured_count": 0}, inplace=True
)
df_total_involved_count_by_yishuv.loc[
:, (slice("injured_count"), slice(None))
] = df_total_involved_count_by_yishuv.loc[:, (slice("injured_count"), slice(None))].apply(
lambda x: x.apply(int)
)
df_total_involved_count_by_yishuv["total_injured_count"] = (
df_total_involved_count_by_yishuv.loc[:, ["injured_count"]].sum(axis=1)
).apply(int)
groups = df_total_involved_count_by_yishuv.loc[
:,
[
"school_yishuv_name",
"school_name",
"school_longitude",
"school_latitude",
"school_id",
"total_injured_count",
],
].groupby(["school_yishuv_name"])
rank_in_yishuv = groups["total_injured_count"].rank(method="dense", ascending=False)
rank_in_yishuv.name = "rank"
rank_in_yishuv = rank_in_yishuv.apply(int)
rank_in_yishuv = rank_in_yishuv.to_frame(name="rank_in_yishuv").reset_index()
joined_df = pd.merge(
df_total_involved_count_by_yishuv.reset_index(),
rank_in_yishuv,
on=["school_yishuv_name", "school_name", "school_longitude", "school_latitude"],
how="left",
)
joined_df.sort_values(["school_yishuv_name", "rank_in_yishuv"], ascending=True, inplace=True)
joined_df.columns = [
col if type(col) == str else "_".join(map(str, col)) for col in joined_df.columns.values
]
joined_df = joined_df.loc[
:,
[
"school_yishuv_name",
"school_name",
"rank_in_yishuv",
"school_longitude",
"school_latitude",
"injured_count_1",
"injured_count_2",
"injured_count_3",
"total_injured_count_",
"anyway_link_with_filters",
"school_id",
],
]
joined_df.columns = [
"school_yishuv_name",
"school_name",
"rank_in_yishuv",
"school_longitude",
"school_latitude",
"killed_count",
"severly_injured_count",
"light_injured_count",
"total_injured_killed_count",
"anyway_link",
"school_id",
]
joined_df.to_csv(
os.path.join(output_path, "df_total_involved_count_by_yishuv.csv"),
encoding=CONTENT_ENCODING,
header=True,
)
| true | true |
f7fb5e0f3bc10ab2ce84307dfcfa55e8c6d68bac | 4,906 | py | Python | vanilla_segmentation/train.py | khmariem/DenseFusion | e39f685e6315fc9319d47be0f859585f7dfcf288 | [
"MIT"
] | 1 | 2019-11-06T12:02:02.000Z | 2019-11-06T12:02:02.000Z | vanilla_segmentation/train.py | khmariem/DenseFusion | e39f685e6315fc9319d47be0f859585f7dfcf288 | [
"MIT"
] | null | null | null | vanilla_segmentation/train.py | khmariem/DenseFusion | e39f685e6315fc9319d47be0f859585f7dfcf288 | [
"MIT"
] | null | null | null | import os
import copy
import random
import argparse
import time
import numpy as np
from PIL import Image
import scipy.io as scio
import scipy.misc
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
from torch.backends import cudnn
from data_controller import SegDataset
from loss import Loss
from segnet import SegNet as segnet
import sys
sys.path.append("..")
from lib.utils import setup_logger
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', default='/home/nrp-telechan/Downloads/LabelFusion_Sample_Data/logs/dataset/obj', help="dataset root dir (''wrs Dataset'')")
parser.add_argument('--batch_size', default=3, help="batch size")
parser.add_argument('--n_epochs', default=600, help="epochs to train")
parser.add_argument('--workers', type=int, default=10, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help="learning rate")
parser.add_argument('--logs_path', default='logs/', help="path to save logs")
parser.add_argument('--model_save_path', default='trained_models/', help="path to save models")
parser.add_argument('--log_dir', default='logs/', help="path to save logs")
parser.add_argument('--resume_model', default='', help="resume model name")
opt = parser.parse_args()
if __name__ == '__main__':
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
dataset = SegDataset(opt.dataset_root, '../datasets/wrs/dataset_config/train_data_list.txt', True, 5000)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=int(opt.workers))
test_dataset = SegDataset(opt.dataset_root, '../datasets/wrs/dataset_config/test_data_list.txt', False, 1000)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=int(opt.workers))
print(len(dataset), len(test_dataset))
model = segnet()
model = model.cuda()
if opt.resume_model != '':
checkpoint = torch.load('{0}/{1}'.format(opt.model_save_path, opt.resume_model))
model.load_state_dict(checkpoint)
for log in os.listdir(opt.log_dir):
os.remove(os.path.join(opt.log_dir, log))
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
criterion = Loss()
best_val_cost = np.Inf
st_time = time.time()
for epoch in range(1, opt.n_epochs):
model.train()
train_all_cost = 0.0
train_time = 0
logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))
logger.info('Train time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started'))
for i, data in enumerate(dataloader, 0):
rgb, target = data
rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
semantic = model(rgb)
optimizer.zero_grad()
semantic_loss = criterion(semantic, target)
train_all_cost += semantic_loss.item()
semantic_loss.backward()
optimizer.step()
logger.info('Train time {0} Batch {1} CEloss {2}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), train_time, semantic_loss.item()))
if train_time != 0 and train_time % 1000 == 0:
torch.save(model.state_dict(), os.path.join(opt.model_save_path, 'model_current.pth'))
train_time += 1
train_all_cost = train_all_cost / train_time
logger.info('Train Finish Avg CEloss: {0}'.format(train_all_cost))
model.eval()
test_all_cost = 0.0
test_time = 0
logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
logger.info('Test time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))
for j, data in enumerate(test_dataloader, 0):
rgb, target = data
rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
semantic = model(rgb)
semantic_loss = criterion(semantic, target)
test_all_cost += semantic_loss.item()
test_time += 1
logger.info('Test time {0} Batch {1} CEloss {2}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_time, semantic_loss.item()))
test_all_cost = test_all_cost / test_time
logger.info('Test Finish Avg CEloss: {0}'.format(test_all_cost))
if test_all_cost <= best_val_cost:
best_val_cost = test_all_cost
torch.save(model.state_dict(), os.path.join(opt.model_save_path, 'model_{}_{}.pth'.format(epoch, test_all_cost)))
print('----------->BEST SAVED<-----------')
| 45.850467 | 169 | 0.670404 | import os
import copy
import random
import argparse
import time
import numpy as np
from PIL import Image
import scipy.io as scio
import scipy.misc
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
from torch.backends import cudnn
from data_controller import SegDataset
from loss import Loss
from segnet import SegNet as segnet
import sys
sys.path.append("..")
from lib.utils import setup_logger
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', default='/home/nrp-telechan/Downloads/LabelFusion_Sample_Data/logs/dataset/obj', help="dataset root dir (''wrs Dataset'')")
parser.add_argument('--batch_size', default=3, help="batch size")
parser.add_argument('--n_epochs', default=600, help="epochs to train")
parser.add_argument('--workers', type=int, default=10, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help="learning rate")
parser.add_argument('--logs_path', default='logs/', help="path to save logs")
parser.add_argument('--model_save_path', default='trained_models/', help="path to save models")
parser.add_argument('--log_dir', default='logs/', help="path to save logs")
parser.add_argument('--resume_model', default='', help="resume model name")
opt = parser.parse_args()
if __name__ == '__main__':
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
dataset = SegDataset(opt.dataset_root, '../datasets/wrs/dataset_config/train_data_list.txt', True, 5000)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=int(opt.workers))
test_dataset = SegDataset(opt.dataset_root, '../datasets/wrs/dataset_config/test_data_list.txt', False, 1000)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=int(opt.workers))
print(len(dataset), len(test_dataset))
model = segnet()
model = model.cuda()
if opt.resume_model != '':
checkpoint = torch.load('{0}/{1}'.format(opt.model_save_path, opt.resume_model))
model.load_state_dict(checkpoint)
for log in os.listdir(opt.log_dir):
os.remove(os.path.join(opt.log_dir, log))
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
criterion = Loss()
best_val_cost = np.Inf
st_time = time.time()
for epoch in range(1, opt.n_epochs):
model.train()
train_all_cost = 0.0
train_time = 0
logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))
logger.info('Train time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started'))
for i, data in enumerate(dataloader, 0):
rgb, target = data
rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
semantic = model(rgb)
optimizer.zero_grad()
semantic_loss = criterion(semantic, target)
train_all_cost += semantic_loss.item()
semantic_loss.backward()
optimizer.step()
logger.info('Train time {0} Batch {1} CEloss {2}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), train_time, semantic_loss.item()))
if train_time != 0 and train_time % 1000 == 0:
torch.save(model.state_dict(), os.path.join(opt.model_save_path, 'model_current.pth'))
train_time += 1
train_all_cost = train_all_cost / train_time
logger.info('Train Finish Avg CEloss: {0}'.format(train_all_cost))
model.eval()
test_all_cost = 0.0
test_time = 0
logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
logger.info('Test time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))
for j, data in enumerate(test_dataloader, 0):
rgb, target = data
rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
semantic = model(rgb)
semantic_loss = criterion(semantic, target)
test_all_cost += semantic_loss.item()
test_time += 1
logger.info('Test time {0} Batch {1} CEloss {2}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_time, semantic_loss.item()))
test_all_cost = test_all_cost / test_time
logger.info('Test Finish Avg CEloss: {0}'.format(test_all_cost))
if test_all_cost <= best_val_cost:
best_val_cost = test_all_cost
torch.save(model.state_dict(), os.path.join(opt.model_save_path, 'model_{}_{}.pth'.format(epoch, test_all_cost)))
print('----------->BEST SAVED<-----------')
| true | true |
f7fb5eef9a18ff14528216f7b9dd8caa4dd4afec | 901 | py | Python | tests/aws/s3_handler_test.py | project-hadron/discovery-connectors | 7bca00d6649eab5a21a17a98c49540ff781481fc | [
"BSD-3-Clause"
] | 1 | 2020-09-21T17:24:28.000Z | 2020-09-21T17:24:28.000Z | tests/aws/s3_handler_test.py | project-hadron/discovery-connectors | 7bca00d6649eab5a21a17a98c49540ff781481fc | [
"BSD-3-Clause"
] | null | null | null | tests/aws/s3_handler_test.py | project-hadron/discovery-connectors | 7bca00d6649eab5a21a17a98c49540ff781481fc | [
"BSD-3-Clause"
] | 1 | 2021-07-26T12:09:37.000Z | 2021-07-26T12:09:37.000Z | import unittest
import pandas as pd
import os
from pprint import pprint
from ds_connectors.handlers.s3_handlers import S3SourceHandler, S3PersistHandler
from aistac.handlers.abstract_handlers import ConnectorContract, HandlerFactory
class S3HandlerTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_aws_connector_init(self):
handler = S3PersistHandler(connector_contract=ConnectorContract(uri='s3://project-hadron-cs-repo/factory/healthcare/members', module_name='', handler=''))
data = {'a': [1,2,3,4,5]}
handler.persist_canonical(data)
result = handler.load_canonical()
self.assertTrue(isinstance(result, dict))
result = handler.load_canonical(read_params={'as_dataframe': True})
self.assertTrue(isinstance(result, pd.DataFrame))
if __name__ == '__main__':
unittest.main()
| 30.033333 | 162 | 0.72364 | import unittest
import pandas as pd
import os
from pprint import pprint
from ds_connectors.handlers.s3_handlers import S3SourceHandler, S3PersistHandler
from aistac.handlers.abstract_handlers import ConnectorContract, HandlerFactory
class S3HandlerTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_aws_connector_init(self):
handler = S3PersistHandler(connector_contract=ConnectorContract(uri='s3://project-hadron-cs-repo/factory/healthcare/members', module_name='', handler=''))
data = {'a': [1,2,3,4,5]}
handler.persist_canonical(data)
result = handler.load_canonical()
self.assertTrue(isinstance(result, dict))
result = handler.load_canonical(read_params={'as_dataframe': True})
self.assertTrue(isinstance(result, pd.DataFrame))
if __name__ == '__main__':
unittest.main()
| true | true |
f7fb60cd85373573cb0521c2d08a753d3e9e4f3f | 1,440 | py | Python | tests/sql_sync/fixtures/db_fixtures_helper.py | jzcruiser/doltpy | e7dfa97c66fa405e1a1ead04228084eaf3c4b0b9 | [
"Apache-2.0"
] | 31 | 2020-11-16T23:47:13.000Z | 2022-03-26T21:39:47.000Z | tests/sql_sync/fixtures/db_fixtures_helper.py | jzcruiser/doltpy | e7dfa97c66fa405e1a1ead04228084eaf3c4b0b9 | [
"Apache-2.0"
] | 61 | 2020-10-01T19:33:33.000Z | 2022-02-24T14:08:07.000Z | tests/sql_sync/fixtures/db_fixtures_helper.py | jzcruiser/doltpy | e7dfa97c66fa405e1a1ead04228084eaf3c4b0b9 | [
"Apache-2.0"
] | 8 | 2020-12-20T21:22:27.000Z | 2021-09-02T12:45:03.000Z | import pytest
import yaml
import sqlalchemy as sa
from retry import retry
@pytest.fixture(scope='session')
def docker_compose_file(tmpdir_factory, mysql_service_def, postgres_service_def, oracle_service_def):
compose_file = tmpdir_factory.mktemp('docker_files').join('docker-compose.yml')
compose_conf = {
'version': '2',
'services': {
'mysql': mysql_service_def,
'postgres': postgres_service_def,
'oracle': oracle_service_def
}
}
with compose_file.open('w') as f:
yaml.dump(compose_conf, stream=f)
return compose_file.strpath
def engine_helper(dialect: str, user: str, password: str, host: str, port: int, database: str):
"""
:param dialect:
:param user:
:param password:
:param host:
:param port:
:param database:
:return:
"""
engine = sa.create_engine(
'{dialect}://{user}:{password}@{host}:{port}/{database}'.format(
dialect=dialect,
user=user,
password=password,
host=host,
port=port,
database=database
),
echo=True
)
@retry(delay=2, tries=10, exceptions=(
sa.exc.OperationalError,
sa.exc.DatabaseError,
sa.exc.InterfaceError,
))
def verify_connection():
conn = engine.connect()
conn.close()
return engine
return verify_connection()
| 24 | 101 | 0.602083 | import pytest
import yaml
import sqlalchemy as sa
from retry import retry
@pytest.fixture(scope='session')
def docker_compose_file(tmpdir_factory, mysql_service_def, postgres_service_def, oracle_service_def):
compose_file = tmpdir_factory.mktemp('docker_files').join('docker-compose.yml')
compose_conf = {
'version': '2',
'services': {
'mysql': mysql_service_def,
'postgres': postgres_service_def,
'oracle': oracle_service_def
}
}
with compose_file.open('w') as f:
yaml.dump(compose_conf, stream=f)
return compose_file.strpath
def engine_helper(dialect: str, user: str, password: str, host: str, port: int, database: str):
engine = sa.create_engine(
'{dialect}://{user}:{password}@{host}:{port}/{database}'.format(
dialect=dialect,
user=user,
password=password,
host=host,
port=port,
database=database
),
echo=True
)
@retry(delay=2, tries=10, exceptions=(
sa.exc.OperationalError,
sa.exc.DatabaseError,
sa.exc.InterfaceError,
))
def verify_connection():
conn = engine.connect()
conn.close()
return engine
return verify_connection()
| true | true |
f7fb612b84fc3d55082918ad74acdaef0b71b081 | 8,122 | py | Python | network.py | KimMeen/STGCN | 5f67ccb68aae857b92a63bd9bfcbb79dd31f4c66 | [
"Apache-2.0"
] | 6 | 2020-11-09T21:19:45.000Z | 2022-03-09T08:46:58.000Z | network.py | KimMeen/STGCN | 5f67ccb68aae857b92a63bd9bfcbb79dd31f4c66 | [
"Apache-2.0"
] | 1 | 2020-11-09T21:57:27.000Z | 2020-11-24T15:54:00.000Z | network.py | KimMeen/STGCN | 5f67ccb68aae857b92a63bd9bfcbb79dd31f4c66 | [
"Apache-2.0"
] | 1 | 2021-11-25T20:55:30.000Z | 2021-11-25T20:55:30.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 18:30:55 2020
@author: Ming Jin
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl import DGLGraph
from layers import TemporalConvLayer, TemporalConvLayer_Residual, SpatialConvLayer, OutputLayer, OutputLayer_Simple
class STGCN(nn.Module):
'''
STGCN network described in the paper (Figure 2)
Inputs:
c: channels, e.g. [1, 64, 16, 64, 64, 16, 64] where [1, 64] means c_in and c_out for the first temporal layer
T: window length, e.g. 12
n: num_nodes
g: fixed DGLGraph
p: dropout after each 'sandwich', i.e. 'TSTN', block
control_str: model strcture controller, e.g. 'TSTNTSTN'; T: Temporal Layer, S: Spatio Layer, N: Norm Layer
x: input feature matrix with the shape [batch, 1, T, n]
Return:
y: output with the shape [batch, 1, 1, n]
'''
def __init__(self, c, T, n, g, p, control_str):
super(STGCN, self).__init__()
self.control_str = control_str
self.num_layers = len(control_str)
self.num_nodes = n
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(p)
# Temporal conv kernel size set to 3
self.Kt = 3
# c_index controls the change of channels
c_index = 0
num_temporal_layers = 0
# construct network based on 'control_str'
for i in range(self.num_layers):
layer_i = control_str[i]
# Temporal Layer
if layer_i == 'T':
self.layers.append(TemporalConvLayer_Residual(c[c_index], c[c_index + 1], kernel = self.Kt))
c_index += 1
num_temporal_layers += 1
# Spatio Layer
if layer_i == 'S':
self.layers.append(SpatialConvLayer(c[c_index], c[c_index + 1], g))
c_index += 1
# Norm Layer
if layer_i == 'N':
# TODO: The meaning of this layernorm
self.layers.append(nn.LayerNorm([n, c[c_index]]))
# c[c_index] is the last element in 'c'
# T - (self.Kt - 1) * num_temporal_layers returns the timesteps after previous temporal layer transformations cuz dialiation = 1
self.output = OutputLayer(c[c_index], T - (self.Kt - 1) * num_temporal_layers, self.num_nodes)
for layer in self.layers:
layer = layer.cuda()
def forward(self, x):
# Example:
# batch=64, input_channel=1, window_length=12, num_nodes=207, temporal_kernel = 2
# input.shape: torch.Size([64, 1, 12, 207])
# T output.shape: torch.Size([64, 64, 11, 207])
# S output.shape: torch.Size([64, 16, 11, 207])
# T output.shape: torch.Size([64, 64, 10, 207])
# N output.shape: torch.Size([64, 64, 10, 207])
# T output.shape: torch.Size([64, 64, 9, 207])
# S output.shape: torch.Size([64, 16, 9, 207])
# T output.shape: torch.Size([64, 64, 8, 207])
# OutputLayer output.shape: torch.Size([64, 1, 1, 207])
for i in range(self.num_layers):
layer_i = self.control_str[i]
if layer_i == 'N':
# x.permute(0, 2, 3, 1) leads
# [batch, channel, timesteps, nodes] to [batch, timesteps, nodes, channel]
# self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) leads
# [batch, timesteps, nodes, channel] to [batch, channel, timesteps, nodes]
x = self.dropout(self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2))
else:
# x.shape is [batch, channel, timesteps, nodes]
x = self.layers[i](x)
return self.output(x) # [batch, 1, 1, nodes]
class STGCN_WAVE(nn.Module):
'''
Improved variation of the above STGCN network
+ Extra temporal conv, layernorm, and sophisticated output layer design
+ temporal conv with increasing dialations like in TCN
Inputs:
c: channels, e.g. [1, 16, 32, 64, 32, 128] where [1, 16] means c_in and c_out for the first temporal layer
T: window length, e.g. 144, which should larger than the total dialations
n: num_nodes
g: fixed DGLGraph
p: dropout
control_str: model strcture controller, e.g. 'TNTSTNTSTN'; T: Temporal Layer, S: Spatio Layer, N: Norm Layer
x: input feature matrix with the shape [batch, 1, T, n]
Return:
y: output with the shape [batch, 1, 1, n]
Notice:
** Temporal layer changes c_in to c_out, but spatial layer doesn't change
in this way where c_in = c_out = c
'''
def __init__(self, c, T, n, g, p, control_str):
super(STGCN_WAVE, self).__init__()
self.control_str = control_str
self.num_layers = len(control_str)
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(p)
# c_index controls the change of channels
c_index = 0
# diapower controls the change of dilations in temporal CNNs where dilation = 2^diapower
diapower = 0
# construct network based on 'control_str'
for i in range(self.num_layers):
layer_i = control_str[i]
# Temporal Layer
if layer_i == 'T':
# Notice: dialation = 2^diapower (e.g. 1, 2, 4, 8) so that
# T_out = T_in - dialation * (kernel_size - 1) - 1 + 1
# if padding = 0 and stride = 1
self.layers.append(TemporalConvLayer_Residual(c[c_index], c[c_index + 1], dia = 2**diapower))
diapower += 1
c_index += 1
# Spatio Layer
if layer_i == 'S':
self.layers.append(SpatialConvLayer(c[c_index], c[c_index], g))
# Norm Layer
if layer_i == 'N':
# TODO: The meaning of this layernorm
self.layers.append(nn.LayerNorm([n, c[c_index]]))
# c[c_index] is the last element in 'c'
# T + 1 - 2**(diapower) returns the timesteps after previous temporal layer transformations
# 'n' will be needed by LayerNorm inside of the OutputLayer
self.output = OutputLayer(c[c_index], T + 1 - 2**(diapower), n)
for layer in self.layers:
layer = layer.cuda()
def forward(self, x):
# Example:
# batch=8, input_channel=1, window_length=144, num_nodes=207, temporal_kernel = 2
# x.shape: torch.Size([8, 1, 144, 207])
# T output.shape: torch.Size([8, 16, 143, 207])
# N output.shape: torch.Size([8, 16, 143, 207])
# T output.shape: torch.Size([8, 32, 141, 207])
# S output.shape: torch.Size([8, 32, 141, 207])
# T output.shape: torch.Size([8, 64, 137, 207])
# N output.shape: torch.Size([8, 64, 137, 207])
# T output.shape: torch.Size([8, 32, 129, 207])
# S output.shape: torch.Size([8, 32, 129, 207])
# T output.shape: torch.Size([8, 128, 113, 207])
# Outputlayer output.shape: torch.Size([8, 1, 1, 207])
for i in range(self.num_layers):
layer_i = self.control_str[i]
if layer_i == 'N':
# x.permute(0, 2, 3, 1) leads
# [batch, channel, timesteps, nodes] to [batch, timesteps, nodes, channel]
# self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) leads
# [batch, timesteps, nodes, channel] to [batch, channel, timesteps, nodes]
x = self.dropout(self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2))
else:
# x.shape is [batch, channel, timesteps, nodes]
x = self.layers[i](x)
return self.output(x) # [batch, 1, 1, nodes] | 40.81407 | 136 | 0.548141 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl import DGLGraph
from layers import TemporalConvLayer, TemporalConvLayer_Residual, SpatialConvLayer, OutputLayer, OutputLayer_Simple
class STGCN(nn.Module):
def __init__(self, c, T, n, g, p, control_str):
super(STGCN, self).__init__()
self.control_str = control_str
self.num_layers = len(control_str)
self.num_nodes = n
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(p)
self.Kt = 3
c_index = 0
num_temporal_layers = 0
for i in range(self.num_layers):
layer_i = control_str[i]
if layer_i == 'T':
self.layers.append(TemporalConvLayer_Residual(c[c_index], c[c_index + 1], kernel = self.Kt))
c_index += 1
num_temporal_layers += 1
if layer_i == 'S':
self.layers.append(SpatialConvLayer(c[c_index], c[c_index + 1], g))
c_index += 1
if layer_i == 'N':
self.layers.append(nn.LayerNorm([n, c[c_index]]))
self.output = OutputLayer(c[c_index], T - (self.Kt - 1) * num_temporal_layers, self.num_nodes)
for layer in self.layers:
layer = layer.cuda()
def forward(self, x):
for i in range(self.num_layers):
layer_i = self.control_str[i]
if layer_i == 'N':
x = self.dropout(self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2))
else:
x = self.layers[i](x)
return self.output(x)
class STGCN_WAVE(nn.Module):
def __init__(self, c, T, n, g, p, control_str):
super(STGCN_WAVE, self).__init__()
self.control_str = control_str
self.num_layers = len(control_str)
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(p)
c_index = 0
diapower = 0
for i in range(self.num_layers):
layer_i = control_str[i]
if layer_i == 'T':
self.layers.append(TemporalConvLayer_Residual(c[c_index], c[c_index + 1], dia = 2**diapower))
diapower += 1
c_index += 1
if layer_i == 'S':
self.layers.append(SpatialConvLayer(c[c_index], c[c_index], g))
if layer_i == 'N':
self.layers.append(nn.LayerNorm([n, c[c_index]]))
self.output = OutputLayer(c[c_index], T + 1 - 2**(diapower), n)
for layer in self.layers:
layer = layer.cuda()
def forward(self, x):
for i in range(self.num_layers):
layer_i = self.control_str[i]
if layer_i == 'N':
x = self.dropout(self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2))
else:
x = self.layers[i](x)
return self.output(x) | true | true |
f7fb6222648b99c20bc8837e865ea751c71412b9 | 10,053 | py | Python | contrib/spendfrom/spendfrom.py | metalheadani/cryptocare | b1e6d56a63b88d466668788e12d73038db4a490f | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | metalheadani/cryptocare | b1e6d56a63b88d466668788e12d73038db4a490f | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | metalheadani/cryptocare | b1e6d56a63b88d466668788e12d73038db4a490f | [
"MIT"
] | 1 | 2018-05-13T07:06:51.000Z | 2018-05-13T07:06:51.000Z | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19031 if testnet else 9031
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.511194 | 111 | 0.632249 |
mport *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0)
if not 'rpcport' in config:
config['rpcport'] = 19031 if testnet else 9031
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| true | true |
f7fb6235a443779b8812d9669051db1d0cab6400 | 425 | py | Python | item_h/server.py | augustocarrlos10/Flask_TecWeb | 5e99f20d61c42a9a1b621abe40c7410f8ac9890f | [
"MIT"
] | null | null | null | item_h/server.py | augustocarrlos10/Flask_TecWeb | 5e99f20d61c42a9a1b621abe40c7410f8ac9890f | [
"MIT"
] | null | null | null | item_h/server.py | augustocarrlos10/Flask_TecWeb | 5e99f20d61c42a9a1b621abe40c7410f8ac9890f | [
"MIT"
] | null | null | null | #h. Send data to Flask template (Jinja2)
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def student():
return render_template('student.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("result.html", result=result)
if __name__ == '__main__':
app.run(debug=True)
| 19.318182 | 58 | 0.675294 |
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def student():
return render_template('student.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("result.html", result=result)
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f7fb6264dd17770dc115fcbb983a2d087d65a732 | 943 | py | Python | kubernetes/test/test_v1_daemon_set_spec.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 2 | 2020-06-21T08:03:18.000Z | 2020-06-21T09:53:29.000Z | kubernetes/test/test_v1_daemon_set_spec.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_daemon_set_spec.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 1 | 2020-06-21T08:03:17.000Z | 2020-06-21T08:03:17.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_daemon_set_spec import V1DaemonSetSpec # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1DaemonSetSpec(unittest.TestCase):
"""V1DaemonSetSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1DaemonSetSpec(self):
"""Test V1DaemonSetSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_daemon_set_spec.V1DaemonSetSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.575 | 124 | 0.718982 |
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_daemon_set_spec import V1DaemonSetSpec
from kubernetes.client.rest import ApiException
class TestV1DaemonSetSpec(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1DaemonSetSpec(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f7fb62e1a057c12ecf35183f431577c91d45b6fa | 379 | py | Python | ptranking/ltr_adversarial/pointwise/point_discriminator.py | ryo59/ptranking | f06fd768de6dd5eaa3c931f191d907f56c147d09 | [
"MIT"
] | 236 | 2020-08-31T04:20:48.000Z | 2022-03-23T07:01:46.000Z | ptranking/ltr_adversarial/pointwise/point_discriminator.py | ryo59/ptranking | f06fd768de6dd5eaa3c931f191d907f56c147d09 | [
"MIT"
] | 7 | 2020-09-06T06:08:28.000Z | 2022-02-22T01:29:30.000Z | ptranking/ltr_adversarial/pointwise/point_discriminator.py | ryo59/ptranking | f06fd768de6dd5eaa3c931f191d907f56c147d09 | [
"MIT"
] | 30 | 2020-09-01T17:07:12.000Z | 2022-03-13T17:43:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ptranking.ltr_adversarial.base.ad_player import AdversarialPlayer
class Point_Discriminator(AdversarialPlayer):
'''
A pointwise discriminator
'''
def __init__(self, sf_para_dict=None, gpu=False, device=None):
super(Point_Discriminator, self).__init__(sf_para_dict=sf_para_dict, gpu=gpu, device=device)
| 31.583333 | 100 | 0.738786 |
from ptranking.ltr_adversarial.base.ad_player import AdversarialPlayer
class Point_Discriminator(AdversarialPlayer):
def __init__(self, sf_para_dict=None, gpu=False, device=None):
super(Point_Discriminator, self).__init__(sf_para_dict=sf_para_dict, gpu=gpu, device=device)
| true | true |
f7fb62e6b03b704ecd23f01a294cba3391bf54dc | 217 | py | Python | pylgrum/tui/__init__.py | jrheling/pylgrum | 1e95a56acdfdd865404da5dc94e47c7fe858095f | [
"MIT"
] | 2 | 2020-12-27T09:35:19.000Z | 2021-07-27T01:29:05.000Z | pylgrum/tui/__init__.py | jrheling/pylgrum | 1e95a56acdfdd865404da5dc94e47c7fe858095f | [
"MIT"
] | 4 | 2020-03-24T18:15:38.000Z | 2021-06-02T00:55:56.000Z | pylgrum/tui/__init__.py | jrheling/pylgrum | 1e95a56acdfdd865404da5dc94e47c7fe858095f | [
"MIT"
] | 2 | 2020-12-01T15:24:16.000Z | 2020-12-27T09:47:00.000Z | """The pylgrum.tui sub-package includes classes to drive text-mode play.
Classes all extend base classes from `pylgrum`:
TUIGame: simple console-based proof-of-concept
TUIPlayer: simple console-based PoC
""" | 31 | 72 | 0.75576 | true | true | |
f7fb635810a051d73e49bfa65dc1d3e5a0f75801 | 1,925 | py | Python | tests/xi_test.py | bbstats/xicor | 69d61d5cff5a0e3bc7a2d95660ed3e8e48ef0341 | [
"MIT"
] | 118 | 2021-02-26T22:50:52.000Z | 2022-02-22T08:05:03.000Z | tests/xi_test.py | bbstats/xicor | 69d61d5cff5a0e3bc7a2d95660ed3e8e48ef0341 | [
"MIT"
] | 8 | 2021-07-15T20:43:56.000Z | 2022-03-04T14:14:19.000Z | tests/xi_test.py | bbstats/xicor | 69d61d5cff5a0e3bc7a2d95660ed3e8e48ef0341 | [
"MIT"
] | 7 | 2021-12-26T01:56:04.000Z | 2022-03-30T00:15:35.000Z | import random
import numpy as np
import pytest
from xicor.xicor import Xi
"""
From Wikipedia:
Anscombe's quartet comprises four data sets that have nearly
identical simple descriptive statistics, yet have very different distributions
and appear very different when graphed. Each dataset consists of eleven
(x,y) points. They were constructed in 1973 by the
statistician Francis Anscombe to demonstrate both the importance of graphing
data before analyzing it and the effect of outliers and other influential
observations on statistical properties.
"""
@pytest.fixture
def anscombes_xis(anscombes_quartet):
random.seed(2020)
np.random.seed(2020)
xis = {
"xi_1": Xi(anscombes_quartet["x_1"], anscombes_quartet["y_1"]),
"xi_2": Xi(anscombes_quartet["x_2"], anscombes_quartet["y_2"]),
"xi_3": Xi(anscombes_quartet["x_3"], anscombes_quartet["y_3"]),
"xi_4": Xi(anscombes_quartet["x_4"], anscombes_quartet["y_4"]),
}
return xis
def test_xi_correlations(anscombes_xis):
random.seed(2020)
np.random.seed(2020)
assert anscombes_xis["xi_1"].correlation == 0.2749999999999999
assert anscombes_xis["xi_2"].correlation == 0.6
assert anscombes_xis["xi_3"].correlation == 0.6190476190476191
assert anscombes_xis["xi_4"].correlation == 0.1000000000000002
def test_p_val_asymptotic(anscombes_xis):
random.seed(2020)
np.random.seed(2020)
# values taken from R code
assert (
anscombes_xis["xi_1"].pval_asymptotic(ties=False, nperm=1000)
== 0.07841556446646347
)
assert (
anscombes_xis["xi_2"].pval_asymptotic(ties=False, nperm=1000)
== 0.0010040217037570187
)
assert (
anscombes_xis["xi_3"].pval_asymptotic(ties=False, nperm=1000)
== 0.04989192742513937
)
assert (
anscombes_xis["xi_4"].pval_asymptotic(ties=False, nperm=1000)
== 0.2599336349448975
)
| 31.048387 | 78 | 0.71013 | import random
import numpy as np
import pytest
from xicor.xicor import Xi
@pytest.fixture
def anscombes_xis(anscombes_quartet):
random.seed(2020)
np.random.seed(2020)
xis = {
"xi_1": Xi(anscombes_quartet["x_1"], anscombes_quartet["y_1"]),
"xi_2": Xi(anscombes_quartet["x_2"], anscombes_quartet["y_2"]),
"xi_3": Xi(anscombes_quartet["x_3"], anscombes_quartet["y_3"]),
"xi_4": Xi(anscombes_quartet["x_4"], anscombes_quartet["y_4"]),
}
return xis
def test_xi_correlations(anscombes_xis):
random.seed(2020)
np.random.seed(2020)
assert anscombes_xis["xi_1"].correlation == 0.2749999999999999
assert anscombes_xis["xi_2"].correlation == 0.6
assert anscombes_xis["xi_3"].correlation == 0.6190476190476191
assert anscombes_xis["xi_4"].correlation == 0.1000000000000002
def test_p_val_asymptotic(anscombes_xis):
random.seed(2020)
np.random.seed(2020)
assert (
anscombes_xis["xi_1"].pval_asymptotic(ties=False, nperm=1000)
== 0.07841556446646347
)
assert (
anscombes_xis["xi_2"].pval_asymptotic(ties=False, nperm=1000)
== 0.0010040217037570187
)
assert (
anscombes_xis["xi_3"].pval_asymptotic(ties=False, nperm=1000)
== 0.04989192742513937
)
assert (
anscombes_xis["xi_4"].pval_asymptotic(ties=False, nperm=1000)
== 0.2599336349448975
)
| true | true |
f7fb6391176c926b55db3084ba67b16f08ad90a4 | 1,888 | py | Python | examples/dfp/v201711/user_service/get_user_by_email_address.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:22.000Z | 2019-10-21T04:10:22.000Z | examples/dfp/v201711/user_service/get_user_by_email_address.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201711/user_service/get_user_by_email_address.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:51.000Z | 2019-10-21T04:10:51.000Z | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets users by email.
"""
# Import appropriate modules from the client library.
from googleads import dfp
EMAIL_ADDRESS = 'INSERT_EMAIL_ADDRESS_HERE'
def main(client, email_address):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201711')
# Create a statement to select users.
statement = (dfp.StatementBuilder()
.Where('email = :email')
.WithBindVariable('email', email_address))
# Retrieve a small amount of users at a time, paging
# through until all users have been retrieved.
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
# Print out some information for each user.
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, EMAIL_ADDRESS)
| 35.622642 | 78 | 0.692797 |
"""This example gets users by email.
"""
from googleads import dfp
EMAIL_ADDRESS = 'INSERT_EMAIL_ADDRESS_HERE'
def main(client, email_address):
user_service = client.GetService('UserService', version='v201711')
statement = (dfp.StatementBuilder()
.Where('email = :email')
.WithBindVariable('email', email_address))
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for user in response['results']:
print('User with ID "%d" and name "%s" was found.\n' % (user['id'],
user['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, EMAIL_ADDRESS)
| false | true |
f7fb63bc6b75023e46abe2b059d9f5498c154b87 | 1,574 | py | Python | bin/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py | zdmc23/bash-lambda-layer | e762df0189cfb894dab2d96bae1655b8857d5efb | [
"MIT"
] | null | null | null | bin/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py | zdmc23/bash-lambda-layer | e762df0189cfb894dab2d96bae1655b8857d5efb | [
"MIT"
] | null | null | null | bin/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py | zdmc23/bash-lambda-layer | e762df0189cfb894dab2d96bae1655b8857d5efb | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DataLakeAnalyticsCatalogCredentialCreateParameters(Model):
"""Data Lake Analytics catalog credential creation parameters.
:param password: the password for the credential and user with access to
the data source.
:type password: str
:param uri: the URI identifier for the data source this credential can
connect to in the format <hostname>:<port>
:type uri: str
:param user_id: the object identifier for the user associated with this
credential with access to the data source.
:type user_id: str
"""
_validation = {
'password': {'required': True},
'uri': {'required': True},
'user_id': {'required': True},
}
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
}
def __init__(self, password, uri, user_id):
self.password = password
self.uri = uri
self.user_id = user_id
| 34.977778 | 77 | 0.57878 |
from msrest.serialization import Model
class DataLakeAnalyticsCatalogCredentialCreateParameters(Model):
_validation = {
'password': {'required': True},
'uri': {'required': True},
'user_id': {'required': True},
}
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
}
def __init__(self, password, uri, user_id):
self.password = password
self.uri = uri
self.user_id = user_id
| true | true |
f7fb644a9ddb62c0e7f97a8b3e6205ba23ae7c60 | 4,735 | py | Python | setup.py | tgodaA/cvprac | 52a44d8a098ee25761344421b99d09eeb4d19784 | [
"BSD-3-Clause"
] | null | null | null | setup.py | tgodaA/cvprac | 52a44d8a098ee25761344421b99d09eeb4d19784 | [
"BSD-3-Clause"
] | null | null | null | setup.py | tgodaA/cvprac | 52a44d8a098ee25761344421b99d09eeb4d19784 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2016, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" This module provides a RESTful API client for Cloudvision(R) Portal (CVP)
which can be used for building applications that work with Arista CVP.
"""
import io
from os import path, walk
from glob import glob
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from cvprac import __version__, __author__
def find_modules(pkg):
''' Return all modules from the pkg
'''
modules = [pkg]
for dirname, dirnames, _ in walk(pkg):
for subdirname in dirnames:
modules.append(path.join(dirname, subdirname))
return modules
def get_long_description():
''' Get the long description from README.rst if it exists.
Null string is returned if README.rst is non-existent
'''
long_description = ''
here = path.abspath(path.dirname(__file__))
try:
with io.open(path.join(here, 'README.md'), encoding='utf-8') as file_hdl:
long_description = file_hdl.read()
except IOError:
pass
return long_description
setup(
name='cvprac',
version=__version__,
description='Arista Cloudvision(R) Portal Rest API Client written in python',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author=__author__,
author_email='eosplus-dev@arista.com',
url='https://github.com/aristanetworks/cvprac',
download_url='https://github.com/aristanetworks/cvprac/tarball/%s' % __version__,
license='BSD-3',
packages=find_modules('cvprac'),
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='networking CloudVision development rest api',
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests>=1.0.0'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev]
extras_require={
'dev': ['check-manifest', 'pep8', 'pyflakes', 'pylint', 'coverage',
'pyyaml'],
},
)
| 37.579365 | 85 | 0.700528 |
import io
from os import path, walk
from glob import glob
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from cvprac import __version__, __author__
def find_modules(pkg):
modules = [pkg]
for dirname, dirnames, _ in walk(pkg):
for subdirname in dirnames:
modules.append(path.join(dirname, subdirname))
return modules
def get_long_description():
long_description = ''
here = path.abspath(path.dirname(__file__))
try:
with io.open(path.join(here, 'README.md'), encoding='utf-8') as file_hdl:
long_description = file_hdl.read()
except IOError:
pass
return long_description
setup(
name='cvprac',
version=__version__,
description='Arista Cloudvision(R) Portal Rest API Client written in python',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author=__author__,
author_email='eosplus-dev@arista.com',
url='https://github.com/aristanetworks/cvprac',
download_url='https://github.com/aristanetworks/cvprac/tarball/%s' % __version__,
license='BSD-3',
packages=find_modules('cvprac'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords='networking CloudVision development rest api',
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests>=1.0.0'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev]
extras_require={
'dev': ['check-manifest', 'pep8', 'pyflakes', 'pylint', 'coverage',
'pyyaml'],
},
)
| true | true |
f7fb65013edae6f0f2dba656cd44875d668e673a | 2,596 | py | Python | docs/sphinxext/doxybridge/project.py | Oghma/speect | f618e8d651cb9ec4c90cc244af3e7aa993599f6d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 5 | 2016-01-29T14:39:46.000Z | 2019-04-24T14:45:55.000Z | docs/sphinxext/doxybridge/project.py | Oghma/speect | f618e8d651cb9ec4c90cc244af3e7aa993599f6d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | docs/sphinxext/doxybridge/project.py | Oghma/speect | f618e8d651cb9ec4c90cc244af3e7aa993599f6d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 7 | 2015-09-17T14:45:05.000Z | 2020-03-30T13:19:29.000Z | """
doxybridge.project
~~~~~~~~~~~~~~~~~~
:copyright: Copyright (c) 2011 The Department of Arts and Culture, The Government
of the Republic of South Africa.
:copyright: Copyright (c) 2009, Michael Jones
:license: MIT, see LICENSE for details.
"""
import sys
class ProjectInfo(object):
def __init__(self, name, path, reference, domain, match):
self._name = name
self._path = path
self._reference = reference
self._domain = domain
self._match = match
def name(self):
return self._name
def path(self):
return self._path
def reference(self):
return self._reference
def domain(self):
return self._domain
class ProjectInfoFactory(object):
def __init__(self, match):
self.match = match
self.projects = {}
self.default_project = None
self.project_count = 0
self.project_info_store = {}
def update(
self,
projects,
default_project,
):
self.projects = projects
self.default_project = default_project
def default_path(self):
return self.projects[self.default_project][0]
def default_domain(self):
return self.projects[self.default_project][1]
def create_project_info(self, options):
name = ""
path = self.default_path()
domain = self.default_domain()
if options.has_key("project"):
try:
path = self.projects[ options["project"] ][0]
name = options["project"]
domain = self.projects[ options["project"] ][1]
except KeyError, e:
sys.stderr.write(
"Unable to find project '%s' in doxygen_projects dictionary" \
% options["project"]
)
if options.has_key("path"):
path = options["path"]
if options.has_key("domain"):
domain = options["domain"]
try:
return self.project_info_store[path]
except KeyError:
reference = name
if not name:
name = "project%s" % self.project_count
reference = path
self.project_count += 1
project_info = ProjectInfo(
name,
path,
reference,
domain,
self.match
)
self.project_info_store[path] = project_info
return project_info
| 23.387387 | 85 | 0.534669 | """
doxybridge.project
~~~~~~~~~~~~~~~~~~
:copyright: Copyright (c) 2011 The Department of Arts and Culture, The Government
of the Republic of South Africa.
:copyright: Copyright (c) 2009, Michael Jones
:license: MIT, see LICENSE for details.
"""
import sys
class ProjectInfo(object):
def __init__(self, name, path, reference, domain, match):
self._name = name
self._path = path
self._reference = reference
self._domain = domain
self._match = match
def name(self):
return self._name
def path(self):
return self._path
def reference(self):
return self._reference
def domain(self):
return self._domain
class ProjectInfoFactory(object):
def __init__(self, match):
self.match = match
self.projects = {}
self.default_project = None
self.project_count = 0
self.project_info_store = {}
def update(
self,
projects,
default_project,
):
self.projects = projects
self.default_project = default_project
def default_path(self):
return self.projects[self.default_project][0]
def default_domain(self):
return self.projects[self.default_project][1]
def create_project_info(self, options):
name = ""
path = self.default_path()
domain = self.default_domain()
if options.has_key("project"):
try:
path = self.projects[ options["project"] ][0]
name = options["project"]
domain = self.projects[ options["project"] ][1]
except KeyError, e:
sys.stderr.write(
"Unable to find project '%s' in doxygen_projects dictionary" \
% options["project"]
)
if options.has_key("path"):
path = options["path"]
if options.has_key("domain"):
domain = options["domain"]
try:
return self.project_info_store[path]
except KeyError:
reference = name
if not name:
name = "project%s" % self.project_count
reference = path
self.project_count += 1
project_info = ProjectInfo(
name,
path,
reference,
domain,
self.match
)
self.project_info_store[path] = project_info
return project_info
| false | true |
f7fb65981a8db33eed8c20487f5bc455e7a2483a | 3,993 | py | Python | testing/test_images.py | ZouaghiHoussem/MTCNN_68_TensorFlow | b41dbda229e24d6c79d28c22d910e17fca2618c3 | [
"MIT"
] | 50 | 2017-09-18T16:11:01.000Z | 2022-03-28T15:54:04.000Z | testing/test_images.py | ZouaghiHoussem/MTCNN_68_TensorFlow | b41dbda229e24d6c79d28c22d910e17fca2618c3 | [
"MIT"
] | 7 | 2017-09-18T16:11:36.000Z | 2018-05-05T01:39:24.000Z | testing/test_images.py | ZouaghiHoussem/MTCNN_68_TensorFlow | b41dbda229e24d6c79d28c22d910e17fca2618c3 | [
"MIT"
] | 22 | 2018-03-22T06:31:14.000Z | 2020-03-10T07:20:04.000Z | #coding:utf-8
import tensorflow as tf
import numpy as np
import os
import sys
rootPath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
sys.path.insert(0, rootPath)
from training.mtcnn_model import P_Net, R_Net, O_Net
from tools.loader import TestLoader
from detection.MtcnnDetector import MtcnnDetector
from detection.detector import Detector
from detection.fcn_detector import FcnDetector
import cv2
import argparse
def test(stage, testFolder):
print("Start testing in %s"%(testFolder))
detectors = [None, None, None]
if stage in ['pnet', 'rnet', 'onet']:
modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('pnet-') and b.endswith('.index')]
maxEpoch = max(map(int, a)) # auto match a max epoch model
modelPath = os.path.join(modelPath, "pnet-%d"%(maxEpoch))
print("Use PNet model: %s"%(modelPath))
detectors[0] = FcnDetector(P_Net,modelPath)
if stage in ['rnet', 'onet']:
modelPath = os.path.join(rootPath, 'tmp/model/rnet/')
a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('rnet-') and b.endswith('.index')]
maxEpoch = max(map(int, a))
modelPath = os.path.join(modelPath, "rnet-%d"%(maxEpoch))
print("Use RNet model: %s"%(modelPath))
detectors[1] = Detector(R_Net, 24, 1, modelPath)
if stage in ['onet']:
modelPath = os.path.join(rootPath, 'tmp/model/onet/')
a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('onet-') and b.endswith('.index')]
maxEpoch = max(map(int, a))
modelPath = os.path.join(modelPath, "onet-%d"%(maxEpoch))
print("Use ONet model: %s"%(modelPath))
detectors[2] = Detector(O_Net, 48, 1, modelPath)
mtcnnDetector = MtcnnDetector(detectors=detectors, min_face_size = 24, threshold=[0.9, 0.6, 0.7])
testImages = []
for name in os.listdir(testFolder):
testImages.append(os.path.join(testFolder, name))
testDatas = TestLoader(testImages)
# Now to detect
allBoxes, allLandmarks = mtcnnDetector.detect_face(testDatas)
print("\n")
# Save it
for idx, imagePath in enumerate(testImages):
image = cv2.imread(imagePath)
for bbox in allBoxes[idx]:
cv2.putText(image,str(np.round(bbox[4],2)),(int(bbox[0]),int(bbox[1])),cv2.FONT_HERSHEY_TRIPLEX,1,color=(255,0,255))
cv2.rectangle(image, (int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),(0,0,255))
allLandmark = allLandmarks[idx]
if allLandmark is not None: # pnet and rnet will be ignore landmark
for landmark in allLandmark:
for i in range(len(landmark)/2):
cv2.circle(image, (int(landmark[2*i]),int(int(landmark[2*i+1]))), 3, (0,0,255))
savePath = os.path.join(rootPath, 'testing', 'results_%s'%(stage))
if not os.path.isdir(savePath):
os.makedirs(savePath)
cv2.imwrite(os.path.join(savePath, "result_%d.jpg" %(idx)), image)
print("Save image to %s"%(savePath))
def parse_args():
parser = argparse.ArgumentParser(description='Create hard bbox sample...',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--stage', dest='stage', help='working stage, can be pnet, rnet, onet',
default='onet', type=str)
parser.add_argument('--gpus', dest='gpus', help='specify gpu to run. eg: --gpus=0,1',
default='0', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
stage = args.stage
if stage not in ['pnet', 'rnet', 'onet']:
raise Exception("Please specify stage by --stage=pnet or rnet or onet")
# Support stage: pnet, rnet, onet
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # set GPU
test(stage, os.path.join(rootPath, "testing", "images"))
| 46.976471 | 128 | 0.63436 |
import tensorflow as tf
import numpy as np
import os
import sys
rootPath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
sys.path.insert(0, rootPath)
from training.mtcnn_model import P_Net, R_Net, O_Net
from tools.loader import TestLoader
from detection.MtcnnDetector import MtcnnDetector
from detection.detector import Detector
from detection.fcn_detector import FcnDetector
import cv2
import argparse
def test(stage, testFolder):
print("Start testing in %s"%(testFolder))
detectors = [None, None, None]
if stage in ['pnet', 'rnet', 'onet']:
modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('pnet-') and b.endswith('.index')]
maxEpoch = max(map(int, a))
modelPath = os.path.join(modelPath, "pnet-%d"%(maxEpoch))
print("Use PNet model: %s"%(modelPath))
detectors[0] = FcnDetector(P_Net,modelPath)
if stage in ['rnet', 'onet']:
modelPath = os.path.join(rootPath, 'tmp/model/rnet/')
a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('rnet-') and b.endswith('.index')]
maxEpoch = max(map(int, a))
modelPath = os.path.join(modelPath, "rnet-%d"%(maxEpoch))
print("Use RNet model: %s"%(modelPath))
detectors[1] = Detector(R_Net, 24, 1, modelPath)
if stage in ['onet']:
modelPath = os.path.join(rootPath, 'tmp/model/onet/')
a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('onet-') and b.endswith('.index')]
maxEpoch = max(map(int, a))
modelPath = os.path.join(modelPath, "onet-%d"%(maxEpoch))
print("Use ONet model: %s"%(modelPath))
detectors[2] = Detector(O_Net, 48, 1, modelPath)
mtcnnDetector = MtcnnDetector(detectors=detectors, min_face_size = 24, threshold=[0.9, 0.6, 0.7])
testImages = []
for name in os.listdir(testFolder):
testImages.append(os.path.join(testFolder, name))
testDatas = TestLoader(testImages)
allBoxes, allLandmarks = mtcnnDetector.detect_face(testDatas)
print("\n")
for idx, imagePath in enumerate(testImages):
image = cv2.imread(imagePath)
for bbox in allBoxes[idx]:
cv2.putText(image,str(np.round(bbox[4],2)),(int(bbox[0]),int(bbox[1])),cv2.FONT_HERSHEY_TRIPLEX,1,color=(255,0,255))
cv2.rectangle(image, (int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),(0,0,255))
allLandmark = allLandmarks[idx]
if allLandmark is not None:
for landmark in allLandmark:
for i in range(len(landmark)/2):
cv2.circle(image, (int(landmark[2*i]),int(int(landmark[2*i+1]))), 3, (0,0,255))
savePath = os.path.join(rootPath, 'testing', 'results_%s'%(stage))
if not os.path.isdir(savePath):
os.makedirs(savePath)
cv2.imwrite(os.path.join(savePath, "result_%d.jpg" %(idx)), image)
print("Save image to %s"%(savePath))
def parse_args():
parser = argparse.ArgumentParser(description='Create hard bbox sample...',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--stage', dest='stage', help='working stage, can be pnet, rnet, onet',
default='onet', type=str)
parser.add_argument('--gpus', dest='gpus', help='specify gpu to run. eg: --gpus=0,1',
default='0', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
stage = args.stage
if stage not in ['pnet', 'rnet', 'onet']:
raise Exception("Please specify stage by --stage=pnet or rnet or onet")
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
test(stage, os.path.join(rootPath, "testing", "images"))
| true | true |
f7fb6666acec27c3471ebbb8630a2e964ae53c7d | 1,480 | py | Python | experiments/rpi/brickpi/LEGO-Motor_Test.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | 13 | 2016-04-12T07:38:02.000Z | 2021-12-24T16:53:53.000Z | experiments/rpi/brickpi/LEGO-Motor_Test.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | 1 | 2021-03-20T05:17:03.000Z | 2021-03-20T05:17:03.000Z | experiments/rpi/brickpi/LEGO-Motor_Test.py | willingc/pingo | 0890bf5ed763e9061320093fc3fb5f7543c5cc2c | [
"MIT"
] | 9 | 2016-04-29T02:21:37.000Z | 2022-03-17T23:35:13.000Z | # Jaikrishna
# Initial Date: June 24, 2013
# Last Updated: June 24, 2013
#
# These files have been made available online through a Creative Commons Attribution-ShareAlike 3.0 license.
# (http://creativecommons.org/licenses/by-sa/3.0/)
#
# http://www.dexterindustries.com/
# This code is for testing the BrickPi with a Lego Motor
from BrickPi import * #import BrickPi.py file to use BrickPi operations
BrickPiSetup() # setup the serial port for communication
BrickPi.MotorEnable[PORT_A] = 1 #Enable the Motor A
BrickPi.MotorEnable[PORT_B] = 1 #Enable the Motor B
BrickPiSetupSensors() #Send the properties of sensors to BrickPi
while True:
print "Running Forward"
BrickPi.MotorSpeed[PORT_A] = 200 #Set the speed of MotorA (-255 to 255)
BrickPi.MotorSpeed[PORT_B] = 200 #Set the speed of MotorB (-255 to 255)
ot = time.time()
while(time.time() - ot < 3): #running while loop for 3 seconds
BrickPiUpdateValues() # Ask BrickPi to update values for sensors/motors
time.sleep(.1) # sleep for 100 ms
print "Running Reverse"
BrickPi.MotorSpeed[PORT_A] = -200 #Set the speed of MotorA (-255 to 255)
BrickPi.MotorSpeed[PORT_B] = -200 #Set the speed of MotorB (-255 to 255)
ot = time.time()
while(time.time() - ot < 3): #running while loop for 3 seconds
BrickPiUpdateValues() # Ask BrickPi to update values for sensors/motors
time.sleep(.1) # sleep for 100 ms | 43.529412 | 109 | 0.685811 |
from BrickPi import *
BrickPiSetup()
BrickPi.MotorEnable[PORT_A] = 1
BrickPi.MotorEnable[PORT_B] = 1
BrickPiSetupSensors()
while True:
print "Running Forward"
BrickPi.MotorSpeed[PORT_A] = 200
BrickPi.MotorSpeed[PORT_B] = 200
ot = time.time()
while(time.time() - ot < 3):
BrickPiUpdateValues()
time.sleep(.1)
print "Running Reverse"
BrickPi.MotorSpeed[PORT_A] = -200
BrickPi.MotorSpeed[PORT_B] = -200
ot = time.time()
while(time.time() - ot < 3):
BrickPiUpdateValues()
time.sleep(.1) | false | true |
f7fb669f111e8b6461b2fc5edca54c4bede91841 | 39,828 | py | Python | sympy/functions/elementary/exponential.py | joha2/sympy | 5c54e5b78bc907569f56996601603b7b574dfc73 | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/exponential.py | joha2/sympy | 5c54e5b78bc907569f56996601603b7b574dfc73 | [
"BSD-3-Clause"
] | 10 | 2021-07-21T20:56:57.000Z | 2021-07-31T16:35:28.000Z | sympy/functions/elementary/exponential.py | joha2/sympy | 5c54e5b78bc907569f56996601603b7b574dfc73 | [
"BSD-3-Clause"
] | null | null | null | from sympy.core import sympify
from sympy.core.add import Add
from sympy.core.cache import cacheit
from sympy.core.function import (
Function, ArgumentIndexError, _coeff_isneg,
expand_mul, FunctionClass, PoleError)
from sympy.core.logic import fuzzy_and, fuzzy_not, fuzzy_or
from sympy.core.mul import Mul
from sympy.core.numbers import Integer, Rational
from sympy.core.parameters import global_parameters
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Wild, Dummy
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.ntheory import multiplicity, perfect_power
# NOTE IMPORTANT
# The series expansion code in this file is an important part of the gruntz
# algorithm for determining limits. _eval_nseries has to return a generalized
# power series with coefficients in C(log(x), log).
# In more detail, the result of _eval_nseries(self, x, n) must be
# c_0*x**e_0 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i involve only
# numbers, the function log, and log(x). [This also means it must not contain
# log(x(1+p)), this *has* to be expanded to log(x)+log(1+p) if x.is_positive and
# p.is_positive.]
class ExpBase(Function):
unbranched = True
_singularities = (S.ComplexInfinity,)
def inverse(self, argindex=1):
"""
Returns the inverse function of ``exp(x)``.
"""
return log
def as_numer_denom(self):
"""
Returns this with a positive exponent as a 2-tuple (a fraction).
Examples
========
>>> from sympy.functions import exp
>>> from sympy.abc import x
>>> exp(-x).as_numer_denom()
(1, exp(x))
>>> exp(x).as_numer_denom()
(exp(x), 1)
"""
# this should be the same as Pow.as_numer_denom wrt
# exponent handling
exp = self.exp
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
if neg_exp:
return S.One, self.func(-exp)
return self, S.One
@property
def exp(self):
"""
Returns the exponent of the function.
"""
return self.args[0]
def as_base_exp(self):
"""
Returns the 2-tuple (base, exponent).
"""
return self.func(1), Mul(*self.args)
def _eval_adjoint(self):
return self.func(self.exp.adjoint())
def _eval_conjugate(self):
return self.func(self.exp.conjugate())
def _eval_transpose(self):
return self.func(self.exp.transpose())
def _eval_is_finite(self):
arg = self.exp
if arg.is_infinite:
if arg.is_extended_negative:
return True
if arg.is_extended_positive:
return False
if arg.is_finite:
return True
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
z = s.exp.is_zero
if z:
return True
elif s.exp.is_rational and fuzzy_not(z):
return False
else:
return s.is_rational
def _eval_is_zero(self):
return self.exp is S.NegativeInfinity
def _eval_power(self, other):
"""exp(arg)**e -> exp(arg*e) if assumptions allow it.
"""
b, e = self.as_base_exp()
return Pow._eval_power(Pow(b, e, evaluate=False), other)
def _eval_expand_power_exp(self, **hints):
from sympy import Sum, Product
arg = self.args[0]
if arg.is_Add and arg.is_commutative:
return Mul.fromiter(self.func(x) for x in arg.args)
elif isinstance(arg, Sum) and arg.is_commutative:
return Product(self.func(arg.function), *arg.limits)
return self.func(arg)
class exp_polar(ExpBase):
r"""
Represent a 'polar number' (see g-function Sphinx documentation).
Explanation
===========
``exp_polar`` represents the function
`Exp: \mathbb{C} \rightarrow \mathcal{S}`, sending the complex number
`z = a + bi` to the polar number `r = exp(a), \theta = b`. It is one of
the main functions to construct polar numbers.
Examples
========
>>> from sympy import exp_polar, pi, I, exp
The main difference is that polar numbers don't "wrap around" at `2 \pi`:
>>> exp(2*pi*I)
1
>>> exp_polar(2*pi*I)
exp_polar(2*I*pi)
apart from that they behave mostly like classical complex numbers:
>>> exp_polar(2)*exp_polar(3)
exp_polar(5)
See Also
========
sympy.simplify.powsimp.powsimp
polar_lift
periodic_argument
principal_branch
"""
is_polar = True
is_comparable = False # cannot be evalf'd
def _eval_Abs(self): # Abs is never a polar number
from sympy.functions.elementary.complexes import re
return exp(re(self.args[0]))
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
from sympy import im, pi, re
i = im(self.args[0])
try:
bad = (i <= -pi or i > pi)
except TypeError:
bad = True
if bad:
return self # cannot evalf for this argument
res = exp(self.args[0])._eval_evalf(prec)
if i > 0 and im(res) < 0:
# i ~ pi, but exp(I*i) evaluated to argument slightly bigger than pi
return re(res)
return res
def _eval_power(self, other):
return self.func(self.args[0]*other)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def as_base_exp(self):
# XXX exp_polar(0) is special!
if self.args[0] == 0:
return self, S.One
return ExpBase.as_base_exp(self)
class ExpMeta(FunctionClass):
def __instancecheck__(cls, instance):
if exp in instance.__class__.__mro__:
return True
return isinstance(instance, Pow) and instance.base is S.Exp1
class exp(ExpBase, metaclass=ExpMeta):
"""
The exponential function, :math:`e^x`.
Examples
========
>>> from sympy.functions import exp
>>> from sympy.abc import x
>>> from sympy import I, pi
>>> exp(x)
exp(x)
>>> exp(x).diff(x)
exp(x)
>>> exp(I*pi)
-1
Parameters
==========
arg : Expr
See Also
========
log
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return self
else:
raise ArgumentIndexError(self, argindex)
def _eval_refine(self, assumptions):
from sympy.assumptions import ask, Q
arg = self.args[0]
if arg.is_Mul:
Ioo = S.ImaginaryUnit*S.Infinity
if arg in [Ioo, -Ioo]:
return S.NaN
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff)):
if ask(Q.even(coeff)):
return S.One
elif ask(Q.odd(coeff)):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half)):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half)):
return S.ImaginaryUnit
@classmethod
def eval(cls, arg):
from sympy.calculus import AccumBounds
from sympy.sets.setexpr import SetExpr
from sympy.matrices.matrices import MatrixBase
from sympy import im, logcombine, re
if isinstance(arg, MatrixBase):
return arg.exp()
elif global_parameters.exp_is_pow:
return Pow(S.Exp1, arg)
elif arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg.is_zero:
return S.One
elif arg is S.One:
return S.Exp1
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.ComplexInfinity:
return S.NaN
elif isinstance(arg, log):
return arg.args[0]
elif isinstance(arg, AccumBounds):
return AccumBounds(exp(arg.min), exp(arg.max))
elif isinstance(arg, SetExpr):
return arg._eval_func(cls)
elif arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if (2*coeff).is_integer:
if coeff.is_even:
return S.One
elif coeff.is_odd:
return S.NegativeOne
elif (coeff + S.Half).is_even:
return -S.ImaginaryUnit
elif (coeff + S.Half).is_odd:
return S.ImaginaryUnit
elif coeff.is_Rational:
ncoeff = coeff % 2 # restrict to [0, 2pi)
if ncoeff > 1: # restrict to (-pi, pi]
ncoeff -= 2
if ncoeff != coeff:
return cls(ncoeff*S.Pi*S.ImaginaryUnit)
# Warning: code in risch.py will be very sensitive to changes
# in this (see DifferentialExtension).
# look for a single log factor
coeff, terms = arg.as_coeff_Mul()
# but it can't be multiplied by oo
if coeff in [S.NegativeInfinity, S.Infinity]:
if terms.is_number:
if coeff is S.NegativeInfinity:
terms = -terms
if re(terms).is_zero and terms is not S.Zero:
return S.NaN
if re(terms).is_positive and im(terms) is not S.Zero:
return S.ComplexInfinity
if re(terms).is_negative:
return S.Zero
return None
coeffs, log_term = [coeff], None
for term in Mul.make_args(terms):
term_ = logcombine(term)
if isinstance(term_, log):
if log_term is None:
log_term = term_.args[0]
else:
return None
elif term.is_comparable:
coeffs.append(term)
else:
return None
return log_term**Mul(*coeffs) if log_term else None
elif arg.is_Add:
out = []
add = []
argchanged = False
for a in arg.args:
if a is S.One:
add.append(a)
continue
newa = cls(a)
if isinstance(newa, cls):
if newa.args[0] != a:
add.append(newa.args[0])
argchanged = True
else:
add.append(a)
else:
out.append(newa)
if out or argchanged:
return Mul(*out)*cls(Add(*add), evaluate=False)
if arg.is_zero:
return S.One
@property
def base(self):
"""
Returns the base of the exponential function.
"""
return S.Exp1
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Calculates the next term in the Taylor series expansion.
"""
if n < 0:
return S.Zero
if n == 0:
return S.One
x = sympify(x)
if previous_terms:
p = previous_terms[-1]
if p is not None:
return p * x / n
return x**n/factorial(n)
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a 2-tuple representing a complex number.
Examples
========
>>> from sympy import I
>>> from sympy.abc import x
>>> from sympy.functions import exp
>>> exp(x).as_real_imag()
(exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x)))
>>> exp(1).as_real_imag()
(E, 0)
>>> exp(I).as_real_imag()
(cos(1), sin(1))
>>> exp(1+I).as_real_imag()
(E*cos(1), E*sin(1))
See Also
========
sympy.functions.elementary.complexes.re
sympy.functions.elementary.complexes.im
"""
from sympy.functions.elementary.trigonometric import cos, sin
re, im = self.args[0].as_real_imag()
if deep:
re = re.expand(deep, **hints)
im = im.expand(deep, **hints)
cos, sin = cos(im), sin(im)
return (exp(re)*cos, exp(re)*sin)
def _eval_subs(self, old, new):
# keep processing of power-like args centralized in Pow
if old.is_Pow: # handle (exp(3*log(x))).subs(x**2, z) -> z**(3/2)
old = exp(old.exp*log(old.base))
elif old is S.Exp1 and new.is_Function:
old = exp
if isinstance(old, exp) or old is S.Exp1:
f = lambda a: Pow(*a.as_base_exp(), evaluate=False) if (
a.is_Pow or isinstance(a, exp)) else a
return Pow._eval_subs(f(self), f(old), new)
if old is exp and not new.is_Function:
return new**self.exp._subs(old, new)
return Function._eval_subs(self, old, new)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
elif self.args[0].is_imaginary:
arg2 = -S(2) * S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_is_complex(self):
def complex_extended_negative(arg):
yield arg.is_complex
yield arg.is_extended_negative
return fuzzy_or(complex_extended_negative(self.args[0]))
def _eval_is_algebraic(self):
if (self.exp / S.Pi / S.ImaginaryUnit).is_rational:
return True
if fuzzy_not(self.exp.is_zero):
if self.exp.is_algebraic:
return False
elif (self.exp / S.Pi).is_rational:
return False
def _eval_is_extended_positive(self):
if self.exp.is_extended_real:
return not self.args[0] is S.NegativeInfinity
elif self.exp.is_imaginary:
arg2 = -S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_nseries(self, x, n, logx, cdir=0):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy import ceiling, limit, Order, powsimp, Wild, expand_complex
arg = self.exp
arg_series = arg._eval_nseries(x, n=n, logx=logx)
if arg_series.is_Order:
return 1 + arg_series
arg0 = limit(arg_series.removeO(), x, 0)
if arg0 is S.NegativeInfinity:
return Order(x**n, x)
if arg0 is S.Infinity:
return self
t = Dummy("t")
nterms = n
try:
cf = Order(arg.as_leading_term(x, logx=logx), x).getn()
except (NotImplementedError, PoleError):
cf = 0
if cf and cf > 0:
nterms = ceiling(n/cf)
exp_series = exp(t)._taylor(t, nterms)
r = exp(arg0)*exp_series.subs(t, arg_series - arg0)
if cf and cf > 1:
r += Order((arg_series - arg0)**n, x)/x**((cf-1)*n)
else:
r += Order((arg_series - arg0)**n, x)
r = r.expand()
r = powsimp(r, deep=True, combine='exp')
# powsimp may introduce unexpanded (-1)**Rational; see PR #17201
simplerat = lambda x: x.is_Rational and x.q in [3, 4, 6]
w = Wild('w', properties=[simplerat])
r = r.replace((-1)**w, expand_complex((-1)**w))
return r
def _taylor(self, x, n):
l = []
g = None
for i in range(n):
g = self.taylor_term(i, self.args[0], g)
g = g.nseries(x, n=n)
l.append(g.removeO())
return Add(*l)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
arg = self.args[0].cancel().as_leading_term(x, logx=logx)
arg0 = arg.subs(x, 0)
if arg0 is S.NaN:
arg0 = arg.limit(x, 0)
if arg0.is_infinite is False:
return exp(arg0)
raise PoleError("Cannot expand %s around 0" % (self))
def _eval_rewrite_as_sin(self, arg, **kwargs):
from sympy import sin
I = S.ImaginaryUnit
return sin(I*arg + S.Pi/2) - I*sin(I*arg)
def _eval_rewrite_as_cos(self, arg, **kwargs):
from sympy import cos
I = S.ImaginaryUnit
return cos(I*arg) + I*cos(I*arg + S.Pi/2)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
from sympy import tanh
return (1 + tanh(arg/2))/(1 - tanh(arg/2))
def _eval_rewrite_as_sqrt(self, arg, **kwargs):
from sympy.functions.elementary.trigonometric import sin, cos
if arg.is_Mul:
coeff = arg.coeff(S.Pi*S.ImaginaryUnit)
if coeff and coeff.is_number:
cosine, sine = cos(S.Pi*coeff), sin(S.Pi*coeff)
if not isinstance(cosine, cos) and not isinstance (sine, sin):
return cosine + S.ImaginaryUnit*sine
def _eval_rewrite_as_Pow(self, arg, **kwargs):
if arg.is_Mul:
logs = [a for a in arg.args if isinstance(a, log) and len(a.args) == 1]
if logs:
return Pow(logs[0].args[0], arg.coeff(logs[0]))
def match_real_imag(expr):
"""
Try to match expr with a + b*I for real a and b.
``match_real_imag`` returns a tuple containing the real and imaginary
parts of expr or (None, None) if direct matching is not possible. Contrary
to ``re()``, ``im()``, ``as_real_imag()``, this helper won't force things
by returning expressions themselves containing ``re()`` or ``im()`` and it
doesn't expand its argument either.
"""
r_, i_ = expr.as_independent(S.ImaginaryUnit, as_Add=True)
if i_ == 0 and r_.is_real:
return (r_, i_)
i_ = i_.as_coefficient(S.ImaginaryUnit)
if i_ and i_.is_real and r_.is_real:
return (r_, i_)
else:
return (None, None) # simpler to check for than None
class log(Function):
r"""
The natural logarithm function `\ln(x)` or `\log(x)`.
Explanation
===========
Logarithms are taken with the natural base, `e`. To get
a logarithm of a different base ``b``, use ``log(x, b)``,
which is essentially short-hand for ``log(x)/log(b)``.
``log`` represents the principal branch of the natural
logarithm. As such it has a branch cut along the negative
real axis and returns values having a complex argument in
`(-\pi, \pi]`.
Examples
========
>>> from sympy import log, sqrt, S, I
>>> log(8, 2)
3
>>> log(S(8)/3, 2)
-log(3)/log(2) + 3
>>> log(-1 + I*sqrt(3))
log(2) + 2*I*pi/3
See Also
========
exp
"""
_singularities = (S.Zero, S.ComplexInfinity)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if argindex == 1:
return 1/self.args[0]
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
r"""
Returns `e^x`, the inverse function of `\log(x)`.
"""
return exp
@classmethod
def eval(cls, arg, base=None):
from sympy import unpolarify
from sympy.calculus import AccumBounds
from sympy.sets.setexpr import SetExpr
from sympy.functions.elementary.complexes import Abs
arg = sympify(arg)
if base is not None:
base = sympify(base)
if base == 1:
if arg == 1:
return S.NaN
else:
return S.ComplexInfinity
try:
# handle extraction of powers of the base now
# or else expand_log in Mul would have to handle this
n = multiplicity(base, arg)
if n:
return n + log(arg / base**n) / log(base)
else:
return log(arg)/log(base)
except ValueError:
pass
if base is not S.Exp1:
return cls(arg)/cls(base)
else:
return cls(arg)
if arg.is_Number:
if arg.is_zero:
return S.ComplexInfinity
elif arg is S.One:
return S.Zero
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.NaN:
return S.NaN
elif arg.is_Rational and arg.p == 1:
return -cls(arg.q)
if arg.is_Pow and arg.base is S.Exp1 and arg.exp.is_extended_real:
return arg.exp
I = S.ImaginaryUnit
if isinstance(arg, exp) and arg.exp.is_extended_real:
return arg.exp
elif isinstance(arg, exp) and arg.exp.is_number:
r_, i_ = match_real_imag(arg.exp)
if i_ and i_.is_comparable:
i_ %= 2*S.Pi
if i_ > S.Pi:
i_ -= 2*S.Pi
return r_ + expand_mul(i_ * I, deep=False)
elif isinstance(arg, exp_polar):
return unpolarify(arg.exp)
elif isinstance(arg, AccumBounds):
if arg.min.is_positive:
return AccumBounds(log(arg.min), log(arg.max))
else:
return
elif isinstance(arg, SetExpr):
return arg._eval_func(cls)
if arg.is_number:
if arg.is_negative:
return S.Pi * I + cls(-arg)
elif arg is S.ComplexInfinity:
return S.ComplexInfinity
elif arg is S.Exp1:
return S.One
if arg.is_zero:
return S.ComplexInfinity
# don't autoexpand Pow or Mul (see the issue 3351):
if not arg.is_Add:
coeff = arg.as_coefficient(I)
if coeff is not None:
if coeff is S.Infinity:
return S.Infinity
elif coeff is S.NegativeInfinity:
return S.Infinity
elif coeff.is_Rational:
if coeff.is_nonnegative:
return S.Pi * I * S.Half + cls(coeff)
else:
return -S.Pi * I * S.Half + cls(-coeff)
if arg.is_number and arg.is_algebraic:
# Match arg = coeff*(r_ + i_*I) with coeff>0, r_ and i_ real.
coeff, arg_ = arg.as_independent(I, as_Add=False)
if coeff.is_negative:
coeff *= -1
arg_ *= -1
arg_ = expand_mul(arg_, deep=False)
r_, i_ = arg_.as_independent(I, as_Add=True)
i_ = i_.as_coefficient(I)
if coeff.is_real and i_ and i_.is_real and r_.is_real:
if r_.is_zero:
if i_.is_positive:
return S.Pi * I * S.Half + cls(coeff * i_)
elif i_.is_negative:
return -S.Pi * I * S.Half + cls(coeff * -i_)
else:
from sympy.simplify import ratsimp
# Check for arguments involving rational multiples of pi
t = (i_/r_).cancel()
t1 = (-t).cancel()
atan_table = {
# first quadrant only
sqrt(3): S.Pi/3,
1: S.Pi/4,
sqrt(5 - 2*sqrt(5)): S.Pi/5,
sqrt(2)*sqrt(5 - sqrt(5))/(1 + sqrt(5)): S.Pi/5,
sqrt(5 + 2*sqrt(5)): S.Pi*Rational(2, 5),
sqrt(2)*sqrt(sqrt(5) + 5)/(-1 + sqrt(5)): S.Pi*Rational(2, 5),
sqrt(3)/3: S.Pi/6,
sqrt(2) - 1: S.Pi/8,
sqrt(2 - sqrt(2))/sqrt(sqrt(2) + 2): S.Pi/8,
sqrt(2) + 1: S.Pi*Rational(3, 8),
sqrt(sqrt(2) + 2)/sqrt(2 - sqrt(2)): S.Pi*Rational(3, 8),
sqrt(1 - 2*sqrt(5)/5): S.Pi/10,
(-sqrt(2) + sqrt(10))/(2*sqrt(sqrt(5) + 5)): S.Pi/10,
sqrt(1 + 2*sqrt(5)/5): S.Pi*Rational(3, 10),
(sqrt(2) + sqrt(10))/(2*sqrt(5 - sqrt(5))): S.Pi*Rational(3, 10),
2 - sqrt(3): S.Pi/12,
(-1 + sqrt(3))/(1 + sqrt(3)): S.Pi/12,
2 + sqrt(3): S.Pi*Rational(5, 12),
(1 + sqrt(3))/(-1 + sqrt(3)): S.Pi*Rational(5, 12)
}
if t in atan_table:
modulus = ratsimp(coeff * Abs(arg_))
if r_.is_positive:
return cls(modulus) + I * atan_table[t]
else:
return cls(modulus) + I * (atan_table[t] - S.Pi)
elif t1 in atan_table:
modulus = ratsimp(coeff * Abs(arg_))
if r_.is_positive:
return cls(modulus) + I * (-atan_table[t1])
else:
return cls(modulus) + I * (S.Pi - atan_table[t1])
def as_base_exp(self):
"""
Returns this function in the form (base, exponent).
"""
return self, S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms): # of log(1+x)
r"""
Returns the next term in the Taylor series expansion of `\log(1+x)`.
"""
from sympy import powsimp
if n < 0:
return S.Zero
x = sympify(x)
if n == 0:
return x
if previous_terms:
p = previous_terms[-1]
if p is not None:
return powsimp((-n) * p * x / (n + 1), deep=True, combine='exp')
return (1 - 2*(n % 2)) * x**(n + 1)/(n + 1)
def _eval_expand_log(self, deep=True, **hints):
from sympy import unpolarify, expand_log, factorint
from sympy.concrete import Sum, Product
force = hints.get('force', False)
factor = hints.get('factor', False)
if (len(self.args) == 2):
return expand_log(self.func(*self.args), deep=deep, force=force)
arg = self.args[0]
if arg.is_Integer:
# remove perfect powers
p = perfect_power(arg)
logarg = None
coeff = 1
if p is not False:
arg, coeff = p
logarg = self.func(arg)
# expand as product of its prime factors if factor=True
if factor:
p = factorint(arg)
if arg not in p.keys():
logarg = sum(n*log(val) for val, n in p.items())
if logarg is not None:
return coeff*logarg
elif arg.is_Rational:
return log(arg.p) - log(arg.q)
elif arg.is_Mul:
expr = []
nonpos = []
for x in arg.args:
if force or x.is_positive or x.is_polar:
a = self.func(x)
if isinstance(a, log):
expr.append(self.func(x)._eval_expand_log(**hints))
else:
expr.append(a)
elif x.is_negative:
a = self.func(-x)
expr.append(a)
nonpos.append(S.NegativeOne)
else:
nonpos.append(x)
return Add(*expr) + log(Mul(*nonpos))
elif arg.is_Pow or isinstance(arg, exp):
if force or (arg.exp.is_extended_real and (arg.base.is_positive or ((arg.exp+1)
.is_positive and (arg.exp-1).is_nonpositive))) or arg.base.is_polar:
b = arg.base
e = arg.exp
a = self.func(b)
if isinstance(a, log):
return unpolarify(e) * a._eval_expand_log(**hints)
else:
return unpolarify(e) * a
elif isinstance(arg, Product):
if force or arg.function.is_positive:
return Sum(log(arg.function), *arg.limits)
return self.func(arg)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import expand_log, simplify, inversecombine
if len(self.args) == 2: # it's unevaluated
return simplify(self.func(*self.args), **kwargs)
expr = self.func(simplify(self.args[0], **kwargs))
if kwargs['inverse']:
expr = inversecombine(expr)
expr = expand_log(expr, deep=True)
return min([expr, self], key=kwargs['measure'])
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
Examples
========
>>> from sympy import I
>>> from sympy.abc import x
>>> from sympy.functions import log
>>> log(x).as_real_imag()
(log(Abs(x)), arg(x))
>>> log(I).as_real_imag()
(0, pi/2)
>>> log(1 + I).as_real_imag()
(log(sqrt(2)), pi/4)
>>> log(I*x).as_real_imag()
(log(Abs(x)), arg(I*x))
"""
from sympy import Abs, arg
sarg = self.args[0]
if deep:
sarg = self.args[0].expand(deep, **hints)
abs = Abs(sarg)
if abs == sarg:
return self, S.Zero
arg = arg(sarg)
if hints.get('log', False): # Expand the log
hints['complex'] = False
return (log(abs).expand(deep, **hints), arg)
else:
return log(abs), arg
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
if s.args[0].is_rational and fuzzy_not((self.args[0] - 1).is_zero):
return False
else:
return s.is_rational
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
elif fuzzy_not((self.args[0] - 1).is_zero):
if self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_is_extended_real(self):
return self.args[0].is_extended_positive
def _eval_is_complex(self):
z = self.args[0]
return fuzzy_and([z.is_complex, fuzzy_not(z.is_zero)])
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_zero:
return False
return arg.is_finite
def _eval_is_extended_positive(self):
return (self.args[0] - 1).is_extended_positive
def _eval_is_zero(self):
return (self.args[0] - 1).is_zero
def _eval_is_extended_nonnegative(self):
return (self.args[0] - 1).is_extended_nonnegative
def _eval_nseries(self, x, n, logx, cdir=0):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy import im, cancel, I, Order, logcombine
from itertools import product
if not logx:
logx = log(x)
if self.args[0] == x:
return logx
arg = self.args[0]
k, l = Wild("k"), Wild("l")
r = arg.match(k*x**l)
if r is not None:
k, l = r[k], r[l]
if l != 0 and not l.has(x) and not k.has(x):
r = log(k) + l*logx # XXX true regardless of assumptions?
return r
def coeff_exp(term, x):
coeff, exp = S.One, S.Zero
for factor in Mul.make_args(term):
if factor.has(x):
base, exp = factor.as_base_exp()
if base != x:
try:
return term.leadterm(x)
except ValueError:
return term, S.Zero
else:
coeff *= factor
return coeff, exp
# TODO new and probably slow
try:
a, b = arg.leadterm(x)
s = arg.nseries(x, n=n+b, logx=logx)
except (ValueError, NotImplementedError, PoleError):
s = arg.nseries(x, n=n, logx=logx)
while s.is_Order:
n += 1
s = arg.nseries(x, n=n, logx=logx)
a, b = s.removeO().leadterm(x)
p = cancel(s/(a*x**b) - 1).expand().powsimp()
if p.has(exp):
p = logcombine(p)
if isinstance(p, Order):
n = p.getn()
_, d = coeff_exp(p, x)
if not d.is_positive:
return log(a) + b*logx + Order(x**n, x)
def mul(d1, d2):
res = {}
for e1, e2 in product(d1, d2):
ex = e1 + e2
if ex < n:
res[ex] = res.get(ex, S.Zero) + d1[e1]*d2[e2]
return res
pterms = {}
for term in Add.make_args(p):
co1, e1 = coeff_exp(term, x)
pterms[e1] = pterms.get(e1, S.Zero) + co1.removeO()
k = S.One
terms = {}
pk = pterms
while k*d < n:
coeff = -(-1)**k/k
for ex in pk:
terms[ex] = terms.get(ex, S.Zero) + coeff*pk[ex]
pk = mul(pk, pterms)
k += S.One
res = log(a) + b*logx
for ex in terms:
res += terms[ex]*x**(ex)
if cdir != 0:
cdir = self.args[0].dir(x, cdir)
if a.is_real and a.is_negative and im(cdir) < 0:
res -= 2*I*S.Pi
return res + Order(x**n, x)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
from sympy import I, im
arg0 = self.args[0].together()
arg = arg0.as_leading_term(x, cdir=cdir)
x0 = arg0.subs(x, 0)
if (x0 is S.NaN and logx is None):
x0 = arg.limit(x, 0, dir='-' if cdir < 0 else '+')
if x0 in (S.NegativeInfinity, S.Infinity):
raise PoleError("Cannot expand %s around 0" % (self))
if x0 == 1:
return (arg0 - S.One).as_leading_term(x)
if cdir != 0:
cdir = arg0.dir(x, cdir)
if x0.is_real and x0.is_negative and im(cdir) < 0:
return self.func(x0) - 2*I*S.Pi
return self.func(arg)
class LambertW(Function):
r"""
The Lambert W function `W(z)` is defined as the inverse
function of `w \exp(w)` [1]_.
Explanation
===========
In other words, the value of `W(z)` is such that `z = W(z) \exp(W(z))`
for any complex number `z`. The Lambert W function is a multivalued
function with infinitely many branches `W_k(z)`, indexed by
`k \in \mathbb{Z}`. Each branch gives a different solution `w`
of the equation `z = w \exp(w)`.
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
Examples
========
>>> from sympy import LambertW
>>> LambertW(1.2)
0.635564016364870
>>> LambertW(1.2, -1).n()
-1.34747534407696 - 4.41624341514535*I
>>> LambertW(-1).is_real
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Lambert_W_function
"""
_singularities = (-Pow(S.Exp1, -1, evaluate=False), S.ComplexInfinity)
@classmethod
def eval(cls, x, k=None):
if k == S.Zero:
return cls(x)
elif k is None:
k = S.Zero
if k.is_zero:
if x.is_zero:
return S.Zero
if x is S.Exp1:
return S.One
if x == -1/S.Exp1:
return S.NegativeOne
if x == -log(2)/2:
return -log(2)
if x == 2*log(2):
return log(2)
if x == -S.Pi/2:
return S.ImaginaryUnit*S.Pi/2
if x == exp(1 + S.Exp1):
return S.Exp1
if x is S.Infinity:
return S.Infinity
if x.is_zero:
return S.Zero
if fuzzy_not(k.is_zero):
if x.is_zero:
return S.NegativeInfinity
if k is S.NegativeOne:
if x == -S.Pi/2:
return -S.ImaginaryUnit*S.Pi/2
elif x == -1/S.Exp1:
return S.NegativeOne
elif x == -2*exp(-2):
return -Integer(2)
def fdiff(self, argindex=1):
"""
Return the first derivative of this function.
"""
x = self.args[0]
if len(self.args) == 1:
if argindex == 1:
return LambertW(x)/(x*(1 + LambertW(x)))
else:
k = self.args[1]
if argindex == 1:
return LambertW(x, k)/(x*(1 + LambertW(x, k)))
raise ArgumentIndexError(self, argindex)
def _eval_is_extended_real(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if k.is_zero:
if (x + 1/S.Exp1).is_positive:
return True
elif (x + 1/S.Exp1).is_nonpositive:
return False
elif (k + 1).is_zero:
if x.is_negative and (x + 1/S.Exp1).is_positive:
return True
elif x.is_nonpositive or (x + 1/S.Exp1).is_nonnegative:
return False
elif fuzzy_not(k.is_zero) and fuzzy_not((k + 1).is_zero):
if x.is_extended_real:
return False
def _eval_is_finite(self):
return self.args[0].is_finite
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if fuzzy_not(self.args[0].is_zero) and self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_as_leading_term(self, x, logx=None, cdir=0):
if len(self.args) == 1:
arg = self.args[0]
arg0 = arg.subs(x, 0).cancel()
if not arg0.is_zero:
return self.func(arg0)
return arg.as_leading_term(x)
def _eval_nseries(self, x, n, logx, cdir=0):
if len(self.args) == 1:
from sympy import Order, ceiling, expand_multinomial
arg = self.args[0].nseries(x, n=n, logx=logx)
lt = arg.compute_leading_term(x, logx=logx)
lte = 1
if lt.is_Pow:
lte = lt.exp
if ceiling(n/lte) >= 1:
s = Add(*[(-S.One)**(k - 1)*Integer(k)**(k - 2)/
factorial(k - 1)*arg**k for k in range(1, ceiling(n/lte))])
s = expand_multinomial(s)
else:
s = S.Zero
return s + Order(x**n, x)
return super()._eval_nseries(x, n, logx)
def _eval_is_zero(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if x.is_zero and k.is_zero:
return True
| 33.052282 | 91 | 0.512353 | from sympy.core import sympify
from sympy.core.add import Add
from sympy.core.cache import cacheit
from sympy.core.function import (
Function, ArgumentIndexError, _coeff_isneg,
expand_mul, FunctionClass, PoleError)
from sympy.core.logic import fuzzy_and, fuzzy_not, fuzzy_or
from sympy.core.mul import Mul
from sympy.core.numbers import Integer, Rational
from sympy.core.parameters import global_parameters
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Wild, Dummy
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.ntheory import multiplicity, perfect_power
class ExpBase(Function):
unbranched = True
_singularities = (S.ComplexInfinity,)
def inverse(self, argindex=1):
return log
def as_numer_denom(self):
exp = self.exp
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
if neg_exp:
return S.One, self.func(-exp)
return self, S.One
@property
def exp(self):
return self.args[0]
def as_base_exp(self):
return self.func(1), Mul(*self.args)
def _eval_adjoint(self):
return self.func(self.exp.adjoint())
def _eval_conjugate(self):
return self.func(self.exp.conjugate())
def _eval_transpose(self):
return self.func(self.exp.transpose())
def _eval_is_finite(self):
arg = self.exp
if arg.is_infinite:
if arg.is_extended_negative:
return True
if arg.is_extended_positive:
return False
if arg.is_finite:
return True
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
z = s.exp.is_zero
if z:
return True
elif s.exp.is_rational and fuzzy_not(z):
return False
else:
return s.is_rational
def _eval_is_zero(self):
return self.exp is S.NegativeInfinity
def _eval_power(self, other):
b, e = self.as_base_exp()
return Pow._eval_power(Pow(b, e, evaluate=False), other)
def _eval_expand_power_exp(self, **hints):
from sympy import Sum, Product
arg = self.args[0]
if arg.is_Add and arg.is_commutative:
return Mul.fromiter(self.func(x) for x in arg.args)
elif isinstance(arg, Sum) and arg.is_commutative:
return Product(self.func(arg.function), *arg.limits)
return self.func(arg)
class exp_polar(ExpBase):
is_polar = True
is_comparable = False
def _eval_Abs(self): # Abs is never a polar number
from sympy.functions.elementary.complexes import re
return exp(re(self.args[0]))
def _eval_evalf(self, prec):
from sympy import im, pi, re
i = im(self.args[0])
try:
bad = (i <= -pi or i > pi)
except TypeError:
bad = True
if bad:
return self # cannot evalf for this argument
res = exp(self.args[0])._eval_evalf(prec)
if i > 0 and im(res) < 0:
# i ~ pi, but exp(I*i) evaluated to argument slightly bigger than pi
return re(res)
return res
def _eval_power(self, other):
return self.func(self.args[0]*other)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def as_base_exp(self):
# XXX exp_polar(0) is special!
if self.args[0] == 0:
return self, S.One
return ExpBase.as_base_exp(self)
class ExpMeta(FunctionClass):
def __instancecheck__(cls, instance):
if exp in instance.__class__.__mro__:
return True
return isinstance(instance, Pow) and instance.base is S.Exp1
class exp(ExpBase, metaclass=ExpMeta):
def fdiff(self, argindex=1):
if argindex == 1:
return self
else:
raise ArgumentIndexError(self, argindex)
def _eval_refine(self, assumptions):
from sympy.assumptions import ask, Q
arg = self.args[0]
if arg.is_Mul:
Ioo = S.ImaginaryUnit*S.Infinity
if arg in [Ioo, -Ioo]:
return S.NaN
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff)):
if ask(Q.even(coeff)):
return S.One
elif ask(Q.odd(coeff)):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half)):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half)):
return S.ImaginaryUnit
@classmethod
def eval(cls, arg):
from sympy.calculus import AccumBounds
from sympy.sets.setexpr import SetExpr
from sympy.matrices.matrices import MatrixBase
from sympy import im, logcombine, re
if isinstance(arg, MatrixBase):
return arg.exp()
elif global_parameters.exp_is_pow:
return Pow(S.Exp1, arg)
elif arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg.is_zero:
return S.One
elif arg is S.One:
return S.Exp1
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.ComplexInfinity:
return S.NaN
elif isinstance(arg, log):
return arg.args[0]
elif isinstance(arg, AccumBounds):
return AccumBounds(exp(arg.min), exp(arg.max))
elif isinstance(arg, SetExpr):
return arg._eval_func(cls)
elif arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if (2*coeff).is_integer:
if coeff.is_even:
return S.One
elif coeff.is_odd:
return S.NegativeOne
elif (coeff + S.Half).is_even:
return -S.ImaginaryUnit
elif (coeff + S.Half).is_odd:
return S.ImaginaryUnit
elif coeff.is_Rational:
ncoeff = coeff % 2 # restrict to [0, 2pi)
if ncoeff > 1: # restrict to (-pi, pi]
ncoeff -= 2
if ncoeff != coeff:
return cls(ncoeff*S.Pi*S.ImaginaryUnit)
# Warning: code in risch.py will be very sensitive to changes
# in this (see DifferentialExtension).
# look for a single log factor
coeff, terms = arg.as_coeff_Mul()
# but it can't be multiplied by oo
if coeff in [S.NegativeInfinity, S.Infinity]:
if terms.is_number:
if coeff is S.NegativeInfinity:
terms = -terms
if re(terms).is_zero and terms is not S.Zero:
return S.NaN
if re(terms).is_positive and im(terms) is not S.Zero:
return S.ComplexInfinity
if re(terms).is_negative:
return S.Zero
return None
coeffs, log_term = [coeff], None
for term in Mul.make_args(terms):
term_ = logcombine(term)
if isinstance(term_, log):
if log_term is None:
log_term = term_.args[0]
else:
return None
elif term.is_comparable:
coeffs.append(term)
else:
return None
return log_term**Mul(*coeffs) if log_term else None
elif arg.is_Add:
out = []
add = []
argchanged = False
for a in arg.args:
if a is S.One:
add.append(a)
continue
newa = cls(a)
if isinstance(newa, cls):
if newa.args[0] != a:
add.append(newa.args[0])
argchanged = True
else:
add.append(a)
else:
out.append(newa)
if out or argchanged:
return Mul(*out)*cls(Add(*add), evaluate=False)
if arg.is_zero:
return S.One
@property
def base(self):
return S.Exp1
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
if n == 0:
return S.One
x = sympify(x)
if previous_terms:
p = previous_terms[-1]
if p is not None:
return p * x / n
return x**n/factorial(n)
def as_real_imag(self, deep=True, **hints):
from sympy.functions.elementary.trigonometric import cos, sin
re, im = self.args[0].as_real_imag()
if deep:
re = re.expand(deep, **hints)
im = im.expand(deep, **hints)
cos, sin = cos(im), sin(im)
return (exp(re)*cos, exp(re)*sin)
def _eval_subs(self, old, new):
if old.is_Pow:
old = exp(old.exp*log(old.base))
elif old is S.Exp1 and new.is_Function:
old = exp
if isinstance(old, exp) or old is S.Exp1:
f = lambda a: Pow(*a.as_base_exp(), evaluate=False) if (
a.is_Pow or isinstance(a, exp)) else a
return Pow._eval_subs(f(self), f(old), new)
if old is exp and not new.is_Function:
return new**self.exp._subs(old, new)
return Function._eval_subs(self, old, new)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
elif self.args[0].is_imaginary:
arg2 = -S(2) * S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_is_complex(self):
def complex_extended_negative(arg):
yield arg.is_complex
yield arg.is_extended_negative
return fuzzy_or(complex_extended_negative(self.args[0]))
def _eval_is_algebraic(self):
if (self.exp / S.Pi / S.ImaginaryUnit).is_rational:
return True
if fuzzy_not(self.exp.is_zero):
if self.exp.is_algebraic:
return False
elif (self.exp / S.Pi).is_rational:
return False
def _eval_is_extended_positive(self):
if self.exp.is_extended_real:
return not self.args[0] is S.NegativeInfinity
elif self.exp.is_imaginary:
arg2 = -S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_nseries(self, x, n, logx, cdir=0):
from sympy import ceiling, limit, Order, powsimp, Wild, expand_complex
arg = self.exp
arg_series = arg._eval_nseries(x, n=n, logx=logx)
if arg_series.is_Order:
return 1 + arg_series
arg0 = limit(arg_series.removeO(), x, 0)
if arg0 is S.NegativeInfinity:
return Order(x**n, x)
if arg0 is S.Infinity:
return self
t = Dummy("t")
nterms = n
try:
cf = Order(arg.as_leading_term(x, logx=logx), x).getn()
except (NotImplementedError, PoleError):
cf = 0
if cf and cf > 0:
nterms = ceiling(n/cf)
exp_series = exp(t)._taylor(t, nterms)
r = exp(arg0)*exp_series.subs(t, arg_series - arg0)
if cf and cf > 1:
r += Order((arg_series - arg0)**n, x)/x**((cf-1)*n)
else:
r += Order((arg_series - arg0)**n, x)
r = r.expand()
r = powsimp(r, deep=True, combine='exp')
simplerat = lambda x: x.is_Rational and x.q in [3, 4, 6]
w = Wild('w', properties=[simplerat])
r = r.replace((-1)**w, expand_complex((-1)**w))
return r
def _taylor(self, x, n):
l = []
g = None
for i in range(n):
g = self.taylor_term(i, self.args[0], g)
g = g.nseries(x, n=n)
l.append(g.removeO())
return Add(*l)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
arg = self.args[0].cancel().as_leading_term(x, logx=logx)
arg0 = arg.subs(x, 0)
if arg0 is S.NaN:
arg0 = arg.limit(x, 0)
if arg0.is_infinite is False:
return exp(arg0)
raise PoleError("Cannot expand %s around 0" % (self))
def _eval_rewrite_as_sin(self, arg, **kwargs):
from sympy import sin
I = S.ImaginaryUnit
return sin(I*arg + S.Pi/2) - I*sin(I*arg)
def _eval_rewrite_as_cos(self, arg, **kwargs):
from sympy import cos
I = S.ImaginaryUnit
return cos(I*arg) + I*cos(I*arg + S.Pi/2)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
from sympy import tanh
return (1 + tanh(arg/2))/(1 - tanh(arg/2))
def _eval_rewrite_as_sqrt(self, arg, **kwargs):
from sympy.functions.elementary.trigonometric import sin, cos
if arg.is_Mul:
coeff = arg.coeff(S.Pi*S.ImaginaryUnit)
if coeff and coeff.is_number:
cosine, sine = cos(S.Pi*coeff), sin(S.Pi*coeff)
if not isinstance(cosine, cos) and not isinstance (sine, sin):
return cosine + S.ImaginaryUnit*sine
def _eval_rewrite_as_Pow(self, arg, **kwargs):
if arg.is_Mul:
logs = [a for a in arg.args if isinstance(a, log) and len(a.args) == 1]
if logs:
return Pow(logs[0].args[0], arg.coeff(logs[0]))
def match_real_imag(expr):
r_, i_ = expr.as_independent(S.ImaginaryUnit, as_Add=True)
if i_ == 0 and r_.is_real:
return (r_, i_)
i_ = i_.as_coefficient(S.ImaginaryUnit)
if i_ and i_.is_real and r_.is_real:
return (r_, i_)
else:
return (None, None)
class log(Function):
_singularities = (S.Zero, S.ComplexInfinity)
def fdiff(self, argindex=1):
if argindex == 1:
return 1/self.args[0]
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
return exp
@classmethod
def eval(cls, arg, base=None):
from sympy import unpolarify
from sympy.calculus import AccumBounds
from sympy.sets.setexpr import SetExpr
from sympy.functions.elementary.complexes import Abs
arg = sympify(arg)
if base is not None:
base = sympify(base)
if base == 1:
if arg == 1:
return S.NaN
else:
return S.ComplexInfinity
try:
n = multiplicity(base, arg)
if n:
return n + log(arg / base**n) / log(base)
else:
return log(arg)/log(base)
except ValueError:
pass
if base is not S.Exp1:
return cls(arg)/cls(base)
else:
return cls(arg)
if arg.is_Number:
if arg.is_zero:
return S.ComplexInfinity
elif arg is S.One:
return S.Zero
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.NaN:
return S.NaN
elif arg.is_Rational and arg.p == 1:
return -cls(arg.q)
if arg.is_Pow and arg.base is S.Exp1 and arg.exp.is_extended_real:
return arg.exp
I = S.ImaginaryUnit
if isinstance(arg, exp) and arg.exp.is_extended_real:
return arg.exp
elif isinstance(arg, exp) and arg.exp.is_number:
r_, i_ = match_real_imag(arg.exp)
if i_ and i_.is_comparable:
i_ %= 2*S.Pi
if i_ > S.Pi:
i_ -= 2*S.Pi
return r_ + expand_mul(i_ * I, deep=False)
elif isinstance(arg, exp_polar):
return unpolarify(arg.exp)
elif isinstance(arg, AccumBounds):
if arg.min.is_positive:
return AccumBounds(log(arg.min), log(arg.max))
else:
return
elif isinstance(arg, SetExpr):
return arg._eval_func(cls)
if arg.is_number:
if arg.is_negative:
return S.Pi * I + cls(-arg)
elif arg is S.ComplexInfinity:
return S.ComplexInfinity
elif arg is S.Exp1:
return S.One
if arg.is_zero:
return S.ComplexInfinity
if not arg.is_Add:
coeff = arg.as_coefficient(I)
if coeff is not None:
if coeff is S.Infinity:
return S.Infinity
elif coeff is S.NegativeInfinity:
return S.Infinity
elif coeff.is_Rational:
if coeff.is_nonnegative:
return S.Pi * I * S.Half + cls(coeff)
else:
return -S.Pi * I * S.Half + cls(-coeff)
if arg.is_number and arg.is_algebraic:
# Match arg = coeff*(r_ + i_*I) with coeff>0, r_ and i_ real.
coeff, arg_ = arg.as_independent(I, as_Add=False)
if coeff.is_negative:
coeff *= -1
arg_ *= -1
arg_ = expand_mul(arg_, deep=False)
r_, i_ = arg_.as_independent(I, as_Add=True)
i_ = i_.as_coefficient(I)
if coeff.is_real and i_ and i_.is_real and r_.is_real:
if r_.is_zero:
if i_.is_positive:
return S.Pi * I * S.Half + cls(coeff * i_)
elif i_.is_negative:
return -S.Pi * I * S.Half + cls(coeff * -i_)
else:
from sympy.simplify import ratsimp
# Check for arguments involving rational multiples of pi
t = (i_/r_).cancel()
t1 = (-t).cancel()
atan_table = {
# first quadrant only
sqrt(3): S.Pi/3,
1: S.Pi/4,
sqrt(5 - 2*sqrt(5)): S.Pi/5,
sqrt(2)*sqrt(5 - sqrt(5))/(1 + sqrt(5)): S.Pi/5,
sqrt(5 + 2*sqrt(5)): S.Pi*Rational(2, 5),
sqrt(2)*sqrt(sqrt(5) + 5)/(-1 + sqrt(5)): S.Pi*Rational(2, 5),
sqrt(3)/3: S.Pi/6,
sqrt(2) - 1: S.Pi/8,
sqrt(2 - sqrt(2))/sqrt(sqrt(2) + 2): S.Pi/8,
sqrt(2) + 1: S.Pi*Rational(3, 8),
sqrt(sqrt(2) + 2)/sqrt(2 - sqrt(2)): S.Pi*Rational(3, 8),
sqrt(1 - 2*sqrt(5)/5): S.Pi/10,
(-sqrt(2) + sqrt(10))/(2*sqrt(sqrt(5) + 5)): S.Pi/10,
sqrt(1 + 2*sqrt(5)/5): S.Pi*Rational(3, 10),
(sqrt(2) + sqrt(10))/(2*sqrt(5 - sqrt(5))): S.Pi*Rational(3, 10),
2 - sqrt(3): S.Pi/12,
(-1 + sqrt(3))/(1 + sqrt(3)): S.Pi/12,
2 + sqrt(3): S.Pi*Rational(5, 12),
(1 + sqrt(3))/(-1 + sqrt(3)): S.Pi*Rational(5, 12)
}
if t in atan_table:
modulus = ratsimp(coeff * Abs(arg_))
if r_.is_positive:
return cls(modulus) + I * atan_table[t]
else:
return cls(modulus) + I * (atan_table[t] - S.Pi)
elif t1 in atan_table:
modulus = ratsimp(coeff * Abs(arg_))
if r_.is_positive:
return cls(modulus) + I * (-atan_table[t1])
else:
return cls(modulus) + I * (S.Pi - atan_table[t1])
def as_base_exp(self):
return self, S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms): # of log(1+x)
from sympy import powsimp
if n < 0:
return S.Zero
x = sympify(x)
if n == 0:
return x
if previous_terms:
p = previous_terms[-1]
if p is not None:
return powsimp((-n) * p * x / (n + 1), deep=True, combine='exp')
return (1 - 2*(n % 2)) * x**(n + 1)/(n + 1)
def _eval_expand_log(self, deep=True, **hints):
from sympy import unpolarify, expand_log, factorint
from sympy.concrete import Sum, Product
force = hints.get('force', False)
factor = hints.get('factor', False)
if (len(self.args) == 2):
return expand_log(self.func(*self.args), deep=deep, force=force)
arg = self.args[0]
if arg.is_Integer:
# remove perfect powers
p = perfect_power(arg)
logarg = None
coeff = 1
if p is not False:
arg, coeff = p
logarg = self.func(arg)
# expand as product of its prime factors if factor=True
if factor:
p = factorint(arg)
if arg not in p.keys():
logarg = sum(n*log(val) for val, n in p.items())
if logarg is not None:
return coeff*logarg
elif arg.is_Rational:
return log(arg.p) - log(arg.q)
elif arg.is_Mul:
expr = []
nonpos = []
for x in arg.args:
if force or x.is_positive or x.is_polar:
a = self.func(x)
if isinstance(a, log):
expr.append(self.func(x)._eval_expand_log(**hints))
else:
expr.append(a)
elif x.is_negative:
a = self.func(-x)
expr.append(a)
nonpos.append(S.NegativeOne)
else:
nonpos.append(x)
return Add(*expr) + log(Mul(*nonpos))
elif arg.is_Pow or isinstance(arg, exp):
if force or (arg.exp.is_extended_real and (arg.base.is_positive or ((arg.exp+1)
.is_positive and (arg.exp-1).is_nonpositive))) or arg.base.is_polar:
b = arg.base
e = arg.exp
a = self.func(b)
if isinstance(a, log):
return unpolarify(e) * a._eval_expand_log(**hints)
else:
return unpolarify(e) * a
elif isinstance(arg, Product):
if force or arg.function.is_positive:
return Sum(log(arg.function), *arg.limits)
return self.func(arg)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import expand_log, simplify, inversecombine
if len(self.args) == 2: # it's unevaluated
return simplify(self.func(*self.args), **kwargs)
expr = self.func(simplify(self.args[0], **kwargs))
if kwargs['inverse']:
expr = inversecombine(expr)
expr = expand_log(expr, deep=True)
return min([expr, self], key=kwargs['measure'])
def as_real_imag(self, deep=True, **hints):
from sympy import Abs, arg
sarg = self.args[0]
if deep:
sarg = self.args[0].expand(deep, **hints)
abs = Abs(sarg)
if abs == sarg:
return self, S.Zero
arg = arg(sarg)
if hints.get('log', False):
hints['complex'] = False
return (log(abs).expand(deep, **hints), arg)
else:
return log(abs), arg
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
if s.args[0].is_rational and fuzzy_not((self.args[0] - 1).is_zero):
return False
else:
return s.is_rational
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
elif fuzzy_not((self.args[0] - 1).is_zero):
if self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_is_extended_real(self):
return self.args[0].is_extended_positive
def _eval_is_complex(self):
z = self.args[0]
return fuzzy_and([z.is_complex, fuzzy_not(z.is_zero)])
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_zero:
return False
return arg.is_finite
def _eval_is_extended_positive(self):
return (self.args[0] - 1).is_extended_positive
def _eval_is_zero(self):
return (self.args[0] - 1).is_zero
def _eval_is_extended_nonnegative(self):
return (self.args[0] - 1).is_extended_nonnegative
def _eval_nseries(self, x, n, logx, cdir=0):
from sympy import im, cancel, I, Order, logcombine
from itertools import product
if not logx:
logx = log(x)
if self.args[0] == x:
return logx
arg = self.args[0]
k, l = Wild("k"), Wild("l")
r = arg.match(k*x**l)
if r is not None:
k, l = r[k], r[l]
if l != 0 and not l.has(x) and not k.has(x):
r = log(k) + l*logx
return r
def coeff_exp(term, x):
coeff, exp = S.One, S.Zero
for factor in Mul.make_args(term):
if factor.has(x):
base, exp = factor.as_base_exp()
if base != x:
try:
return term.leadterm(x)
except ValueError:
return term, S.Zero
else:
coeff *= factor
return coeff, exp
try:
a, b = arg.leadterm(x)
s = arg.nseries(x, n=n+b, logx=logx)
except (ValueError, NotImplementedError, PoleError):
s = arg.nseries(x, n=n, logx=logx)
while s.is_Order:
n += 1
s = arg.nseries(x, n=n, logx=logx)
a, b = s.removeO().leadterm(x)
p = cancel(s/(a*x**b) - 1).expand().powsimp()
if p.has(exp):
p = logcombine(p)
if isinstance(p, Order):
n = p.getn()
_, d = coeff_exp(p, x)
if not d.is_positive:
return log(a) + b*logx + Order(x**n, x)
def mul(d1, d2):
res = {}
for e1, e2 in product(d1, d2):
ex = e1 + e2
if ex < n:
res[ex] = res.get(ex, S.Zero) + d1[e1]*d2[e2]
return res
pterms = {}
for term in Add.make_args(p):
co1, e1 = coeff_exp(term, x)
pterms[e1] = pterms.get(e1, S.Zero) + co1.removeO()
k = S.One
terms = {}
pk = pterms
while k*d < n:
coeff = -(-1)**k/k
for ex in pk:
terms[ex] = terms.get(ex, S.Zero) + coeff*pk[ex]
pk = mul(pk, pterms)
k += S.One
res = log(a) + b*logx
for ex in terms:
res += terms[ex]*x**(ex)
if cdir != 0:
cdir = self.args[0].dir(x, cdir)
if a.is_real and a.is_negative and im(cdir) < 0:
res -= 2*I*S.Pi
return res + Order(x**n, x)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
from sympy import I, im
arg0 = self.args[0].together()
arg = arg0.as_leading_term(x, cdir=cdir)
x0 = arg0.subs(x, 0)
if (x0 is S.NaN and logx is None):
x0 = arg.limit(x, 0, dir='-' if cdir < 0 else '+')
if x0 in (S.NegativeInfinity, S.Infinity):
raise PoleError("Cannot expand %s around 0" % (self))
if x0 == 1:
return (arg0 - S.One).as_leading_term(x)
if cdir != 0:
cdir = arg0.dir(x, cdir)
if x0.is_real and x0.is_negative and im(cdir) < 0:
return self.func(x0) - 2*I*S.Pi
return self.func(arg)
class LambertW(Function):
_singularities = (-Pow(S.Exp1, -1, evaluate=False), S.ComplexInfinity)
@classmethod
def eval(cls, x, k=None):
if k == S.Zero:
return cls(x)
elif k is None:
k = S.Zero
if k.is_zero:
if x.is_zero:
return S.Zero
if x is S.Exp1:
return S.One
if x == -1/S.Exp1:
return S.NegativeOne
if x == -log(2)/2:
return -log(2)
if x == 2*log(2):
return log(2)
if x == -S.Pi/2:
return S.ImaginaryUnit*S.Pi/2
if x == exp(1 + S.Exp1):
return S.Exp1
if x is S.Infinity:
return S.Infinity
if x.is_zero:
return S.Zero
if fuzzy_not(k.is_zero):
if x.is_zero:
return S.NegativeInfinity
if k is S.NegativeOne:
if x == -S.Pi/2:
return -S.ImaginaryUnit*S.Pi/2
elif x == -1/S.Exp1:
return S.NegativeOne
elif x == -2*exp(-2):
return -Integer(2)
def fdiff(self, argindex=1):
x = self.args[0]
if len(self.args) == 1:
if argindex == 1:
return LambertW(x)/(x*(1 + LambertW(x)))
else:
k = self.args[1]
if argindex == 1:
return LambertW(x, k)/(x*(1 + LambertW(x, k)))
raise ArgumentIndexError(self, argindex)
def _eval_is_extended_real(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if k.is_zero:
if (x + 1/S.Exp1).is_positive:
return True
elif (x + 1/S.Exp1).is_nonpositive:
return False
elif (k + 1).is_zero:
if x.is_negative and (x + 1/S.Exp1).is_positive:
return True
elif x.is_nonpositive or (x + 1/S.Exp1).is_nonnegative:
return False
elif fuzzy_not(k.is_zero) and fuzzy_not((k + 1).is_zero):
if x.is_extended_real:
return False
def _eval_is_finite(self):
return self.args[0].is_finite
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if fuzzy_not(self.args[0].is_zero) and self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_as_leading_term(self, x, logx=None, cdir=0):
if len(self.args) == 1:
arg = self.args[0]
arg0 = arg.subs(x, 0).cancel()
if not arg0.is_zero:
return self.func(arg0)
return arg.as_leading_term(x)
def _eval_nseries(self, x, n, logx, cdir=0):
if len(self.args) == 1:
from sympy import Order, ceiling, expand_multinomial
arg = self.args[0].nseries(x, n=n, logx=logx)
lt = arg.compute_leading_term(x, logx=logx)
lte = 1
if lt.is_Pow:
lte = lt.exp
if ceiling(n/lte) >= 1:
s = Add(*[(-S.One)**(k - 1)*Integer(k)**(k - 2)/
factorial(k - 1)*arg**k for k in range(1, ceiling(n/lte))])
s = expand_multinomial(s)
else:
s = S.Zero
return s + Order(x**n, x)
return super()._eval_nseries(x, n, logx)
def _eval_is_zero(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if x.is_zero and k.is_zero:
return True
| true | true |
f7fb675b845ff6b54bbecfb3a0f01d376831a594 | 8,820 | py | Python | lib/markdown/extensions/toc.py | ParadiseDS/sublimetext-markdown-slideshow | b2bb2600e373341ccb9f8a0ba9ad2b799ab5b380 | [
"MIT"
] | 50 | 2015-01-13T04:57:49.000Z | 2021-04-10T21:51:52.000Z | lib/markdown/extensions/toc.py | ParadiseDS/sublimetext-markdown-slideshow | b2bb2600e373341ccb9f8a0ba9ad2b799ab5b380 | [
"MIT"
] | 6 | 2015-01-03T04:00:39.000Z | 2018-03-03T08:40:05.000Z | lib/markdown/extensions/toc.py | ParadiseDS/sublimetext-markdown-slideshow | b2bb2600e373341ccb9f8a0ba9ad2b799ab5b380 | [
"MIT"
] | 11 | 2015-01-17T17:58:36.000Z | 2019-12-15T17:34:08.000Z | """
Table of Contents Extension for Python-Markdown
===============================================
See <https://pythonhosted.org/Markdown/extensions/toc.html>
for documentation.
Oringinal code Copyright 2008 [Jack Miller](http://codezen.org)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree, parseBoolValue, AMP_SUBSTITUTE
from .headerid import slugify, unique, itertext, stashedHTML2text
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
ordered_list = []
if len(toc_list):
# Initialize everything by processing the first entry
last = toc_list.pop(0)
last['children'] = []
levels = [last['level']]
ordered_list.append(last)
parents = []
# Walk the rest nesting the entries properly
while toc_list:
t = toc_list.pop(0)
current_level = t['level']
t['children'] = []
# Reduce depth if current level < last item's level
if current_level < levels[-1]:
# Pop last level since we know we are less than it
levels.pop()
# Pop parents and levels we are less than or equal to
to_pop = 0
for p in reversed(parents):
if current_level <= p['level']:
to_pop += 1
else:
break
if to_pop:
levels = levels[:-to_pop]
parents = parents[:-to_pop]
# Note current level as last
levels.append(current_level)
# Level is the same, so append to the current parent (if available)
if current_level == levels[-1]:
(parents[-1]['children'] if parents else ordered_list).append(t)
# Current level is > last item's level,
# So make last item a parent and append current as child
else:
last['children'].append(t)
parents.append(last)
levels.append(current_level)
last = t
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = "headerlink"
permalink.attrib["title"] = "Permanent link"
c.append(permalink)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = parseBoolValue(self.config["anchorlink"])
self.use_permalinks = parseBoolValue(self.config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = self.config["permalink"]
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = stashedHTML2text(text, self.markdown)
elem_id = unique(self.config["slugify"](elem_id, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
if self.use_anchors:
self.add_anchor(c, elem_id)
if self.use_permalinks:
self.add_permalink(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, *args, **kwargs):
self.config = {
"marker" : ["[TOC]",
"Text to find and replace with Table of Contents - "
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text - "
"Defaults to the headerid ext's slugify function."],
"title" : ["",
"Title to insert into TOC <div> - "
"Defaults to an empty string"],
"anchorlink" : [0,
"1 if header should be a self link - "
"Defaults to 0"],
"permalink" : [0,
"1 or link text if a Sphinx-style permalink should be added - "
"Defaults to 0"]
}
super(TocExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(*args, **kwargs):
return TocExtension(*args, **kwargs)
| 36.147541 | 84 | 0.538435 |
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree, parseBoolValue, AMP_SUBSTITUTE
from .headerid import slugify, unique, itertext, stashedHTML2text
import re
def order_toc_list(toc_list):
ordered_list = []
if len(toc_list):
last = toc_list.pop(0)
last['children'] = []
levels = [last['level']]
ordered_list.append(last)
parents = []
while toc_list:
t = toc_list.pop(0)
current_level = t['level']
t['children'] = []
if current_level < levels[-1]:
# Pop last level since we know we are less than it
levels.pop()
# Pop parents and levels we are less than or equal to
to_pop = 0
for p in reversed(parents):
if current_level <= p['level']:
to_pop += 1
else:
break
if to_pop:
levels = levels[:-to_pop]
parents = parents[:-to_pop]
# Note current level as last
levels.append(current_level)
# Level is the same, so append to the current parent (if available)
if current_level == levels[-1]:
(parents[-1]['children'] if parents else ordered_list).append(t)
# Current level is > last item's level,
else:
last['children'].append(t)
parents.append(last)
levels.append(current_level)
last = t
return ordered_list
class TocTreeprocessor(Treeprocessor):
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id):
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = "headerlink"
permalink.attrib["title"] = "Permanent link"
c.append(permalink)
def build_toc_etree(self, div, toc_list):
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = parseBoolValue(self.config["anchorlink"])
self.use_permalinks = parseBoolValue(self.config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = self.config["permalink"]
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
if not "id" in c.attrib:
elem_id = stashedHTML2text(text, self.markdown)
elem_id = unique(self.config["slugify"](elem_id, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
if self.use_anchors:
self.add_anchor(c, elem_id)
if self.use_permalinks:
self.add_permalink(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, *args, **kwargs):
self.config = {
"marker" : ["[TOC]",
"Text to find and replace with Table of Contents - "
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text - "
"Defaults to the headerid ext's slugify function."],
"title" : ["",
"Title to insert into TOC <div> - "
"Defaults to an empty string"],
"anchorlink" : [0,
"1 if header should be a self link - "
"Defaults to 0"],
"permalink" : [0,
"1 or link text if a Sphinx-style permalink should be added - "
"Defaults to 0"]
}
super(TocExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(*args, **kwargs):
return TocExtension(*args, **kwargs)
| true | true |
f7fb686890c8970bf5120e2e7584066e2b14baa8 | 40,835 | py | Python | sdk/python/pulumi_aws/elasticloadbalancingv2/listener.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/elasticloadbalancingv2/listener.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/elasticloadbalancingv2/listener.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
default_actions: pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]],
load_balancer_arn: pulumi.Input[str],
alpn_policy: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Listener resource.
:param pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]] default_actions: Configuration block for default actions. Detailed below.
:param pulumi.Input[str] load_balancer_arn: ARN of the load balancer.
:param pulumi.Input[str] alpn_policy: Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
:param pulumi.Input[str] certificate_arn: ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
:param pulumi.Input[int] port: Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
:param pulumi.Input[str] protocol: Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
:param pulumi.Input[str] ssl_policy: Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "default_actions", default_actions)
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if alpn_policy is not None:
pulumi.set(__self__, "alpn_policy", alpn_policy)
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]:
"""
Configuration block for default actions. Detailed below.
"""
return pulumi.get(self, "default_actions")
@default_actions.setter
def default_actions(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]):
pulumi.set(self, "default_actions", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Input[str]:
"""
ARN of the load balancer.
"""
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "load_balancer_arn", value)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
"""
return pulumi.get(self, "alpn_policy")
@alpn_policy.setter
def alpn_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alpn_policy", value)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
"""
return pulumi.get(self, "certificate_arn")
@certificate_arn.setter
def certificate_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_arn", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional[pulumi.Input[str]]:
"""
Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
"""
return pulumi.get(self, "ssl_policy")
@ssl_policy.setter
def ssl_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ListenerState:
def __init__(__self__, *,
alpn_policy: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Listener resources.
:param pulumi.Input[str] alpn_policy: Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
:param pulumi.Input[str] arn: ARN of the target group.
:param pulumi.Input[str] certificate_arn: ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
:param pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]] default_actions: Configuration block for default actions. Detailed below.
:param pulumi.Input[str] load_balancer_arn: ARN of the load balancer.
:param pulumi.Input[int] port: Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
:param pulumi.Input[str] protocol: Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
:param pulumi.Input[str] ssl_policy: Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
if alpn_policy is not None:
pulumi.set(__self__, "alpn_policy", alpn_policy)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
if default_actions is not None:
pulumi.set(__self__, "default_actions", default_actions)
if load_balancer_arn is not None:
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
"""
return pulumi.get(self, "alpn_policy")
@alpn_policy.setter
def alpn_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alpn_policy", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the target group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
"""
return pulumi.get(self, "certificate_arn")
@certificate_arn.setter
def certificate_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_arn", value)
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]]:
"""
Configuration block for default actions. Detailed below.
"""
return pulumi.get(self, "default_actions")
@default_actions.setter
def default_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]]):
pulumi.set(self, "default_actions", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the load balancer.
"""
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_arn", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional[pulumi.Input[str]]:
"""
Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
"""
return pulumi.get(self, "ssl_policy")
@ssl_policy.setter
def ssl_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
warnings.warn("""aws.elasticloadbalancingv2.Listener has been deprecated in favor of aws.lb.Listener""", DeprecationWarning)
class Listener(pulumi.CustomResource):
warnings.warn("""aws.elasticloadbalancingv2.Listener has been deprecated in favor of aws.lb.Listener""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Load Balancer Listener resource.
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
## Example Usage
### Forward Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_target_group = aws.lb.TargetGroup("frontEndTargetGroup")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=443,
protocol="HTTPS",
ssl_policy="ELBSecurityPolicy-2016-08",
certificate_arn="arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=front_end_target_group.arn,
)])
```
To a NLB:
```python
import pulumi
import pulumi_aws as aws
front_end = aws.lb.Listener("frontEnd",
load_balancer_arn=aws_lb["front_end"]["arn"],
port=443,
protocol="TLS",
certificate_arn="arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4",
alpn_policy="HTTP2Preferred",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=aws_lb_target_group["front_end"]["arn"],
)])
```
### Redirect Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="redirect",
redirect=aws.lb.ListenerDefaultActionRedirectArgs(
port="443",
protocol="HTTPS",
status_code="HTTP_301",
),
)])
```
### Fixed-response Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="fixed-response",
fixed_response=aws.lb.ListenerDefaultActionFixedResponseArgs(
content_type="text/plain",
message_body="Fixed response content",
status_code="200",
),
)])
```
### Authenticate-cognito Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_target_group = aws.lb.TargetGroup("frontEndTargetGroup")
# ...
pool = aws.cognito.UserPool("pool")
# ...
client = aws.cognito.UserPoolClient("client")
# ...
domain = aws.cognito.UserPoolDomain("domain")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[
aws.lb.ListenerDefaultActionArgs(
type="authenticate-cognito",
authenticate_cognito=aws.lb.ListenerDefaultActionAuthenticateCognitoArgs(
user_pool_arn=pool.arn,
user_pool_client_id=client.id,
user_pool_domain=domain.domain,
),
),
aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=front_end_target_group.arn,
),
])
```
### Authenticate-OIDC Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_target_group = aws.lb.TargetGroup("frontEndTargetGroup")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[
aws.lb.ListenerDefaultActionArgs(
type="authenticate-oidc",
authenticate_oidc=aws.lb.ListenerDefaultActionAuthenticateOidcArgs(
authorization_endpoint="https://example.com/authorization_endpoint",
client_id="client_id",
client_secret="client_secret",
issuer="https://example.com",
token_endpoint="https://example.com/token_endpoint",
user_info_endpoint="https://example.com/user_info_endpoint",
),
),
aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=front_end_target_group.arn,
),
])
```
### Gateway Load Balancer Listener
```python
import pulumi
import pulumi_aws as aws
example_load_balancer = aws.lb.LoadBalancer("exampleLoadBalancer",
load_balancer_type="gateway",
subnet_mappings=[aws.lb.LoadBalancerSubnetMappingArgs(
subnet_id=aws_subnet["example"]["id"],
)])
example_target_group = aws.lb.TargetGroup("exampleTargetGroup",
port=6081,
protocol="GENEVE",
vpc_id=aws_vpc["example"]["id"],
health_check=aws.lb.TargetGroupHealthCheckArgs(
port="80",
protocol="HTTP",
))
example_listener = aws.lb.Listener("exampleListener",
load_balancer_arn=example_load_balancer.id,
default_actions=[aws.lb.ListenerDefaultActionArgs(
target_group_arn=example_target_group.id,
type="forward",
)])
```
## Import
Listeners can be imported using their ARN, e.g.,
```sh
$ pulumi import aws:elasticloadbalancingv2/listener:Listener front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alpn_policy: Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
:param pulumi.Input[str] certificate_arn: ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]] default_actions: Configuration block for default actions. Detailed below.
:param pulumi.Input[str] load_balancer_arn: ARN of the load balancer.
:param pulumi.Input[int] port: Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
:param pulumi.Input[str] protocol: Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
:param pulumi.Input[str] ssl_policy: Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Load Balancer Listener resource.
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
## Example Usage
### Forward Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_target_group = aws.lb.TargetGroup("frontEndTargetGroup")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=443,
protocol="HTTPS",
ssl_policy="ELBSecurityPolicy-2016-08",
certificate_arn="arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=front_end_target_group.arn,
)])
```
To a NLB:
```python
import pulumi
import pulumi_aws as aws
front_end = aws.lb.Listener("frontEnd",
load_balancer_arn=aws_lb["front_end"]["arn"],
port=443,
protocol="TLS",
certificate_arn="arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4",
alpn_policy="HTTP2Preferred",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=aws_lb_target_group["front_end"]["arn"],
)])
```
### Redirect Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="redirect",
redirect=aws.lb.ListenerDefaultActionRedirectArgs(
port="443",
protocol="HTTPS",
status_code="HTTP_301",
),
)])
```
### Fixed-response Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[aws.lb.ListenerDefaultActionArgs(
type="fixed-response",
fixed_response=aws.lb.ListenerDefaultActionFixedResponseArgs(
content_type="text/plain",
message_body="Fixed response content",
status_code="200",
),
)])
```
### Authenticate-cognito Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_target_group = aws.lb.TargetGroup("frontEndTargetGroup")
# ...
pool = aws.cognito.UserPool("pool")
# ...
client = aws.cognito.UserPoolClient("client")
# ...
domain = aws.cognito.UserPoolDomain("domain")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[
aws.lb.ListenerDefaultActionArgs(
type="authenticate-cognito",
authenticate_cognito=aws.lb.ListenerDefaultActionAuthenticateCognitoArgs(
user_pool_arn=pool.arn,
user_pool_client_id=client.id,
user_pool_domain=domain.domain,
),
),
aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=front_end_target_group.arn,
),
])
```
### Authenticate-OIDC Action
```python
import pulumi
import pulumi_aws as aws
front_end_load_balancer = aws.lb.LoadBalancer("frontEndLoadBalancer")
# ...
front_end_target_group = aws.lb.TargetGroup("frontEndTargetGroup")
# ...
front_end_listener = aws.lb.Listener("frontEndListener",
load_balancer_arn=front_end_load_balancer.arn,
port=80,
protocol="HTTP",
default_actions=[
aws.lb.ListenerDefaultActionArgs(
type="authenticate-oidc",
authenticate_oidc=aws.lb.ListenerDefaultActionAuthenticateOidcArgs(
authorization_endpoint="https://example.com/authorization_endpoint",
client_id="client_id",
client_secret="client_secret",
issuer="https://example.com",
token_endpoint="https://example.com/token_endpoint",
user_info_endpoint="https://example.com/user_info_endpoint",
),
),
aws.lb.ListenerDefaultActionArgs(
type="forward",
target_group_arn=front_end_target_group.arn,
),
])
```
### Gateway Load Balancer Listener
```python
import pulumi
import pulumi_aws as aws
example_load_balancer = aws.lb.LoadBalancer("exampleLoadBalancer",
load_balancer_type="gateway",
subnet_mappings=[aws.lb.LoadBalancerSubnetMappingArgs(
subnet_id=aws_subnet["example"]["id"],
)])
example_target_group = aws.lb.TargetGroup("exampleTargetGroup",
port=6081,
protocol="GENEVE",
vpc_id=aws_vpc["example"]["id"],
health_check=aws.lb.TargetGroupHealthCheckArgs(
port="80",
protocol="HTTP",
))
example_listener = aws.lb.Listener("exampleListener",
load_balancer_arn=example_load_balancer.id,
default_actions=[aws.lb.ListenerDefaultActionArgs(
target_group_arn=example_target_group.id,
type="forward",
)])
```
## Import
Listeners can be imported using their ARN, e.g.,
```sh
$ pulumi import aws:elasticloadbalancingv2/listener:Listener front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96
```
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
pulumi.log.warn("""Listener is deprecated: aws.elasticloadbalancingv2.Listener has been deprecated in favor of aws.lb.Listener""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["alpn_policy"] = alpn_policy
__props__.__dict__["certificate_arn"] = certificate_arn
if default_actions is None and not opts.urn:
raise TypeError("Missing required property 'default_actions'")
__props__.__dict__["default_actions"] = default_actions
if load_balancer_arn is None and not opts.urn:
raise TypeError("Missing required property 'load_balancer_arn'")
__props__.__dict__["load_balancer_arn"] = load_balancer_arn
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["ssl_policy"] = ssl_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
super(Listener, __self__).__init__(
'aws:elasticloadbalancingv2/listener:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alpn_policy: Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
:param pulumi.Input[str] arn: ARN of the target group.
:param pulumi.Input[str] certificate_arn: ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]] default_actions: Configuration block for default actions. Detailed below.
:param pulumi.Input[str] load_balancer_arn: ARN of the load balancer.
:param pulumi.Input[int] port: Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
:param pulumi.Input[str] protocol: Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
:param pulumi.Input[str] ssl_policy: Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ListenerState.__new__(_ListenerState)
__props__.__dict__["alpn_policy"] = alpn_policy
__props__.__dict__["arn"] = arn
__props__.__dict__["certificate_arn"] = certificate_arn
__props__.__dict__["default_actions"] = default_actions
__props__.__dict__["load_balancer_arn"] = load_balancer_arn
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["ssl_policy"] = ssl_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> pulumi.Output[Optional[str]]:
"""
Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`.
"""
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
ARN of the target group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> pulumi.Output[Optional[str]]:
"""
ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the `lb.ListenerCertificate` resource.
"""
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Output[Sequence['outputs.ListenerDefaultAction']]:
"""
Configuration block for default actions. Detailed below.
"""
return pulumi.get(self, "default_actions")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Output[str]:
"""
ARN of the load balancer.
"""
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
"""
Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> pulumi.Output[str]:
"""
Name of the SSL Policy for the listener. Required if `protocol` is `HTTPS` or `TLS`.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
| 44.628415 | 258 | 0.625101 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
default_actions: pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]],
load_balancer_arn: pulumi.Input[str],
alpn_policy: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "default_actions", default_actions)
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if alpn_policy is not None:
pulumi.set(__self__, "alpn_policy", alpn_policy)
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]:
return pulumi.get(self, "default_actions")
@default_actions.setter
def default_actions(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]):
pulumi.set(self, "default_actions", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "load_balancer_arn", value)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "alpn_policy")
@alpn_policy.setter
def alpn_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alpn_policy", value)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "certificate_arn")
@certificate_arn.setter
def certificate_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_arn", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_policy")
@ssl_policy.setter
def ssl_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ListenerState:
def __init__(__self__, *,
alpn_policy: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
if alpn_policy is not None:
pulumi.set(__self__, "alpn_policy", alpn_policy)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
if default_actions is not None:
pulumi.set(__self__, "default_actions", default_actions)
if load_balancer_arn is not None:
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "alpn_policy")
@alpn_policy.setter
def alpn_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alpn_policy", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "certificate_arn")
@certificate_arn.setter
def certificate_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_arn", value)
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]]:
return pulumi.get(self, "default_actions")
@default_actions.setter
def default_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionArgs']]]]):
pulumi.set(self, "default_actions", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_arn", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_policy")
@ssl_policy.setter
def ssl_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
warnings.warn("""aws.elasticloadbalancingv2.Listener has been deprecated in favor of aws.lb.Listener""", DeprecationWarning)
class Listener(pulumi.CustomResource):
warnings.warn("""aws.elasticloadbalancingv2.Listener has been deprecated in favor of aws.lb.Listener""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
pulumi.log.warn("""Listener is deprecated: aws.elasticloadbalancingv2.Listener has been deprecated in favor of aws.lb.Listener""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["alpn_policy"] = alpn_policy
__props__.__dict__["certificate_arn"] = certificate_arn
if default_actions is None and not opts.urn:
raise TypeError("Missing required property 'default_actions'")
__props__.__dict__["default_actions"] = default_actions
if load_balancer_arn is None and not opts.urn:
raise TypeError("Missing required property 'load_balancer_arn'")
__props__.__dict__["load_balancer_arn"] = load_balancer_arn
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["ssl_policy"] = ssl_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
super(Listener, __self__).__init__(
'aws:elasticloadbalancingv2/listener:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerDefaultActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Listener':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ListenerState.__new__(_ListenerState)
__props__.__dict__["alpn_policy"] = alpn_policy
__props__.__dict__["arn"] = arn
__props__.__dict__["certificate_arn"] = certificate_arn
__props__.__dict__["default_actions"] = default_actions
__props__.__dict__["load_balancer_arn"] = load_balancer_arn
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["ssl_policy"] = ssl_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Output[Sequence['outputs.ListenerDefaultAction']]:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> pulumi.Output[str]:
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
| true | true |
f7fb6881d3cac1c6a4a8dcce6c1e88ce32a634d8 | 4,446 | py | Python | graph2vec/node2vec.py | elifriedman/node2vec | 0fade3002f84e19cfe7564b5cb9d232dfd63d1ea | [
"MIT"
] | null | null | null | graph2vec/node2vec.py | elifriedman/node2vec | 0fade3002f84e19cfe7564b5cb9d232dfd63d1ea | [
"MIT"
] | null | null | null | graph2vec/node2vec.py | elifriedman/node2vec | 0fade3002f84e19cfe7564b5cb9d232dfd63d1ea | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import random
class Graph():
def __init__(self, nx_G, is_directed, p, q):
self.G = nx_G.to_directed() if is_directed else nx_G.to_undirected()
self.is_directed = is_directed
self.p = p
self.q = q
def node2vec_walk(self, walk_length, start_node):
'''
Simulate a random walk starting from start node.
'''
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = sorted(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
next = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0],
alias_edges[(prev, cur)][1])]
walk.append(next)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
'''
Repeatedly simulate random walks from each node.
'''
G = self.G
walks = []
nodes = list(G.nodes())
for walk_iter in range(num_walks):
random.shuffle(nodes)
for node in nodes:
walks.append(self.node2vec_walk(walk_length=walk_length, start_node=node))
return walks
def get_alias_edge(self, src, dst):
'''
Get the alias edge setup lists for a given edge.
'''
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for dst_nbr in sorted(G.neighbors(dst)):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr].get('weight', 1)/p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr].get('weight', 1))
else:
unnormalized_probs.append(G[dst][dst_nbr].get('weight', 1)/q)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def preprocess_transition_probs(self):
'''
Preprocessing of transition probabilities for guiding the random walks.
'''
G = self.G
is_directed = self.is_directed
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr].get('weight', 1) for nbr in sorted(G.neighbors(node))]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
alias_edges = {}
triads = {}
if is_directed:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
else:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
def alias_setup(probs):
'''
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K*prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
'''
Draw sample from a non-uniform discrete distribution using alias sampling.
'''
K = len(J)
kk = int(np.floor(np.random.rand()*K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
| 30.040541 | 123 | 0.55758 | import numpy as np
import networkx as nx
import random
class Graph():
def __init__(self, nx_G, is_directed, p, q):
self.G = nx_G.to_directed() if is_directed else nx_G.to_undirected()
self.is_directed = is_directed
self.p = p
self.q = q
def node2vec_walk(self, walk_length, start_node):
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = sorted(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
next = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0],
alias_edges[(prev, cur)][1])]
walk.append(next)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
G = self.G
walks = []
nodes = list(G.nodes())
for walk_iter in range(num_walks):
random.shuffle(nodes)
for node in nodes:
walks.append(self.node2vec_walk(walk_length=walk_length, start_node=node))
return walks
def get_alias_edge(self, src, dst):
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for dst_nbr in sorted(G.neighbors(dst)):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr].get('weight', 1)/p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr].get('weight', 1))
else:
unnormalized_probs.append(G[dst][dst_nbr].get('weight', 1)/q)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def preprocess_transition_probs(self):
G = self.G
is_directed = self.is_directed
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr].get('weight', 1) for nbr in sorted(G.neighbors(node))]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
alias_edges = {}
triads = {}
if is_directed:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
else:
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
def alias_setup(probs):
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K*prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
K = len(J)
kk = int(np.floor(np.random.rand()*K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
| true | true |
f7fb68b586c7a9eb8ea8388ba8aa8e54f6c9bbb4 | 4,099 | py | Python | nipyapi/nifi/models/search_result_group_dto.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 199 | 2017-08-24T12:19:41.000Z | 2022-03-20T14:50:17.000Z | nipyapi/nifi/models/search_result_group_dto.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 275 | 2017-08-28T21:21:49.000Z | 2022-03-29T17:57:26.000Z | nipyapi/nifi/models/search_result_group_dto.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 73 | 2017-09-07T10:13:56.000Z | 2022-02-28T10:37:21.000Z | # coding: utf-8
"""
NiFi Rest API
The Rest API provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.15.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SearchResultGroupDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None):
"""
SearchResultGroupDTO - a model defined in Swagger
"""
self._id = None
self._name = None
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""
Gets the id of this SearchResultGroupDTO.
The id of the group.
:return: The id of this SearchResultGroupDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this SearchResultGroupDTO.
The id of the group.
:param id: The id of this SearchResultGroupDTO.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def name(self):
"""
Gets the name of this SearchResultGroupDTO.
The name of the group.
:return: The name of this SearchResultGroupDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this SearchResultGroupDTO.
The name of the group.
:param name: The name of this SearchResultGroupDTO.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SearchResultGroupDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.445161 | 478 | 0.529641 |
from pprint import pformat
from six import iteritems
import re
class SearchResultGroupDTO(object):
swagger_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None):
self._id = None
self._name = None
self.id = id
if name is not None:
self.name = name
@property
def id(self):
return self._id
@id.setter
def id(self, id):
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SearchResultGroupDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fb6a245df1eeb24b8b473047d19a2233470b06 | 56 | py | Python | cognite/client/_version.py | sakshi87/cognite-sdk-python | eb3d569fd058dfd8e3c0c29dee2a635deabad1ac | [
"Apache-2.0"
] | null | null | null | cognite/client/_version.py | sakshi87/cognite-sdk-python | eb3d569fd058dfd8e3c0c29dee2a635deabad1ac | [
"Apache-2.0"
] | null | null | null | cognite/client/_version.py | sakshi87/cognite-sdk-python | eb3d569fd058dfd8e3c0c29dee2a635deabad1ac | [
"Apache-2.0"
] | null | null | null | __version__ = "2.20.0"
__api_subversion__ = "V20210423"
| 18.666667 | 32 | 0.75 | __version__ = "2.20.0"
__api_subversion__ = "V20210423"
| true | true |
f7fb6b2adc973bed9154169011f89e3b9dc2cd72 | 685 | py | Python | app/core/migrations/0003_ingredient.py | sunnyrpandya/recipe-app-api | 92fbefb9bd80e967cd1111ddc25c3c8da5980c39 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | sunnyrpandya/recipe-app-api | 92fbefb9bd80e967cd1111ddc25c3c8da5980c39 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | sunnyrpandya/recipe-app-api | 92fbefb9bd80e967cd1111ddc25c3c8da5980c39 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-02-18 05:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.541667 | 118 | 0.617518 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7fb6bf2ecefa59a5d8472409b6048444bee173f | 22,395 | py | Python | cogdl/models/nn/pyg_supergat.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/models/nn/pyg_supergat.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/models/nn/pyg_supergat.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import (
remove_self_loops,
add_self_loops,
softmax,
dropout_adj,
is_undirected,
accuracy,
negative_sampling,
batched_negative_sampling,
to_undirected,
)
import torch_geometric.nn.inits as tgi
from cogdl.trainers.supergat_trainer import SuperGATTrainer
from .. import BaseModel, register_model
from typing import List
# borrowed from https://github.com/dongkwan-kim/SuperGAT
def np_sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
class SuperGATLayer(MessagePassing):
def __init__(
self,
in_channels,
out_channels,
heads=1,
concat=True,
negative_slope=0.2,
dropout=0,
bias=True,
is_super_gat=True,
attention_type="basic",
super_gat_criterion=None,
neg_sample_ratio=0.0,
edge_sample_ratio=1.0,
pretraining_noise_ratio=0.0,
use_pretraining=False,
to_undirected_at_neg=False,
scaling_factor=None,
cache_label=False,
cache_attention=False,
**kwargs,
):
super(SuperGATLayer, self).__init__(aggr="add", node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.is_super_gat = is_super_gat
self.attention_type = attention_type
self.super_gat_criterion = super_gat_criterion
self.neg_sample_ratio = neg_sample_ratio
self.edge_sample_ratio = edge_sample_ratio
self.pretraining_noise_ratio = pretraining_noise_ratio
self.pretraining = None if not use_pretraining else True
self.to_undirected_at_neg = to_undirected_at_neg
self.cache_label = cache_label
self.cache_attention = cache_attention
self.weight = Parameter(torch.Tensor(in_channels, heads * out_channels))
if self.is_super_gat:
if self.attention_type == "gat_originated": # GO
self.att_mh_1 = Parameter(torch.Tensor(1, heads, 2 * out_channels))
elif self.attention_type == "dot_product": # DP
pass
elif self.attention_type == "scaled_dot_product": # SD
self.scaling_factor = scaling_factor or np.sqrt(self.out_channels)
elif self.attention_type.endswith("mask_only"): # MX
self.att_mh_1 = Parameter(torch.Tensor(1, heads, 2 * out_channels))
else:
raise ValueError
else:
if self.attention_type.endswith("gat_originated") or self.attention_type == "basic":
self.att_mh_1 = Parameter(torch.Tensor(1, heads, 2 * out_channels))
elif self.attention_type.endswith("dot_product"):
pass
else:
raise ValueError
self.cache = {
"num_updated": 0,
"att": None, # Use only when self.cache_attention == True for task_type == "Attention_Dist"
"att_with_negatives": None, # Use as X for supervision.
"att_label": None, # Use as Y for supervision.
}
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
tgi.glorot(self.weight)
tgi.zeros(self.bias)
for name, param in self.named_parameters():
if name.startswith("att_scaling"):
tgi.ones(param)
elif name.startswith("att_bias"):
tgi.zeros(param)
elif name.startswith("att_mh"):
tgi.glorot(param)
def forward(self, x, edge_index, size=None, batch=None, neg_edge_index=None, attention_edge_index=None):
"""
:param x: [N, F]
:param edge_index: [2, E]
:param size:
:param batch: None or [B]
:param neg_edge_index: When using explicitly given negative edges.
:param attention_edge_index: [2, E'], Use for link prediction
:return:
"""
if isinstance(edge_index, tuple):
edge_index = torch.stack(edge_index)
if self.pretraining and self.pretraining_noise_ratio > 0.0:
edge_index, _ = dropout_adj(
edge_index,
p=self.pretraining_noise_ratio,
force_undirected=is_undirected(edge_index),
num_nodes=x.size(0),
training=self.training,
)
if size is None and torch.is_tensor(x):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
# [N, F0] * [F0, heads * F] = [N, heads * F]
x = torch.matmul(x, self.weight)
x = x.view(-1, self.heads, self.out_channels)
propagated = self.propagate(edge_index, size=size, x=x)
if (self.is_super_gat and self.training) or (attention_edge_index is not None) or (neg_edge_index is not None):
device = next(self.parameters()).device
num_pos_samples = int(self.edge_sample_ratio * edge_index.size(1))
num_neg_samples = int(self.neg_sample_ratio * self.edge_sample_ratio * edge_index.size(1))
if attention_edge_index is not None:
neg_edge_index = None
elif neg_edge_index is not None:
pass
elif batch is None:
if self.to_undirected_at_neg:
edge_index_for_ns = to_undirected(edge_index, num_nodes=x.size(0))
else:
edge_index_for_ns = edge_index
neg_edge_index = negative_sampling(
edge_index=edge_index_for_ns,
num_nodes=x.size(0),
num_neg_samples=num_neg_samples,
)
else:
neg_edge_index = batched_negative_sampling(
edge_index=edge_index,
batch=batch,
num_neg_samples=num_neg_samples,
)
if self.edge_sample_ratio < 1.0:
pos_indices = random.sample(range(edge_index.size(1)), num_pos_samples)
pos_indices = torch.tensor(pos_indices).long().to(device)
pos_edge_index = edge_index[:, pos_indices]
else:
pos_edge_index = edge_index
att_with_negatives = self._get_attention_with_negatives(
x=x,
edge_index=pos_edge_index,
neg_edge_index=neg_edge_index,
total_edge_index=attention_edge_index,
) # [E + neg_E, heads]
# Labels
if self.training and (self.cache["att_label"] is None or not self.cache_label):
att_label = torch.zeros(att_with_negatives.size(0)).float().to(device)
att_label[: pos_edge_index.size(1)] = 1.0
elif self.training and self.cache["att_label"] is not None:
att_label = self.cache["att_label"]
else:
att_label = None
self._update_cache("att_label", att_label)
self._update_cache("att_with_negatives", att_with_negatives)
return propagated
def message(self, edge_index_i, x_i, x_j, size_i):
"""
:param edge_index_i: [E]
:param x_i: [E, heads * F]
:param x_j: [E, heads * F]
:param size_i: N
:return: [E, heads, F]
"""
x_j = x_j.view(-1, self.heads, self.out_channels) # [E, heads, F]
if x_i is not None:
x_i = x_i.view(-1, self.heads, self.out_channels) # [E, heads, F]
# Compute attention coefficients. [E, heads]
alpha = self._get_attention(edge_index_i, x_i, x_j, size_i)
if self.cache_attention:
self._update_cache("att", alpha)
# Sample attention coefficients stochastically.
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
# [E, heads, F] * [E, heads, 1] = [E, heads, F]
return x_j * alpha.view(-1, self.heads, 1)
def update(self, aggr_out):
"""
:param aggr_out: [N, heads, F]
:return: [N, heads * F]
"""
if self.concat is True:
aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
else:
aggr_out = aggr_out.mean(dim=1)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def _get_attention(
self, edge_index_i, x_i, x_j, size_i, normalize=True, with_negatives=False, **kwargs
) -> torch.Tensor:
"""
:param edge_index_i: [E]
:param x_i: [E, heads, F]
:param x_j: [E, heads, F]
:param size_i: N
:return: [E, heads]
"""
# Compute attention coefficients.
if self.attention_type == "basic" or self.attention_type.endswith("gat_originated"):
# [E, heads, 2F] * [1, heads, 2F] -> [E, heads]
alpha = torch.einsum("ehf,xhf->eh", torch.cat([x_i, x_j], dim=-1), self.att_mh_1)
elif self.attention_type == "scaled_dot_product":
alpha = torch.einsum("ehf,ehf->eh", x_i, x_j) / self.scaling_factor
elif self.attention_type == "dot_product":
# [E, heads, F] * [E, heads, F] -> [E, heads]
alpha = torch.einsum("ehf,ehf->eh", x_i, x_j)
elif "mask" in self.attention_type:
# [E, heads, F] * [E, heads, F] -> [E, heads]
logits = torch.einsum("ehf,ehf->eh", x_i, x_j)
if self.attention_type.endswith("scaling"):
logits = logits / self.att_scaling
if with_negatives:
return logits
# [E, heads, 2F] * [1, heads, 2F] -> [E, heads]
alpha = torch.einsum("ehf,xhf->eh", torch.cat([x_i, x_j], dim=-1), self.att_mh_1)
alpha = torch.einsum("eh,eh->eh", alpha, torch.sigmoid(logits))
else:
raise ValueError
if normalize:
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index_i, num_nodes=size_i)
return alpha
def _get_attention_with_negatives(self, x, edge_index, neg_edge_index, total_edge_index=None):
"""
:param x: [N, heads * F]
:param edge_index: [2, E]
:param neg_edge_index: [2, neg_E]
:param total_edge_index: [2, E + neg_E], if total_edge_index is given, use it.
:return: [E + neg_E, heads]
"""
if neg_edge_index is not None and neg_edge_index.size(1) <= 0:
neg_edge_index = torch.zeros((2, 0, self.heads))
if total_edge_index is None:
total_edge_index = torch.cat([edge_index, neg_edge_index], dim=-1) # [2, E + neg_E]
total_edge_index_j, total_edge_index_i = total_edge_index # [E + neg_E]
x_i = torch.index_select(x, 0, total_edge_index_i) # [E + neg_E, heads * F]
x_j = torch.index_select(x, 0, total_edge_index_j) # [E + neg_E, heads * F]
size_i = x.size(0) # N
x_j = x_j.view(-1, self.heads, self.out_channels) # [E + neg_E, heads, F]
if x_i is not None:
x_i = x_i.view(-1, self.heads, self.out_channels) # [E + neg_E, heads, F]
alpha = self._get_attention(total_edge_index_i, x_i, x_j, size_i, normalize=False, with_negatives=True)
return alpha
def __repr__(self):
return "{}({}, {}, heads={}, concat={}, att_type={}, nsr={}, pnr={})".format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.heads,
self.concat,
self.attention_type,
self.neg_sample_ratio,
self.pretraining_noise_ratio,
)
def _update_cache(self, key, val):
self.cache[key] = val
self.cache["num_updated"] += 1
def get_attention_dist(self, edge_index: torch.Tensor, num_nodes: int):
"""
:param edge_index: tensor the shape of which is [2, E]
:param num_nodes: number of nodes
:return: Tensor list L the length of which is N.
L[i] = a_ji for e_{ji} in {E}
- a_ji = normalized attention coefficient of e_{ji} (shape: [heads, #neighbors])
"""
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes) # [2, E]
att = self.cache["att"] # [E, heads]
att_dist_list = []
for node_idx in range(num_nodes):
att_neighbors = att[edge_index[1] == node_idx, :].t() # [heads, #neighbors]
att_dist_list.append(att_neighbors)
return att_dist_list
@register_model("supergat")
class SuperGAT(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--num-features', type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--patience", type=int, default=100)
parser.add_argument('--hidden-size', type=int, default=16)
parser.add_argument("--heads", default=8, type=int)
parser.add_argument("--out-heads", default=None, type=int)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument("--attention-type", type=str, default="basic")
parser.add_argument("--super-gat-criterion", type=str, default=None)
parser.add_argument("--neg-sample-ratio", type=float, default=0.5)
parser.add_argument("--edge-sampling-ratio", type=float, default=0.8)
parser.add_argument("--scaling-factor", type=float, default=None)
parser.add_argument("--to-undirected-at-neg", action="store_true")
parser.add_argument("--to-undirected", action="store_true")
parser.add_argument("--pretraining-noise-ratio", type=float, default=0.0)
parser.add_argument("--val-interval", type=int, default=1)
parser.add_argument("--att-lambda", default=0., type=float)
parser.add_argument("--total-pretraining-epoch", default=0, type=int)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args)
def __init__(self, args):
super().__init__()
self.args = args
self.conv1 = SuperGATLayer(
args.num_features,
args.hidden_size,
heads=args.heads,
dropout=args.dropout,
concat=True,
is_super_gat=True,
attention_type=args.attention_type,
super_gat_criterion=args.super_gat_criterion,
neg_sample_ratio=args.neg_sample_ratio,
edge_sample_ratio=args.edge_sampling_ratio,
pretraining_noise_ratio=args.pretraining_noise_ratio,
use_pretraining=False,
to_undirected_at_neg=args.to_undirected_at_neg,
scaling_factor=args.scaling_factor,
)
self.conv2 = SuperGATLayer(
args.hidden_size * args.heads,
args.num_classes,
heads=(args.out_heads or args.heads),
dropout=args.dropout,
concat=False,
is_super_gat=True,
attention_type=args.attention_type,
super_gat_criterion=args.super_gat_criterion,
neg_sample_ratio=args.neg_sample_ratio,
edge_sample_ratio=args.edge_sampling_ratio,
pretraining_noise_ratio=args.pretraining_noise_ratio,
use_pretraining=False,
to_undirected_at_neg=args.to_undirected_at_neg,
scaling_factor=args.scaling_factor,
)
def forward_for_all_layers(self, x, edge_index, batch=None, **kwargs):
x1 = F.dropout(x, p=self.args.dropout, training=self.training)
x1 = self.conv1(x1, edge_index, batch=batch, **kwargs)
x2 = F.elu(x1)
x2 = F.dropout(x2, p=self.args.dropout, training=self.training)
x2 = self.conv2(x2, edge_index, batch=batch, **kwargs)
return x1, x2
def forward(self, x, edge_index, batch=None, **kwargs) -> torch.Tensor:
x = F.dropout(x, p=self.args.dropout, training=self.training)
x = self.conv1(x, edge_index, batch=batch, **kwargs)
x = F.elu(x)
x = F.dropout(x, p=self.args.dropout, training=self.training)
x = self.conv2(x, edge_index, batch=batch, **kwargs)
return x
def set_layer_attrs(self, name, value):
setattr(self.conv1, name, value)
setattr(self.conv2, name, value)
def get_attention_dist_by_layer(self, edge_index, num_nodes) -> List[List[torch.Tensor]]:
"""
:param edge_index: tensor the shape of which is [2, E]
:param num_nodes: number of nodes
:return List[List[torch.Tensor]]: [L, N, [#neighbors, heads]]
"""
return [
self.conv1.get_attention_dist(edge_index, num_nodes),
self.conv2.get_attention_dist(edge_index, num_nodes),
]
def modules(self) -> List[SuperGATLayer]:
return [self.conv1, self.conv2]
@staticmethod
def get_trainer(args):
return SuperGATTrainer
@register_model("supergat-large")
class LargeSuperGAT(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--num-features', type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--patience", type=int, default=100)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument('--hidden-size', type=int, default=8)
parser.add_argument("--heads", default=8, type=int)
parser.add_argument("--out-heads", default=None, type=int)
parser.add_argument('--dropout', type=float, default=0.6)
parser.add_argument("--attention-type", type=str, default="basic")
parser.add_argument("--super-gat-criterion", type=str, default=None)
parser.add_argument("--neg-sample-ratio", type=float, default=0.5)
parser.add_argument("--edge-sampling-ratio", type=float, default=0.8)
parser.add_argument("--scaling-factor", type=float, default=None)
parser.add_argument("--to-undirected-at-neg", action="store_true")
parser.add_argument("--to-undirected", action="store_true")
parser.add_argument("--use-bn", action="store_true")
parser.add_argument("--pretraining-noise-ratio", type=float, default=0.0)
parser.add_argument("--val-interval", type=int, default=1)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args)
def __init__(self, args):
super().__init__()
self.args = args
self.num_layers = self.args.num_layers
conv_common_kwargs = dict(
dropout=args.dropout,
is_super_gat=True,
attention_type=args.attention_type,
super_gat_criterion=args.super_gat_criterion,
neg_sample_ratio=args.neg_sample_ratio,
edge_sample_ratio=args.edge_sampling_ratio,
pretraining_noise_ratio=args.pretraining_noise_ratio,
use_pretraining=args.use_pretraining,
to_undirected_at_neg=args.to_undirected_at_neg,
scaling_factor=args.scaling_factor,
)
self.conv_list = []
self.bn_list = []
for conv_id in range(1, self.num_layers + 1):
if conv_id == 1: # first layer
in_channels, out_channels = args.num_features, args.hidden_size
heads, concat = args.heads, True
elif conv_id == self.num_layers: # last layer
in_channels, out_channels = args.hidden_size * args.heads, args.num_classes
heads, concat = args.out_heads or args.heads, False
else:
in_channels, out_channels = args.hidden_size * args.heads, args.hidden_size
heads, concat = args.heads, True
# conv
conv = SuperGATLayer(in_channels, out_channels, heads=heads, concat=concat, **conv_common_kwargs)
conv_name = "conv{}".format(conv_id)
self.conv_list.append(conv)
setattr(self, conv_name, conv)
self.add_module(conv_name, conv)
# bn
if args.use_bn and conv_id != self.num_layers: # not last layer
bn = nn.BatchNorm1d(out_channels * heads)
bn_name = "bn{}".format(conv_id)
self.bn_list.append(bn)
setattr(self, bn_name, bn)
self.add_module(bn_name, bn)
print(next(self.modules()))
def forward(self, x, edge_index, batch=None, **kwargs) -> torch.Tensor:
for conv_idx, conv in enumerate(self.conv_list):
x = F.dropout(x, p=self.args.dropout, training=self.training)
x = conv(x, edge_index, **kwargs)
if conv_idx != self.num_layers - 1:
if self.args.use_bn:
x = self.bn_list[conv_idx](x)
x = F.elu(x)
return x
def set_layer_attrs(self, name, value):
for conv in self.conv_list:
setattr(conv, name, value)
def get_attention_dist_by_layer(self, edge_index, num_nodes) -> List[List[torch.Tensor]]:
"""
:param edge_index: tensor the shape of which is [2, E]
:param num_nodes: number of nodes
:return List[List[torch.Tensor]]: [L, N, [#neighbors, heads]]
"""
attention_dist_by_layer = []
for conv in self.conv_list:
attention_dist_by_layer.append(conv.get_attention_dist(edge_index, num_nodes))
return attention_dist_by_layer
def modules(self) -> List[SuperGATLayer]:
return self.conv_list
@staticmethod
def get_trainer(args):
return SuperGATTrainer
| 38.347603 | 119 | 0.602858 | import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import (
remove_self_loops,
add_self_loops,
softmax,
dropout_adj,
is_undirected,
accuracy,
negative_sampling,
batched_negative_sampling,
to_undirected,
)
import torch_geometric.nn.inits as tgi
from cogdl.trainers.supergat_trainer import SuperGATTrainer
from .. import BaseModel, register_model
from typing import List
def np_sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
class SuperGATLayer(MessagePassing):
def __init__(
self,
in_channels,
out_channels,
heads=1,
concat=True,
negative_slope=0.2,
dropout=0,
bias=True,
is_super_gat=True,
attention_type="basic",
super_gat_criterion=None,
neg_sample_ratio=0.0,
edge_sample_ratio=1.0,
pretraining_noise_ratio=0.0,
use_pretraining=False,
to_undirected_at_neg=False,
scaling_factor=None,
cache_label=False,
cache_attention=False,
**kwargs,
):
super(SuperGATLayer, self).__init__(aggr="add", node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.is_super_gat = is_super_gat
self.attention_type = attention_type
self.super_gat_criterion = super_gat_criterion
self.neg_sample_ratio = neg_sample_ratio
self.edge_sample_ratio = edge_sample_ratio
self.pretraining_noise_ratio = pretraining_noise_ratio
self.pretraining = None if not use_pretraining else True
self.to_undirected_at_neg = to_undirected_at_neg
self.cache_label = cache_label
self.cache_attention = cache_attention
self.weight = Parameter(torch.Tensor(in_channels, heads * out_channels))
if self.is_super_gat:
if self.attention_type == "gat_originated":
self.att_mh_1 = Parameter(torch.Tensor(1, heads, 2 * out_channels))
elif self.attention_type == "dot_product":
pass
elif self.attention_type == "scaled_dot_product":
self.scaling_factor = scaling_factor or np.sqrt(self.out_channels)
elif self.attention_type.endswith("mask_only"):
self.att_mh_1 = Parameter(torch.Tensor(1, heads, 2 * out_channels))
else:
raise ValueError
else:
if self.attention_type.endswith("gat_originated") or self.attention_type == "basic":
self.att_mh_1 = Parameter(torch.Tensor(1, heads, 2 * out_channels))
elif self.attention_type.endswith("dot_product"):
pass
else:
raise ValueError
self.cache = {
"num_updated": 0,
"att": None,
"att_with_negatives": None,
"att_label": None,
}
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
tgi.glorot(self.weight)
tgi.zeros(self.bias)
for name, param in self.named_parameters():
if name.startswith("att_scaling"):
tgi.ones(param)
elif name.startswith("att_bias"):
tgi.zeros(param)
elif name.startswith("att_mh"):
tgi.glorot(param)
def forward(self, x, edge_index, size=None, batch=None, neg_edge_index=None, attention_edge_index=None):
if isinstance(edge_index, tuple):
edge_index = torch.stack(edge_index)
if self.pretraining and self.pretraining_noise_ratio > 0.0:
edge_index, _ = dropout_adj(
edge_index,
p=self.pretraining_noise_ratio,
force_undirected=is_undirected(edge_index),
num_nodes=x.size(0),
training=self.training,
)
if size is None and torch.is_tensor(x):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
x = torch.matmul(x, self.weight)
x = x.view(-1, self.heads, self.out_channels)
propagated = self.propagate(edge_index, size=size, x=x)
if (self.is_super_gat and self.training) or (attention_edge_index is not None) or (neg_edge_index is not None):
device = next(self.parameters()).device
num_pos_samples = int(self.edge_sample_ratio * edge_index.size(1))
num_neg_samples = int(self.neg_sample_ratio * self.edge_sample_ratio * edge_index.size(1))
if attention_edge_index is not None:
neg_edge_index = None
elif neg_edge_index is not None:
pass
elif batch is None:
if self.to_undirected_at_neg:
edge_index_for_ns = to_undirected(edge_index, num_nodes=x.size(0))
else:
edge_index_for_ns = edge_index
neg_edge_index = negative_sampling(
edge_index=edge_index_for_ns,
num_nodes=x.size(0),
num_neg_samples=num_neg_samples,
)
else:
neg_edge_index = batched_negative_sampling(
edge_index=edge_index,
batch=batch,
num_neg_samples=num_neg_samples,
)
if self.edge_sample_ratio < 1.0:
pos_indices = random.sample(range(edge_index.size(1)), num_pos_samples)
pos_indices = torch.tensor(pos_indices).long().to(device)
pos_edge_index = edge_index[:, pos_indices]
else:
pos_edge_index = edge_index
att_with_negatives = self._get_attention_with_negatives(
x=x,
edge_index=pos_edge_index,
neg_edge_index=neg_edge_index,
total_edge_index=attention_edge_index,
)
if self.training and (self.cache["att_label"] is None or not self.cache_label):
att_label = torch.zeros(att_with_negatives.size(0)).float().to(device)
att_label[: pos_edge_index.size(1)] = 1.0
elif self.training and self.cache["att_label"] is not None:
att_label = self.cache["att_label"]
else:
att_label = None
self._update_cache("att_label", att_label)
self._update_cache("att_with_negatives", att_with_negatives)
return propagated
def message(self, edge_index_i, x_i, x_j, size_i):
x_j = x_j.view(-1, self.heads, self.out_channels)
if x_i is not None:
x_i = x_i.view(-1, self.heads, self.out_channels)
alpha = self._get_attention(edge_index_i, x_i, x_j, size_i)
if self.cache_attention:
self._update_cache("att", alpha)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return x_j * alpha.view(-1, self.heads, 1)
def update(self, aggr_out):
if self.concat is True:
aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
else:
aggr_out = aggr_out.mean(dim=1)
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def _get_attention(
self, edge_index_i, x_i, x_j, size_i, normalize=True, with_negatives=False, **kwargs
) -> torch.Tensor:
if self.attention_type == "basic" or self.attention_type.endswith("gat_originated"):
alpha = torch.einsum("ehf,xhf->eh", torch.cat([x_i, x_j], dim=-1), self.att_mh_1)
elif self.attention_type == "scaled_dot_product":
alpha = torch.einsum("ehf,ehf->eh", x_i, x_j) / self.scaling_factor
elif self.attention_type == "dot_product":
alpha = torch.einsum("ehf,ehf->eh", x_i, x_j)
elif "mask" in self.attention_type:
logits = torch.einsum("ehf,ehf->eh", x_i, x_j)
if self.attention_type.endswith("scaling"):
logits = logits / self.att_scaling
if with_negatives:
return logits
alpha = torch.einsum("ehf,xhf->eh", torch.cat([x_i, x_j], dim=-1), self.att_mh_1)
alpha = torch.einsum("eh,eh->eh", alpha, torch.sigmoid(logits))
else:
raise ValueError
if normalize:
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index_i, num_nodes=size_i)
return alpha
def _get_attention_with_negatives(self, x, edge_index, neg_edge_index, total_edge_index=None):
if neg_edge_index is not None and neg_edge_index.size(1) <= 0:
neg_edge_index = torch.zeros((2, 0, self.heads))
if total_edge_index is None:
total_edge_index = torch.cat([edge_index, neg_edge_index], dim=-1)
total_edge_index_j, total_edge_index_i = total_edge_index
x_i = torch.index_select(x, 0, total_edge_index_i)
x_j = torch.index_select(x, 0, total_edge_index_j)
size_i = x.size(0)
x_j = x_j.view(-1, self.heads, self.out_channels)
if x_i is not None:
x_i = x_i.view(-1, self.heads, self.out_channels)
alpha = self._get_attention(total_edge_index_i, x_i, x_j, size_i, normalize=False, with_negatives=True)
return alpha
def __repr__(self):
return "{}({}, {}, heads={}, concat={}, att_type={}, nsr={}, pnr={})".format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.heads,
self.concat,
self.attention_type,
self.neg_sample_ratio,
self.pretraining_noise_ratio,
)
def _update_cache(self, key, val):
self.cache[key] = val
self.cache["num_updated"] += 1
def get_attention_dist(self, edge_index: torch.Tensor, num_nodes: int):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
att = self.cache["att"]
att_dist_list = []
for node_idx in range(num_nodes):
att_neighbors = att[edge_index[1] == node_idx, :].t() att_dist_list.append(att_neighbors)
return att_dist_list
@register_model("supergat")
class SuperGAT(BaseModel):
@staticmethod
def add_args(parser):
parser.add_argument('--num-features', type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--patience", type=int, default=100)
parser.add_argument('--hidden-size', type=int, default=16)
parser.add_argument("--heads", default=8, type=int)
parser.add_argument("--out-heads", default=None, type=int)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument("--attention-type", type=str, default="basic")
parser.add_argument("--super-gat-criterion", type=str, default=None)
parser.add_argument("--neg-sample-ratio", type=float, default=0.5)
parser.add_argument("--edge-sampling-ratio", type=float, default=0.8)
parser.add_argument("--scaling-factor", type=float, default=None)
parser.add_argument("--to-undirected-at-neg", action="store_true")
parser.add_argument("--to-undirected", action="store_true")
parser.add_argument("--pretraining-noise-ratio", type=float, default=0.0)
parser.add_argument("--val-interval", type=int, default=1)
parser.add_argument("--att-lambda", default=0., type=float)
parser.add_argument("--total-pretraining-epoch", default=0, type=int)
@classmethod
def build_model_from_args(cls, args):
return cls(args)
def __init__(self, args):
super().__init__()
self.args = args
self.conv1 = SuperGATLayer(
args.num_features,
args.hidden_size,
heads=args.heads,
dropout=args.dropout,
concat=True,
is_super_gat=True,
attention_type=args.attention_type,
super_gat_criterion=args.super_gat_criterion,
neg_sample_ratio=args.neg_sample_ratio,
edge_sample_ratio=args.edge_sampling_ratio,
pretraining_noise_ratio=args.pretraining_noise_ratio,
use_pretraining=False,
to_undirected_at_neg=args.to_undirected_at_neg,
scaling_factor=args.scaling_factor,
)
self.conv2 = SuperGATLayer(
args.hidden_size * args.heads,
args.num_classes,
heads=(args.out_heads or args.heads),
dropout=args.dropout,
concat=False,
is_super_gat=True,
attention_type=args.attention_type,
super_gat_criterion=args.super_gat_criterion,
neg_sample_ratio=args.neg_sample_ratio,
edge_sample_ratio=args.edge_sampling_ratio,
pretraining_noise_ratio=args.pretraining_noise_ratio,
use_pretraining=False,
to_undirected_at_neg=args.to_undirected_at_neg,
scaling_factor=args.scaling_factor,
)
def forward_for_all_layers(self, x, edge_index, batch=None, **kwargs):
x1 = F.dropout(x, p=self.args.dropout, training=self.training)
x1 = self.conv1(x1, edge_index, batch=batch, **kwargs)
x2 = F.elu(x1)
x2 = F.dropout(x2, p=self.args.dropout, training=self.training)
x2 = self.conv2(x2, edge_index, batch=batch, **kwargs)
return x1, x2
def forward(self, x, edge_index, batch=None, **kwargs) -> torch.Tensor:
x = F.dropout(x, p=self.args.dropout, training=self.training)
x = self.conv1(x, edge_index, batch=batch, **kwargs)
x = F.elu(x)
x = F.dropout(x, p=self.args.dropout, training=self.training)
x = self.conv2(x, edge_index, batch=batch, **kwargs)
return x
def set_layer_attrs(self, name, value):
setattr(self.conv1, name, value)
setattr(self.conv2, name, value)
def get_attention_dist_by_layer(self, edge_index, num_nodes) -> List[List[torch.Tensor]]:
return [
self.conv1.get_attention_dist(edge_index, num_nodes),
self.conv2.get_attention_dist(edge_index, num_nodes),
]
def modules(self) -> List[SuperGATLayer]:
return [self.conv1, self.conv2]
@staticmethod
def get_trainer(args):
return SuperGATTrainer
@register_model("supergat-large")
class LargeSuperGAT(BaseModel):
@staticmethod
def add_args(parser):
parser.add_argument('--num-features', type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--patience", type=int, default=100)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument('--hidden-size', type=int, default=8)
parser.add_argument("--heads", default=8, type=int)
parser.add_argument("--out-heads", default=None, type=int)
parser.add_argument('--dropout', type=float, default=0.6)
parser.add_argument("--attention-type", type=str, default="basic")
parser.add_argument("--super-gat-criterion", type=str, default=None)
parser.add_argument("--neg-sample-ratio", type=float, default=0.5)
parser.add_argument("--edge-sampling-ratio", type=float, default=0.8)
parser.add_argument("--scaling-factor", type=float, default=None)
parser.add_argument("--to-undirected-at-neg", action="store_true")
parser.add_argument("--to-undirected", action="store_true")
parser.add_argument("--use-bn", action="store_true")
parser.add_argument("--pretraining-noise-ratio", type=float, default=0.0)
parser.add_argument("--val-interval", type=int, default=1)
@classmethod
def build_model_from_args(cls, args):
return cls(args)
def __init__(self, args):
super().__init__()
self.args = args
self.num_layers = self.args.num_layers
conv_common_kwargs = dict(
dropout=args.dropout,
is_super_gat=True,
attention_type=args.attention_type,
super_gat_criterion=args.super_gat_criterion,
neg_sample_ratio=args.neg_sample_ratio,
edge_sample_ratio=args.edge_sampling_ratio,
pretraining_noise_ratio=args.pretraining_noise_ratio,
use_pretraining=args.use_pretraining,
to_undirected_at_neg=args.to_undirected_at_neg,
scaling_factor=args.scaling_factor,
)
self.conv_list = []
self.bn_list = []
for conv_id in range(1, self.num_layers + 1):
if conv_id == 1:
in_channels, out_channels = args.num_features, args.hidden_size
heads, concat = args.heads, True
elif conv_id == self.num_layers:
in_channels, out_channels = args.hidden_size * args.heads, args.num_classes
heads, concat = args.out_heads or args.heads, False
else:
in_channels, out_channels = args.hidden_size * args.heads, args.hidden_size
heads, concat = args.heads, True
conv = SuperGATLayer(in_channels, out_channels, heads=heads, concat=concat, **conv_common_kwargs)
conv_name = "conv{}".format(conv_id)
self.conv_list.append(conv)
setattr(self, conv_name, conv)
self.add_module(conv_name, conv)
if args.use_bn and conv_id != self.num_layers:
bn = nn.BatchNorm1d(out_channels * heads)
bn_name = "bn{}".format(conv_id)
self.bn_list.append(bn)
setattr(self, bn_name, bn)
self.add_module(bn_name, bn)
print(next(self.modules()))
def forward(self, x, edge_index, batch=None, **kwargs) -> torch.Tensor:
for conv_idx, conv in enumerate(self.conv_list):
x = F.dropout(x, p=self.args.dropout, training=self.training)
x = conv(x, edge_index, **kwargs)
if conv_idx != self.num_layers - 1:
if self.args.use_bn:
x = self.bn_list[conv_idx](x)
x = F.elu(x)
return x
def set_layer_attrs(self, name, value):
for conv in self.conv_list:
setattr(conv, name, value)
def get_attention_dist_by_layer(self, edge_index, num_nodes) -> List[List[torch.Tensor]]:
attention_dist_by_layer = []
for conv in self.conv_list:
attention_dist_by_layer.append(conv.get_attention_dist(edge_index, num_nodes))
return attention_dist_by_layer
def modules(self) -> List[SuperGATLayer]:
return self.conv_list
@staticmethod
def get_trainer(args):
return SuperGATTrainer
| true | true |
f7fb6d12d3d76297ceb77432612fedf234741b15 | 2,581 | py | Python | setup.py | mrdavidlaing/impyla | dfe9d00ca7eb9f4297075e1251397cdeb61e8298 | [
"Apache-2.0"
] | null | null | null | setup.py | mrdavidlaing/impyla | dfe9d00ca7eb9f4297075e1251397cdeb61e8298 | [
"Apache-2.0"
] | null | null | null | setup.py | mrdavidlaing/impyla | dfe9d00ca7eb9f4297075e1251397cdeb61e8298 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
def readme():
with open('README.md', 'r') as ip:
return ip.read()
import versioneer # noqa
setup(
name='impyla',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Python client for the Impala distributed query engine',
long_description_content_type='text/markdown',
long_description=readme(),
maintainer='Wes McKinney',
maintainer_email='wes.mckinney@twosigma.com',
author='Uri Laserson',
author_email='laserson@cloudera.com',
url='https://github.com/cloudera/impyla',
packages=find_packages(),
install_package_data=True,
package_data={'impala.thrift': ['*.thrift']},
install_requires=['six', 'bitarray'],
extras_require={
# TODO: Python 3 could also use Thrift 0.11.0
":python_version<'3.0'": ["thrift==0.11.0"],
":python_version>='3.0'": ["thriftpy2>=0.4.0,<0.5.0",
],
"kerberos": ["thrift_sasl==0.4.3a1",
"kerberos>=1.3.0",
],
},
keywords=('cloudera impala python hadoop sql hdfs mpp spark pydata '
'pandas distributed db api pep 249 hive hiveserver2 hs2'),
license='Apache License, Version 2.0',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
entry_points={
'sqlalchemy.dialects': ['impala = impala.sqlalchemy:ImpalaDialect']},
zip_safe=False)
| 35.356164 | 77 | 0.647036 |
from __future__ import absolute_import
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
def readme():
with open('README.md', 'r') as ip:
return ip.read()
import versioneer
setup(
name='impyla',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Python client for the Impala distributed query engine',
long_description_content_type='text/markdown',
long_description=readme(),
maintainer='Wes McKinney',
maintainer_email='wes.mckinney@twosigma.com',
author='Uri Laserson',
author_email='laserson@cloudera.com',
url='https://github.com/cloudera/impyla',
packages=find_packages(),
install_package_data=True,
package_data={'impala.thrift': ['*.thrift']},
install_requires=['six', 'bitarray'],
extras_require={
":python_version<'3.0'": ["thrift==0.11.0"],
":python_version>='3.0'": ["thriftpy2>=0.4.0,<0.5.0",
],
"kerberos": ["thrift_sasl==0.4.3a1",
"kerberos>=1.3.0",
],
},
keywords=('cloudera impala python hadoop sql hdfs mpp spark pydata '
'pandas distributed db api pep 249 hive hiveserver2 hs2'),
license='Apache License, Version 2.0',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
entry_points={
'sqlalchemy.dialects': ['impala = impala.sqlalchemy:ImpalaDialect']},
zip_safe=False)
| true | true |
f7fb6f3210080025fcb3a466bb138b1fc06178ac | 2,966 | py | Python | audio/receiver.py | qaute/zeitgeist | 6d62294571e32acc12fbb67f98adf923d5a1533b | [
"CC0-1.0"
] | null | null | null | audio/receiver.py | qaute/zeitgeist | 6d62294571e32acc12fbb67f98adf923d5a1533b | [
"CC0-1.0"
] | null | null | null | audio/receiver.py | qaute/zeitgeist | 6d62294571e32acc12fbb67f98adf923d5a1533b | [
"CC0-1.0"
] | 1 | 2018-10-30T16:51:41.000Z | 2018-10-30T16:51:41.000Z | #!/usr/bin/python3
"""
receiver.py
This file tracks an acoustic FSK signal by the phase difference between two microphones.
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import sounddevice as sd
import scipy.signal as sp
# define waveform parameters
fs = 44100 # (Hz) sample rate
fm = 256 # (samples/cycle) (~172 Hz) modulation frequency
f1 = 55 # (cycles/128 samples) (18949.2 Hz) first carrier frequency
f2 = 57 # (cycles/128 samples) (19638.3 Hz) second carrier frequency
# generate sample waveform
times = np.linspace(0, fm/fs, fm, False)
carrier1 = np.sin(2*np.pi*fs*2/fm*f1*times)
carrier2 = np.sin(2*np.pi*fs*2/fm*f2*times)
blank = times*0
mask1 = np.reshape(np.concatenate((carrier1[:int(fm/2)], blank[int(fm/2):])), (fm))
mask2 = np.reshape(np.concatenate((carrier2[:int(fm/2)], blank[int(fm/2):])), (fm))
# define helper functions
def corr2(a, b):
"""
Correlates a and b cyclically.
a is a NxM numpy array --- data for M channels.
b is a Nx1 numpy array.
Returns an NxM array.
"""
output = np.zeros(a.shape)
for i in range(a.shape[0]):
output[i] = np.sum(np.abs(a*np.roll(b, i, axis=0)), axis=0)
return output
def corr(a, b):
"""correlates a and b cyclically"""
assert(len(a)==len(b))
output = np.zeros((len(a)))
plt.plot(a); plt.show()
for i in range(len(a)):
output[i] = np.sum(np.abs(a*np.roll(b, i)))
plt.plot(output); plt.show()
return output
def avg(a, n):
"""
Takes cyclic running average of a with 2n-1 points.
a is a NxM numpy array --- data for M channels.
Returns an NxM array.
"""
output = np.zeros(a.shape)
for i in range(a.shape[0]):
temp = np.roll(a, -i, axis=0)
output[i] = (temp[0,:]+np.sum(temp[1:n+1,:], axis=0)+np.sum(temp[a.shape[0]-n:,:], axis=0))/(2*n+1)
return output
average = np.zeros((50))
count = 0
while True:
data = sd.rec(fm*10, samplerate=fs, channels=2)
plt.plot(data, label='original')
b, a = sp.butter(3, 0.5, btype='high')
data2 = sp.filtfilt(b, a, data, padlen=50, axis=0)
plt.plot(data2, label='filter')
data3 = np.abs(data2)
plt.plot(data3, label='abs')
n = 5
data4 = np.zeros(data3.shape)
for i in range(data3.shape[0]):
temp = np.roll(data3, -i, axis=0)
data4[i] = (temp[0]+np.sum(temp[1:n+1], axis=0)+np.sum(temp[data3.shape[0]-n:], axis=0))/(2*n+1)
plt.plot(data4, label='avg')
b, a = sp.butter(3, 0.01, btype='low')
data5 = sp.filtfilt(b, a, data4, padlen=50, axis=0)*10
plt.plot(data5, label='filter2')
data6 = np.zeros(data5.shape[0])
for i in range(data5.shape[0]):
data6[i] = np.sum(data5[:,0]*np.roll(data5[:,1], i))/1000
plt.plot(data6[256:512], label='output')
diff = data6[:256].argmax()
dist = diff
if diff > 256/2:
dist = diff-256
plt.title('{}'.format(dist))
print(dist)
plt.legend()
plt.show()
| 27.719626 | 107 | 0.610249 |
import time
import numpy as np
import matplotlib.pyplot as plt
import sounddevice as sd
import scipy.signal as sp
fs = 44100
fm = 256
f1 = 55
f2 = 57
times = np.linspace(0, fm/fs, fm, False)
carrier1 = np.sin(2*np.pi*fs*2/fm*f1*times)
carrier2 = np.sin(2*np.pi*fs*2/fm*f2*times)
blank = times*0
mask1 = np.reshape(np.concatenate((carrier1[:int(fm/2)], blank[int(fm/2):])), (fm))
mask2 = np.reshape(np.concatenate((carrier2[:int(fm/2)], blank[int(fm/2):])), (fm))
def corr2(a, b):
output = np.zeros(a.shape)
for i in range(a.shape[0]):
output[i] = np.sum(np.abs(a*np.roll(b, i, axis=0)), axis=0)
return output
def corr(a, b):
assert(len(a)==len(b))
output = np.zeros((len(a)))
plt.plot(a); plt.show()
for i in range(len(a)):
output[i] = np.sum(np.abs(a*np.roll(b, i)))
plt.plot(output); plt.show()
return output
def avg(a, n):
output = np.zeros(a.shape)
for i in range(a.shape[0]):
temp = np.roll(a, -i, axis=0)
output[i] = (temp[0,:]+np.sum(temp[1:n+1,:], axis=0)+np.sum(temp[a.shape[0]-n:,:], axis=0))/(2*n+1)
return output
average = np.zeros((50))
count = 0
while True:
data = sd.rec(fm*10, samplerate=fs, channels=2)
plt.plot(data, label='original')
b, a = sp.butter(3, 0.5, btype='high')
data2 = sp.filtfilt(b, a, data, padlen=50, axis=0)
plt.plot(data2, label='filter')
data3 = np.abs(data2)
plt.plot(data3, label='abs')
n = 5
data4 = np.zeros(data3.shape)
for i in range(data3.shape[0]):
temp = np.roll(data3, -i, axis=0)
data4[i] = (temp[0]+np.sum(temp[1:n+1], axis=0)+np.sum(temp[data3.shape[0]-n:], axis=0))/(2*n+1)
plt.plot(data4, label='avg')
b, a = sp.butter(3, 0.01, btype='low')
data5 = sp.filtfilt(b, a, data4, padlen=50, axis=0)*10
plt.plot(data5, label='filter2')
data6 = np.zeros(data5.shape[0])
for i in range(data5.shape[0]):
data6[i] = np.sum(data5[:,0]*np.roll(data5[:,1], i))/1000
plt.plot(data6[256:512], label='output')
diff = data6[:256].argmax()
dist = diff
if diff > 256/2:
dist = diff-256
plt.title('{}'.format(dist))
print(dist)
plt.legend()
plt.show()
| true | true |
f7fb6f40d200a252e6e99bd61725342ed175df11 | 769 | py | Python | examples/Graph_Neural_Networks/PyTorch/TAGCN.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | examples/Graph_Neural_Networks/PyTorch/TAGCN.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | examples/Graph_Neural_Networks/PyTorch/TAGCN.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import torch
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
print("Torch version: ", torch.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
graphgallery.set_backend("pytorch")
from graphgallery.gallery.nodeclas import TAGCN
trainer = TAGCN(device="gpu", seed=123).setup_graph(graph, attr_transform="normalize_attr").build()
his = trainer.fit(splits.train_nodes, splits.val_nodes, verbose=1, epochs=100)
results = trainer.evaluate(splits.test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| 29.576923 | 99 | 0.768531 |
import torch
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
print("Torch version: ", torch.__version__)
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
graphgallery.set_backend("pytorch")
from graphgallery.gallery.nodeclas import TAGCN
trainer = TAGCN(device="gpu", seed=123).setup_graph(graph, attr_transform="normalize_attr").build()
his = trainer.fit(splits.train_nodes, splits.val_nodes, verbose=1, epochs=100)
results = trainer.evaluate(splits.test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| true | true |
f7fb702baffcabc23fabacf84e23ee9f3bb01304 | 2,650 | py | Python | sarpy/io/general/nitf_elements/tres/unclass/RSMPCA.py | pressler-vsc/sarpy | fa6c951c42b9a7d9df2edfa53c771494cb0246fb | [
"MIT"
] | 1 | 2021-02-04T08:44:18.000Z | 2021-02-04T08:44:18.000Z | sarpy/io/general/nitf_elements/tres/unclass/RSMPCA.py | pressler-vsc/sarpy | fa6c951c42b9a7d9df2edfa53c771494cb0246fb | [
"MIT"
] | null | null | null | sarpy/io/general/nitf_elements/tres/unclass/RSMPCA.py | pressler-vsc/sarpy | fa6c951c42b9a7d9df2edfa53c771494cb0246fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class RNT(TREElement):
def __init__(self, value):
super(RNT, self).__init__()
self.add_field('RNPCF', 's', 21, value)
class RDT(TREElement):
def __init__(self, value):
super(RDT, self).__init__()
self.add_field('RDPCF', 's', 21, value)
class CNT(TREElement):
def __init__(self, value):
super(CNT, self).__init__()
self.add_field('CNPCF', 's', 21, value)
class CDT(TREElement):
def __init__(self, value):
super(CDT, self).__init__()
self.add_field('CDPCF', 's', 21, value)
class RSMPCAType(TREElement):
def __init__(self, value):
super(RSMPCAType, self).__init__()
self.add_field('IID', 's', 80, value)
self.add_field('EDITION', 's', 40, value)
self.add_field('RSN', 's', 3, value)
self.add_field('CSN', 's', 3, value)
self.add_field('RFEP', 's', 21, value)
self.add_field('CFEP', 's', 21, value)
self.add_field('RNRMO', 's', 21, value)
self.add_field('CNRMO', 's', 21, value)
self.add_field('XNRMO', 's', 21, value)
self.add_field('YNRMO', 's', 21, value)
self.add_field('ZNRMO', 's', 21, value)
self.add_field('RNRMSF', 's', 21, value)
self.add_field('CNRMSF', 's', 21, value)
self.add_field('XNRMSF', 's', 21, value)
self.add_field('YNRMSF', 's', 21, value)
self.add_field('ZNRMSF', 's', 21, value)
self.add_field('RNPWRX', 's', 1, value)
self.add_field('RNPWRY', 's', 1, value)
self.add_field('RNPWRZ', 's', 1, value)
self.add_field('RNTRMS', 'd', 3, value)
self.add_loop('RNTs', self.RNTRMS, RNT, value)
self.add_field('RDPWRX', 's', 1, value)
self.add_field('RDPWRY', 's', 1, value)
self.add_field('RDPWRZ', 's', 1, value)
self.add_field('RDTRMS', 'd', 3, value)
self.add_loop('RDTs', self.RDTRMS, RDT, value)
self.add_field('CNPWRX', 's', 1, value)
self.add_field('CNPWRY', 's', 1, value)
self.add_field('CNPWRZ', 's', 1, value)
self.add_field('CNTRMS', 'd', 3, value)
self.add_loop('CNTs', self.CNTRMS, CNT, value)
self.add_field('CDPWRX', 's', 1, value)
self.add_field('CDPWRY', 's', 1, value)
self.add_field('CDPWRZ', 's', 1, value)
self.add_field('CDTRMS', 'd', 3, value)
self.add_loop('CDTs', self.CDTRMS, CDT, value)
class RSMPCA(TREExtension):
_tag_value = 'RSMPCA'
_data_type = RSMPCAType
| 34.415584 | 54 | 0.581887 |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class RNT(TREElement):
def __init__(self, value):
super(RNT, self).__init__()
self.add_field('RNPCF', 's', 21, value)
class RDT(TREElement):
def __init__(self, value):
super(RDT, self).__init__()
self.add_field('RDPCF', 's', 21, value)
class CNT(TREElement):
def __init__(self, value):
super(CNT, self).__init__()
self.add_field('CNPCF', 's', 21, value)
class CDT(TREElement):
def __init__(self, value):
super(CDT, self).__init__()
self.add_field('CDPCF', 's', 21, value)
class RSMPCAType(TREElement):
def __init__(self, value):
super(RSMPCAType, self).__init__()
self.add_field('IID', 's', 80, value)
self.add_field('EDITION', 's', 40, value)
self.add_field('RSN', 's', 3, value)
self.add_field('CSN', 's', 3, value)
self.add_field('RFEP', 's', 21, value)
self.add_field('CFEP', 's', 21, value)
self.add_field('RNRMO', 's', 21, value)
self.add_field('CNRMO', 's', 21, value)
self.add_field('XNRMO', 's', 21, value)
self.add_field('YNRMO', 's', 21, value)
self.add_field('ZNRMO', 's', 21, value)
self.add_field('RNRMSF', 's', 21, value)
self.add_field('CNRMSF', 's', 21, value)
self.add_field('XNRMSF', 's', 21, value)
self.add_field('YNRMSF', 's', 21, value)
self.add_field('ZNRMSF', 's', 21, value)
self.add_field('RNPWRX', 's', 1, value)
self.add_field('RNPWRY', 's', 1, value)
self.add_field('RNPWRZ', 's', 1, value)
self.add_field('RNTRMS', 'd', 3, value)
self.add_loop('RNTs', self.RNTRMS, RNT, value)
self.add_field('RDPWRX', 's', 1, value)
self.add_field('RDPWRY', 's', 1, value)
self.add_field('RDPWRZ', 's', 1, value)
self.add_field('RDTRMS', 'd', 3, value)
self.add_loop('RDTs', self.RDTRMS, RDT, value)
self.add_field('CNPWRX', 's', 1, value)
self.add_field('CNPWRY', 's', 1, value)
self.add_field('CNPWRZ', 's', 1, value)
self.add_field('CNTRMS', 'd', 3, value)
self.add_loop('CNTs', self.CNTRMS, CNT, value)
self.add_field('CDPWRX', 's', 1, value)
self.add_field('CDPWRY', 's', 1, value)
self.add_field('CDPWRZ', 's', 1, value)
self.add_field('CDTRMS', 'd', 3, value)
self.add_loop('CDTs', self.CDTRMS, CDT, value)
class RSMPCA(TREExtension):
_tag_value = 'RSMPCA'
_data_type = RSMPCAType
| true | true |
f7fb7080753277206483bd820d16b9e0e9a0b5fb | 24,218 | py | Python | _v5__sub_browser.py | konsan1101/pyRiKi1 | 687061ce09889ec91c1c3c11df62f4cfcb3d9613 | [
"MIT"
] | null | null | null | _v5__sub_browser.py | konsan1101/pyRiKi1 | 687061ce09889ec91c1c3c11df62f4cfcb3d9613 | [
"MIT"
] | 1 | 2020-11-06T04:36:33.000Z | 2020-11-06T04:36:33.000Z | _v5__sub_browser.py | konsan1101/pyRiKi1 | 687061ce09889ec91c1c3c11df62f4cfcb3d9613 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
from selenium.webdriver import Firefox, FirefoxOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
#print(os.path.dirname(__file__))
#print(os.path.basename(__file__))
#print(sys.version_info)
# インターフェース
qCtrl_control_browser = 'temp/control_browser.txt'
qCtrl_control_self = qCtrl_control_browser
# 共通ルーチン
import _v5__qRiKi
qRiKi = _v5__qRiKi.qRiKi_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
import _v5__qLog
qLog = _v5__qLog.qLog_class()
qPLATFORM = qRiKi.getValue('qPLATFORM' )
qRUNATTR = qRiKi.getValue('qRUNATTR' )
qHOSTNAME = qRiKi.getValue('qHOSTNAME' )
qUSERNAME = qRiKi.getValue('qUSERNAME' )
qPath_pictures = qRiKi.getValue('qPath_pictures' )
qPath_videos = qRiKi.getValue('qPath_videos' )
qPath_cache = qRiKi.getValue('qPath_cache' )
qPath_sounds = qRiKi.getValue('qPath_sounds' )
qPath_icons = qRiKi.getValue('qPath_icons' )
qPath_fonts = qRiKi.getValue('qPath_fonts' )
qPath_log = qRiKi.getValue('qPath_log' )
qPath_work = qRiKi.getValue('qPath_work' )
qPath_rec = qRiKi.getValue('qPath_rec' )
qPath_s_ctrl = qRiKi.getValue('qPath_s_ctrl' )
qPath_s_inp = qRiKi.getValue('qPath_s_inp' )
qPath_s_wav = qRiKi.getValue('qPath_s_wav' )
qPath_s_jul = qRiKi.getValue('qPath_s_jul' )
qPath_s_STT = qRiKi.getValue('qPath_s_STT' )
qPath_s_TTS = qRiKi.getValue('qPath_s_TTS' )
qPath_s_TRA = qRiKi.getValue('qPath_s_TRA' )
qPath_s_play = qRiKi.getValue('qPath_s_play' )
qPath_v_ctrl = qRiKi.getValue('qPath_v_ctrl' )
qPath_v_inp = qRiKi.getValue('qPath_v_inp' )
qPath_v_jpg = qRiKi.getValue('qPath_v_jpg' )
qPath_v_detect = qRiKi.getValue('qPath_v_detect' )
qPath_v_cv = qRiKi.getValue('qPath_v_cv' )
qPath_v_photo = qRiKi.getValue('qPath_v_photo' )
qPath_v_msg = qRiKi.getValue('qPath_v_msg' )
qPath_d_ctrl = qRiKi.getValue('qPath_d_ctrl' )
qPath_d_play = qRiKi.getValue('qPath_d_play' )
qPath_d_prtscn = qRiKi.getValue('qPath_d_prtscn' )
qPath_d_movie = qRiKi.getValue('qPath_d_movie' )
qPath_d_upload = qRiKi.getValue('qPath_d_upload' )
qBusy_dev_cpu = qRiKi.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qRiKi.getValue('qBusy_dev_com' )
qBusy_dev_mic = qRiKi.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qRiKi.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qRiKi.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qRiKi.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qRiKi.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qRiKi.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qRiKi.getValue('qBusy_s_inp' )
qBusy_s_wav = qRiKi.getValue('qBusy_s_wav' )
qBusy_s_STT = qRiKi.getValue('qBusy_s_STT' )
qBusy_s_TTS = qRiKi.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qRiKi.getValue('qBusy_s_TRA' )
qBusy_s_play = qRiKi.getValue('qBusy_s_play' )
qBusy_v_ctrl = qRiKi.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qRiKi.getValue('qBusy_v_inp' )
qBusy_v_QR = qRiKi.getValue('qBusy_v_QR' )
qBusy_v_jpg = qRiKi.getValue('qBusy_v_jpg' )
qBusy_v_CV = qRiKi.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qRiKi.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qRiKi.getValue('qBusy_d_inp' )
qBusy_d_QR = qRiKi.getValue('qBusy_d_QR' )
qBusy_d_rec = qRiKi.getValue('qBusy_d_rec' )
qBusy_d_telework = qRiKi.getValue('qBusy_d_telework' )
qBusy_d_play = qRiKi.getValue('qBusy_d_play' )
qBusy_d_browser = qRiKi.getValue('qBusy_d_browser' )
qBusy_d_upload = qRiKi.getValue('qBusy_d_upload' )
qRdy__s_force = qRiKi.getValue('qRdy__s_force' )
qRdy__s_fproc = qRiKi.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qRiKi.getValue('qRdy__s_sendkey' )
qRdy__v_mirror = qRiKi.getValue('qRdy__v_mirror' )
qRdy__v_reader = qRiKi.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qRiKi.getValue('qRdy__v_sendkey' )
qRdy__d_reader = qRiKi.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qRiKi.getValue('qRdy__d_sendkey' )
import _v5__qRiKi_key
config_file = '_v5__sub_browser_key.json'
qRiKi_key = _v5__qRiKi_key.qRiKi_key_class()
res, dic = qRiKi_key.getCryptJson(config_file=config_file, auto_crypt=False, )
if (res == False):
dic['_crypt_'] = 'none'
dic['engine'] = 'firefox'
dic['url_home'] = 'https://google.co.jp'
dic['url_search'] = 'https://www.google.com/search?q='
dic['narou_home'] = 'https://syosetu.com/'
dic['narou_base'] = 'https://ncode.syosetu.com/'
dic['narou_speech'] = 'yes'
res = qRiKi_key.putCryptJson(config_file=config_file, put_dic=dic, )
runMode = 'debug'
def clear_tts(proc_id, ):
# TTSフォルダクリア
path = qPath_s_TTS
path_files = glob.glob(path + '*.' + proc_id + '.*')
path_files.sort()
if (len(path_files) > 0):
for f in path_files:
proc_file = f.replace('\\', '/')
print(proc_file)
qFunc.remove(proc_file)
# Playフォルダクリア
path = qPath_s_play
path_files = glob.glob(path + '*.' + proc_id + '.*')
path_files.sort()
if (len(path_files) > 0):
for f in path_files:
proc_file = f.replace('\\', '/')
print(proc_file)
qFunc.remove(proc_file)
def html_narou_to_tts(abortQ=None, proc_id=None, base_url='', page_url='', html=None, autoPaging='yes', ):
# 中断指示Q クリア
if (not abortQ is None):
if (abortQ.qsize() > 0):
q_get = abortQ.get()
abortQ.task_done()
# 無効 html ?
if (html == None):
return False
# ページ情報
page_sep = page_url.split('/')
page_id = ''
page_seq = ''
if (len(page_sep) >= 1):
page_id = page_sep[0]
if (len(page_sep) >= 2):
page_seq = page_sep[1]
print(page_seq)
# 無効 ページ ?
if (page_seq == ''):
return False
if (not page_seq.isnumeric()):
return False
# TTS 出力(タイトル)
try:
soup = BeautifulSoup(html, 'html.parser')
capter_title = ''
try:
capter_title = soup.find('p', class_='chapter_title')
except:
capter_title = soup.find('p', class_='margin_r20')
print(capter_title.text)
sub_title = soup.find('p', class_='novel_subtitle')
print(sub_title.text)
txt = 'ja,' + u'タイトル'
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
txt = 'ja,' + capter_title.text + ' ' + sub_title.text
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
except:
pass
# TTS 出力(本文)
for i in range(1, 9999):
# 中断処理
if (not abortQ is None):
if (abortQ.qsize() > 0):
q_get = abortQ.get()
abortQ.task_done()
return False
try:
p_list = soup.find_all('p', id='L' + str(i))
if (len(p_list) == 0):
break
if (i == 1):
txt = 'ja,' + u'本文'
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
for p in p_list:
txt = p.text
print(txt)
txt = txt.replace(u'「', '')
txt = txt.replace(u'」', '')
txt = txt.replace(u'…', ' ')
txt = 'ja,' + txt
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
except:
pass
# 自動ページング
if (autoPaging != 'yes'):
return True
# 音声待機
check = 5
while (check > 0):
if (qRiKi.statusWait_speech() == False): # busy
check -= 1
else:
check = 5
# 中断処理
if (not abortQ is None):
if (abortQ.qsize() > 0):
q_get = abortQ.get()
abortQ.task_done()
return True
# ジャンプ
next_page = base_url + page_id + '/' + str(int(page_seq) + 1) + '/'
qFunc.txtsWrite(filename=qCtrl_control_self, txts=[next_page], exclusive=True, )
return True
class main_browser:
def __init__(self, name='thread', id='0', runMode='debug', ):
self.runMode = runMode
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
self.browser_id = None
self.browser_start = time.time()
self.browser_url = ''
self.browser_html = None
self.last_url = None
self.batch_thread = None
self.batch_abortQ = queue.Queue()
# 構成情報
json_file = '_v5__sub_browser_key.json'
self.engine = 'firefox'
self.url_home = 'https://google.co.jp'
self.url_search = 'https://www.google.com/search?q='
self.narou_home = 'https://syosetu.com/'
self.narou_base = 'https://ncode.syosetu.com/'
self.narou_speech = 'yes'
res, json_dic = qRiKi_key.getCryptJson(config_file=json_file, auto_crypt=False, )
if (res == True):
self.engine = json_dic['engine']
self.url_home = json_dic['url_home']
self.url_search = json_dic['url_search']
self.narou_home = json_dic['narou_home']
self.narou_base = json_dic['narou_base']
self.narou_speech = json_dic['narou_speech']
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
qFunc.remove(qCtrl_control_self)
# 待機ループ
self.proc_step = '5'
onece = True
last_alive = time.time()
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
qLog.log('info', self.proc_id, '' + str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_self)
control = txt
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# 活動メッセージ
if ((time.time() - last_alive) > 30):
qLog.log('debug', self.proc_id, 'alive', display=True, )
last_alive = time.time()
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# レディー設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# 処理
if (control != ''):
self.sub_proc(control, )
# 検査
self.sub_check_url()
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (qFunc.statusCheck(qBusy_dev_mic) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.50)
else:
time.sleep(0.25)
# 終了処理
if (True):
# レディー解除
qFunc.statusSet(self.fileRdy, False)
# 停止
if (not self.browser_id is None):
self.sub_proc('_stop_', )
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_d_browser, False)
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
# 処理
def sub_proc(self, proc_text, ):
if (proc_text.find(u'リセット') >=0):
# 停止
if (not self.browser_id is None):
#self.sub_stop(proc_text, )
self.sub_stop('_stop_', )
elif (proc_text.lower() == '_stop_') \
or (proc_text.find(u'WEB') >=0) and (proc_text.find(u'停止') >=0) \
or (proc_text.find(u'WEB') >=0) and (proc_text.find(u'終了') >=0) \
or (proc_text.find(u'ウェブ') >=0) and (proc_text.find(u'停止') >=0) \
or (proc_text.find(u'ウェブ') >=0) and (proc_text.find(u'終了') >=0) \
or (proc_text.find(u'ブラウザ') >=0) and (proc_text.find(u'停止') >=0) \
or (proc_text.find(u'ブラウザ') >=0) and (proc_text.find(u'終了') >=0):
# 停止
if (not self.browser_id is None):
#self.sub_stop(proc_text, )
self.sub_stop('_stop_', )
elif (proc_text.lower() == '_start_') \
or (proc_text.find(u'WEB') >=0) and (proc_text.find(u'開始') >=0) \
or (proc_text.find(u'ウェブ') >=0) and (proc_text.find(u'開始') >=0) \
or (proc_text.find(u'ブラウザ') >=0) and (proc_text.find(u'開始') >=0):
# 開始
self.sub_start('_start_', )
else:
# 開始
if (not self.browser_id is None):
self.sub_start(proc_text, )
# 開始
def sub_start(self, proc_text, ):
# ログ
qLog.log('info', self.proc_id, 'open ' + proc_text, display=True,)
# オープン
if (self.browser_id is None):
# ビジー設定
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_d_browser, True)
# ヘッドレスモードオプション
options = FirefoxOptions()
#options.add_argument('-headless')
# FirefoxのWebDriver作成
self.browser_id = Firefox(options=options)
# ウィンドウサイズとズームを設定
#driver.set_window_size(1920, 9999)
#driver.execute_script("document.body.style.zoom='100%'")
# URLを開く
url = ''
if (proc_text == '_start_'):
url = self.url_home #'https://google.co.jp'
#self.browser_id.get(url)
elif (proc_text[:4] == 'http'):
url = proc_text
#self.browser_id.get(url)
elif (proc_text == u'なろう') or (proc_text == u'本好き'):
url = self.narou_home #'https://syosetu.com/'
#self.browser_id.get(url)
if (url == ''):
url = self.url_search + proc_text #'https://www.google.com/search?q='
#self.browser_id.get(url)
# 開く
try:
self.browser_id.get(url)
except Exception as e:
self.sub_stop('_stop_', )
# 停止
def sub_stop(self, proc_text, ):
if (not self.browser_id is None):
# 音声読み上げキャンセル
if (not self.batch_thread is None):
self.batch_abortQ.put('_abort_')
time.sleep(2.00)
self.batch_thread = None
clear_tts(self.proc_id, )
# 停止
self.browser_id.quit()
self.browser_id = None
# リセット
#qFunc.kill('firefox', )
qFunc.kill(self.engine, )
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_d_browser, False)
# 検査
def sub_check_url(self, ):
# 表示中?
if (self.browser_id is None):
self.browser_url = None
self.browser_html = None
self.last_url = None
return False
# 変化?
self.browser_url = self.browser_id.current_url
if (self.browser_url == self.last_url):
return False
#visibility_of_all_elements_located
#ページの全要素がDOM上に現れ, かつheight・widthが0以上になるまで待機
self.browser_wait = WebDriverWait(self.browser_id, 10)
element = self.browser_wait.until(EC.visibility_of_all_elements_located)
# 画像保管
self.browser_html = self.browser_id.page_source
self.last_url = self.browser_url
print(self.browser_url)
#self.browser_id.save_screenshot(file_name)
# 音声読み上げキャンセル
if (not self.batch_thread is None):
self.batch_abortQ.put('_abort_')
time.sleep(2.00)
self.batch_thread = None
clear_tts(self.proc_id, )
# なろうページ読み上げ
if (self.narou_speech == 'yes'):
base_url = self.narou_base #'https://ncode.syosetu.com/'
if (self.browser_url[:len(base_url)] == base_url):
page_url = self.browser_url[len(base_url):]
# threading
self.batch_thread = threading.Thread(target=html_narou_to_tts, args=(
self.batch_abortQ, self.proc_id,
base_url, page_url, self.browser_html, 'yes',
))
self.batch_thread.setDaemon(True)
self.batch_thread.start()
return True
# シグナル処理
import signal
def signal_handler(signal_number, stack_frame):
print(os.path.basename(__file__), 'accept signal =', signal_number)
#signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if __name__ == '__main__':
main_name = 'browser'
main_id = '{0:10s}'.format(main_name).replace(' ', '_')
# 共通クラス
qRiKi.init()
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
qLog.log('info', main_id, 'init')
qLog.log('info', main_id, 'exsample.py runMode, ')
# パラメータ
if (True):
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
qLog.log('info', main_id, 'runMode =' + str(runMode ))
# 初期設定
if (True):
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
qFunc.remove(qCtrl_control_self)
# 起動
if (True):
qLog.log('info', main_id, 'start')
main_core = main_browser(main_name, '0', runMode=runMode, )
main_core.begin()
main_start = time.time()
onece = True
# 待機ループ
while (True):
# 終了確認
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
break
# デバッグ
if (runMode == 'debug'):
# テスト開始
if ((time.time() - main_start) > 1):
if (onece == True):
onece = False
qFunc.txtsWrite(qCtrl_control_self ,txts=['_start_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=['http://yahoo.co.jp'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=[u'姫路城'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=[u'本好き'], encoding='utf-8', exclusive=True, mode='w', )
# テスト終了
if ((time.time() - main_start) > 40):
qFunc.txtsWrite(qCtrl_control_self ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
elif (qFunc.statusCheck(qBusy_dev_mic) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
time.sleep(0.50)
# 終了
if (True):
qLog.log('info', main_id, 'terminate')
main_core.abort()
del main_core
qLog.log('info', main_id, 'bye!')
sys.exit(0)
| 31.249032 | 130 | 0.547857 |
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
from selenium.webdriver import Firefox, FirefoxOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
qCtrl_control_browser = 'temp/control_browser.txt'
qCtrl_control_self = qCtrl_control_browser
import _v5__qRiKi
qRiKi = _v5__qRiKi.qRiKi_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
import _v5__qLog
qLog = _v5__qLog.qLog_class()
qPLATFORM = qRiKi.getValue('qPLATFORM' )
qRUNATTR = qRiKi.getValue('qRUNATTR' )
qHOSTNAME = qRiKi.getValue('qHOSTNAME' )
qUSERNAME = qRiKi.getValue('qUSERNAME' )
qPath_pictures = qRiKi.getValue('qPath_pictures' )
qPath_videos = qRiKi.getValue('qPath_videos' )
qPath_cache = qRiKi.getValue('qPath_cache' )
qPath_sounds = qRiKi.getValue('qPath_sounds' )
qPath_icons = qRiKi.getValue('qPath_icons' )
qPath_fonts = qRiKi.getValue('qPath_fonts' )
qPath_log = qRiKi.getValue('qPath_log' )
qPath_work = qRiKi.getValue('qPath_work' )
qPath_rec = qRiKi.getValue('qPath_rec' )
qPath_s_ctrl = qRiKi.getValue('qPath_s_ctrl' )
qPath_s_inp = qRiKi.getValue('qPath_s_inp' )
qPath_s_wav = qRiKi.getValue('qPath_s_wav' )
qPath_s_jul = qRiKi.getValue('qPath_s_jul' )
qPath_s_STT = qRiKi.getValue('qPath_s_STT' )
qPath_s_TTS = qRiKi.getValue('qPath_s_TTS' )
qPath_s_TRA = qRiKi.getValue('qPath_s_TRA' )
qPath_s_play = qRiKi.getValue('qPath_s_play' )
qPath_v_ctrl = qRiKi.getValue('qPath_v_ctrl' )
qPath_v_inp = qRiKi.getValue('qPath_v_inp' )
qPath_v_jpg = qRiKi.getValue('qPath_v_jpg' )
qPath_v_detect = qRiKi.getValue('qPath_v_detect' )
qPath_v_cv = qRiKi.getValue('qPath_v_cv' )
qPath_v_photo = qRiKi.getValue('qPath_v_photo' )
qPath_v_msg = qRiKi.getValue('qPath_v_msg' )
qPath_d_ctrl = qRiKi.getValue('qPath_d_ctrl' )
qPath_d_play = qRiKi.getValue('qPath_d_play' )
qPath_d_prtscn = qRiKi.getValue('qPath_d_prtscn' )
qPath_d_movie = qRiKi.getValue('qPath_d_movie' )
qPath_d_upload = qRiKi.getValue('qPath_d_upload' )
qBusy_dev_cpu = qRiKi.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qRiKi.getValue('qBusy_dev_com' )
qBusy_dev_mic = qRiKi.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qRiKi.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qRiKi.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qRiKi.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qRiKi.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qRiKi.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qRiKi.getValue('qBusy_s_inp' )
qBusy_s_wav = qRiKi.getValue('qBusy_s_wav' )
qBusy_s_STT = qRiKi.getValue('qBusy_s_STT' )
qBusy_s_TTS = qRiKi.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qRiKi.getValue('qBusy_s_TRA' )
qBusy_s_play = qRiKi.getValue('qBusy_s_play' )
qBusy_v_ctrl = qRiKi.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qRiKi.getValue('qBusy_v_inp' )
qBusy_v_QR = qRiKi.getValue('qBusy_v_QR' )
qBusy_v_jpg = qRiKi.getValue('qBusy_v_jpg' )
qBusy_v_CV = qRiKi.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qRiKi.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qRiKi.getValue('qBusy_d_inp' )
qBusy_d_QR = qRiKi.getValue('qBusy_d_QR' )
qBusy_d_rec = qRiKi.getValue('qBusy_d_rec' )
qBusy_d_telework = qRiKi.getValue('qBusy_d_telework' )
qBusy_d_play = qRiKi.getValue('qBusy_d_play' )
qBusy_d_browser = qRiKi.getValue('qBusy_d_browser' )
qBusy_d_upload = qRiKi.getValue('qBusy_d_upload' )
qRdy__s_force = qRiKi.getValue('qRdy__s_force' )
qRdy__s_fproc = qRiKi.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qRiKi.getValue('qRdy__s_sendkey' )
qRdy__v_mirror = qRiKi.getValue('qRdy__v_mirror' )
qRdy__v_reader = qRiKi.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qRiKi.getValue('qRdy__v_sendkey' )
qRdy__d_reader = qRiKi.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qRiKi.getValue('qRdy__d_sendkey' )
import _v5__qRiKi_key
config_file = '_v5__sub_browser_key.json'
qRiKi_key = _v5__qRiKi_key.qRiKi_key_class()
res, dic = qRiKi_key.getCryptJson(config_file=config_file, auto_crypt=False, )
if (res == False):
dic['_crypt_'] = 'none'
dic['engine'] = 'firefox'
dic['url_home'] = 'https://google.co.jp'
dic['url_search'] = 'https://www.google.com/search?q='
dic['narou_home'] = 'https://syosetu.com/'
dic['narou_base'] = 'https://ncode.syosetu.com/'
dic['narou_speech'] = 'yes'
res = qRiKi_key.putCryptJson(config_file=config_file, put_dic=dic, )
runMode = 'debug'
def clear_tts(proc_id, ):
path = qPath_s_TTS
path_files = glob.glob(path + '*.' + proc_id + '.*')
path_files.sort()
if (len(path_files) > 0):
for f in path_files:
proc_file = f.replace('\\', '/')
print(proc_file)
qFunc.remove(proc_file)
path = qPath_s_play
path_files = glob.glob(path + '*.' + proc_id + '.*')
path_files.sort()
if (len(path_files) > 0):
for f in path_files:
proc_file = f.replace('\\', '/')
print(proc_file)
qFunc.remove(proc_file)
def html_narou_to_tts(abortQ=None, proc_id=None, base_url='', page_url='', html=None, autoPaging='yes', ):
if (not abortQ is None):
if (abortQ.qsize() > 0):
q_get = abortQ.get()
abortQ.task_done()
if (html == None):
return False
page_sep = page_url.split('/')
page_id = ''
page_seq = ''
if (len(page_sep) >= 1):
page_id = page_sep[0]
if (len(page_sep) >= 2):
page_seq = page_sep[1]
print(page_seq)
if (page_seq == ''):
return False
if (not page_seq.isnumeric()):
return False
try:
soup = BeautifulSoup(html, 'html.parser')
capter_title = ''
try:
capter_title = soup.find('p', class_='chapter_title')
except:
capter_title = soup.find('p', class_='margin_r20')
print(capter_title.text)
sub_title = soup.find('p', class_='novel_subtitle')
print(sub_title.text)
txt = 'ja,' + u'タイトル'
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
txt = 'ja,' + capter_title.text + ' ' + sub_title.text
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
except:
pass
for i in range(1, 9999):
if (not abortQ is None):
if (abortQ.qsize() > 0):
q_get = abortQ.get()
abortQ.task_done()
return False
try:
p_list = soup.find_all('p', id='L' + str(i))
if (len(p_list) == 0):
break
if (i == 1):
txt = 'ja,' + u'本文'
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
for p in p_list:
txt = p.text
print(txt)
txt = txt.replace(u'「', '')
txt = txt.replace(u'」', '')
txt = txt.replace(u'…', ' ')
txt = 'ja,' + txt
qRiKi.tts(id=proc_id, text=txt, idolSec=0, maxWait=0, )
time.sleep(1.2)
except:
pass
if (autoPaging != 'yes'):
return True
check = 5
while (check > 0):
if (qRiKi.statusWait_speech() == False):
check -= 1
else:
check = 5
if (not abortQ is None):
if (abortQ.qsize() > 0):
q_get = abortQ.get()
abortQ.task_done()
return True
next_page = base_url + page_id + '/' + str(int(page_seq) + 1) + '/'
qFunc.txtsWrite(filename=qCtrl_control_self, txts=[next_page], exclusive=True, )
return True
class main_browser:
def __init__(self, name='thread', id='0', runMode='debug', ):
self.runMode = runMode
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
self.browser_id = None
self.browser_start = time.time()
self.browser_url = ''
self.browser_html = None
self.last_url = None
self.batch_thread = None
self.batch_abortQ = queue.Queue()
json_file = '_v5__sub_browser_key.json'
self.engine = 'firefox'
self.url_home = 'https://google.co.jp'
self.url_search = 'https://www.google.com/search?q='
self.narou_home = 'https://syosetu.com/'
self.narou_base = 'https://ncode.syosetu.com/'
self.narou_speech = 'yes'
res, json_dic = qRiKi_key.getCryptJson(config_file=json_file, auto_crypt=False, )
if (res == True):
self.engine = json_dic['engine']
self.url_home = json_dic['url_home']
self.url_search = json_dic['url_search']
self.narou_home = json_dic['narou_home']
self.narou_base = json_dic['narou_base']
self.narou_speech = json_dic['narou_speech']
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
self.proc_step = '1'
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
qFunc.remove(qCtrl_control_self)
self.proc_step = '5'
onece = True
last_alive = time.time()
while (self.proc_step == '5'):
self.proc_beat = time.time()
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
qLog.log('info', self.proc_id, '' + str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_self)
control = txt
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
if ((time.time() - last_alive) > 30):
qLog.log('debug', self.proc_id, 'alive', display=True, )
last_alive = time.time()
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
if (control != ''):
self.sub_proc(control, )
self.sub_check_url()
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (qFunc.statusCheck(qBusy_dev_mic) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.50)
else:
time.sleep(0.25)
if (True):
qFunc.statusSet(self.fileRdy, False)
if (not self.browser_id is None):
self.sub_proc('_stop_', )
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_d_browser, False)
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
def sub_proc(self, proc_text, ):
if (proc_text.find(u'リセット') >=0):
if (not self.browser_id is None):
self.sub_stop('_stop_', )
elif (proc_text.lower() == '_stop_') \
or (proc_text.find(u'WEB') >=0) and (proc_text.find(u'停止') >=0) \
or (proc_text.find(u'WEB') >=0) and (proc_text.find(u'終了') >=0) \
or (proc_text.find(u'ウェブ') >=0) and (proc_text.find(u'停止') >=0) \
or (proc_text.find(u'ウェブ') >=0) and (proc_text.find(u'終了') >=0) \
or (proc_text.find(u'ブラウザ') >=0) and (proc_text.find(u'停止') >=0) \
or (proc_text.find(u'ブラウザ') >=0) and (proc_text.find(u'終了') >=0):
if (not self.browser_id is None):
self.sub_stop('_stop_', )
elif (proc_text.lower() == '_start_') \
or (proc_text.find(u'WEB') >=0) and (proc_text.find(u'開始') >=0) \
or (proc_text.find(u'ウェブ') >=0) and (proc_text.find(u'開始') >=0) \
or (proc_text.find(u'ブラウザ') >=0) and (proc_text.find(u'開始') >=0):
self.sub_start('_start_', )
else:
if (not self.browser_id is None):
self.sub_start(proc_text, )
def sub_start(self, proc_text, ):
qLog.log('info', self.proc_id, 'open ' + proc_text, display=True,)
if (self.browser_id is None):
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_d_browser, True)
options = FirefoxOptions()
self.browser_id = Firefox(options=options)
url = ''
if (proc_text == '_start_'):
url = self.url_home
elif (proc_text[:4] == 'http'):
url = proc_text
elif (proc_text == u'なろう') or (proc_text == u'本好き'):
url = self.narou_home
if (url == ''):
url = self.url_search + proc_text
try:
self.browser_id.get(url)
except Exception as e:
self.sub_stop('_stop_', )
def sub_stop(self, proc_text, ):
if (not self.browser_id is None):
if (not self.batch_thread is None):
self.batch_abortQ.put('_abort_')
time.sleep(2.00)
self.batch_thread = None
clear_tts(self.proc_id, )
self.browser_id.quit()
self.browser_id = None
qFunc.kill(self.engine, )
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_d_browser, False)
def sub_check_url(self, ):
if (self.browser_id is None):
self.browser_url = None
self.browser_html = None
self.last_url = None
return False
self.browser_url = self.browser_id.current_url
if (self.browser_url == self.last_url):
return False
self.browser_wait = WebDriverWait(self.browser_id, 10)
element = self.browser_wait.until(EC.visibility_of_all_elements_located)
self.browser_html = self.browser_id.page_source
self.last_url = self.browser_url
print(self.browser_url)
if (not self.batch_thread is None):
self.batch_abortQ.put('_abort_')
time.sleep(2.00)
self.batch_thread = None
clear_tts(self.proc_id, )
if (self.narou_speech == 'yes'):
base_url = self.narou_base
if (self.browser_url[:len(base_url)] == base_url):
page_url = self.browser_url[len(base_url):]
self.batch_thread = threading.Thread(target=html_narou_to_tts, args=(
self.batch_abortQ, self.proc_id,
base_url, page_url, self.browser_html, 'yes',
))
self.batch_thread.setDaemon(True)
self.batch_thread.start()
return True
import signal
def signal_handler(signal_number, stack_frame):
print(os.path.basename(__file__), 'accept signal =', signal_number)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if __name__ == '__main__':
main_name = 'browser'
main_id = '{0:10s}'.format(main_name).replace(' ', '_')
qRiKi.init()
qFunc.init()
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
qLog.log('info', main_id, 'init')
qLog.log('info', main_id, 'exsample.py runMode, ')
if (True):
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
qLog.log('info', main_id, 'runMode =' + str(runMode ))
if (True):
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
qFunc.remove(qCtrl_control_self)
if (True):
qLog.log('info', main_id, 'start')
main_core = main_browser(main_name, '0', runMode=runMode, )
main_core.begin()
main_start = time.time()
onece = True
while (True):
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
break
if (runMode == 'debug'):
if ((time.time() - main_start) > 1):
if (onece == True):
onece = False
qFunc.txtsWrite(qCtrl_control_self ,txts=['_start_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=['http://yahoo.co.jp'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=[u'姫路城'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=[u'本好き'], encoding='utf-8', exclusive=True, mode='w', )
if ((time.time() - main_start) > 40):
qFunc.txtsWrite(qCtrl_control_self ,txts=['_stop_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(5.00)
qFunc.txtsWrite(qCtrl_control_self ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
elif (qFunc.statusCheck(qBusy_dev_mic) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
time.sleep(0.50)
if (True):
qLog.log('info', main_id, 'terminate')
main_core.abort()
del main_core
qLog.log('info', main_id, 'bye!')
sys.exit(0)
| true | true |
f7fb722fcc1162385978ef79710fcda0248259a6 | 190,658 | py | Python | env/lib/python3.9/site-packages/multiprocess/tests/__init__.py | wphoong/flappy_doge | c778f0e4820c1ed46e50a56f989d57df4f386736 | [
"MIT"
] | null | null | null | env/lib/python3.9/site-packages/multiprocess/tests/__init__.py | wphoong/flappy_doge | c778f0e4820c1ed46e50a56f989d57df4f386736 | [
"MIT"
] | null | null | null | env/lib/python3.9/site-packages/multiprocess/tests/__init__.py | wphoong/flappy_doge | c778f0e4820c1ed46e50a56f989d57df4f386736 | [
"MIT"
] | null | null | null | #
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle #XXX: use dill?
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocess.synchronize')
import threading
import multiprocess as multiprocessing
import multiprocess.connection
import multiprocess.dummy
import multiprocess.heap
import multiprocess.managers
import multiprocess.pool
import multiprocess.queues
from multiprocess import util
try:
from multiprocess import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocess.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocess import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocess import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocess.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = getattr(time,'monotonic',time.time)()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = getattr(time,'monotonic',time.time)() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocess.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def _test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocess.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocess.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("__init__.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocess as multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = getattr(time,'monotonic',time.time)()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = getattr(time,'monotonic',time.time)() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = getattr(time,'monotonic',time.time)()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = getattr(time,'monotonic',time.time)() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocess.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = getattr(time,'monotonic',time.time)()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def _test_unpickleable_result(self):
from multiprocess.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocess.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocess import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocess.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT
t = 0.1
while getattr(time,'monotonic',time.time)() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocess.' + m for m in modules]
modules.remove('multiprocess.__init__')
modules.append('multiprocess')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocess.popen_fork')
modules.remove('multiprocess.popen_forkserver')
modules.remove('multiprocess.popen_spawn_posix')
else:
modules.remove('multiprocess.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocess.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocess.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocess.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocess.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocess.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = getattr(time,'monotonic',time.time)()
res = wait([a, b], expected)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = getattr(time,'monotonic',time.time)()
res = wait([a, b], 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocess.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = getattr(time,'monotonic',time.time)()
res = wait([a, p.sentinel, b], expected + 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = getattr(time,'monotonic',time.time)()
res = wait([a, p.sentinel, b], 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = getattr(time,'monotonic',time.time)()
res = wait([a, p.sentinel, b], 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocess.connection import wait
a, b = multiprocessing.Pipe()
t = getattr(time,'monotonic',time.time)()
res = wait([a], timeout=-1)
t = getattr(time,'monotonic',time.time)() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def _test_flags(self):
import json
# start child process using unusual flags
prog = ('from multiprocess.tests import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def _test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocess as mp
from multiprocess import resource_tracker
from multiprocess.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT
while getattr(time,'monotonic',time.time)() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocess.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocess.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocess.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = getattr(time,'monotonic',time.time)()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = getattr(time,'monotonic',time.time)() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocess.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = getattr(time,'monotonic',time.time)()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = getattr(time,'monotonic',time.time)() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocess.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
| 32.91179 | 87 | 0.58682 |
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocess.synchronize')
import threading
import multiprocess as multiprocessing
import multiprocess.connection
import multiprocess.dummy
import multiprocess.heap
import multiprocess.managers
import multiprocess.pool
import multiprocess.queues
from multiprocess import util
try:
from multiprocess import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocess.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocess import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocess import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocess.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = getattr(time,'monotonic',time.time)()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = getattr(time,'monotonic',time.time)() - t
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocess.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def _test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocess.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001)
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocess.forkserver import _forkserver
_forkserver.ensure_running()
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
self.assertIn("ZeroDivisionError", err)
self.assertIn("__init__.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
def test_fork(self):
queue = self.Queue()
for i in range(10):
queue.put(i)
time.sleep(DELTA)
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocess as multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = getattr(time,'monotonic',time.time)()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = getattr(time,'monotonic',time.time)() - start
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
sleeping.acquire()
sleeping.acquire()
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
cond.acquire()
cond.notify()
cond.release()
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
cond.acquire()
cond.notify()
cond.release()
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
for i in range(6):
sleeping.acquire()
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
self.check_invariant(cond)
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
for i in range(6):
sleeping.acquire()
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
cond.acquire()
cond.notify_all()
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
for i in range(6):
sleeping.acquire()
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
cond.acquire()
cond.notify(n=2)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 2)
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = getattr(time,'monotonic',time.time)()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = getattr(time,'monotonic',time.time)() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
def __init__(self, namespace, f, args, n, wait_before_exit=False):
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocess.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123)
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_traceback(self):
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
@unittest.skipIf(True, "fails with is_dill(obj, child=True)")
def test_wrapped_exception(self):
ssertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
,'monotonic',time.time)()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
self.assertGreater(getattr(time,'monotonic',time.time)() - t_start, 0.9)
def test_release_task_refs(self):
nge(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA)
self.assertEqual(set(wr() for wr in refs), {None})
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
with self.assertRaises(ValueError):
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def _test_unpickleable_result(self):
from multiprocess.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocess.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16)
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL)
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocess import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocess.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocess.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT
t = 0.1
while getattr(time,'monotonic',time.time)() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b()
close_b()
del b
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocess.' + m for m in modules]
modules.remove('multiprocess.__init__')
modules.append('multiprocess')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocess.popen_fork')
modules.remove('multiprocess.popen_forkserver')
modules.remove('multiprocess.popen_spawn_posix')
else:
modules.remove('multiprocess.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocess.popen_forkserver')
if c_int is None:
modules.remove('multiprocess.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
eTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocess.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocess.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocess.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = getattr(time,'monotonic',time.time)()
res = wait([a, b], expected)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = getattr(time,'monotonic',time.time)()
res = wait([a, b], 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocess.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = getattr(time,'monotonic',time.time)()
res = wait([a, p.sentinel, b], expected + 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = getattr(time,'monotonic',time.time)()
res = wait([a, p.sentinel, b], 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = getattr(time,'monotonic',time.time)()
res = wait([a, p.sentinel, b], 20)
delta = getattr(time,'monotonic',time.time)() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocess.connection import wait
a, b = multiprocessing.Pipe()
t = getattr(time,'monotonic',time.time)()
res = wait([a], timeout=-1)
t = getattr(time,'monotonic',time.time)() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def _test_flags(self):
import json
# start child process using unusual flags
prog = ('from multiprocess.tests import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def _test_resource_tracker(self):
cmd = '''if 1:
import time, os, tempfile
import multiprocess as mp
from multiprocess import resource_tracker
from multiprocess.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = getattr(time,'monotonic',time.time)() + support.LONG_TIMEOUT
while getattr(time,'monotonic',time.time)() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
from multiprocess.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0)
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocess.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocess.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
queue.close()
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
il_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
join_process(self.proc)
start_time = getattr(time,'monotonic',time.time)()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = getattr(time,'monotonic',time.time)() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocess.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release)
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
start_time = getattr(time,'monotonic',time.time)()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = getattr(time,'monotonic',time.time)() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocess.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect()
if cls.manager._number_of_objects() != 0:
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir()
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
| true | true |
f7fb7232f1f0c5acce0ddc7da84dcf9e59f6cbb5 | 663 | py | Python | vispy/util/__init__.py | lcampagn/vispy | 28c25d6904d697cde9bb4c37909bc3f934621134 | [
"BSD-3-Clause"
] | 1 | 2015-12-03T02:03:50.000Z | 2015-12-03T02:03:50.000Z | vispy/util/__init__.py | lcampagn/vispy | 28c25d6904d697cde9bb4c37909bc3f934621134 | [
"BSD-3-Clause"
] | 19 | 2015-06-16T14:33:22.000Z | 2015-07-27T21:18:15.000Z | vispy/util/__init__.py | astrofrog/vispy | fa5e2eab9bb3d956f87ae68a56e342913e58a305 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" Utilities for Vispy. A collection of modules that are used in
one or more Vispy sub-packages.
"""
from .logs import logger, set_log_level, use_log_level # noqa
from .config import (config, sys_info, save_config, get_config_keys, # noqa
set_data_dir, _TempDir) # noqa
from .fetching import load_data_file # noqa
from . import fonts # noqa
from . import transforms # noqa
from .wrappers import use, run_subprocess # noqa
from .bunch import SimpleBunch # noqa
| 39 | 77 | 0.714932 |
from .logs import logger, set_log_level, use_log_level
from .config import (config, sys_info, save_config, get_config_keys,
set_data_dir, _TempDir)
from .fetching import load_data_file
from . import fonts
from . import transforms
from .wrappers import use, run_subprocess
from .bunch import SimpleBunch
| true | true |
f7fb728e2b24c9183d89d5ba8c0bbb315e2baa40 | 597 | py | Python | ingestion/src/metadata/generated/data/tags/personalDataTags.py | klimber/OpenMetadata | 1db18a50cfde00825a04b8a2c63c94fa6d5df615 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/generated/data/tags/personalDataTags.py | klimber/OpenMetadata | 1db18a50cfde00825a04b8a2c63c94fa6d5df615 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/generated/data/tags/personalDataTags.py | klimber/OpenMetadata | 1db18a50cfde00825a04b8a2c63c94fa6d5df615 | [
"Apache-2.0"
] | null | null | null | # generated by datamodel-codegen:
# filename: data/tags/personalDataTags.json
# timestamp: 2021-10-12T00:34:28+00:00
from __future__ import annotations
from typing import Any
from pydantic import BaseModel, Field
class Model(BaseModel):
__root__: Any = Field(
...,
description='Tags related classifying **Personal data** as defined by **GDPR.**<br/><br/>_Note to Legal - This tag category is provided as a starting point. Please review and update the tags based on your company policy. Also, add a reference to your GDPR policy document in this description._',
)
| 35.117647 | 303 | 0.730318 |
from __future__ import annotations
from typing import Any
from pydantic import BaseModel, Field
class Model(BaseModel):
__root__: Any = Field(
...,
description='Tags related classifying **Personal data** as defined by **GDPR.**<br/><br/>_Note to Legal - This tag category is provided as a starting point. Please review and update the tags based on your company policy. Also, add a reference to your GDPR policy document in this description._',
)
| true | true |
f7fb72b31c3deac4de138d13a1013e5246ad7577 | 2,689 | py | Python | server_info_collector/migrations/0002_auto__add_field_networkelement_created.py | weijia/server-info-collector | a4ab5423de7d59859fa83762f8ce7ff2d6c50620 | [
"BSD-3-Clause"
] | null | null | null | server_info_collector/migrations/0002_auto__add_field_networkelement_created.py | weijia/server-info-collector | a4ab5423de7d59859fa83762f8ce7ff2d6c50620 | [
"BSD-3-Clause"
] | null | null | null | server_info_collector/migrations/0002_auto__add_field_networkelement_created.py | weijia/server-info-collector | a4ab5423de7d59859fa83762f8ce7ff2d6c50620 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'NetworkElement.created'
db.add_column('server_info_collector_networkelement', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'NetworkElement.created'
db.delete_column('server_info_collector_networkelement', 'created')
models = {
'server_info_collector.networkelement': {
'Meta': {'object_name': 'NetworkElement'},
'cpu_info': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'hard_disk_size_in_m': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'memory_size_in_m': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'model_or_part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'other_info': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_network_element'", 'null': 'True', 'to': "orm['server_info_collector.NetworkElement']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '22'}),
'serial_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['server_info_collector'] | 61.113636 | 205 | 0.59762 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('server_info_collector_networkelement', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column('server_info_collector_networkelement', 'created')
models = {
'server_info_collector.networkelement': {
'Meta': {'object_name': 'NetworkElement'},
'cpu_info': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'hard_disk_size_in_m': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'memory_size_in_m': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'model_or_part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'other_info': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_network_element'", 'null': 'True', 'to': "orm['server_info_collector.NetworkElement']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '22'}),
'serial_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['server_info_collector'] | true | true |
f7fb74612c44e33866a339ff3d38323a157f4e43 | 3,459 | py | Python | tests.py | dag/flask-attest | 0ffe92424c289d6a360004dd1ea943458c6d49c5 | [
"BSD-2-Clause"
] | 1 | 2015-12-29T12:14:05.000Z | 2015-12-29T12:14:05.000Z | tests.py | dag/flask-attest | 0ffe92424c289d6a360004dd1ea943458c6d49c5 | [
"BSD-2-Clause"
] | 1 | 2016-10-31T15:59:13.000Z | 2016-10-31T15:59:13.000Z | tests.py | dag/flask-attest | 0ffe92424c289d6a360004dd1ea943458c6d49c5 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import with_statement
from flask import (Module, request, redirect, Flask, Response, jsonify,
render_template_string)
from flaskext.attest import request_context, get, post, put, delete
from flaskext.genshi import Genshi, generate_template
from attest import Tests, raises, assert_hook
DEBUG = True
TESTING = True
db = {}
mod = Module(__name__, name='tests')
@mod.route('/', methods=('GET', 'POST', 'PUT', 'DELETE'))
def index():
case = lambda x: request.method == x
if case('GET'):
return db['index']
elif case('POST') or case('PUT'):
db['index'] = request.form['message']
elif case('DELETE'):
del db['index']
return 'Success!'
@mod.route('/error')
def error():
1/0
return 'Oh noes!'
@mod.route('/elsewhere')
def elsewhere():
return redirect('/otherplace')
@mod.route('/json')
def json():
return jsonify(status='Success!')
@mod.route('/hello/<name>')
def hello(name):
return render_template_string('Hello {{name.capitalize()}}!', name=name)
@request_context
def testapp():
app = Flask(__name__)
app.config.from_object(__name__)
app.register_module(mod)
Genshi(app)
yield app
app = Tests(contexts=[testapp])
@app.test
@post('/', data={'message': 'Hello, World!'})
def post_to_index(response, templates):
assert (request.method) == 'POST'
assert (response) == Response('Success!')
assert (db['index']) == 'Hello, World!'
@app.test
@put('/', data={'message': 'Hello, World!'})
def put_to_index(response, templates):
assert (request.method) == 'PUT'
assert (response) == Response('Success!')
assert (db['index']) == 'Hello, World!'
@app.test
@get('/')
def get_index(response, templates):
assert (request.method) == 'GET'
assert (response) == Response('Hello, World!')
assert (response) != Response('Hello, World!', status=404)
@app.test
@delete('/')
def delete_index(response, templates):
assert (request.method) == 'DELETE'
assert (response) == Response('Success!')
assert ('index') not in (db)
@app.test
@get('/404')
def request_persists(response, templates):
assert (request.path) == '/404'
@app.test
def test_request_context(client, templates):
assert (request.path) == '/'
client.get('/404')
assert (request.path) == '/404'
@app.test
def trigger_error(client, templates):
with raises(ZeroDivisionError):
client.get('/error')
client.application.debug = False
response = client.get('/error')
assert (response.status_code) == 500
client.application.debug = True
@app.test
@get('/elsewhere')
def redirection(response, templates):
assert (response) == redirect('/otherplace')
assert (response) != redirect('/wrongplace')
@app.test
@get('/json')
def json_response(response, templates):
assert (response) == jsonify(status='Success!')
@app.test
@get('/hello/world')
def capture_templates(response, templates):
assert (response) == Response('Hello World!')
assert (len(templates)) == 1
assert (templates[0][0]) is (None)
assert (templates[0][1]['name']) == 'world'
@app.test
def genshi_templates(client, templates):
generate_template(string='Hello ${name.capitalize}!', method='text',
context=dict(name='world'))
assert (len(templates)) == 1
assert (templates[0][0]) is (None)
assert (templates[0][1]['name']) == 'world'
if __name__ == '__main__':
app.main()
| 26.007519 | 76 | 0.64961 | from __future__ import with_statement
from flask import (Module, request, redirect, Flask, Response, jsonify,
render_template_string)
from flaskext.attest import request_context, get, post, put, delete
from flaskext.genshi import Genshi, generate_template
from attest import Tests, raises, assert_hook
DEBUG = True
TESTING = True
db = {}
mod = Module(__name__, name='tests')
@mod.route('/', methods=('GET', 'POST', 'PUT', 'DELETE'))
def index():
case = lambda x: request.method == x
if case('GET'):
return db['index']
elif case('POST') or case('PUT'):
db['index'] = request.form['message']
elif case('DELETE'):
del db['index']
return 'Success!'
@mod.route('/error')
def error():
1/0
return 'Oh noes!'
@mod.route('/elsewhere')
def elsewhere():
return redirect('/otherplace')
@mod.route('/json')
def json():
return jsonify(status='Success!')
@mod.route('/hello/<name>')
def hello(name):
return render_template_string('Hello {{name.capitalize()}}!', name=name)
@request_context
def testapp():
app = Flask(__name__)
app.config.from_object(__name__)
app.register_module(mod)
Genshi(app)
yield app
app = Tests(contexts=[testapp])
@app.test
@post('/', data={'message': 'Hello, World!'})
def post_to_index(response, templates):
assert (request.method) == 'POST'
assert (response) == Response('Success!')
assert (db['index']) == 'Hello, World!'
@app.test
@put('/', data={'message': 'Hello, World!'})
def put_to_index(response, templates):
assert (request.method) == 'PUT'
assert (response) == Response('Success!')
assert (db['index']) == 'Hello, World!'
@app.test
@get('/')
def get_index(response, templates):
assert (request.method) == 'GET'
assert (response) == Response('Hello, World!')
assert (response) != Response('Hello, World!', status=404)
@app.test
@delete('/')
def delete_index(response, templates):
assert (request.method) == 'DELETE'
assert (response) == Response('Success!')
assert ('index') not in (db)
@app.test
@get('/404')
def request_persists(response, templates):
assert (request.path) == '/404'
@app.test
def test_request_context(client, templates):
assert (request.path) == '/'
client.get('/404')
assert (request.path) == '/404'
@app.test
def trigger_error(client, templates):
with raises(ZeroDivisionError):
client.get('/error')
client.application.debug = False
response = client.get('/error')
assert (response.status_code) == 500
client.application.debug = True
@app.test
@get('/elsewhere')
def redirection(response, templates):
assert (response) == redirect('/otherplace')
assert (response) != redirect('/wrongplace')
@app.test
@get('/json')
def json_response(response, templates):
assert (response) == jsonify(status='Success!')
@app.test
@get('/hello/world')
def capture_templates(response, templates):
assert (response) == Response('Hello World!')
assert (len(templates)) == 1
assert (templates[0][0]) is (None)
assert (templates[0][1]['name']) == 'world'
@app.test
def genshi_templates(client, templates):
generate_template(string='Hello ${name.capitalize}!', method='text',
context=dict(name='world'))
assert (len(templates)) == 1
assert (templates[0][0]) is (None)
assert (templates[0][1]['name']) == 'world'
if __name__ == '__main__':
app.main()
| true | true |
f7fb755fd03f864132f593874b9d6029346f742b | 4,140 | py | Python | QDE/offline/offline_test_run.py | oxquantum-repo/drl_for_quantum_measurement | a02a8f3a7c5b40458f440a63355932409c66921c | [
"MIT"
] | 5 | 2021-05-18T01:07:04.000Z | 2022-01-29T13:31:18.000Z | QDE/offline/offline_test_run.py | oxquantum-repo/drl_for_quantum_measurement | a02a8f3a7c5b40458f440a63355932409c66921c | [
"MIT"
] | null | null | null | QDE/offline/offline_test_run.py | oxquantum-repo/drl_for_quantum_measurement | a02a8f3a7c5b40458f440a63355932409c66921c | [
"MIT"
] | 1 | 2021-05-18T01:07:20.000Z | 2021-05-18T01:07:20.000Z | import sys
import math
sys.path.append('../')
import mock_pygor
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../environments')
sys.path.append('../utilities')
sys.path.append('../testing_code')
sys.path.append('../data')
from offline_test_play_episode import offline_test_play_episode
from drl_models import Dueling_DQN_PER_2D
from prioritized_experience_replay import Memory
from datetime import datetime
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import models
import random
import pickle
from tqdm import tqdm
from offline_test_environment_creation import double_dot_2d
def initiate():
IM_SIZE = 2 # 80
N_CHANEL = 9 # this is the representation of a block by 9 blocks
K = 6 # env.action_space.n
D = IM_SIZE * N_CHANEL
hidden_layer_sizes = [128, 64, 32]
gamma = 0.5
# number of random test
batch_sz = 32
count = 0
tf.reset_default_graph()
model = Dueling_DQN_PER_2D(D=D, K=K, batch_sz=batch_sz, hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma, lr=2.3e-6, N_CHANEL=N_CHANEL, IM_SIZE=IM_SIZE, scope="DDQN")
print("DRL model loaded")
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
saver = tf.train.Saver()
MODEL_PATH = "../logs/2d/save_models/2d_mean_std"
saver.restore(sess, MODEL_PATH)
model.set_session(sess)
return model
def reset_session(model):
model.session.close()
model = initiate()
return model
def end_session(model):
model.session.close()
return
def run(model,epsilon,file_name, MaxStep=60, show_log = False, save = False):
name = file_name
block_size = 32
env = double_dot_2d(block_size , file_name)
'''print("Environment initialised")'''
episode_reward, num_steps_in_episode, total_time_training, env.visit_map, loc_state_list, env = offline_test_play_episode(env, model, epsilon, MaxStep, show_log)
'''plt.imshow(env.pre_classification_prediction)
plt.colorbar()
plt.title("Pre-classifier Prediction")
plt.show()
plt.imshow(env.cnn_prediction)
plt.colorbar()
plt.title("CNN Prediction")
plt.show()
plt.imshow(env.visit_map)
plt.title("Visit Map")
plt.show()
plt.imshow(env.total_measurement)
plt.colorbar()
plt.title("Total measurement")
plt.show()
route = np.zeros_like(env.image)
for index, item in enumerate(env.visit_map):
for index2, item2, in enumerate(item):
if item2 == 1:
route[index * block_size:(index + 1) * block_size, index2 * block_size:(index2 + 1) * block_size] = env.image[index * block_size:(index + 1) * block_size, index2 * block_size:(index2 + 1) * block_size]
plt.imshow(route)
plt.title("Trajectory")
plt.xlabel('Plunger Gate A')
plt.ylabel('Plunger Gate B')
#plt.savefig("trajectory5.png", transparent=True)
plt.show()
now = datetime.now()
date_time = now.strftime("%m_%d_%Y__%H_%M")
print("date and time:",date_time)'''
run_information = {
"Name": name,
"Episode reward": episode_reward,
"Number of steps": num_steps_in_episode,
"Total training time (seconds)": total_time_training,
"Location state list": loc_state_list,
"Environment visit map": env.visit_map,
"Bias triangle location": env.isquantum,
"Small window measurements": env.small_window_measurements,
"Small window statistics": env.small_window_statistics,
}
#print(run_information)
if save == True:
pickle_out = open("fine_tuning/mock_run_information"+date_time+".pickle","wb")
pickle.dump(run_information, pickle_out)
pickle_out.close()
#np.save('fine_tuning/total_measurement'+date_time, env.total_measurement)
'''print("Play episode completed")
print('total_time_training',total_time_training)
print('Time per step',total_time_training/num_steps_in_episode)'''
return env,episode_reward,num_steps_in_episode
| 28.356164 | 217 | 0.681159 | import sys
import math
sys.path.append('../')
import mock_pygor
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../environments')
sys.path.append('../utilities')
sys.path.append('../testing_code')
sys.path.append('../data')
from offline_test_play_episode import offline_test_play_episode
from drl_models import Dueling_DQN_PER_2D
from prioritized_experience_replay import Memory
from datetime import datetime
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import models
import random
import pickle
from tqdm import tqdm
from offline_test_environment_creation import double_dot_2d
def initiate():
IM_SIZE = 2
N_CHANEL = 9
K = 6
D = IM_SIZE * N_CHANEL
hidden_layer_sizes = [128, 64, 32]
gamma = 0.5
batch_sz = 32
count = 0
tf.reset_default_graph()
model = Dueling_DQN_PER_2D(D=D, K=K, batch_sz=batch_sz, hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma, lr=2.3e-6, N_CHANEL=N_CHANEL, IM_SIZE=IM_SIZE, scope="DDQN")
print("DRL model loaded")
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
saver = tf.train.Saver()
MODEL_PATH = "../logs/2d/save_models/2d_mean_std"
saver.restore(sess, MODEL_PATH)
model.set_session(sess)
return model
def reset_session(model):
model.session.close()
model = initiate()
return model
def end_session(model):
model.session.close()
return
def run(model,epsilon,file_name, MaxStep=60, show_log = False, save = False):
name = file_name
block_size = 32
env = double_dot_2d(block_size , file_name)
episode_reward, num_steps_in_episode, total_time_training, env.visit_map, loc_state_list, env = offline_test_play_episode(env, model, epsilon, MaxStep, show_log)
run_information = {
"Name": name,
"Episode reward": episode_reward,
"Number of steps": num_steps_in_episode,
"Total training time (seconds)": total_time_training,
"Location state list": loc_state_list,
"Environment visit map": env.visit_map,
"Bias triangle location": env.isquantum,
"Small window measurements": env.small_window_measurements,
"Small window statistics": env.small_window_statistics,
}
if save == True:
pickle_out = open("fine_tuning/mock_run_information"+date_time+".pickle","wb")
pickle.dump(run_information, pickle_out)
pickle_out.close()
return env,episode_reward,num_steps_in_episode
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.