code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
import argparse
class Env():
def __init__(self, W, F, K, Dn, Cn, f, dist, pn, pi):
# W 带宽 10 MHz
# F 边缘服务器总计算能力
# K 用户数量
# Dn, Cn 任务量大小,所需cpu周期数, (300~500kb), (900, 1100)兆周期数 1Mhz = 1000khz = 1000*1000hz
# f 用户本地计算能力 1GHz/s | [0.5, 1.5]GHz/s (1000*1000*1000)
# dist 用户距离
# pn, pi 上传功率,闲时功率 | mW (毫瓦)
# state 系统状态
self.W, self.F, self.K = W, F, K
self.pn, self.pi = pn, pi
self.Dn, self.Cn, self.f, self.dist = Dn, Cn, f, dist
self.state = 0
self.reward = 0
# self.pre_state = 5
def step(self, action):
# 把action 特殊处理了一下,防止出现算法bug,小于0的置为0,大于1的置为1,无限大的置为1
action[action < 0] = 0
action[action > 1] = 1
action[np.isnan(action)] = 1
# 用于返回的状态和奖励
self.state = 0
self.reward = 0
# 有几个需要计算卸载
rk = np.sum(action > 0)
# 所有用户卸载了多少到边缘服务器,之后依据这个按比例分配计算资源
sum_c = 0
for i in range(self.K):
if action[i] > 0:
sum_c += self.Cn[i] * action[i]
mw = pow(10, -174 / 10) * 0.001 # 噪声功率转化 -174dbm 转成瓦特
for i in range(self.K):
if action[i] > 0:
tmp_rn = self.W * 1000 / rk * 1000 # W / K 速率公式的一部分
rn = tmp_rn * np.log2(1 + self.pn * 0.001 * pow(self.dist[i], -3) / (tmp_rn * mw)) # 计算速率
# 部分卸载部分的第一步卸载延迟
to1 = action[i] * self.Dn[i] * 1024 / rn
# 部分卸载的第二步计算延迟
to2 = action[i] * self.Cn[i] / (self.F * 1000 * action[i] * self.Cn[i] / sum_c)
# 部分卸载的本地计算部分 1-action
tl = (1 - action[i]) * self.Cn[i] / (self.f * 1000)
# 时延是max(本地计算延迟,计算卸载的部分的延迟)
self.state += max(to1 + to2, tl)
elif action[i] == 0:
# 本地执行的延迟
self.state += (self.Cn[i]) / (self.f * 1000)
# self.reward = (self.pre_state - self.state) / self.pre_state
# 奖励是状态的相反数
self.reward = -self.state
return self.state, self.reward, False, {}
def reset(self):
# random_action = np.random.uniform(0, 1, self.K)
# state, _, _, _ = self.step(random_action)
# state, _, _, _ = self.step(np.array([0.5] * self.K))
state, _, _, _ = self.step(np.zeros(self.K))
return state
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num-ue', type=int, default=5) # 用户数量
parser.add_argument('--F', type=int, default=5) # 边缘服务器计算量
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
num_ue = args.num_ue
F = args.F
env = Env(W=10, F=5, K=num_ue,
Dn=np.random.uniform(300, 500, num_ue), Cn=np.random.uniform(900, 1100, num_ue),
f=1, dist=np.random.uniform(0, 200, num_ue), pn=500, pi=100)
state, reward, _, _ = env.step(np.ones(num_ue))
print(state)
state, reward, _, _ = env.step(np.array([0.5, 0.5, 0.5, 0.5, 0.5]))
print(state)
state, reward, _, _ = env.step(np.array([1/3, 1/3, 1/3, 2/3, 2/3]))
print(state) | mec.py | import numpy as np
import argparse
class Env():
def __init__(self, W, F, K, Dn, Cn, f, dist, pn, pi):
# W 带宽 10 MHz
# F 边缘服务器总计算能力
# K 用户数量
# Dn, Cn 任务量大小,所需cpu周期数, (300~500kb), (900, 1100)兆周期数 1Mhz = 1000khz = 1000*1000hz
# f 用户本地计算能力 1GHz/s | [0.5, 1.5]GHz/s (1000*1000*1000)
# dist 用户距离
# pn, pi 上传功率,闲时功率 | mW (毫瓦)
# state 系统状态
self.W, self.F, self.K = W, F, K
self.pn, self.pi = pn, pi
self.Dn, self.Cn, self.f, self.dist = Dn, Cn, f, dist
self.state = 0
self.reward = 0
# self.pre_state = 5
def step(self, action):
# 把action 特殊处理了一下,防止出现算法bug,小于0的置为0,大于1的置为1,无限大的置为1
action[action < 0] = 0
action[action > 1] = 1
action[np.isnan(action)] = 1
# 用于返回的状态和奖励
self.state = 0
self.reward = 0
# 有几个需要计算卸载
rk = np.sum(action > 0)
# 所有用户卸载了多少到边缘服务器,之后依据这个按比例分配计算资源
sum_c = 0
for i in range(self.K):
if action[i] > 0:
sum_c += self.Cn[i] * action[i]
mw = pow(10, -174 / 10) * 0.001 # 噪声功率转化 -174dbm 转成瓦特
for i in range(self.K):
if action[i] > 0:
tmp_rn = self.W * 1000 / rk * 1000 # W / K 速率公式的一部分
rn = tmp_rn * np.log2(1 + self.pn * 0.001 * pow(self.dist[i], -3) / (tmp_rn * mw)) # 计算速率
# 部分卸载部分的第一步卸载延迟
to1 = action[i] * self.Dn[i] * 1024 / rn
# 部分卸载的第二步计算延迟
to2 = action[i] * self.Cn[i] / (self.F * 1000 * action[i] * self.Cn[i] / sum_c)
# 部分卸载的本地计算部分 1-action
tl = (1 - action[i]) * self.Cn[i] / (self.f * 1000)
# 时延是max(本地计算延迟,计算卸载的部分的延迟)
self.state += max(to1 + to2, tl)
elif action[i] == 0:
# 本地执行的延迟
self.state += (self.Cn[i]) / (self.f * 1000)
# self.reward = (self.pre_state - self.state) / self.pre_state
# 奖励是状态的相反数
self.reward = -self.state
return self.state, self.reward, False, {}
def reset(self):
# random_action = np.random.uniform(0, 1, self.K)
# state, _, _, _ = self.step(random_action)
# state, _, _, _ = self.step(np.array([0.5] * self.K))
state, _, _, _ = self.step(np.zeros(self.K))
return state
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num-ue', type=int, default=5) # 用户数量
parser.add_argument('--F', type=int, default=5) # 边缘服务器计算量
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
num_ue = args.num_ue
F = args.F
env = Env(W=10, F=5, K=num_ue,
Dn=np.random.uniform(300, 500, num_ue), Cn=np.random.uniform(900, 1100, num_ue),
f=1, dist=np.random.uniform(0, 200, num_ue), pn=500, pi=100)
state, reward, _, _ = env.step(np.ones(num_ue))
print(state)
state, reward, _, _ = env.step(np.array([0.5, 0.5, 0.5, 0.5, 0.5]))
print(state)
state, reward, _, _ = env.step(np.array([1/3, 1/3, 1/3, 2/3, 2/3]))
print(state) | 0.215846 | 0.284635 |
import pytest
import redis
import random
from threading import Thread
from time import sleep
from utils import Env, Refresh_Cluster
from test_helper_classes import _get_series_value, calc_rule, ALLOWED_ERROR, _insert_data, \
_get_ts_info, _insert_agg_data
from includes import *
def testLibmrFail():
env = Env()
if env.shardsCount < 3:
env.skip()
if(not env.isCluster):
env.skip()
env.skipOnSlave() # There can't be 2 rdb save at the same time
env.skipOnAOF()
start_ts = 1
samples_count = 10
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1{1}', 'LABELS', 'name', 'bob')
_insert_data(r, 'tester1{1}', start_ts, samples_count, 1)
try:
env.envRunner.shards[2].stopEnv()
except Exception as e:
pass
Refresh_Cluster(env)
try:
actual_result = env.getConnection(1).execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
assert(False)
except Exception as e:
env.assertResponseError(e, "multi shard cmd failed")
env.envRunner.shards[2].startEnv()
Refresh_Cluster(env)
expected_res = [[b'tester1{1}', [[b'name', b'bob']], [[1, b'1'], [2, b'1'], [3, b'1'], [4, b'1'], [5, b'1'], [6, b'1'], [7, b'1'], [8, b'1'], [9, b'1'], [10, b'1']]]]
actual_result = env.getConnection(1).execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
env.assertEqual(actual_result, expected_res)
def libmr_query(con, env, start_ts, samples_count):
expected_res = [[b'tester1{1}', [[b'name', b'bob']], [[1, b'1'], [2, b'1'], [3, b'1'], [4, b'1'], [5, b'1'], [6, b'1'], [7, b'1'], [8, b'1'], [9, b'1'], [10, b'1']]]]
actual_result = con.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
env.assertEqual(actual_result, expected_res)
def testLibmr_client_disconnect():
env = Env()
if env.shardsCount < 2:
env.skip()
if(not env.isCluster):
env.skip()
env.skipOnSlave() # There can't be 2 rdb save at the same time
env.skipOnAOF()
start_ts = 1
samples_count = 10
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1{1}', 'LABELS', 'name', 'bob')
_insert_data(r, 'tester1{1}', start_ts, samples_count, 1)
threads = []
cons = []
for i in range(0,20):
# expect a new connection to arrive
cons.append(env.getConnection(random.randint(0, env.shardsCount - 1)))
threads.append(Thread(target=libmr_query, args=(cons[i], env, start_ts, samples_count)))
for i in range(len(threads)):
threads[i].start()
cons[i].close()
# wait for processes to join
[th.join() for th in threads]
# make sure we did not crashed
r.ping()
r.close() | tests/flow/test_ts_libmr_failiure.py | import pytest
import redis
import random
from threading import Thread
from time import sleep
from utils import Env, Refresh_Cluster
from test_helper_classes import _get_series_value, calc_rule, ALLOWED_ERROR, _insert_data, \
_get_ts_info, _insert_agg_data
from includes import *
def testLibmrFail():
env = Env()
if env.shardsCount < 3:
env.skip()
if(not env.isCluster):
env.skip()
env.skipOnSlave() # There can't be 2 rdb save at the same time
env.skipOnAOF()
start_ts = 1
samples_count = 10
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1{1}', 'LABELS', 'name', 'bob')
_insert_data(r, 'tester1{1}', start_ts, samples_count, 1)
try:
env.envRunner.shards[2].stopEnv()
except Exception as e:
pass
Refresh_Cluster(env)
try:
actual_result = env.getConnection(1).execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
assert(False)
except Exception as e:
env.assertResponseError(e, "multi shard cmd failed")
env.envRunner.shards[2].startEnv()
Refresh_Cluster(env)
expected_res = [[b'tester1{1}', [[b'name', b'bob']], [[1, b'1'], [2, b'1'], [3, b'1'], [4, b'1'], [5, b'1'], [6, b'1'], [7, b'1'], [8, b'1'], [9, b'1'], [10, b'1']]]]
actual_result = env.getConnection(1).execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
env.assertEqual(actual_result, expected_res)
def libmr_query(con, env, start_ts, samples_count):
expected_res = [[b'tester1{1}', [[b'name', b'bob']], [[1, b'1'], [2, b'1'], [3, b'1'], [4, b'1'], [5, b'1'], [6, b'1'], [7, b'1'], [8, b'1'], [9, b'1'], [10, b'1']]]]
actual_result = con.execute_command('TS.mrange', start_ts, start_ts + samples_count, 'WITHLABELS', 'FILTER',
'name=bob')
env.assertEqual(actual_result, expected_res)
def testLibmr_client_disconnect():
env = Env()
if env.shardsCount < 2:
env.skip()
if(not env.isCluster):
env.skip()
env.skipOnSlave() # There can't be 2 rdb save at the same time
env.skipOnAOF()
start_ts = 1
samples_count = 10
with env.getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester1{1}', 'LABELS', 'name', 'bob')
_insert_data(r, 'tester1{1}', start_ts, samples_count, 1)
threads = []
cons = []
for i in range(0,20):
# expect a new connection to arrive
cons.append(env.getConnection(random.randint(0, env.shardsCount - 1)))
threads.append(Thread(target=libmr_query, args=(cons[i], env, start_ts, samples_count)))
for i in range(len(threads)):
threads[i].start()
cons[i].close()
# wait for processes to join
[th.join() for th in threads]
# make sure we did not crashed
r.ping()
r.close() | 0.275422 | 0.322873 |
from __future__ import print_function
import unittest
import uuid
from biggraphite import metric as bg_metric
from biggraphite.drivers import _downsampling as bg_ds
from tests import test_utils
test_utils.setup_logging()
class TestDownsampler(unittest.TestCase):
METRIC_NAME_SUM = "test.metric.sum"
METRIC_NAME_AVG = "test.metric.avg"
PRECISION = 10
CAPACITY = 3
def setUp(self):
"""Set up a Downsampler, aggregating with the sum and average function."""
capacity_precisions = (
self.CAPACITY,
self.PRECISION,
self.CAPACITY,
self.PRECISION ** 2,
)
retention_string = "%d*%ds:%d*%ds" % (capacity_precisions)
retention = bg_metric.Retention.from_string(retention_string)
self.stage_0 = retention.stages[0]
self.stage_1 = retention.stages[1]
uid = uuid.uuid4()
metric_metadata = bg_metric.MetricMetadata.create(
aggregator=bg_metric.Aggregator.total, retention=retention
)
self.metric_sum = bg_metric.Metric(self.METRIC_NAME_SUM, uid, metric_metadata)
uid = uuid.uuid4()
metric_metadata = bg_metric.MetricMetadata.create(
aggregator=bg_metric.Aggregator.average, retention=retention
)
self.metric_avg = bg_metric.Metric(self.METRIC_NAME_AVG, uid, metric_metadata)
self.ds = bg_ds.Downsampler(self.CAPACITY)
def test_feed_simple_sum(self):
"""Test feed with few points."""
# 1. Put value 1 at timestamp 0.
# 2. Check that it is used in the aggregates, even though it is not expired.
points = [(0, 1)]
expected = [(0, 1, 1, self.stage_0), (0, 1, 1, self.stage_1)]
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
# 1. Feed no point, and check that nothing is thrown.
points = []
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, [])
# 1. Add point with value 3 that overrides the previous point.
# 2. Check the result takes into account the override.
points = [(0, 3)]
expected = [(0, 3, 1, self.stage_0), (0, 3, 1, self.stage_1)]
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
# 1. Add point with value 9 at index 1 in stage0 buffer.
# 2. Check that the aggregates are updated using both points.
points = [(0, 5), (self.PRECISION, 9)] # Overrides previous point.
expected = [
(0, 5, 1, self.stage_0),
(self.PRECISION, 9, 1, self.stage_0),
(0, 14, 2, self.stage_1),
]
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
# 1. Feed no point, and check that nothing is thrown.
points = []
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, [])
def test_feed_simple_avg(self):
"""Test feed with few points."""
# 1. Put value 1 at timestamp 0.
# 2. Check that it is used in the aggregates, even though it is not expired.
points = [(0, 1)]
expected = [(0, 1, 1, self.stage_0), (0, 1, 1, self.stage_1)]
result = self.ds.feed(self.metric_avg, points)
self.assertEqual(result, expected)
# 1. Add point with value 9 at index 1 in stage0 buffer.
# 2. Check that the aggregates are updated using both points.
points = [
(0, 5), # Overrides previous point.
(self.PRECISION, 9),
(self.PRECISION ** 2 * self.CAPACITY, 10),
]
expected = [
(0, 5, 1, self.stage_0),
(self.PRECISION, 9, 1, self.stage_0),
(300, 10.0, 1, self.stage_0),
(0, 14.0, 2, self.stage_1),
(300, 10.0, 1, self.stage_1),
]
result = self.ds.feed(self.metric_avg, points)
self.assertEqual(result, expected)
def test_feed_multiple(self):
"""Test feed with one point per minute for 30 minutes."""
for i in range(30):
result = self.ds.feed(self.metric_sum, [(1, i)])
# We should generate only one metric per retention.
self.assertEqual(len(result), 2)
for i in range(30):
result = self.ds.feed(self.metric_sum, [(0, i)])
self.assertEqual(len(result), 2)
def test_feed_extended(self):
"""Test feed with several points."""
# 1. Add point with value 15 which expires the point at index 0.
# 2. Check that the aggregates are updated using the three points.
points = [
(0, 1), # Point at index 0.
(1, 2), # Overrides previous point at index 0.
(self.PRECISION, 15), # Point at index 1.
# Evicts the point at index 0.
(self.PRECISION * self.CAPACITY, 25),
# Evicts all previous points.
(self.PRECISION * self.CAPACITY * 2, 150),
(self.PRECISION ** 2 * self.CAPACITY, 1500), # Bump stage 1 epoch.
# Replace previous point.
(self.PRECISION ** 2 * self.CAPACITY, 1501),
]
expected_stage_0 = [
(0, 2, 1, self.stage_0), # Point at index 0.
(self.PRECISION, 15, 1, self.stage_0), # Point at index 1.
(self.PRECISION * self.CAPACITY, 25, 1, self.stage_0),
(self.PRECISION * self.CAPACITY * 2, 150, 1, self.stage_0),
(self.CAPACITY * self.PRECISION ** 2, 1501, 1, self.stage_0),
]
expected_stage_1 = [
# 192 = 2 + 15 + 25 + 150, sum of stage_0 values
(0, 192, 4, self.stage_1),
(self.CAPACITY * self.PRECISION ** 2, 1501, 1, self.stage_1),
]
expected = expected_stage_0 + expected_stage_1
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
def test_out_of_order(self):
"""Test feeding points out of order."""
points = [
(self.PRECISION ** 2 + 1, 42), # Overrides next point once sorted.
(self.PRECISION ** 2, 84),
(self.PRECISION - 1, 1), # Overrides next point once sorted
(self.PRECISION, 2),
(0, -10),
]
expected_stage_0 = [
(0, 1, 1, self.stage_0),
(self.PRECISION, 2, 1, self.stage_0),
(self.PRECISION ** 2, 42, 1, self.stage_0),
]
expected_stage_1 = [
(0, 3, 2, self.stage_1), # 3 = 1 + 2.
(self.PRECISION ** 2, 42, 1, self.stage_1),
]
expected = expected_stage_0 + expected_stage_1
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
def test_purge(self):
"""Test that we purge old metrics correctly."""
points = [(1, 1)]
self.ds.PURGE_EVERY_S = 0
self.ds.feed(self.metric_sum, points)
# Should no remove anything.
self.ds.purge(now=1)
self.assertEqual(len(self.ds._names_to_aggregates), 1)
# Should remove everything.
self.ds.purge(now=(self.PRECISION ** 2) * 3)
self.assertEqual(len(self.ds._names_to_aggregates), 0)
if __name__ == "__main__":
unittest.main() | tests/drivers/test_drivers_downsampling.py |
from __future__ import print_function
import unittest
import uuid
from biggraphite import metric as bg_metric
from biggraphite.drivers import _downsampling as bg_ds
from tests import test_utils
test_utils.setup_logging()
class TestDownsampler(unittest.TestCase):
METRIC_NAME_SUM = "test.metric.sum"
METRIC_NAME_AVG = "test.metric.avg"
PRECISION = 10
CAPACITY = 3
def setUp(self):
"""Set up a Downsampler, aggregating with the sum and average function."""
capacity_precisions = (
self.CAPACITY,
self.PRECISION,
self.CAPACITY,
self.PRECISION ** 2,
)
retention_string = "%d*%ds:%d*%ds" % (capacity_precisions)
retention = bg_metric.Retention.from_string(retention_string)
self.stage_0 = retention.stages[0]
self.stage_1 = retention.stages[1]
uid = uuid.uuid4()
metric_metadata = bg_metric.MetricMetadata.create(
aggregator=bg_metric.Aggregator.total, retention=retention
)
self.metric_sum = bg_metric.Metric(self.METRIC_NAME_SUM, uid, metric_metadata)
uid = uuid.uuid4()
metric_metadata = bg_metric.MetricMetadata.create(
aggregator=bg_metric.Aggregator.average, retention=retention
)
self.metric_avg = bg_metric.Metric(self.METRIC_NAME_AVG, uid, metric_metadata)
self.ds = bg_ds.Downsampler(self.CAPACITY)
def test_feed_simple_sum(self):
"""Test feed with few points."""
# 1. Put value 1 at timestamp 0.
# 2. Check that it is used in the aggregates, even though it is not expired.
points = [(0, 1)]
expected = [(0, 1, 1, self.stage_0), (0, 1, 1, self.stage_1)]
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
# 1. Feed no point, and check that nothing is thrown.
points = []
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, [])
# 1. Add point with value 3 that overrides the previous point.
# 2. Check the result takes into account the override.
points = [(0, 3)]
expected = [(0, 3, 1, self.stage_0), (0, 3, 1, self.stage_1)]
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
# 1. Add point with value 9 at index 1 in stage0 buffer.
# 2. Check that the aggregates are updated using both points.
points = [(0, 5), (self.PRECISION, 9)] # Overrides previous point.
expected = [
(0, 5, 1, self.stage_0),
(self.PRECISION, 9, 1, self.stage_0),
(0, 14, 2, self.stage_1),
]
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
# 1. Feed no point, and check that nothing is thrown.
points = []
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, [])
def test_feed_simple_avg(self):
"""Test feed with few points."""
# 1. Put value 1 at timestamp 0.
# 2. Check that it is used in the aggregates, even though it is not expired.
points = [(0, 1)]
expected = [(0, 1, 1, self.stage_0), (0, 1, 1, self.stage_1)]
result = self.ds.feed(self.metric_avg, points)
self.assertEqual(result, expected)
# 1. Add point with value 9 at index 1 in stage0 buffer.
# 2. Check that the aggregates are updated using both points.
points = [
(0, 5), # Overrides previous point.
(self.PRECISION, 9),
(self.PRECISION ** 2 * self.CAPACITY, 10),
]
expected = [
(0, 5, 1, self.stage_0),
(self.PRECISION, 9, 1, self.stage_0),
(300, 10.0, 1, self.stage_0),
(0, 14.0, 2, self.stage_1),
(300, 10.0, 1, self.stage_1),
]
result = self.ds.feed(self.metric_avg, points)
self.assertEqual(result, expected)
def test_feed_multiple(self):
"""Test feed with one point per minute for 30 minutes."""
for i in range(30):
result = self.ds.feed(self.metric_sum, [(1, i)])
# We should generate only one metric per retention.
self.assertEqual(len(result), 2)
for i in range(30):
result = self.ds.feed(self.metric_sum, [(0, i)])
self.assertEqual(len(result), 2)
def test_feed_extended(self):
"""Test feed with several points."""
# 1. Add point with value 15 which expires the point at index 0.
# 2. Check that the aggregates are updated using the three points.
points = [
(0, 1), # Point at index 0.
(1, 2), # Overrides previous point at index 0.
(self.PRECISION, 15), # Point at index 1.
# Evicts the point at index 0.
(self.PRECISION * self.CAPACITY, 25),
# Evicts all previous points.
(self.PRECISION * self.CAPACITY * 2, 150),
(self.PRECISION ** 2 * self.CAPACITY, 1500), # Bump stage 1 epoch.
# Replace previous point.
(self.PRECISION ** 2 * self.CAPACITY, 1501),
]
expected_stage_0 = [
(0, 2, 1, self.stage_0), # Point at index 0.
(self.PRECISION, 15, 1, self.stage_0), # Point at index 1.
(self.PRECISION * self.CAPACITY, 25, 1, self.stage_0),
(self.PRECISION * self.CAPACITY * 2, 150, 1, self.stage_0),
(self.CAPACITY * self.PRECISION ** 2, 1501, 1, self.stage_0),
]
expected_stage_1 = [
# 192 = 2 + 15 + 25 + 150, sum of stage_0 values
(0, 192, 4, self.stage_1),
(self.CAPACITY * self.PRECISION ** 2, 1501, 1, self.stage_1),
]
expected = expected_stage_0 + expected_stage_1
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
def test_out_of_order(self):
"""Test feeding points out of order."""
points = [
(self.PRECISION ** 2 + 1, 42), # Overrides next point once sorted.
(self.PRECISION ** 2, 84),
(self.PRECISION - 1, 1), # Overrides next point once sorted
(self.PRECISION, 2),
(0, -10),
]
expected_stage_0 = [
(0, 1, 1, self.stage_0),
(self.PRECISION, 2, 1, self.stage_0),
(self.PRECISION ** 2, 42, 1, self.stage_0),
]
expected_stage_1 = [
(0, 3, 2, self.stage_1), # 3 = 1 + 2.
(self.PRECISION ** 2, 42, 1, self.stage_1),
]
expected = expected_stage_0 + expected_stage_1
result = self.ds.feed(self.metric_sum, points)
self.assertEqual(result, expected)
def test_purge(self):
"""Test that we purge old metrics correctly."""
points = [(1, 1)]
self.ds.PURGE_EVERY_S = 0
self.ds.feed(self.metric_sum, points)
# Should no remove anything.
self.ds.purge(now=1)
self.assertEqual(len(self.ds._names_to_aggregates), 1)
# Should remove everything.
self.ds.purge(now=(self.PRECISION ** 2) * 3)
self.assertEqual(len(self.ds._names_to_aggregates), 0)
if __name__ == "__main__":
unittest.main() | 0.916332 | 0.456289 |
import reframe.utility.sanity as sn
import reframe as rfm
@rfm.parameterized_test(['peerAccess'], ['noPeerAccess'])
class P2pBandwidthCheck(rfm.RegressionTest):
def __init__(self, peerAccess):
self.valid_systems = ['cannon:local-gpu','cannon:gpu_test','fasse:fasse_gpu','test:gpu']
self.valid_prog_environs = ['gpu']
# Perform a single bandwidth test with a buffer size of 1024MB
copy_size = 1073741824
self.build_system = 'Make'
self.executable = './p2p_bandwidth.x'
self.build_system.cxxflags = [f'-DCOPY={copy_size}']
if (peerAccess == 'peerAccess'):
self.build_system.cxxflags += ['-DP2P']
p2p = True
else:
p2p = False
self.perf_patterns = {
'bw': sn.min(sn.extractall(
r'^[^,]*\[[^\]]*\]\s+GPU\s+\d+\s+(\s*\d+.\d+\s)+',
self.stdout, 1, float))
}
if p2p:
self.reference = {
'cannon:local-gpu': {
'bw': (28, -0.05, None, 'GB/s'),
},
'cannon:gpu_test': {
'bw': (9, -0.05, None, 'GB/s'),
},
'*': {
'bw': (172.5, None, None, 'GB/s'),
},
}
else:
self.reference = {
'cannon:local-gpu': {
'bw': (35, -0.05, None, 'GB/s'),
},
'cannon:gpu_test': {
'bw': (11, -0.05, None, 'GB/s'),
},
'*': {
'bw': (79.6, None, None, 'GB/s'),
},
}
@run_after('setup')
def select_makefile(self):
self.build_system.makefile = 'makefile_p2pBandwidth.cuda'
@run_before('run')
def set_num_gpus_per_node(self):
cp = self.current_partition.fullname
if cp in {'cannon:local-gpu','fasse:fasse_gpu', 'test:gpu'}:
self.num_gpus_per_node = 4
self.num_cpus_per_task = 4
self.num_tasks = 1
elif cp in {'cannon:gpu_test'}:
self.num_gpus_per_node = 2
self.num_cpus_per_task = 2
self.num_tasks = 1
else:
self.num_gpus_per_node = 1
self.num_cpus_per_task = 1
self.num_tasks = 1
@sanity_function
def do_sanity_check(self):
node_names = set(sn.extractall(
r'^\s*\[([^,]{1,100})\]\s*Found %s device\(s\).'
% self.num_gpus_per_node, self.stdout, 1
))
sn.evaluate(sn.assert_eq(
self.job.num_tasks, len(node_names),
msg='requested {0} node(s), got {1} (nodelist: %s)' %
','.join(sorted(node_names))))
good_nodes = set(sn.extractall(
r'^\s*\[([^,]{1,100})\]\s*Test Result\s*=\s*PASS',
self.stdout, 1
))
sn.evaluate(sn.assert_eq(
node_names, good_nodes,
msg='check failed on the following node(s): %s' %
','.join(sorted(node_names - good_nodes)))
)
return True | checks/microbenchmarks/gpu/memory_bandwidth/p2p_bandwidth.py |
import reframe.utility.sanity as sn
import reframe as rfm
@rfm.parameterized_test(['peerAccess'], ['noPeerAccess'])
class P2pBandwidthCheck(rfm.RegressionTest):
def __init__(self, peerAccess):
self.valid_systems = ['cannon:local-gpu','cannon:gpu_test','fasse:fasse_gpu','test:gpu']
self.valid_prog_environs = ['gpu']
# Perform a single bandwidth test with a buffer size of 1024MB
copy_size = 1073741824
self.build_system = 'Make'
self.executable = './p2p_bandwidth.x'
self.build_system.cxxflags = [f'-DCOPY={copy_size}']
if (peerAccess == 'peerAccess'):
self.build_system.cxxflags += ['-DP2P']
p2p = True
else:
p2p = False
self.perf_patterns = {
'bw': sn.min(sn.extractall(
r'^[^,]*\[[^\]]*\]\s+GPU\s+\d+\s+(\s*\d+.\d+\s)+',
self.stdout, 1, float))
}
if p2p:
self.reference = {
'cannon:local-gpu': {
'bw': (28, -0.05, None, 'GB/s'),
},
'cannon:gpu_test': {
'bw': (9, -0.05, None, 'GB/s'),
},
'*': {
'bw': (172.5, None, None, 'GB/s'),
},
}
else:
self.reference = {
'cannon:local-gpu': {
'bw': (35, -0.05, None, 'GB/s'),
},
'cannon:gpu_test': {
'bw': (11, -0.05, None, 'GB/s'),
},
'*': {
'bw': (79.6, None, None, 'GB/s'),
},
}
@run_after('setup')
def select_makefile(self):
self.build_system.makefile = 'makefile_p2pBandwidth.cuda'
@run_before('run')
def set_num_gpus_per_node(self):
cp = self.current_partition.fullname
if cp in {'cannon:local-gpu','fasse:fasse_gpu', 'test:gpu'}:
self.num_gpus_per_node = 4
self.num_cpus_per_task = 4
self.num_tasks = 1
elif cp in {'cannon:gpu_test'}:
self.num_gpus_per_node = 2
self.num_cpus_per_task = 2
self.num_tasks = 1
else:
self.num_gpus_per_node = 1
self.num_cpus_per_task = 1
self.num_tasks = 1
@sanity_function
def do_sanity_check(self):
node_names = set(sn.extractall(
r'^\s*\[([^,]{1,100})\]\s*Found %s device\(s\).'
% self.num_gpus_per_node, self.stdout, 1
))
sn.evaluate(sn.assert_eq(
self.job.num_tasks, len(node_names),
msg='requested {0} node(s), got {1} (nodelist: %s)' %
','.join(sorted(node_names))))
good_nodes = set(sn.extractall(
r'^\s*\[([^,]{1,100})\]\s*Test Result\s*=\s*PASS',
self.stdout, 1
))
sn.evaluate(sn.assert_eq(
node_names, good_nodes,
msg='check failed on the following node(s): %s' %
','.join(sorted(node_names - good_nodes)))
)
return True | 0.426799 | 0.227695 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_dns_cache_resolver import (
ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
answer_default_zones=True,
route_domain=10,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.route_domain == '/Common/10'
def test_api_parameters(self):
args = load_fixture('load_ltm_dns_cache_resolver_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.route_domain == '/Common/0'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_dns_cache_resolver.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_dns_cache_resolver.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
route_domain=20,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True | venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_dns_cache_resolver.py |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_dns_cache_resolver import (
ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
answer_default_zones=True,
route_domain=10,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.route_domain == '/Common/10'
def test_api_parameters(self):
args = load_fixture('load_ltm_dns_cache_resolver_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.route_domain == '/Common/0'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_dns_cache_resolver.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_dns_cache_resolver.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
route_domain=20,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True | 0.505859 | 0.226473 |
import subprocess
from charmhelpers.core import hookenv
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
"""Generate selfsigned SSL key pair
You must provide one of the 3 optional arguments:
config, subject or cn
If more than one is provided the leftmost will be used
Arguments:
keyfile -- (required) full path to the keyfile to be created
certfile -- (required) full path to the certfile to be created
keysize -- (optional) SSL key length
config -- (optional) openssl configuration file
subject -- (optional) dictionary with SSL subject variables
cn -- (optional) cerfificate common name
Required keys in subject dict:
cn -- Common name (eq. FQDN)
Optional keys in subject dict
country -- Country Name (2 letter code)
state -- State or Province Name (full name)
locality -- Locality Name (eg, city)
organization -- Organization Name (eg, company)
organizational_unit -- Organizational Unit Name (eg, section)
email -- Email Address
"""
cmd = []
if config:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-config", config]
elif subject:
ssl_subject = ""
if "country" in subject:
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
if "state" in subject:
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
if "locality" in subject:
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
if "organization" in subject:
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
if "organizational_unit" in subject:
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
if "cn" in subject:
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
else:
hookenv.log("When using \"subject\" argument you must "
"provide \"cn\" field at very least")
return False
if "email" in subject:
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", ssl_subject]
elif cn:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", "/CN={}".format(cn)]
if not cmd:
hookenv.log("No config, subject or cn provided,"
"unable to generate self signed SSL certificates")
return False
try:
subprocess.check_call(cmd)
return True
except Exception as e:
print("Execution of openssl command failed:\n{}".format(e))
return False | charmhelpers/contrib/ssl/__init__.py |
import subprocess
from charmhelpers.core import hookenv
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
"""Generate selfsigned SSL key pair
You must provide one of the 3 optional arguments:
config, subject or cn
If more than one is provided the leftmost will be used
Arguments:
keyfile -- (required) full path to the keyfile to be created
certfile -- (required) full path to the certfile to be created
keysize -- (optional) SSL key length
config -- (optional) openssl configuration file
subject -- (optional) dictionary with SSL subject variables
cn -- (optional) cerfificate common name
Required keys in subject dict:
cn -- Common name (eq. FQDN)
Optional keys in subject dict
country -- Country Name (2 letter code)
state -- State or Province Name (full name)
locality -- Locality Name (eg, city)
organization -- Organization Name (eg, company)
organizational_unit -- Organizational Unit Name (eg, section)
email -- Email Address
"""
cmd = []
if config:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-config", config]
elif subject:
ssl_subject = ""
if "country" in subject:
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
if "state" in subject:
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
if "locality" in subject:
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
if "organization" in subject:
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
if "organizational_unit" in subject:
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
if "cn" in subject:
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
else:
hookenv.log("When using \"subject\" argument you must "
"provide \"cn\" field at very least")
return False
if "email" in subject:
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", ssl_subject]
elif cn:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", "/CN={}".format(cn)]
if not cmd:
hookenv.log("No config, subject or cn provided,"
"unable to generate self signed SSL certificates")
return False
try:
subprocess.check_call(cmd)
return True
except Exception as e:
print("Execution of openssl command failed:\n{}".format(e))
return False | 0.518546 | 0.134349 |
from __future__ import annotations
from homeassistant.components.sensor import (
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import get_coordinator
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigType,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the sensor platform."""
coordinator = await get_coordinator(hass, config_entry)
async_add_entities(
SenzSensor(coordinator, idx) for idx, ent in enumerate(coordinator.data)
)
class SenzSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Sensor."""
def __init__(self, coordinator, idx):
"""Initialize the sensor."""
super().__init__(coordinator)
self._idx = idx
self._state = None
self._attr_name = self.coordinator.data[self._idx]["name"]
self._attr_device_class = DEVICE_CLASS_TEMPERATURE
self._attr_native_unit_of_measurement = TEMP_CELSIUS
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._attr_unique_id = (
f"temp-{self.coordinator.data[self._idx]['serialNumber']}"
)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self.coordinator.data[self._idx]["serialNumber"])},
name=self.coordinator.data[self._idx]["name"],
manufacturer="nVent",
model="SENZ WiFi Thermostat",
)
@property
def native_value(self):
"""Return the state of the sensor."""
return round(self.coordinator.data[self._idx]["currentTemperature"] / 100, 1)
@property
def available(self):
"""Return the availability of the entity."""
if not self.coordinator.last_update_success:
return False
return self.coordinator.data[self._idx]["online"] | custom_components/senz/sensor.py | from __future__ import annotations
from homeassistant.components.sensor import (
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import get_coordinator
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigType,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the sensor platform."""
coordinator = await get_coordinator(hass, config_entry)
async_add_entities(
SenzSensor(coordinator, idx) for idx, ent in enumerate(coordinator.data)
)
class SenzSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Sensor."""
def __init__(self, coordinator, idx):
"""Initialize the sensor."""
super().__init__(coordinator)
self._idx = idx
self._state = None
self._attr_name = self.coordinator.data[self._idx]["name"]
self._attr_device_class = DEVICE_CLASS_TEMPERATURE
self._attr_native_unit_of_measurement = TEMP_CELSIUS
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._attr_unique_id = (
f"temp-{self.coordinator.data[self._idx]['serialNumber']}"
)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self.coordinator.data[self._idx]["serialNumber"])},
name=self.coordinator.data[self._idx]["name"],
manufacturer="nVent",
model="SENZ WiFi Thermostat",
)
@property
def native_value(self):
"""Return the state of the sensor."""
return round(self.coordinator.data[self._idx]["currentTemperature"] / 100, 1)
@property
def available(self):
"""Return the availability of the entity."""
if not self.coordinator.last_update_success:
return False
return self.coordinator.data[self._idx]["online"] | 0.84572 | 0.119794 |
import json
class InvalidTypeException(Exception):
pass
class ConfigField:
def __init__(self, name, default, type):
self.name = name
self.default = default
self.type = type
self.value = default
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if not self.validate(value):
raise InvalidTypeException(
f'Invalid type for config field. Expected {self.type}, got {type(value)} for {value}')
self._value = value
def validate(self, v):
if not isinstance(v, self.type):
return False
return True
def serialize(self):
return {
'name': self.name,
'type': self.type_to_string(),
'default': self.default,
'value': self.value
}
def type_to_string(self):
return str(self.type)
def serialize_json(self):
return json.dumps(self.serialize())
class BoolField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, bool)
def type_to_string(self):
return 'bool'
class StringField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, str)
def type_to_string(self):
return 'str'
class IntField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, int)
def type_to_string(self):
return 'int'
class ChoiceField(ConfigField):
def __init__(self, name, default, choices: tuple, type):
self.choices = choices # needs to be assigned before super call for validate to work properly
super().__init__(name, default, type)
if default not in self.choices or not all([isinstance(i, self.type) for i in self.choices]):
raise InvalidTypeException(
f'Choices provided to choice field ({self.choices}) do not match given type ({self.type})')
def validate(self, v):
if v not in self.choices:
return False
return True
def serialize(self):
sup = super().serialize().copy()
sup.update({
'choices': self.choices,
})
return sup
def type_to_string(self):
return 'choice'
class ListField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, list)
def type_to_string(self):
return 'list'
class TupleListField(ListField):
def __init__(self, name, default, n_elems, element_names: tuple):
super().__init__(name, default)
self.n_elems = n_elems
self.elem_names = element_names
def validate(self, v):
if not isinstance(v, self.type):
for i in v:
if not isinstance(i, (list, tuple)) or len(i) != self.n_elems:
return False
return True
def serialize(self):
sup = super().serialize().copy()
sup.update({
'n_elems': self.n_elems,
'elem_names': self.elem_names
})
return sup
def type_to_string(self):
return 'tuple_list'
class DictField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, dict)
def type_to_string(self):
return 'dict' | modular_conf/fields.py | import json
class InvalidTypeException(Exception):
pass
class ConfigField:
def __init__(self, name, default, type):
self.name = name
self.default = default
self.type = type
self.value = default
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if not self.validate(value):
raise InvalidTypeException(
f'Invalid type for config field. Expected {self.type}, got {type(value)} for {value}')
self._value = value
def validate(self, v):
if not isinstance(v, self.type):
return False
return True
def serialize(self):
return {
'name': self.name,
'type': self.type_to_string(),
'default': self.default,
'value': self.value
}
def type_to_string(self):
return str(self.type)
def serialize_json(self):
return json.dumps(self.serialize())
class BoolField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, bool)
def type_to_string(self):
return 'bool'
class StringField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, str)
def type_to_string(self):
return 'str'
class IntField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, int)
def type_to_string(self):
return 'int'
class ChoiceField(ConfigField):
def __init__(self, name, default, choices: tuple, type):
self.choices = choices # needs to be assigned before super call for validate to work properly
super().__init__(name, default, type)
if default not in self.choices or not all([isinstance(i, self.type) for i in self.choices]):
raise InvalidTypeException(
f'Choices provided to choice field ({self.choices}) do not match given type ({self.type})')
def validate(self, v):
if v not in self.choices:
return False
return True
def serialize(self):
sup = super().serialize().copy()
sup.update({
'choices': self.choices,
})
return sup
def type_to_string(self):
return 'choice'
class ListField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, list)
def type_to_string(self):
return 'list'
class TupleListField(ListField):
def __init__(self, name, default, n_elems, element_names: tuple):
super().__init__(name, default)
self.n_elems = n_elems
self.elem_names = element_names
def validate(self, v):
if not isinstance(v, self.type):
for i in v:
if not isinstance(i, (list, tuple)) or len(i) != self.n_elems:
return False
return True
def serialize(self):
sup = super().serialize().copy()
sup.update({
'n_elems': self.n_elems,
'elem_names': self.elem_names
})
return sup
def type_to_string(self):
return 'tuple_list'
class DictField(ConfigField):
def __init__(self, name, default):
super().__init__(name, default, dict)
def type_to_string(self):
return 'dict' | 0.652795 | 0.124719 |
import argparse
from decomp import main as decomp, _str2bool
from sample import main as sample
from draw import main as draw
from vocab import vocab
def get_parser():
parser = argparse.ArgumentParser(
description=
'tool for analyze (parallel / non-parallel) translation corpus.')
subparsers = parser.add_subparsers()
decomp_parser = subparsers.add_parser('decomp')
decomp_parser.add_argument('input', help='the input fname.')
decomp_parser.add_argument('output', nargs='?', help='the output fname.')
decomp_parser.add_argument(
'--reverse',
default=False,
type=_str2bool,
help=
'whether to reverse process the input file. If reverse: compose back'
' to normal text file from input fname and vocab fname. Else: do the '
'normal decomposition.')
decomp_parser.add_argument(
'--vocab_decomp',
type=str,
help='the vocab_decomp fname. in decomp process, vocab file will be '
'generated automatically; in comp process, vocab file must exist to '
'be read from.')
decomp_parser.set_defaults(func=decomp)
sample_parser = subparsers.add_parser('sample')
sample_parser.add_argument('src_fname', type=str, help='source file name.')
sample_parser.add_argument('trg_fname', type=str, help='target file name.')
sample_parser.add_argument(
'-n',
type=int,
help=
'num of sampled sentences. should not larger than num of lines in either files.'
)
sample_parser.add_argument(
'-r', type=float, help='the target share token rate for sampling.')
sample_parser.add_argument(
'-k', type=int, help='num of sents extracted for each sample step.')
sample_parser.add_argument(
'-d',
'--draw',
type=str,
help='if given, draw a graph of sampling process. should end with .html'
)
sample_parser.set_defaults(func=sample)
draw_parser = subparsers.add_parser('draw')
draw_parser.add_argument(
'src_fname', type=str, help='the source file name.')
draw_parser.add_argument(
'trg_fname', type=str, help='the target file name')
draw_parser.add_argument(
'--type',
type=str,
choices=['scatter', 'rate', 'both'],
help='whether to only draw shared tokens')
draw_parser.add_argument(
'--output_prefix', default='pref', help='output prefix.')
sample_parser.add_argument(
'--src_output',
type=str,
default='src_sampled.txt',
help='source output filename.')
sample_parser.add_argument(
'--trg_output',
type=str,
default='trg_sampled.txt',
help='target output filename.')
draw_parser.set_defaults(func=draw)
vocab_parser = subparsers.add_parser('vocab')
vocab_parser.add_argument('input', nargs='*', help='input fnames.')
vocab_parser.add_argument('vocab', help='output vocab fname.')
vocab_parser.add_argument(
'vocab_decomp', help='output vocab_decomp fname.')
vocab_parser.add_argument(
'--level',
default='ideo_raw',
choices=['ideo_raw', 'ideo_finest', 'stroke'],
help='to what level should the decomposition be.')
vocab_parser.add_argument(
'--idc',
default=True,
type=_str2bool,
help='whether to include structual IDCs in the decomp. (yes/no)')
vocab_parser.set_defaults(func=vocab)
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
args.func(args) | textprep.py | import argparse
from decomp import main as decomp, _str2bool
from sample import main as sample
from draw import main as draw
from vocab import vocab
def get_parser():
parser = argparse.ArgumentParser(
description=
'tool for analyze (parallel / non-parallel) translation corpus.')
subparsers = parser.add_subparsers()
decomp_parser = subparsers.add_parser('decomp')
decomp_parser.add_argument('input', help='the input fname.')
decomp_parser.add_argument('output', nargs='?', help='the output fname.')
decomp_parser.add_argument(
'--reverse',
default=False,
type=_str2bool,
help=
'whether to reverse process the input file. If reverse: compose back'
' to normal text file from input fname and vocab fname. Else: do the '
'normal decomposition.')
decomp_parser.add_argument(
'--vocab_decomp',
type=str,
help='the vocab_decomp fname. in decomp process, vocab file will be '
'generated automatically; in comp process, vocab file must exist to '
'be read from.')
decomp_parser.set_defaults(func=decomp)
sample_parser = subparsers.add_parser('sample')
sample_parser.add_argument('src_fname', type=str, help='source file name.')
sample_parser.add_argument('trg_fname', type=str, help='target file name.')
sample_parser.add_argument(
'-n',
type=int,
help=
'num of sampled sentences. should not larger than num of lines in either files.'
)
sample_parser.add_argument(
'-r', type=float, help='the target share token rate for sampling.')
sample_parser.add_argument(
'-k', type=int, help='num of sents extracted for each sample step.')
sample_parser.add_argument(
'-d',
'--draw',
type=str,
help='if given, draw a graph of sampling process. should end with .html'
)
sample_parser.set_defaults(func=sample)
draw_parser = subparsers.add_parser('draw')
draw_parser.add_argument(
'src_fname', type=str, help='the source file name.')
draw_parser.add_argument(
'trg_fname', type=str, help='the target file name')
draw_parser.add_argument(
'--type',
type=str,
choices=['scatter', 'rate', 'both'],
help='whether to only draw shared tokens')
draw_parser.add_argument(
'--output_prefix', default='pref', help='output prefix.')
sample_parser.add_argument(
'--src_output',
type=str,
default='src_sampled.txt',
help='source output filename.')
sample_parser.add_argument(
'--trg_output',
type=str,
default='trg_sampled.txt',
help='target output filename.')
draw_parser.set_defaults(func=draw)
vocab_parser = subparsers.add_parser('vocab')
vocab_parser.add_argument('input', nargs='*', help='input fnames.')
vocab_parser.add_argument('vocab', help='output vocab fname.')
vocab_parser.add_argument(
'vocab_decomp', help='output vocab_decomp fname.')
vocab_parser.add_argument(
'--level',
default='ideo_raw',
choices=['ideo_raw', 'ideo_finest', 'stroke'],
help='to what level should the decomposition be.')
vocab_parser.add_argument(
'--idc',
default=True,
type=_str2bool,
help='whether to include structual IDCs in the decomp. (yes/no)')
vocab_parser.set_defaults(func=vocab)
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
args.func(args) | 0.582847 | 0.081813 |
import numpy as np
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import warnings
warnings.filterwarnings("ignore")
import matplotlib
mpl.rcParams.update({
"font.size": 34.0,
"axes.titlesize": 34.0,
"axes.labelsize": 34.0,
"xtick.labelsize": 34.0,
"ytick.labelsize": 34.0,
"legend.fontsize": 34.0,
"figure.figsize": (25, 10),
"figure.dpi": 300,
"savefig.dpi": 300,
"text.usetex": True
})
def plot_convergence():
deltapsi_0p1_pconv = np.load("../data/schwarzschild/schwarzschild_M_0p1_pconv")
deltapsi_0p3_pconv = np.load("../data/schwarzschild/schwarzschild_M_1_pconv")
deltapsi_0p5_pconv = np.load("../data/schwarzschild/schwarzschild_M_8_pconv")
deltapsi_0p1_hconv = np.load("../data/schwarzschild/schwarzschild_M_0p1_hconv")
deltapsi_0p3_hconv = np.load("../data/schwarzschild/schwarzschild_M_1_hconv")
deltapsi_0p5_hconv = np.load("../data/schwarzschild/schwarzschild_M_8_hconv")
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False, sharex=False)
ax1.semilogy(deltapsi_0p1_pconv["n"], deltapsi_0p1_pconv["e"], "-o", label= r"$M = 0.125$")
ax1.semilogy(deltapsi_0p3_pconv["n"], deltapsi_0p3_pconv["e"], "-o", label= r"$M = 1.0$")
ax1.semilogy(deltapsi_0p5_pconv["n"], deltapsi_0p5_pconv["e"], "-o", label= r"$M = 8.0$")
ax2.plot(np.log2(deltapsi_0p5_hconv["n"][1:]), np.true_divide(deltapsi_0p5_hconv["e"][:-1], deltapsi_0p5_hconv["e"][1:])[0:], "--o", markersize=20.0, label= r"$M = 8.0$")
ax2.plot(np.log2(deltapsi_0p1_hconv["n"][1:]), np.true_divide(deltapsi_0p1_hconv["e"][:-1], deltapsi_0p1_hconv["e"][1:])[0:], "--o", markersize=10.0, label= r"$M = 0.125$")
ax2.plot(np.log2(deltapsi_0p3_hconv["n"][1:]), np.true_divide(deltapsi_0p3_hconv["e"][:-1], deltapsi_0p3_hconv["e"][1:])[0:], "k--o", markersize=5.0, label= r"$M = 1.0$")
ax1.set_xlabel(r"$p$")
ax1.set_ylabel(r"$\|\mathcal{C}_p\|_2$")
ax1.legend(frameon=False)
ax2.set_xlabel(r"$h$")
ax2.set_ylabel(r"$\|\mathcal{C}_{h}\|_2 ~/~ \|\mathcal{C}_{h+1}\|_2$")
ax2.legend(frameon=False)
ax2.set_xlim(1.5, 7.5)
ax1.tick_params(axis='both', which='major', size=10)
ax2.tick_params(axis='both', which='major', size=10)
fig.subplots_adjust(hspace=0.1)
plt.tight_layout()
fig.savefig("schwarzschild_convergence.pdf")
return 0
def plot_solution():
unames = glob.glob("../data/schwarzschild/schwarzschild_eta_*")
umax = np.nanmax(list(map(lambda x: np.nanmax(np.load(x)["w"]), unames)))
umin = np.nanmin(list(map(lambda x: np.nanmin(np.load(x)["w"]), unames)))
ulevels = np.linspace(umin, umax, 40)
print(ulevels)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False, sharex=False)
for fname in unames:
u = np.load(fname)
A1 = ax1.contourf(u["v"], u["u"], np.nan_to_num(u["w"]),
vmax=np.amax(ulevels), vmin=np.amin(ulevels), levels=ulevels)
# ax1.plot(u["v"], 1 / u["v"], "--k")
qnames = glob.glob("../data/schwarzschild/schwarzschild_constraints*")
qmax = np.nanmax(list(map(lambda x: np.nanmax(np.load(x)["w"]), qnames)))
qmin = np.nanmin(list(map(lambda x: np.nanmin(np.load(x)["w"]), qnames)))
print(qmax, qmin)
# qlevels = np.log10(np.abs(np.linspace(qmin, qmax, 40)))
qlevels = np.arange(-10, 1, 0.6)
for fname in qnames:
q = np.load(fname)
lp = np.log10(np.abs(np.nan_to_num(q["w"])))
A2 = ax2.contourf(q["v"], q["u"], lp, vmax=np.amax(qlevels), vmin=np.amin(qlevels), levels=qlevels)
# ax2.plot(q["v"], 1 / q["v"], "--k")
ax1.tick_params(axis='both', which='major', size=10)
ax1.set_xlabel(r"$v$")
ax1.set_ylabel(r"$u$")
ax1.set_ylim(-3, 0)
ax1.set_xlim(2, 5)
ax1.set_title("$r(u,v)$", pad=20)
ax2.set_title("$\log_{10} \mathcal{|C|}$", pad=20)
ax2.tick_params(axis='both', which='major', size=10)
ax2.set_xlabel(r"$v$")
ax2.set_ylabel(r"$u$")
ax2.set_ylim(-3, 0)
ax2.set_xlim(2, 5)
fig.colorbar(A1, ax=ax1)
fig.colorbar(A2, ax=ax2)
plt.tight_layout()
fig.savefig("schwarzschild-solution.pdf")
return 0
plot_convergence() | output/python/plot_schwarzschild.py |
import numpy as np
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import warnings
warnings.filterwarnings("ignore")
import matplotlib
mpl.rcParams.update({
"font.size": 34.0,
"axes.titlesize": 34.0,
"axes.labelsize": 34.0,
"xtick.labelsize": 34.0,
"ytick.labelsize": 34.0,
"legend.fontsize": 34.0,
"figure.figsize": (25, 10),
"figure.dpi": 300,
"savefig.dpi": 300,
"text.usetex": True
})
def plot_convergence():
deltapsi_0p1_pconv = np.load("../data/schwarzschild/schwarzschild_M_0p1_pconv")
deltapsi_0p3_pconv = np.load("../data/schwarzschild/schwarzschild_M_1_pconv")
deltapsi_0p5_pconv = np.load("../data/schwarzschild/schwarzschild_M_8_pconv")
deltapsi_0p1_hconv = np.load("../data/schwarzschild/schwarzschild_M_0p1_hconv")
deltapsi_0p3_hconv = np.load("../data/schwarzschild/schwarzschild_M_1_hconv")
deltapsi_0p5_hconv = np.load("../data/schwarzschild/schwarzschild_M_8_hconv")
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False, sharex=False)
ax1.semilogy(deltapsi_0p1_pconv["n"], deltapsi_0p1_pconv["e"], "-o", label= r"$M = 0.125$")
ax1.semilogy(deltapsi_0p3_pconv["n"], deltapsi_0p3_pconv["e"], "-o", label= r"$M = 1.0$")
ax1.semilogy(deltapsi_0p5_pconv["n"], deltapsi_0p5_pconv["e"], "-o", label= r"$M = 8.0$")
ax2.plot(np.log2(deltapsi_0p5_hconv["n"][1:]), np.true_divide(deltapsi_0p5_hconv["e"][:-1], deltapsi_0p5_hconv["e"][1:])[0:], "--o", markersize=20.0, label= r"$M = 8.0$")
ax2.plot(np.log2(deltapsi_0p1_hconv["n"][1:]), np.true_divide(deltapsi_0p1_hconv["e"][:-1], deltapsi_0p1_hconv["e"][1:])[0:], "--o", markersize=10.0, label= r"$M = 0.125$")
ax2.plot(np.log2(deltapsi_0p3_hconv["n"][1:]), np.true_divide(deltapsi_0p3_hconv["e"][:-1], deltapsi_0p3_hconv["e"][1:])[0:], "k--o", markersize=5.0, label= r"$M = 1.0$")
ax1.set_xlabel(r"$p$")
ax1.set_ylabel(r"$\|\mathcal{C}_p\|_2$")
ax1.legend(frameon=False)
ax2.set_xlabel(r"$h$")
ax2.set_ylabel(r"$\|\mathcal{C}_{h}\|_2 ~/~ \|\mathcal{C}_{h+1}\|_2$")
ax2.legend(frameon=False)
ax2.set_xlim(1.5, 7.5)
ax1.tick_params(axis='both', which='major', size=10)
ax2.tick_params(axis='both', which='major', size=10)
fig.subplots_adjust(hspace=0.1)
plt.tight_layout()
fig.savefig("schwarzschild_convergence.pdf")
return 0
def plot_solution():
unames = glob.glob("../data/schwarzschild/schwarzschild_eta_*")
umax = np.nanmax(list(map(lambda x: np.nanmax(np.load(x)["w"]), unames)))
umin = np.nanmin(list(map(lambda x: np.nanmin(np.load(x)["w"]), unames)))
ulevels = np.linspace(umin, umax, 40)
print(ulevels)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False, sharex=False)
for fname in unames:
u = np.load(fname)
A1 = ax1.contourf(u["v"], u["u"], np.nan_to_num(u["w"]),
vmax=np.amax(ulevels), vmin=np.amin(ulevels), levels=ulevels)
# ax1.plot(u["v"], 1 / u["v"], "--k")
qnames = glob.glob("../data/schwarzschild/schwarzschild_constraints*")
qmax = np.nanmax(list(map(lambda x: np.nanmax(np.load(x)["w"]), qnames)))
qmin = np.nanmin(list(map(lambda x: np.nanmin(np.load(x)["w"]), qnames)))
print(qmax, qmin)
# qlevels = np.log10(np.abs(np.linspace(qmin, qmax, 40)))
qlevels = np.arange(-10, 1, 0.6)
for fname in qnames:
q = np.load(fname)
lp = np.log10(np.abs(np.nan_to_num(q["w"])))
A2 = ax2.contourf(q["v"], q["u"], lp, vmax=np.amax(qlevels), vmin=np.amin(qlevels), levels=qlevels)
# ax2.plot(q["v"], 1 / q["v"], "--k")
ax1.tick_params(axis='both', which='major', size=10)
ax1.set_xlabel(r"$v$")
ax1.set_ylabel(r"$u$")
ax1.set_ylim(-3, 0)
ax1.set_xlim(2, 5)
ax1.set_title("$r(u,v)$", pad=20)
ax2.set_title("$\log_{10} \mathcal{|C|}$", pad=20)
ax2.tick_params(axis='both', which='major', size=10)
ax2.set_xlabel(r"$v$")
ax2.set_ylabel(r"$u$")
ax2.set_ylim(-3, 0)
ax2.set_xlim(2, 5)
fig.colorbar(A1, ax=ax1)
fig.colorbar(A2, ax=ax2)
plt.tight_layout()
fig.savefig("schwarzschild-solution.pdf")
return 0
plot_convergence() | 0.402275 | 0.455441 |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import textwrap
from twitter.common.confluence import Confluence, ConfluenceError
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.task import Task
from pants.base.exceptions import TaskError
from pants.binaries import binary_util
from pants.util.dirutil import safe_open
class ConfluencePublish(Task):
"""A task to publish Page targets to Confluence wikis."""
@classmethod
def register_options(cls, register):
super(ConfluencePublish, cls).register_options(register)
# TODO: https://github.com/pantsbuild/pants/issues/395:
# url should probably be a param of the wiki, not a config.
register('--url', help='The url of the confluence site to post to.')
register('--force', action='store_true', default=False,
help='Force publish the page even if its contents is '
'identical to the contents on confluence.')
register('--open', action='store_true', default=False,
help='Attempt to open the published confluence wiki page in a browser.')
register('--user', help='Confluence user name, defaults to unix user.')
@classmethod
def prepare(cls, options, round_manager):
round_manager.require('wiki_html')
def __init__(self, *args, **kwargs):
super(ConfluencePublish, self).__init__(*args, **kwargs)
self.url = self.get_options().url
if not self.url:
raise TaskError('Unable to proceed publishing to confluence. Please set the url option.')
self.force = self.get_options().force
self.open = self.get_options().open
self._wiki = None
self.user = self.get_options().user
def wiki(self):
raise NotImplementedError('Subclasses must provide the wiki target they are associated with')
def api(self):
return 'confluence1'
def execute(self):
pages = []
targets = self.context.targets()
for target in targets:
if isinstance(target, Page):
for wiki_artifact in target.payload.provides:
pages.append((target, wiki_artifact))
urls = list()
genmap = self.context.products.get('wiki_html')
for page, wiki_artifact in pages:
html_info = genmap.get((wiki_artifact, page))
if len(html_info) > 1:
raise TaskError('Unexpected resources for {}: {}'.format(page, html_info))
basedir, htmls = html_info.items()[0]
if len(htmls) != 1:
raise TaskError('Unexpected resources for {}: {}'.format(page, htmls))
with safe_open(os.path.join(basedir, htmls[0])) as contents:
url = self.publish_page(
page.address,
wiki_artifact.config['space'],
wiki_artifact.config['title'],
contents.read(),
# Default to none if not present in the hash.
parent=wiki_artifact.config.get('parent')
)
if url:
urls.append(url)
self.context.log.info('Published {} to {}'.format(page, url))
if self.open and urls:
binary_util.ui_open(*urls)
def publish_page(self, address, space, title, content, parent=None):
body = textwrap.dedent('''
<!-- DO NOT EDIT - generated by pants from {} -->
{}
''').strip().format(address, content)
pageopts = dict(
versionComment='updated by pants!'
)
wiki = self.login()
existing = wiki.getpage(space, title)
if existing:
if not self.force and existing['content'].strip() == body.strip():
self.context.log.warn("Skipping publish of '{}' - no changes".format(title))
return
pageopts['id'] = existing['id']
pageopts['version'] = existing['version']
try:
page = wiki.create_html_page(space, title, body, parent, **pageopts)
return page['url']
except ConfluenceError as e:
raise TaskError('Failed to update confluence: {}'.format(e))
def login(self):
if not self._wiki:
try:
self._wiki = Confluence.login(self.url, self.user, self.api())
except ConfluenceError as e:
raise TaskError('Failed to login to confluence: {}'.format(e))
return self._wiki | src/python/pants/backend/core/tasks/confluence_publish.py |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import textwrap
from twitter.common.confluence import Confluence, ConfluenceError
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.task import Task
from pants.base.exceptions import TaskError
from pants.binaries import binary_util
from pants.util.dirutil import safe_open
class ConfluencePublish(Task):
"""A task to publish Page targets to Confluence wikis."""
@classmethod
def register_options(cls, register):
super(ConfluencePublish, cls).register_options(register)
# TODO: https://github.com/pantsbuild/pants/issues/395:
# url should probably be a param of the wiki, not a config.
register('--url', help='The url of the confluence site to post to.')
register('--force', action='store_true', default=False,
help='Force publish the page even if its contents is '
'identical to the contents on confluence.')
register('--open', action='store_true', default=False,
help='Attempt to open the published confluence wiki page in a browser.')
register('--user', help='Confluence user name, defaults to unix user.')
@classmethod
def prepare(cls, options, round_manager):
round_manager.require('wiki_html')
def __init__(self, *args, **kwargs):
super(ConfluencePublish, self).__init__(*args, **kwargs)
self.url = self.get_options().url
if not self.url:
raise TaskError('Unable to proceed publishing to confluence. Please set the url option.')
self.force = self.get_options().force
self.open = self.get_options().open
self._wiki = None
self.user = self.get_options().user
def wiki(self):
raise NotImplementedError('Subclasses must provide the wiki target they are associated with')
def api(self):
return 'confluence1'
def execute(self):
pages = []
targets = self.context.targets()
for target in targets:
if isinstance(target, Page):
for wiki_artifact in target.payload.provides:
pages.append((target, wiki_artifact))
urls = list()
genmap = self.context.products.get('wiki_html')
for page, wiki_artifact in pages:
html_info = genmap.get((wiki_artifact, page))
if len(html_info) > 1:
raise TaskError('Unexpected resources for {}: {}'.format(page, html_info))
basedir, htmls = html_info.items()[0]
if len(htmls) != 1:
raise TaskError('Unexpected resources for {}: {}'.format(page, htmls))
with safe_open(os.path.join(basedir, htmls[0])) as contents:
url = self.publish_page(
page.address,
wiki_artifact.config['space'],
wiki_artifact.config['title'],
contents.read(),
# Default to none if not present in the hash.
parent=wiki_artifact.config.get('parent')
)
if url:
urls.append(url)
self.context.log.info('Published {} to {}'.format(page, url))
if self.open and urls:
binary_util.ui_open(*urls)
def publish_page(self, address, space, title, content, parent=None):
body = textwrap.dedent('''
<!-- DO NOT EDIT - generated by pants from {} -->
{}
''').strip().format(address, content)
pageopts = dict(
versionComment='updated by pants!'
)
wiki = self.login()
existing = wiki.getpage(space, title)
if existing:
if not self.force and existing['content'].strip() == body.strip():
self.context.log.warn("Skipping publish of '{}' - no changes".format(title))
return
pageopts['id'] = existing['id']
pageopts['version'] = existing['version']
try:
page = wiki.create_html_page(space, title, body, parent, **pageopts)
return page['url']
except ConfluenceError as e:
raise TaskError('Failed to update confluence: {}'.format(e))
def login(self):
if not self._wiki:
try:
self._wiki = Confluence.login(self.url, self.user, self.api())
except ConfluenceError as e:
raise TaskError('Failed to login to confluence: {}'.format(e))
return self._wiki | 0.44553 | 0.140013 |
import tensorflow as tf
import numpy as np
import base64
from tensorflow.saved_model import simple_save
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model.signature_def_utils\
import predict_signature_def
from tensorflow.python.saved_model.tag_constants import SERVING
from tensorflow.python.saved_model.signature_constants\
import DEFAULT_SERVING_SIGNATURE_DEF_KEY
class FaceDetector:
def __init__(self, model_path, gpu_memory_fraction=0.25, visible_device_list='0'):
"""
Arguments:
model_path: a string, path to a pb file.
gpu_memory_fraction: a float number.
visible_device_list: a string.
"""
with tf.gfile.GFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
nodes = [n.name + ' => ' + n.op for n in graph_def.node if n.op in ('Placeholder')]
print(nodes)
print([node for node in graph_def.node if node.name == 'image_tensor'])
self.input_image = tf.placeholder(tf.string, shape=(None,), name="input_image")
input_image_tensor = self.load_base64_tensor(self.input_image)
tf.import_graph_def(graph_def, {'image_tensor': input_image_tensor})
print([node for node in graph_def.node if node.name == 'image_tensor'])
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, name='import')
self.output_ops = {
"boxes": graph.get_tensor_by_name('import/boxes:0'),
"scores": graph.get_tensor_by_name('import/scores:0'),
"num_boxes": graph.get_tensor_by_name('import/num_boxes:0')
}
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction,
visible_device_list=visible_device_list
)
config_proto = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)
self.sess = tf.Session(graph=graph, config=config_proto)
self.graph = graph
def __tf_jpeg_process(self, data):
# The whole jpeg encode/decode dance is neccessary to generate a result
# that matches the original model's (caffe) preprocessing
# (as good as possible)
image = tf.image.decode_jpeg(data, channels=3,
fancy_upscaling=True,
dct_method="INTEGER_FAST")
image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
return image
def load_base64_tensor(self, _input):
def decode_and_process(base64):
_bytes = tf.decode_base64(base64)
_image = self.__tf_jpeg_process(_bytes)
return _image
# we have to do some preprocessing with map_fn, since functions like
# decode_*, resize_images and crop_to_bounding_box do not support
# processing of batches
image = tf.map_fn(decode_and_process, _input,
back_prop=False, dtype=tf.uint8)
return image
def export(self):
import os
inputs = {'b64_image': self.input_image}
export_path = os.path.join(tf.compat.as_bytes("models"), tf.compat.as_bytes(str("1")))
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
self.sess, [SERVING],
signature_def_map={
DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def(
inputs=inputs,
outputs=self.output_ops
)
}
)
builder.save()
def detect(self, b64_image, score_threshold=0.5):
boxes, scores, num_boxes = self.sess.run(
self.output_ops, feed_dict={self.input_image: b64_image}
)
num_boxes = num_boxes[0]
boxes = boxes[0][:num_boxes]
scores = scores[0][:num_boxes]
to_keep = scores > score_threshold
boxes = boxes[to_keep]
scores = scores[to_keep]
scaler = np.array([h, w, h, w], dtype='float32')
boxes = boxes * scaler
return boxes, scores | face_detector.py | import tensorflow as tf
import numpy as np
import base64
from tensorflow.saved_model import simple_save
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model.signature_def_utils\
import predict_signature_def
from tensorflow.python.saved_model.tag_constants import SERVING
from tensorflow.python.saved_model.signature_constants\
import DEFAULT_SERVING_SIGNATURE_DEF_KEY
class FaceDetector:
def __init__(self, model_path, gpu_memory_fraction=0.25, visible_device_list='0'):
"""
Arguments:
model_path: a string, path to a pb file.
gpu_memory_fraction: a float number.
visible_device_list: a string.
"""
with tf.gfile.GFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
nodes = [n.name + ' => ' + n.op for n in graph_def.node if n.op in ('Placeholder')]
print(nodes)
print([node for node in graph_def.node if node.name == 'image_tensor'])
self.input_image = tf.placeholder(tf.string, shape=(None,), name="input_image")
input_image_tensor = self.load_base64_tensor(self.input_image)
tf.import_graph_def(graph_def, {'image_tensor': input_image_tensor})
print([node for node in graph_def.node if node.name == 'image_tensor'])
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, name='import')
self.output_ops = {
"boxes": graph.get_tensor_by_name('import/boxes:0'),
"scores": graph.get_tensor_by_name('import/scores:0'),
"num_boxes": graph.get_tensor_by_name('import/num_boxes:0')
}
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction,
visible_device_list=visible_device_list
)
config_proto = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)
self.sess = tf.Session(graph=graph, config=config_proto)
self.graph = graph
def __tf_jpeg_process(self, data):
# The whole jpeg encode/decode dance is neccessary to generate a result
# that matches the original model's (caffe) preprocessing
# (as good as possible)
image = tf.image.decode_jpeg(data, channels=3,
fancy_upscaling=True,
dct_method="INTEGER_FAST")
image = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
return image
def load_base64_tensor(self, _input):
def decode_and_process(base64):
_bytes = tf.decode_base64(base64)
_image = self.__tf_jpeg_process(_bytes)
return _image
# we have to do some preprocessing with map_fn, since functions like
# decode_*, resize_images and crop_to_bounding_box do not support
# processing of batches
image = tf.map_fn(decode_and_process, _input,
back_prop=False, dtype=tf.uint8)
return image
def export(self):
import os
inputs = {'b64_image': self.input_image}
export_path = os.path.join(tf.compat.as_bytes("models"), tf.compat.as_bytes(str("1")))
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
self.sess, [SERVING],
signature_def_map={
DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def(
inputs=inputs,
outputs=self.output_ops
)
}
)
builder.save()
def detect(self, b64_image, score_threshold=0.5):
boxes, scores, num_boxes = self.sess.run(
self.output_ops, feed_dict={self.input_image: b64_image}
)
num_boxes = num_boxes[0]
boxes = boxes[0][:num_boxes]
scores = scores[0][:num_boxes]
to_keep = scores > score_threshold
boxes = boxes[to_keep]
scores = scores[to_keep]
scaler = np.array([h, w, h, w], dtype='float32')
boxes = boxes * scaler
return boxes, scores | 0.681409 | 0.288964 |
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
from sqlalchemy import desc
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
posts = db.relationship('Post',backref = 'user',lazy = "dynamic")
comments = db.relationship('Comment',backref = 'user',lazy="dynamic")
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String)
post = db.Column(db.String)
posted = db.Column(db.DateTime,default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
def save_post(self):
db.session.add(self)
db.session.commit()
def delete_post(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_post(id):
post = Post.query.filter_by(id=id).order_by(desc('posted')).all()
return post
@classmethod
def all_posts(cls):
post = Post.query.order_by(desc('posted')).all()
return post
def __repr__(self):
return f'Post {self.title}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
comment = db.Column(db.String(255))
posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(post_id=post_id).all()
return comments
def delete_comment(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'Comment: {self.comment}'
class Subscriber(db.Model):
__tablename__='subscribers'
id=db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(255),unique=True,index=True)
def save_subscriber(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'Subscriber {self.email}' | app/models.py | from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
from sqlalchemy import desc
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
posts = db.relationship('Post',backref = 'user',lazy = "dynamic")
comments = db.relationship('Comment',backref = 'user',lazy="dynamic")
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String)
post = db.Column(db.String)
posted = db.Column(db.DateTime,default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
def save_post(self):
db.session.add(self)
db.session.commit()
def delete_post(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_post(id):
post = Post.query.filter_by(id=id).order_by(desc('posted')).all()
return post
@classmethod
def all_posts(cls):
post = Post.query.order_by(desc('posted')).all()
return post
def __repr__(self):
return f'Post {self.title}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
comment = db.Column(db.String(255))
posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(post_id=post_id).all()
return comments
def delete_comment(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'Comment: {self.comment}'
class Subscriber(db.Model):
__tablename__='subscribers'
id=db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(255),unique=True,index=True)
def save_subscriber(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'Subscriber {self.email}' | 0.418459 | 0.053428 |
import json
import os
import time
import threading
from collections import namedtuple
from types import FunctionType
from mindspore import log as logger
from mindspore._c_expression import MSContext, ms_ctx_param
from mindspore._checkparam import args_type_check, Validator, args_unreset_check
from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
_reset_auto_parallel_context
from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context
from .default_config import __device_target__, __package_name__
__all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
'get_ps_context', 'reset_ps_context', 'set_fl_context', 'get_fl_context']
GRAPH_MODE = 0
PYNATIVE_MODE = 1
_DEVICE_APP_MEMORY_SIZE = 31 # The max memory size of graph plus variable.
_re_pattern = r'[1-9][0-9]*(\.)?[0-9]*GB|0\.[0-9]*GB'
_k_context = None
def _make_directory(path):
"""Make directory."""
real_path = None
if path is None or not isinstance(path, str) or path.strip() == "":
raise ValueError(f"For 'context.set_context', the 'save_graphs_path' or the 'print_file_path' is invalid "
f"type, it should be Non-empty string, but got '{path}'.")
# convert the relative paths
path = os.path.realpath(path)
logger.debug("The absolute path is %r", path)
# check whether the path is already existed and has written permissions
if os.path.exists(path):
real_path = path
else:
# All exceptions need to be caught because create directory maybe have some limit(permissions)
logger.debug("The directory(%s) doesn't exist, will create it", path)
try:
os.makedirs(path)
real_path = path
except PermissionError as e:
logger.critical(f"No write permission on the directory '{path}'', error = {e}")
raise ValueError(e.__str__() + f"\nNo write permission on the directory '{path}'.")
return real_path
def _get_print_file_name(file_name):
"""Add timestamp suffix to file name. Rename the file name: file_name + "." + time(seconds)."""
time_second = str(int(time.time()))
file_name = file_name + "." + time_second
if os.path.exists(file_name):
ValueError("This file {} already exists.".format(file_name))
return file_name
class _ThreadLocalInfo(threading.local):
"""
Thread local Info used for store thread local attributes.
"""
def __init__(self):
super(_ThreadLocalInfo, self).__init__()
self._reserve_class_name_in_scope = True
self.debug_runtime = False
@property
def reserve_class_name_in_scope(self):
"""Get whether to save the network class name in the scope."""
return self._reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Set whether to save the network class name in the scope."""
if not isinstance(reserve_class_name_in_scope, bool):
raise ValueError("For '_ThreadLocalInfo', the type of the property 'reserve_class_name_in_scope' must "
"be bool, but got {}.".format(type(reserve_class_name_in_scope)))
self._reserve_class_name_in_scope = reserve_class_name_in_scope
_ContextRecord = namedtuple(
"_ContextRecord", ["is_pynative_mode", "switch_context_fn"])
class _ContextSwitchInfo(threading.local):
"""
Record of context switch information.
Args:
is_pynative (bool): Whether to adopt the PyNative mode.
"""
def __init__(self, is_pynative):
super(_ContextSwitchInfo, self).__init__()
self.context_stack = []
if is_pynative:
self.push(True, None)
def push(self, is_pynative, switch_context_fn):
"""
Push a context switch record onto the stack.
Args:
is_pynative (bool): Whether context switch to PyNative mode.
switch_context_fn (Function): A callable that executes the context switch.
"""
if isinstance(switch_context_fn, FunctionType):
switch_context_fn()
self.context_stack.append(
_ContextRecord(is_pynative, switch_context_fn))
def pop(self):
self.context_stack.pop()
class _Context:
"""
_Context is the environment in which operations are executed
Note:
Create a context through instantiating Context object is not recommended.
should use context() to get the context since Context is a singleton.
"""
_instance = None
_instance_lock = threading.Lock()
def __init__(self):
self._thread_local_info = _ThreadLocalInfo()
self._context_switches = _ContextSwitchInfo(False)
self._context_handle = MSContext.get_instance()
self.enable_compile_cache = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance_lock.acquire()
cls._instance = object.__new__(cls)
cls._instance_lock.release()
return cls._instance
def __getattribute__(self, attr):
value = object.__getattribute__(self, attr)
if attr == "_context_handle" and value is None:
raise ValueError("Context handle is none in context!!!")
return value
def get_param(self, param):
return self._context_handle.get_param(param)
def set_param(self, param, value):
self._context_handle.set_param(param, value)
def get_mode(self):
"""Get current mode."""
return self.get_param(ms_ctx_param.mode)
def set_mode(self, mode):
"""
Switch between Graph mode and PyNative mode.
Args:
mode (int): GRAPH_MODE or PYNATIVE_MODE.
"""
if mode == PYNATIVE_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("vm")
parallel_mode = _get_auto_parallel_context("parallel_mode")
if parallel_mode not in (ParallelMode.DATA_PARALLEL, ParallelMode.STAND_ALONE):
raise ValueError(f"Pynative Only support STAND_ALONE and DATA_PARALLEL for ParallelMode,"
f"but got {parallel_mode.upper()}.")
self._context_switches.push(True, None)
elif mode == GRAPH_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("ge")
self._context_switches.push(False, None)
else:
raise ValueError(f"For 'context.set_context', the argument 'mode' should be context.GRAPH_MODE (0) "
f"or context.PYNATIVE_MODE (1), but got {mode}.")
self.set_param(ms_ctx_param.mode, mode)
def set_backend_policy(self, policy):
success = self._context_handle.set_backend_policy(policy)
if not success:
raise RuntimeError("Backend policy must be one of values in ['ge', 'vm', 'ms']. "
"But got {}.".format(policy))
def set_save_graphs_path(self, save_graphs_path):
self.set_param(ms_ctx_param.save_graphs_path, _make_directory(save_graphs_path))
def set_device_target(self, target):
valid_targets = ["CPU", "GPU", "Ascend", "Davinci"]
if not target in valid_targets:
raise ValueError(f"For 'context.set_context', the argument 'device_target' must be one of "
f"{valid_targets}, but got {target}.")
if target == "Davinci":
target = "Ascend"
self.set_param(ms_ctx_param.device_target, target)
if self.enable_debug_runtime and target == "CPU":
self.set_backend_policy("vm")
def set_auto_tune_mode(self, tune_mode):
candidate = ["NO_TUNE", "RL", "GA", "RL,GA", "GA,RL"]
if tune_mode in candidate:
self.set_param(ms_ctx_param.tune_mode, tune_mode)
else:
raise ValueError(f"For 'context.set_context', the argument 'auto_tune_mode' must be in "
f"['NO_TUNE', 'RL', 'GA', 'RL,GA', 'GA,RL'], but got {tune_mode}.")
def set_device_id(self, device_id):
if device_id < 0 or device_id > 4095:
raise ValueError(f"For 'context.set_context', the argument 'device_id' must be in range [0, 4095], "
f"but got {device_id}.")
self.set_param(ms_ctx_param.device_id, device_id)
def set_max_call_depth(self, max_call_depth):
if max_call_depth <= 0:
raise ValueError(f"For 'context.set_context', the argument 'max_call_depth' must be greater than 0, "
f"but got {max_call_depth}.")
self.set_param(ms_ctx_param.max_call_depth, max_call_depth)
def set_profiling_options(self, option):
if not isinstance(option, str):
raise TypeError("For 'context.set_context', the argument 'profiling_option' must be string, "
"but got {}.".format(type(option)))
self.set_param(ms_ctx_param.profiling_options, option)
def set_variable_memory_max_size(self, variable_memory_max_size):
"""set values of variable_memory_max_size and graph_memory_max_size"""
if not Validator.check_str_by_regular(variable_memory_max_size, _re_pattern):
raise ValueError("For 'context.set_context', the argument 'variable_memory_max_size' should be in correct"
" format! It must be a string ending with 'GB', in addition to that, it must contain "
"only numbers or decimal points, such as \"5GB\" or \"3.5GB\", but got {}."
.format(variable_memory_max_size))
if int(variable_memory_max_size[:-2]) > _DEVICE_APP_MEMORY_SIZE:
raise ValueError("For 'context.set_context', the argument 'variable_memory_max_size' should not be "
"greater than 31GB, but got {}.".format(variable_memory_max_size))
variable_memory_max_size_ = variable_memory_max_size[:-2] + " * 1024 * 1024 * 1024"
graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_)
self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_)
def set_max_device_memory(self, max_device_memory):
if not Validator.check_str_by_regular(max_device_memory, _re_pattern):
raise ValueError("For 'context.set_context', the argument 'max_device_memory' should be in correct "
" format! It must be a string ending with 'GB', in addition to that, it must contain "
"only numbers or decimal points, such as \"5GB\" or \"3.5GB\", but got {}."
.format(max_device_memory))
max_device_memory_value = float(max_device_memory[:-2])
if max_device_memory_value == 0:
raise ValueError("For 'context.set_context', the argument 'max_device_memory' should not be \"0GB\".")
self.set_param(ms_ctx_param.max_device_memory, max_device_memory_value)
def set_print_file_path(self, file_path):
"""Add timestamp suffix to file name. Sets print file path."""
print_file_path = os.path.realpath(file_path)
if os.path.isdir(print_file_path):
raise IOError("For 'context.set_context', the argument 'print_file_path' should be file path, "
"but got directory {}.".format(file_path))
if os.path.exists(print_file_path):
_path, _file_name = os.path.split(print_file_path)
path = _make_directory(_path)
file_name = _get_print_file_name(_file_name)
full_file_name = os.path.join(path, file_name)
else:
full_file_name = print_file_path
self.set_param(ms_ctx_param.print_file_path, full_file_name)
def set_env_config_path(self, env_config_path):
"""Check and set env_config_path."""
if not self._context_handle.enable_dump_ir():
raise ValueError("The 'env_config_path' is not supported, please enable ENABLE_DUMP_IR "
"with '-D on' and recompile source.")
env_config_path = os.path.realpath(env_config_path)
if not os.path.isfile(env_config_path):
raise ValueError("For 'context.set_context', the 'env_config_path' file %r is not exists, "
"please check whether 'env_config_path' is correct." % env_config_path)
try:
with open(env_config_path, 'r') as f:
json.load(f)
except (TypeError, ValueError) as exo:
raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
"failed, please check whether 'env_config_path' is json file and correct, or may not "
"have permission to read it.".format(env_config_path))
self.set_param(ms_ctx_param.env_config_path, env_config_path)
setters = {
'mode': set_mode,
'save_graphs_path': set_save_graphs_path,
'device_target': set_device_target,
'device_id': set_device_id,
'auto_tune_mode': set_auto_tune_mode,
'max_call_depth': set_max_call_depth,
'profiling_options': set_profiling_options,
'variable_memory_max_size': set_variable_memory_max_size,
'max_device_memory': set_max_device_memory,
'print_file_path': set_print_file_path,
'env_config_path': set_env_config_path
}
@property
def reserve_class_name_in_scope(self):
"""Get whether to save the network class name in the scope."""
return self._thread_local_info.reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Set whether to save the network class name in the scope."""
self._thread_local_info.reserve_class_name_in_scope = reserve_class_name_in_scope
@property
def enable_ge(self):
return self._context_handle.get_backend_policy() == 'ge'
@property
def enable_debug_runtime(self):
return self._thread_local_info.debug_runtime
@enable_debug_runtime.setter
def enable_debug_runtime(self, enable):
thread_info = self._thread_local_info
thread_info.debug_runtime = enable
def _context():
"""
Get the global _context, if context is not created, create a new one.
Returns:
_Context, the global context in PyNative mode.
"""
global _k_context
if _k_context is None:
default_backend = 'debug'
try:
from mindspore import default_config
default_backend = default_config.__backend__
except ImportError:
logger.error("import default config fail")
_k_context = _Context()
_k_context.enable_debug_runtime = False
if default_backend == 'debug':
_k_context.enable_debug_runtime = True
default_backend = 'vm'
_k_context.set_backend_policy(default_backend)
return _k_context
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool,
all_reduce_fusion_config=list, pipeline_stages=int, grad_accumulation_step=int,
parallel_optimizer_config=dict)
def set_auto_parallel_context(**kwargs):
r"""
Set auto parallel context, which is valid only for Ascend and GPU target.
Auto parallel context should be configured before the initialization of your network.
Note:
Attribute name is required for setting attributes.
If a program has tasks on different parallel modes, before setting a new parallel mode for the
next task, interface mindspore.context.reset_auto_parallel_context() should be called to reset
the configuration.
Setting or changing parallel modes must be called before creating any Initializer, otherwise,
it may have RuntimeError when compiling the network.
Some configurations are parallel mode specific, see the below table for details:
=========================== ===========================
Common AUTO_PARALLEL
=========================== ===========================
device_num gradient_fp32_sync
global_rank loss_repeated_mean
gradients_mean search_mode
parallel_mode strategy_ckpt_load_file
all_reduce_fusion_config strategy_ckpt_save_file
enable_parallel_optimizer dataset_strategy
parallel_optimizer_config pipeline_stages
\ grad_accumulation_step
=========================== ===========================
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
gradients_mean (bool): Whether to perform mean operator after allreduce of gradients.
"stand_alone" do not support gradients_mean. Default: False.
gradient_fp32_sync (bool): Run allreduce of gradients in fp32. "stand_alone", "data_parallel"
and "hybrid_parallel" do not support gradient_fp32_sync. Default: True.
parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
"hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Note the pynative mode only supports
the "stand_alone" and "data_parallel" mode. Default: "stand_alone".
- stand_alone: Only one processor is working.
- data_parallel: Distributes the data across different processors.
- hybrid_parallel: Achieves data parallelism and model parallelism manually.
- semi_auto_parallel: Achieves data and model parallelism by setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
search_mode (str): There are three kinds of shard strategy search modes: "recursive_programming",
"dynamic_programming" and "sharding_propagation". Default: "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
- sharding_propagation: Propagate shardings from configured ops to non-configured ops.
parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
the same network initialization parameter values for all devices, broadcast the parameters
on device 0 to other devices. Parameter broadcasting in different parallel modes is different,
data_parallel mode, all parameters are broadcast except for the parameter whose attribute
layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
segmented parameters do not participate in broadcasting. Default: False.
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
should be set as True. Default: False. The interface is not to be recommended currently,
it is better using 'dataset_strategy' to replace it.
dataset_strategy (Union[str, tuple]): Dataset sharding strategy. Default: "data_parallel".
dataset_strategy="data_parallel" is equal to full_batch=False, dataset_strategy="full_batch" is
equal to full_batch=True. For dataset load into net by model parallel strategy likes
ds_stra ((1, 8), (1, 8)), it requires using set_auto_parallel_context(dataset_strategy=ds_stra).
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
`Lamb` and `AdamWeightDecay` in Ascend . Default: False.
all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices. Only support ReduceOp.SUM
and HCCL_WORLD_GROUP/NCCL_WORLD_GROUP. No Default, if it is not set, the fusion is closed.
pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how the devices are
distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
stages. Currently, this could only be used when parallel mode semi_auto_parallel is enabled.
Default: 1.
grad_accumulation_step (int): Set the accumulation steps of gradients in auto and semi auto parallel mode.
This should be a positive int. Default: 1.
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
configure. The configure provides more detailed behavior control about parallel training
when parallel optimizer is enabled. Currently it supports the key `gradient_accumulation_shard`.
The configure will be effective when we use
context.set_auto_parallel_context(enable_parallel_optimizer=True).
It supports the following keys.
- gradient_accumulation_shard: If ture, the accumulation gradient parameters will be
sharded across the data parallel devices. This will
introduce additional communication(ReduceScatter) at
each step when accumulate the gradients, but saves a
lot of device memories, thus can make model be trained
with larger batch size. This configure is effective only
when the model runs on pipeline training or gradient
accumulation with data parallel.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> context.set_auto_parallel_context(device_num=8)
>>> context.set_auto_parallel_context(global_rank=0)
>>> context.set_auto_parallel_context(gradients_mean=True)
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
>>> context.set_auto_parallel_context(search_mode="dynamic_programming")
>>> context.set_auto_parallel_context(parameter_broadcast=False)
>>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
>>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
>>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
>>> context.set_auto_parallel_context(pipeline_stages=2)
>>> parallel_config = {"gradient_accumulation_shard": True}
>>> context.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
"""
_set_auto_parallel_context(**kwargs)
def get_auto_parallel_context(attr_key):
"""
Get auto parallel context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> parallel_mode = context.get_auto_parallel_context("parallel_mode")
>>> dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
"""
return _get_auto_parallel_context(attr_key)
def reset_auto_parallel_context():
"""
Reset auto parallel context attributes to the default values:
- device_num: 1.
- global_rank: 0.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: 'stand_alone'.
- search_mode: 'dynamic_programming'.
- parameter_broadcast: False.
- strategy_ckpt_load_file: ''.
- strategy_ckpt_save_file: ''.
- full_batch: False.
- enable_parallel_optimizer: False.
- pipeline_stages: 1.
"""
_reset_auto_parallel_context()
def _check_target_specific_cfgs(device, arg_key):
"""Checking whether a config is suitable for a specified device"""
device_cfgs = {
'enable_dump': ['Ascend'],
'save_dump_path': ['Ascend'],
'enable_graph_kernel': ['Ascend', 'GPU', 'CPU'],
'graph_kernel_flags': ['Ascend', 'GPU', 'CPU'],
'enable_reduce_precision': ['Ascend'],
'enable_profiling': ['Ascend'],
'profiling_options': ['Ascend'],
'print_file_path': ['Ascend'],
'variable_memory_max_size': ['Ascend'],
'auto_tune_mode': ['Ascend'],
'max_device_memory': ['GPU']
}
# configs not in map device_cfgs are supposed to be suitable for all devices
if not arg_key in device_cfgs:
return True
supported_devices = device_cfgs[arg_key]
if device in supported_devices:
return True
logger.warning(f"Config '{arg_key}' only supports devices in {supported_devices}, current device is '{device}'"
", ignore it.")
return False
@args_unreset_check(device_id=int, variable_memory_max_size=str, max_device_memory=str)
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
save_graphs_path=str, enable_dump=bool, auto_tune_mode=str,
save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
enable_profiling=bool, profiling_options=str, enable_auto_mixed_precision=bool,
enable_graph_kernel=bool, reserve_class_name_in_scope=bool, check_bprop=bool,
max_device_memory=str, print_file_path=str, enable_sparse=bool, max_call_depth=int,
env_config_path=str, graph_kernel_flags=str, enable_compile_cache=bool,
compile_cache_path=str, grad_for_scalar=bool, pynative_synchronize=bool)
def set_context(**kwargs):
"""
Set context for running environment.
Context should be configured before running your program. If there is no configuration,
it will be automatically set according to the device target by default.
Note:
Attribute name is required for setting attributes.
The mode is not recommended to be changed after net was initialized because the implementations of some
operations are different in graph mode and pynative mode. Default: GRAPH_MODE.
Some configurations are device specific, see the below table for details:
+-------------------------+------------------------------+----------------------------+
| Function Classification | Configuration Parameters | Hardware Platform Support|
+=========================+==============================+============================+
| System Configuration | device_id | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | device_target | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | max_device_memory | GPU |
| +------------------------------+----------------------------+
| | variable_memory_max_size | Ascend |
+-------------------------+------------------------------+----------------------------+
| Debug Configuration | save_graphs | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | save_graphs_path | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_dump | Ascend |
| +------------------------------+----------------------------+
| | save_dump_path | Ascend |
| +------------------------------+----------------------------+
| | enable_profiling | Ascend |
| +------------------------------+----------------------------+
| | profiling_options | Ascend |
| +------------------------------+----------------------------+
| | print_file_path | Ascend |
| +------------------------------+----------------------------+
| | env_config_path | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | precompile_only | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | reserve_class_name_in_scope | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | pynative_synchronize | GPU/Ascend |
+-------------------------+------------------------------+----------------------------+
| Executive Control | mode | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_graph_kernel | Ascend/GPU |
| +------------------------------+----------------------------+
| | graph_kernel_flags | Ascend/GPU |
| +------------------------------+----------------------------+
| | enable_reduce_precision | Ascend |
| +------------------------------+----------------------------+
| | auto_tune_mode | Ascend |
| +------------------------------+----------------------------+
| | check_bprop | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | max_call_depth | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_sparse | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | grad_for_scalar | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_compile_cache | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | compile_cache_path | CPU/GPU/Ascend |
+-------------------------+------------------------------+----------------------------+
Args:
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
while device_num_per_host should be no more than 4096. Default: 0.
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU".
If device target is not set, the version of MindSpore package is used.
max_device_memory (str): Set the maximum memory available for devices.
Currently, it is only supported on GPU. The format is "xxGB". Default: "1024GB".
The actual used memory size is the minimum of the available memory of the device and max_device_memory.
variable_memory_max_size (str): Set the maximum size of the variable memory max size. Default: "30GB".
After this parameter is set, the maximum memory used by the framework is restricted to the configured value.
save_graphs (bool): Whether to save graphs. Default: False.
When the `save_graphs` attribute is set as True, attribute of `save_graphs_path` is used to set the
intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
save_graphs_path (str): Path to save graphs. Default: ".".
If the specified directory does not exist, the system will automatically create the directory.
During distributed training, graphs will be saved to the directory of
`save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
enable_dump (bool): This parameters is deprecated, and will be deleted in the next version.
save_dump_path (str): This parameters is deprecated, and will be deleted in the next version.
enable_profiling (bool): This parameters is deprecated, and will be deleted in the next version.
Please use mindspore.profiler.Profiler api instead.
profiling_options (str): This parameters is deprecated, and will be deleted in the next version.
Please use mindspore.profiler.Profiler api instead.
print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
a file by default, and print_file_path is not set, the screen will be displayed.
If the saved file already exists, the timestamp suffix will be added to the file. Saving data to a file
solves the problem of data loss in screen printing when a large amount of data is generated.
If it is not set, an error will be reported: prompt to set the upper absolute path.
env_config_path (str): Config path for DFX.
Through context.set_context(env_config_path="./mindspore_config.json")
configure RDR:
- enable: controls whether the RDR is enabled to collect the key data during training and
save key data in the fault scenario. When set to true, the RDR will be turned on.
When set to false, the RDR will be turned off.
- mode: sets the mode of RDR on exporting data. When set to 1, the RDR only exports data
in the fault scenario. When set to 2, the RDR exports data in the fault scenario and the
normal end scenario. Default is 1.
- path: sets the path where RDR saves data. The current path must be absolute.
Memory reuse:
- mem_Reuse: controls whether the memory reuse function is turned on. When set to True,
- the memory reuse function is turned on. When set to False, the memory reuse function is turned off.
precompile_only (bool): Whether to only precompile the network. Default: False.
If set to True, the network will only be compiled, not executed.
reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
Each node has a scope. A scope of a subnode is the name of its parent node. If reserve_class_name_in_scope
is set to True, the class name will be saved after keyword 'net-' in the scope.
For example:
Default/net-Net1/net-Net2 (reserve_class_name_in_scope=True)
Default/net/net (reserve_class_name_in_scope=False)
pynative_synchronize (bool): Whether to enable synchronous execution of the device in PyNative mode.
Default: False. When the value is set to False, the operator is executed asynchronously on the device.
When an error occurs in the execution of the operator, the specific error script code location cannot
be located, when the value is set to True, the operator is executed synchronously on the device. It will
reduce the execution performance of the program. At this time, when an error occurs in the execution of
the operator, the location of the error script code can be located according to the call stack of the error.
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: GRAPH_MODE(0).
GRAPH_MODE or PYNATIVE_MODE can be set by `mode` attribute and both modes support all backends, default
mode is GRAPH_MODE.
enable_graph_kernel (bool): Whether to enable graph kernel fusion to optimize network execution performance.
Default: False.
Indicates whether to enable image-computing convergence to optimize network execution performance.
If enable_graph_kernel is set to True, acceleration can be enabled.
For details of graph kernel fusion, please check
`Enabling Graph Kernel Fusion <https://www.mindspore.cn/docs/programming_guide
/en/master/enable_graph_kernel_fusion.html>`_.
graph_kernel_flags (str) –
Optimization options of graph kernel fusion, and the priority is higher when it conflicts
with enable_graph_kernel. Only for experienced users.
For example, context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text"). Some general options:
- opt_level: Set the optimization level.
Default: 2. Graph kernel fusion can be enabled equivalently by setting opt_level greater than 0.
Available values are:
- 0: Disable graph kernel fusion;
- 1: enable the basic fusion of operators;
- 2: includes all optimizations of level 1,
and turns on more optimizations such as CSE, arithmetic simplification and so on;
- 3: includes all optimizations of level 2, and turns on more optimizations such as SitchingFusion,
ParallelFusion and so on. Optimizations of this level are radical and unstable in some scenarios.
Be caution when using this level.
- dump_as_text: dump detail info as text files. Default: false.
More options can refer to the implementation code. These options can also be set by environment
variable MS_GRAPH_KERNEL_FLAGS, without modifying network source code.
For example, export MS_GRAPH_KERNEL_FLAGS="--opt_level=2 --dump_as_text".
enable_reduce_precision (bool): Whether to enable precision reduction.
If the operator does not support the user-specified precision, the precision will
be changed automatically. Default: True.
auto_tune_mode (str): The mode of auto tune when op building, get the best tiling performance.
Default: NO_TUNE. The value must be in ['RL', 'GA', 'RL,GA'].
- RL: Reinforcement Learning tune.
- GA: Genetic Algorithm tune.
- RL,GA: When both RL and GA optimization are enabled, the tool automatically selects RL or GA based on
different types of operators in the network model. The sequence of RL and GA is not differentiated.
(Automatic selection).
For more information about the enable operator tuning tool settings, please check
`Enable the operator optimization tool <https://www.mindspore.cn/docs/programming_guide/en
/master/enable_auto_tune.html>`_.
check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
of back propagation node outputs is the same as input parameters. Default: False.
max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: 1000.
The max_call_depth parameter needs to be set when the nested call is too deep or the number
of subgraphs is too large. If max_call_depth is set larger than before, the system max stack depth should be
set larger too, otherwise a `core dumped` exception may be raised because of system stack overflow.
enable_sparse (bool): Whether to enable sparsity feature. Default: False.
For details of sparsity and sparse tensor, please check
`sparse tensor <https://www.mindspore.cn/docs/programming_guide/en/master/tensor.html#sparse-tensor>`_.
grad_for_scalar (bool): Whether to get gradient for scalar. Default: False.
When grad_for_scalar is set to True, the function's scalar input can be derived.
The default value is False. Because the back-end does not support scaling operations currently,
this interface only supports simple operations that can be deduced by the front-end.
enable_compile_cache (bool): Whether to save or load the cache of the graph compiled by front-end.
After enable_compile_cache is set to True, during the first execution, a hardware-independent
compilation cache is generated and exported to a MINDIR file. When the network is executed again,
if enable_compile_cache is still set to True and the network scripts are not changed,
the compile cache is loaded. Note that only limited automatic detection for the changes of
python scripts is supported by now, which means that there is a correctness risk. Default: False.
This is an experimental prototype that is subject to change and/or deletion.
compile_cache_path (str): Path to save the cache of the graph compiled by front-end. Default: ".".
If the specified directory does not exist, the system will automatically create the directory.
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
the ID of the current device in the cluster.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> context.set_context(precompile_only=True)
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(enable_graph_kernel=True)
>>> context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(enable_profiling=True,
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
>>> context.set_context(check_bprop=True)
>>> context.set_context(max_device_memory="3.5GB")
>>> context.set_context(print_file_path="print.pb")
>>> context.set_context(enable_sparse=True)
>>> context.set_context(max_call_depth=80)
>>> context.set_context(env_config_path="./env_config.json")
>>> context.set_context(auto_tune_mode="GA,RL")
>>> context.set_context(grad_for_scalar=True)
>>> context.set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
>>> context.set_context(pynative_synchronize=True)
"""
ctx = _context()
# set device target first
if 'device_target' in kwargs:
ctx.set_device_target(kwargs['device_target'])
device = ctx.get_param(ms_ctx_param.device_target)
if not device.lower() in __device_target__:
raise ValueError(f"Error, package type {__package_name__} support device type {__device_target__}, "
f"but got device target {device}")
device = ctx.get_param(ms_ctx_param.device_target)
for key, value in kwargs.items():
if key in ('enable_profiling', 'profiling_options', 'enable_auto_mixed_precision',
'enable_dump', 'save_dump_path'):
logger.warning(f" '{key}' parameters will be deprecated."
"For details, please see the interface parameter API comments")
continue
if not _check_target_specific_cfgs(device, key):
continue
if hasattr(ctx, key):
setattr(ctx, key, value)
continue
if key in ctx.setters:
ctx.setters[key](ctx, value)
continue
# enum variables beginning with '_' are for internal use
if key in ms_ctx_param.__members__ and key[0] != '_':
ctx.set_param(ms_ctx_param.__members__[key], value)
continue
raise ValueError(f"For 'context.set_context', the keyword argument {key} is not recognized! For detailed "
f"usage of 'set_context', please refer to the Mindspore official website.")
def get_context(attr_key):
"""
Get context attribute value according to the input key.
If some attributes are not set, they will be automatically obtained.
Args:
attr_key (str): The key of the attribute.
Returns:
Object, The value of given attribute key.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.get_context("device_target")
>>> context.get_context("device_id")
"""
ctx = _context()
device = ctx.get_param(ms_ctx_param.device_target)
_ = _check_target_specific_cfgs(device, attr_key)
if hasattr(ctx, attr_key):
return getattr(ctx, attr_key)
# enum variables beginning with '_' are for internal use
if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
return ctx.get_param(ms_ctx_param.__members__[attr_key])
raise ValueError(f"For 'context.get_context', the argument {attr_key} is not recognized! For detailed "
f"usage of 'get_context', please refer to the Mindspore official website.")
def _get_mode():
"""
Get execution mode. Only for internal using.
Returns:
Object: The Value of execution mode.
"""
ctx = _context()
return ctx.get_mode()
class ParallelMode:
"""
Parallel mode options.
There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
"HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
- STAND_ALONE: Only one processor is working.
- DATA_PARALLEL: Distributes the data across different processors.
- HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
- SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
- AUTO_PARALLEL: Achieves parallelism automatically.
MODE_LIST: The list of all supported parallel modes.
"""
STAND_ALONE = "stand_alone"
DATA_PARALLEL = "data_parallel"
HYBRID_PARALLEL = "hybrid_parallel"
SEMI_AUTO_PARALLEL = "semi_auto_parallel"
AUTO_PARALLEL = "auto_parallel"
MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL]
@args_type_check(enable_ps=bool)
def set_ps_context(**kwargs):
"""
Set parameter server training mode context.
Note:
Some other environment variables should also be set for parameter server training mode.
These environment variables are listed below:
MS_SERVER_NUM: Server number
MS_WORKER_NUM: Worker number
MS_SCHED_HOST: Scheduler IP address
MS_SCHED_PORT: Scheduler port
MS_ROLE: The role of this process:
MS_SCHED: represents the scheduler,
MS_WORKER: represents the worker,
MS_PSERVER: represents the Server
Args:
enable_ps (bool): Whether to enable parameter server training mode.
Only after enable_ps is set True, the environment variables will be effective.
Default: False.
Raises:
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> context.set_ps_context(enable_ps=True)
"""
_set_ps_context(**kwargs)
def get_ps_context(attr_key):
"""
Get parameter server training mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute:
- enable_ps (bool): Whether to enable parameter server training mode.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> context.get_ps_context(enable_ps)
"""
return _get_ps_context(attr_key)
def reset_ps_context():
"""
Reset parameter server training mode context attributes to the default values:
- enable_ps: False.
"""
_reset_ps_context()
def set_fl_context(**kwargs):
"""
Set federated learning training mode context.
Args:
enable_fl (bool): Whether to enable federated learning training mode.
Default: False.
server_mode (str): Describe the server mode, which must one of 'FEDERATED_LEARNING' and 'HYBRID_TRAINING'.
Default: 'FEDERATED_LEARNING'.
ms_role (str): The process's role in the federated learning mode,
which must be one of 'MS_SERVER', 'MS_WORKER' and 'MS_SCHED'.
Default: 'MS_SERVER'.
worker_num (int): The number of workers. For current version, this must be set to 1 or 0.
server_num (int): The number of federated learning servers. Default: 0.
scheduler_ip (str): The scheduler IP. Default: '0.0.0.0'.
scheduler_port (int): The scheduler port. Default: 6667.
fl_server_port (int): The http port of the federated learning server.
Normally for each server this should be set to the same value. Default: 6668.
enable_fl_client (bool): Whether this process is federated learning client. Default: False.
start_fl_job_threshold (int): The threshold count of startFLJob. Default: 1.
start_fl_job_time_window (int): The time window duration for startFLJob in millisecond. Default: 3000.
share_secrets_ratio (float): The ratio for computing the threshold count of share secrets. Default: 1.0.
update_model_ratio (float): The ratio for computing the threshold count of updateModel. Default: 1.0.
cipher_time_window (int): The time window duration for each cipher round in millisecond. Default: 300000.
reconstruct_secrets_threshold (int): The threshold count of reconstruct threshold. Default: 0.
update_model_time_window (int): The time window duration for updateModel in millisecond. Default: 3000.
fl_name (string): The federated learning job name. Default: ''.
fl_iteration_num (int): Iteration number of federated learning,
which is the number of interactions between client and server. Default: 20.
client_epoch_num (int): Client training epoch number. Default: 25.
client_batch_size (int): Client training data batch size. Default: 32.
client_learning_rate (float): Client training learning rate. Default: 0.001.
worker_step_num_per_iteration (int): The worker's standalone training step number before communicating with
server. Default: 65.
dp_eps (float): Epsilon budget of differential privacy mechanism. The smaller the dp_eps, the better the
privacy protection effect. Default: 50.0.
dp_delta (float): Delta budget of differential privacy mechanism, which is usually equals the reciprocal of
client number. The smaller the dp_delta, the better the privacy protection effect. Default: 0.01.
dp_norm_clip (float): A factor used for clipping model's weights for differential mechanism. Its value is
suggested to be 0.5~2. Default: 1.0.
encrypt_type (string): Secure schema for federated learning, which can be 'NOT_ENCRYPT', 'DP_ENCRYPT',
'PW_ENCRYPT' or 'STABLE_PW_ENCRYPT'. If 'DP_ENCRYPT', differential privacy schema would be applied
for clients and the privacy protection effect would be determined by dp_eps, dp_delta and dp_norm_clip
as described above. If 'PW_ENCRYPT', pairwise secure aggregation would be applied to protect clients'
model from stealing in cross-device scenario. If 'STABLE_PW_ENCRYPT', pairwise secure aggregation would
be applied to protect clients' model from stealing in cross-silo scenario. Default: 'NOT_ENCRYPT'.
config_file_path (string): Configuration file path used by recovery. Default: ''.
scheduler_manage_port (int): scheduler manage port used to scale out/in. Default: 11202.
enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: true.
client_password (str): <PASSWORD> decrypt the secret key stored in the client certificate.
server_password (str): Password to decrypt the secret key stored in the server certificate.
Raises:
ValueError: If input key is not the attribute in federated learning mode context.
Examples:
>>> context.set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
"""
_set_ps_context(**kwargs)
def get_fl_context(attr_key):
"""
Get federated learning mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Please refer to `set_fl_context`'s parameters to decide what key should be passed.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in federated learning mode context.
Examples:
>>> context.get_fl_context("server_mode")
"""
return _get_ps_context(attr_key) | mindspore/context.py | import json
import os
import time
import threading
from collections import namedtuple
from types import FunctionType
from mindspore import log as logger
from mindspore._c_expression import MSContext, ms_ctx_param
from mindspore._checkparam import args_type_check, Validator, args_unreset_check
from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \
_reset_auto_parallel_context
from mindspore.parallel._ps_context import _set_ps_context, _get_ps_context, _reset_ps_context
from .default_config import __device_target__, __package_name__
__all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context',
'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode', 'set_ps_context',
'get_ps_context', 'reset_ps_context', 'set_fl_context', 'get_fl_context']
GRAPH_MODE = 0
PYNATIVE_MODE = 1
_DEVICE_APP_MEMORY_SIZE = 31 # The max memory size of graph plus variable.
_re_pattern = r'[1-9][0-9]*(\.)?[0-9]*GB|0\.[0-9]*GB'
_k_context = None
def _make_directory(path):
"""Make directory."""
real_path = None
if path is None or not isinstance(path, str) or path.strip() == "":
raise ValueError(f"For 'context.set_context', the 'save_graphs_path' or the 'print_file_path' is invalid "
f"type, it should be Non-empty string, but got '{path}'.")
# convert the relative paths
path = os.path.realpath(path)
logger.debug("The absolute path is %r", path)
# check whether the path is already existed and has written permissions
if os.path.exists(path):
real_path = path
else:
# All exceptions need to be caught because create directory maybe have some limit(permissions)
logger.debug("The directory(%s) doesn't exist, will create it", path)
try:
os.makedirs(path)
real_path = path
except PermissionError as e:
logger.critical(f"No write permission on the directory '{path}'', error = {e}")
raise ValueError(e.__str__() + f"\nNo write permission on the directory '{path}'.")
return real_path
def _get_print_file_name(file_name):
"""Add timestamp suffix to file name. Rename the file name: file_name + "." + time(seconds)."""
time_second = str(int(time.time()))
file_name = file_name + "." + time_second
if os.path.exists(file_name):
ValueError("This file {} already exists.".format(file_name))
return file_name
class _ThreadLocalInfo(threading.local):
"""
Thread local Info used for store thread local attributes.
"""
def __init__(self):
super(_ThreadLocalInfo, self).__init__()
self._reserve_class_name_in_scope = True
self.debug_runtime = False
@property
def reserve_class_name_in_scope(self):
"""Get whether to save the network class name in the scope."""
return self._reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Set whether to save the network class name in the scope."""
if not isinstance(reserve_class_name_in_scope, bool):
raise ValueError("For '_ThreadLocalInfo', the type of the property 'reserve_class_name_in_scope' must "
"be bool, but got {}.".format(type(reserve_class_name_in_scope)))
self._reserve_class_name_in_scope = reserve_class_name_in_scope
_ContextRecord = namedtuple(
"_ContextRecord", ["is_pynative_mode", "switch_context_fn"])
class _ContextSwitchInfo(threading.local):
"""
Record of context switch information.
Args:
is_pynative (bool): Whether to adopt the PyNative mode.
"""
def __init__(self, is_pynative):
super(_ContextSwitchInfo, self).__init__()
self.context_stack = []
if is_pynative:
self.push(True, None)
def push(self, is_pynative, switch_context_fn):
"""
Push a context switch record onto the stack.
Args:
is_pynative (bool): Whether context switch to PyNative mode.
switch_context_fn (Function): A callable that executes the context switch.
"""
if isinstance(switch_context_fn, FunctionType):
switch_context_fn()
self.context_stack.append(
_ContextRecord(is_pynative, switch_context_fn))
def pop(self):
self.context_stack.pop()
class _Context:
"""
_Context is the environment in which operations are executed
Note:
Create a context through instantiating Context object is not recommended.
should use context() to get the context since Context is a singleton.
"""
_instance = None
_instance_lock = threading.Lock()
def __init__(self):
self._thread_local_info = _ThreadLocalInfo()
self._context_switches = _ContextSwitchInfo(False)
self._context_handle = MSContext.get_instance()
self.enable_compile_cache = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance_lock.acquire()
cls._instance = object.__new__(cls)
cls._instance_lock.release()
return cls._instance
def __getattribute__(self, attr):
value = object.__getattribute__(self, attr)
if attr == "_context_handle" and value is None:
raise ValueError("Context handle is none in context!!!")
return value
def get_param(self, param):
return self._context_handle.get_param(param)
def set_param(self, param, value):
self._context_handle.set_param(param, value)
def get_mode(self):
"""Get current mode."""
return self.get_param(ms_ctx_param.mode)
def set_mode(self, mode):
"""
Switch between Graph mode and PyNative mode.
Args:
mode (int): GRAPH_MODE or PYNATIVE_MODE.
"""
if mode == PYNATIVE_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("vm")
parallel_mode = _get_auto_parallel_context("parallel_mode")
if parallel_mode not in (ParallelMode.DATA_PARALLEL, ParallelMode.STAND_ALONE):
raise ValueError(f"Pynative Only support STAND_ALONE and DATA_PARALLEL for ParallelMode,"
f"but got {parallel_mode.upper()}.")
self._context_switches.push(True, None)
elif mode == GRAPH_MODE:
if self.enable_debug_runtime:
self.set_backend_policy("ge")
self._context_switches.push(False, None)
else:
raise ValueError(f"For 'context.set_context', the argument 'mode' should be context.GRAPH_MODE (0) "
f"or context.PYNATIVE_MODE (1), but got {mode}.")
self.set_param(ms_ctx_param.mode, mode)
def set_backend_policy(self, policy):
success = self._context_handle.set_backend_policy(policy)
if not success:
raise RuntimeError("Backend policy must be one of values in ['ge', 'vm', 'ms']. "
"But got {}.".format(policy))
def set_save_graphs_path(self, save_graphs_path):
self.set_param(ms_ctx_param.save_graphs_path, _make_directory(save_graphs_path))
def set_device_target(self, target):
valid_targets = ["CPU", "GPU", "Ascend", "Davinci"]
if not target in valid_targets:
raise ValueError(f"For 'context.set_context', the argument 'device_target' must be one of "
f"{valid_targets}, but got {target}.")
if target == "Davinci":
target = "Ascend"
self.set_param(ms_ctx_param.device_target, target)
if self.enable_debug_runtime and target == "CPU":
self.set_backend_policy("vm")
def set_auto_tune_mode(self, tune_mode):
candidate = ["NO_TUNE", "RL", "GA", "RL,GA", "GA,RL"]
if tune_mode in candidate:
self.set_param(ms_ctx_param.tune_mode, tune_mode)
else:
raise ValueError(f"For 'context.set_context', the argument 'auto_tune_mode' must be in "
f"['NO_TUNE', 'RL', 'GA', 'RL,GA', 'GA,RL'], but got {tune_mode}.")
def set_device_id(self, device_id):
if device_id < 0 or device_id > 4095:
raise ValueError(f"For 'context.set_context', the argument 'device_id' must be in range [0, 4095], "
f"but got {device_id}.")
self.set_param(ms_ctx_param.device_id, device_id)
def set_max_call_depth(self, max_call_depth):
if max_call_depth <= 0:
raise ValueError(f"For 'context.set_context', the argument 'max_call_depth' must be greater than 0, "
f"but got {max_call_depth}.")
self.set_param(ms_ctx_param.max_call_depth, max_call_depth)
def set_profiling_options(self, option):
if not isinstance(option, str):
raise TypeError("For 'context.set_context', the argument 'profiling_option' must be string, "
"but got {}.".format(type(option)))
self.set_param(ms_ctx_param.profiling_options, option)
def set_variable_memory_max_size(self, variable_memory_max_size):
"""set values of variable_memory_max_size and graph_memory_max_size"""
if not Validator.check_str_by_regular(variable_memory_max_size, _re_pattern):
raise ValueError("For 'context.set_context', the argument 'variable_memory_max_size' should be in correct"
" format! It must be a string ending with 'GB', in addition to that, it must contain "
"only numbers or decimal points, such as \"5GB\" or \"3.5GB\", but got {}."
.format(variable_memory_max_size))
if int(variable_memory_max_size[:-2]) > _DEVICE_APP_MEMORY_SIZE:
raise ValueError("For 'context.set_context', the argument 'variable_memory_max_size' should not be "
"greater than 31GB, but got {}.".format(variable_memory_max_size))
variable_memory_max_size_ = variable_memory_max_size[:-2] + " * 1024 * 1024 * 1024"
graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
self.set_param(ms_ctx_param.variable_memory_max_size, variable_memory_max_size_)
self.set_param(ms_ctx_param._graph_memory_max_size, graph_memory_max_size_)
def set_max_device_memory(self, max_device_memory):
if not Validator.check_str_by_regular(max_device_memory, _re_pattern):
raise ValueError("For 'context.set_context', the argument 'max_device_memory' should be in correct "
" format! It must be a string ending with 'GB', in addition to that, it must contain "
"only numbers or decimal points, such as \"5GB\" or \"3.5GB\", but got {}."
.format(max_device_memory))
max_device_memory_value = float(max_device_memory[:-2])
if max_device_memory_value == 0:
raise ValueError("For 'context.set_context', the argument 'max_device_memory' should not be \"0GB\".")
self.set_param(ms_ctx_param.max_device_memory, max_device_memory_value)
def set_print_file_path(self, file_path):
"""Add timestamp suffix to file name. Sets print file path."""
print_file_path = os.path.realpath(file_path)
if os.path.isdir(print_file_path):
raise IOError("For 'context.set_context', the argument 'print_file_path' should be file path, "
"but got directory {}.".format(file_path))
if os.path.exists(print_file_path):
_path, _file_name = os.path.split(print_file_path)
path = _make_directory(_path)
file_name = _get_print_file_name(_file_name)
full_file_name = os.path.join(path, file_name)
else:
full_file_name = print_file_path
self.set_param(ms_ctx_param.print_file_path, full_file_name)
def set_env_config_path(self, env_config_path):
"""Check and set env_config_path."""
if not self._context_handle.enable_dump_ir():
raise ValueError("The 'env_config_path' is not supported, please enable ENABLE_DUMP_IR "
"with '-D on' and recompile source.")
env_config_path = os.path.realpath(env_config_path)
if not os.path.isfile(env_config_path):
raise ValueError("For 'context.set_context', the 'env_config_path' file %r is not exists, "
"please check whether 'env_config_path' is correct." % env_config_path)
try:
with open(env_config_path, 'r') as f:
json.load(f)
except (TypeError, ValueError) as exo:
raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
"failed, please check whether 'env_config_path' is json file and correct, or may not "
"have permission to read it.".format(env_config_path))
self.set_param(ms_ctx_param.env_config_path, env_config_path)
setters = {
'mode': set_mode,
'save_graphs_path': set_save_graphs_path,
'device_target': set_device_target,
'device_id': set_device_id,
'auto_tune_mode': set_auto_tune_mode,
'max_call_depth': set_max_call_depth,
'profiling_options': set_profiling_options,
'variable_memory_max_size': set_variable_memory_max_size,
'max_device_memory': set_max_device_memory,
'print_file_path': set_print_file_path,
'env_config_path': set_env_config_path
}
@property
def reserve_class_name_in_scope(self):
"""Get whether to save the network class name in the scope."""
return self._thread_local_info.reserve_class_name_in_scope
@reserve_class_name_in_scope.setter
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Set whether to save the network class name in the scope."""
self._thread_local_info.reserve_class_name_in_scope = reserve_class_name_in_scope
@property
def enable_ge(self):
return self._context_handle.get_backend_policy() == 'ge'
@property
def enable_debug_runtime(self):
return self._thread_local_info.debug_runtime
@enable_debug_runtime.setter
def enable_debug_runtime(self, enable):
thread_info = self._thread_local_info
thread_info.debug_runtime = enable
def _context():
"""
Get the global _context, if context is not created, create a new one.
Returns:
_Context, the global context in PyNative mode.
"""
global _k_context
if _k_context is None:
default_backend = 'debug'
try:
from mindspore import default_config
default_backend = default_config.__backend__
except ImportError:
logger.error("import default config fail")
_k_context = _Context()
_k_context.enable_debug_runtime = False
if default_backend == 'debug':
_k_context.enable_debug_runtime = True
default_backend = 'vm'
_k_context.set_backend_policy(default_backend)
return _k_context
@args_type_check(device_num=int, global_rank=int, gradients_mean=bool, gradient_fp32_sync=bool, parallel_mode=str,
search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool,
all_reduce_fusion_config=list, pipeline_stages=int, grad_accumulation_step=int,
parallel_optimizer_config=dict)
def set_auto_parallel_context(**kwargs):
r"""
Set auto parallel context, which is valid only for Ascend and GPU target.
Auto parallel context should be configured before the initialization of your network.
Note:
Attribute name is required for setting attributes.
If a program has tasks on different parallel modes, before setting a new parallel mode for the
next task, interface mindspore.context.reset_auto_parallel_context() should be called to reset
the configuration.
Setting or changing parallel modes must be called before creating any Initializer, otherwise,
it may have RuntimeError when compiling the network.
Some configurations are parallel mode specific, see the below table for details:
=========================== ===========================
Common AUTO_PARALLEL
=========================== ===========================
device_num gradient_fp32_sync
global_rank loss_repeated_mean
gradients_mean search_mode
parallel_mode strategy_ckpt_load_file
all_reduce_fusion_config strategy_ckpt_save_file
enable_parallel_optimizer dataset_strategy
parallel_optimizer_config pipeline_stages
\ grad_accumulation_step
=========================== ===========================
Args:
device_num (int): Available device number, the value must be in [1, 4096]. Default: 1.
global_rank (int): Global rank id, the value must be in [0, 4095]. Default: 0.
gradients_mean (bool): Whether to perform mean operator after allreduce of gradients.
"stand_alone" do not support gradients_mean. Default: False.
gradient_fp32_sync (bool): Run allreduce of gradients in fp32. "stand_alone", "data_parallel"
and "hybrid_parallel" do not support gradient_fp32_sync. Default: True.
parallel_mode (str): There are five kinds of parallel modes, "stand_alone", "data_parallel",
"hybrid_parallel", "semi_auto_parallel" and "auto_parallel". Note the pynative mode only supports
the "stand_alone" and "data_parallel" mode. Default: "stand_alone".
- stand_alone: Only one processor is working.
- data_parallel: Distributes the data across different processors.
- hybrid_parallel: Achieves data parallelism and model parallelism manually.
- semi_auto_parallel: Achieves data and model parallelism by setting parallel strategies.
- auto_parallel: Achieving parallelism automatically.
search_mode (str): There are three kinds of shard strategy search modes: "recursive_programming",
"dynamic_programming" and "sharding_propagation". Default: "dynamic_programming".
- recursive_programming: Recursive programming search mode.
- dynamic_programming: Dynamic programming search mode.
- sharding_propagation: Propagate shardings from configured ops to non-configured ops.
parameter_broadcast (bool): Whether to broadcast parameters before training. Before training, in order to have
the same network initialization parameter values for all devices, broadcast the parameters
on device 0 to other devices. Parameter broadcasting in different parallel modes is different,
data_parallel mode, all parameters are broadcast except for the parameter whose attribute
layerwise_parallel is True. Hybrid_parallel, semi_auto_parallel and auto_parallel mode, the
segmented parameters do not participate in broadcasting. Default: False.
strategy_ckpt_load_file (str): The path to load parallel strategy checkpoint. Default: ''
strategy_ckpt_save_file (str): The path to save parallel strategy checkpoint. Default: ''
full_batch (bool): If you load whole batch datasets in auto_parallel mode, this parameter
should be set as True. Default: False. The interface is not to be recommended currently,
it is better using 'dataset_strategy' to replace it.
dataset_strategy (Union[str, tuple]): Dataset sharding strategy. Default: "data_parallel".
dataset_strategy="data_parallel" is equal to full_batch=False, dataset_strategy="full_batch" is
equal to full_batch=True. For dataset load into net by model parallel strategy likes
ds_stra ((1, 8), (1, 8)), it requires using set_auto_parallel_context(dataset_strategy=ds_stra).
enable_parallel_optimizer (bool): This is a developing feature, which shards the weight update computation for
data parallel training in the benefit of time and memory saving. Currently, auto and semi auto
parallel mode support all optimizers in both Ascend and GPU. Data parallel mode only supports
`Lamb` and `AdamWeightDecay` in Ascend . Default: False.
all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices. Only support ReduceOp.SUM
and HCCL_WORLD_GROUP/NCCL_WORLD_GROUP. No Default, if it is not set, the fusion is closed.
pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how the devices are
distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
stages. Currently, this could only be used when parallel mode semi_auto_parallel is enabled.
Default: 1.
grad_accumulation_step (int): Set the accumulation steps of gradients in auto and semi auto parallel mode.
This should be a positive int. Default: 1.
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
configure. The configure provides more detailed behavior control about parallel training
when parallel optimizer is enabled. Currently it supports the key `gradient_accumulation_shard`.
The configure will be effective when we use
context.set_auto_parallel_context(enable_parallel_optimizer=True).
It supports the following keys.
- gradient_accumulation_shard: If ture, the accumulation gradient parameters will be
sharded across the data parallel devices. This will
introduce additional communication(ReduceScatter) at
each step when accumulate the gradients, but saves a
lot of device memories, thus can make model be trained
with larger batch size. This configure is effective only
when the model runs on pipeline training or gradient
accumulation with data parallel.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> context.set_auto_parallel_context(device_num=8)
>>> context.set_auto_parallel_context(global_rank=0)
>>> context.set_auto_parallel_context(gradients_mean=True)
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
>>> context.set_auto_parallel_context(search_mode="dynamic_programming")
>>> context.set_auto_parallel_context(parameter_broadcast=False)
>>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
>>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
>>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
>>> context.set_auto_parallel_context(pipeline_stages=2)
>>> parallel_config = {"gradient_accumulation_shard": True}
>>> context.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
"""
_set_auto_parallel_context(**kwargs)
def get_auto_parallel_context(attr_key):
"""
Get auto parallel context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> parallel_mode = context.get_auto_parallel_context("parallel_mode")
>>> dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
"""
return _get_auto_parallel_context(attr_key)
def reset_auto_parallel_context():
"""
Reset auto parallel context attributes to the default values:
- device_num: 1.
- global_rank: 0.
- gradients_mean: False.
- gradient_fp32_sync: True.
- parallel_mode: 'stand_alone'.
- search_mode: 'dynamic_programming'.
- parameter_broadcast: False.
- strategy_ckpt_load_file: ''.
- strategy_ckpt_save_file: ''.
- full_batch: False.
- enable_parallel_optimizer: False.
- pipeline_stages: 1.
"""
_reset_auto_parallel_context()
def _check_target_specific_cfgs(device, arg_key):
"""Checking whether a config is suitable for a specified device"""
device_cfgs = {
'enable_dump': ['Ascend'],
'save_dump_path': ['Ascend'],
'enable_graph_kernel': ['Ascend', 'GPU', 'CPU'],
'graph_kernel_flags': ['Ascend', 'GPU', 'CPU'],
'enable_reduce_precision': ['Ascend'],
'enable_profiling': ['Ascend'],
'profiling_options': ['Ascend'],
'print_file_path': ['Ascend'],
'variable_memory_max_size': ['Ascend'],
'auto_tune_mode': ['Ascend'],
'max_device_memory': ['GPU']
}
# configs not in map device_cfgs are supposed to be suitable for all devices
if not arg_key in device_cfgs:
return True
supported_devices = device_cfgs[arg_key]
if device in supported_devices:
return True
logger.warning(f"Config '{arg_key}' only supports devices in {supported_devices}, current device is '{device}'"
", ignore it.")
return False
@args_unreset_check(device_id=int, variable_memory_max_size=str, max_device_memory=str)
@args_type_check(mode=int, precompile_only=bool, device_target=str, device_id=int, save_graphs=bool,
save_graphs_path=str, enable_dump=bool, auto_tune_mode=str,
save_dump_path=str, enable_reduce_precision=bool, variable_memory_max_size=str,
enable_profiling=bool, profiling_options=str, enable_auto_mixed_precision=bool,
enable_graph_kernel=bool, reserve_class_name_in_scope=bool, check_bprop=bool,
max_device_memory=str, print_file_path=str, enable_sparse=bool, max_call_depth=int,
env_config_path=str, graph_kernel_flags=str, enable_compile_cache=bool,
compile_cache_path=str, grad_for_scalar=bool, pynative_synchronize=bool)
def set_context(**kwargs):
"""
Set context for running environment.
Context should be configured before running your program. If there is no configuration,
it will be automatically set according to the device target by default.
Note:
Attribute name is required for setting attributes.
The mode is not recommended to be changed after net was initialized because the implementations of some
operations are different in graph mode and pynative mode. Default: GRAPH_MODE.
Some configurations are device specific, see the below table for details:
+-------------------------+------------------------------+----------------------------+
| Function Classification | Configuration Parameters | Hardware Platform Support|
+=========================+==============================+============================+
| System Configuration | device_id | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | device_target | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | max_device_memory | GPU |
| +------------------------------+----------------------------+
| | variable_memory_max_size | Ascend |
+-------------------------+------------------------------+----------------------------+
| Debug Configuration | save_graphs | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | save_graphs_path | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_dump | Ascend |
| +------------------------------+----------------------------+
| | save_dump_path | Ascend |
| +------------------------------+----------------------------+
| | enable_profiling | Ascend |
| +------------------------------+----------------------------+
| | profiling_options | Ascend |
| +------------------------------+----------------------------+
| | print_file_path | Ascend |
| +------------------------------+----------------------------+
| | env_config_path | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | precompile_only | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | reserve_class_name_in_scope | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | pynative_synchronize | GPU/Ascend |
+-------------------------+------------------------------+----------------------------+
| Executive Control | mode | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_graph_kernel | Ascend/GPU |
| +------------------------------+----------------------------+
| | graph_kernel_flags | Ascend/GPU |
| +------------------------------+----------------------------+
| | enable_reduce_precision | Ascend |
| +------------------------------+----------------------------+
| | auto_tune_mode | Ascend |
| +------------------------------+----------------------------+
| | check_bprop | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | max_call_depth | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_sparse | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | grad_for_scalar | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | enable_compile_cache | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | compile_cache_path | CPU/GPU/Ascend |
+-------------------------+------------------------------+----------------------------+
Args:
device_id (int): ID of the target device, the value must be in [0, device_num_per_host-1],
while device_num_per_host should be no more than 4096. Default: 0.
device_target (str): The target device to run, support "Ascend", "GPU", and "CPU".
If device target is not set, the version of MindSpore package is used.
max_device_memory (str): Set the maximum memory available for devices.
Currently, it is only supported on GPU. The format is "xxGB". Default: "1024GB".
The actual used memory size is the minimum of the available memory of the device and max_device_memory.
variable_memory_max_size (str): Set the maximum size of the variable memory max size. Default: "30GB".
After this parameter is set, the maximum memory used by the framework is restricted to the configured value.
save_graphs (bool): Whether to save graphs. Default: False.
When the `save_graphs` attribute is set as True, attribute of `save_graphs_path` is used to set the
intermediate compilation graph storage path. By default, the graphs are saved in the current directory.
save_graphs_path (str): Path to save graphs. Default: ".".
If the specified directory does not exist, the system will automatically create the directory.
During distributed training, graphs will be saved to the directory of
`save_graphs_path/rank_${rank_id}/`. `rank_id` is the ID of the current device in the cluster.
enable_dump (bool): This parameters is deprecated, and will be deleted in the next version.
save_dump_path (str): This parameters is deprecated, and will be deleted in the next version.
enable_profiling (bool): This parameters is deprecated, and will be deleted in the next version.
Please use mindspore.profiler.Profiler api instead.
profiling_options (str): This parameters is deprecated, and will be deleted in the next version.
Please use mindspore.profiler.Profiler api instead.
print_file_path (str): The path of saving print data. If this parameter is set, print data is saved to
a file by default, and print_file_path is not set, the screen will be displayed.
If the saved file already exists, the timestamp suffix will be added to the file. Saving data to a file
solves the problem of data loss in screen printing when a large amount of data is generated.
If it is not set, an error will be reported: prompt to set the upper absolute path.
env_config_path (str): Config path for DFX.
Through context.set_context(env_config_path="./mindspore_config.json")
configure RDR:
- enable: controls whether the RDR is enabled to collect the key data during training and
save key data in the fault scenario. When set to true, the RDR will be turned on.
When set to false, the RDR will be turned off.
- mode: sets the mode of RDR on exporting data. When set to 1, the RDR only exports data
in the fault scenario. When set to 2, the RDR exports data in the fault scenario and the
normal end scenario. Default is 1.
- path: sets the path where RDR saves data. The current path must be absolute.
Memory reuse:
- mem_Reuse: controls whether the memory reuse function is turned on. When set to True,
- the memory reuse function is turned on. When set to False, the memory reuse function is turned off.
precompile_only (bool): Whether to only precompile the network. Default: False.
If set to True, the network will only be compiled, not executed.
reserve_class_name_in_scope (bool) : Whether to save the network class name in the scope. Default: True.
Each node has a scope. A scope of a subnode is the name of its parent node. If reserve_class_name_in_scope
is set to True, the class name will be saved after keyword 'net-' in the scope.
For example:
Default/net-Net1/net-Net2 (reserve_class_name_in_scope=True)
Default/net/net (reserve_class_name_in_scope=False)
pynative_synchronize (bool): Whether to enable synchronous execution of the device in PyNative mode.
Default: False. When the value is set to False, the operator is executed asynchronously on the device.
When an error occurs in the execution of the operator, the specific error script code location cannot
be located, when the value is set to True, the operator is executed synchronously on the device. It will
reduce the execution performance of the program. At this time, when an error occurs in the execution of
the operator, the location of the error script code can be located according to the call stack of the error.
mode (int): Running in GRAPH_MODE(0) or PYNATIVE_MODE(1). Default: GRAPH_MODE(0).
GRAPH_MODE or PYNATIVE_MODE can be set by `mode` attribute and both modes support all backends, default
mode is GRAPH_MODE.
enable_graph_kernel (bool): Whether to enable graph kernel fusion to optimize network execution performance.
Default: False.
Indicates whether to enable image-computing convergence to optimize network execution performance.
If enable_graph_kernel is set to True, acceleration can be enabled.
For details of graph kernel fusion, please check
`Enabling Graph Kernel Fusion <https://www.mindspore.cn/docs/programming_guide
/en/master/enable_graph_kernel_fusion.html>`_.
graph_kernel_flags (str) –
Optimization options of graph kernel fusion, and the priority is higher when it conflicts
with enable_graph_kernel. Only for experienced users.
For example, context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text"). Some general options:
- opt_level: Set the optimization level.
Default: 2. Graph kernel fusion can be enabled equivalently by setting opt_level greater than 0.
Available values are:
- 0: Disable graph kernel fusion;
- 1: enable the basic fusion of operators;
- 2: includes all optimizations of level 1,
and turns on more optimizations such as CSE, arithmetic simplification and so on;
- 3: includes all optimizations of level 2, and turns on more optimizations such as SitchingFusion,
ParallelFusion and so on. Optimizations of this level are radical and unstable in some scenarios.
Be caution when using this level.
- dump_as_text: dump detail info as text files. Default: false.
More options can refer to the implementation code. These options can also be set by environment
variable MS_GRAPH_KERNEL_FLAGS, without modifying network source code.
For example, export MS_GRAPH_KERNEL_FLAGS="--opt_level=2 --dump_as_text".
enable_reduce_precision (bool): Whether to enable precision reduction.
If the operator does not support the user-specified precision, the precision will
be changed automatically. Default: True.
auto_tune_mode (str): The mode of auto tune when op building, get the best tiling performance.
Default: NO_TUNE. The value must be in ['RL', 'GA', 'RL,GA'].
- RL: Reinforcement Learning tune.
- GA: Genetic Algorithm tune.
- RL,GA: When both RL and GA optimization are enabled, the tool automatically selects RL or GA based on
different types of operators in the network model. The sequence of RL and GA is not differentiated.
(Automatic selection).
For more information about the enable operator tuning tool settings, please check
`Enable the operator optimization tool <https://www.mindspore.cn/docs/programming_guide/en
/master/enable_auto_tune.html>`_.
check_bprop (bool): Whether to check back propagation nodes. The checking ensures that the shape and dtype
of back propagation node outputs is the same as input parameters. Default: False.
max_call_depth (int): Specify the maximum depth of function call. Must be positive integer. Default: 1000.
The max_call_depth parameter needs to be set when the nested call is too deep or the number
of subgraphs is too large. If max_call_depth is set larger than before, the system max stack depth should be
set larger too, otherwise a `core dumped` exception may be raised because of system stack overflow.
enable_sparse (bool): Whether to enable sparsity feature. Default: False.
For details of sparsity and sparse tensor, please check
`sparse tensor <https://www.mindspore.cn/docs/programming_guide/en/master/tensor.html#sparse-tensor>`_.
grad_for_scalar (bool): Whether to get gradient for scalar. Default: False.
When grad_for_scalar is set to True, the function's scalar input can be derived.
The default value is False. Because the back-end does not support scaling operations currently,
this interface only supports simple operations that can be deduced by the front-end.
enable_compile_cache (bool): Whether to save or load the cache of the graph compiled by front-end.
After enable_compile_cache is set to True, during the first execution, a hardware-independent
compilation cache is generated and exported to a MINDIR file. When the network is executed again,
if enable_compile_cache is still set to True and the network scripts are not changed,
the compile cache is loaded. Note that only limited automatic detection for the changes of
python scripts is supported by now, which means that there is a correctness risk. Default: False.
This is an experimental prototype that is subject to change and/or deletion.
compile_cache_path (str): Path to save the cache of the graph compiled by front-end. Default: ".".
If the specified directory does not exist, the system will automatically create the directory.
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
the ID of the current device in the cluster.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> context.set_context(precompile_only=True)
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(enable_graph_kernel=True)
>>> context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(enable_profiling=True,
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
>>> context.set_context(check_bprop=True)
>>> context.set_context(max_device_memory="3.5GB")
>>> context.set_context(print_file_path="print.pb")
>>> context.set_context(enable_sparse=True)
>>> context.set_context(max_call_depth=80)
>>> context.set_context(env_config_path="./env_config.json")
>>> context.set_context(auto_tune_mode="GA,RL")
>>> context.set_context(grad_for_scalar=True)
>>> context.set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
>>> context.set_context(pynative_synchronize=True)
"""
ctx = _context()
# set device target first
if 'device_target' in kwargs:
ctx.set_device_target(kwargs['device_target'])
device = ctx.get_param(ms_ctx_param.device_target)
if not device.lower() in __device_target__:
raise ValueError(f"Error, package type {__package_name__} support device type {__device_target__}, "
f"but got device target {device}")
device = ctx.get_param(ms_ctx_param.device_target)
for key, value in kwargs.items():
if key in ('enable_profiling', 'profiling_options', 'enable_auto_mixed_precision',
'enable_dump', 'save_dump_path'):
logger.warning(f" '{key}' parameters will be deprecated."
"For details, please see the interface parameter API comments")
continue
if not _check_target_specific_cfgs(device, key):
continue
if hasattr(ctx, key):
setattr(ctx, key, value)
continue
if key in ctx.setters:
ctx.setters[key](ctx, value)
continue
# enum variables beginning with '_' are for internal use
if key in ms_ctx_param.__members__ and key[0] != '_':
ctx.set_param(ms_ctx_param.__members__[key], value)
continue
raise ValueError(f"For 'context.set_context', the keyword argument {key} is not recognized! For detailed "
f"usage of 'set_context', please refer to the Mindspore official website.")
def get_context(attr_key):
"""
Get context attribute value according to the input key.
If some attributes are not set, they will be automatically obtained.
Args:
attr_key (str): The key of the attribute.
Returns:
Object, The value of given attribute key.
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> context.get_context("device_target")
>>> context.get_context("device_id")
"""
ctx = _context()
device = ctx.get_param(ms_ctx_param.device_target)
_ = _check_target_specific_cfgs(device, attr_key)
if hasattr(ctx, attr_key):
return getattr(ctx, attr_key)
# enum variables beginning with '_' are for internal use
if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
return ctx.get_param(ms_ctx_param.__members__[attr_key])
raise ValueError(f"For 'context.get_context', the argument {attr_key} is not recognized! For detailed "
f"usage of 'get_context', please refer to the Mindspore official website.")
def _get_mode():
"""
Get execution mode. Only for internal using.
Returns:
Object: The Value of execution mode.
"""
ctx = _context()
return ctx.get_mode()
class ParallelMode:
"""
Parallel mode options.
There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL",
"HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE".
- STAND_ALONE: Only one processor is working.
- DATA_PARALLEL: Distributes the data across different processors.
- HYBRID_PARALLEL: Achieves data parallelism and model parallelism manually.
- SEMI_AUTO_PARALLEL: Achieves data parallelism and model parallelism by setting parallel strategies.
- AUTO_PARALLEL: Achieves parallelism automatically.
MODE_LIST: The list of all supported parallel modes.
"""
STAND_ALONE = "stand_alone"
DATA_PARALLEL = "data_parallel"
HYBRID_PARALLEL = "hybrid_parallel"
SEMI_AUTO_PARALLEL = "semi_auto_parallel"
AUTO_PARALLEL = "auto_parallel"
MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL]
@args_type_check(enable_ps=bool)
def set_ps_context(**kwargs):
"""
Set parameter server training mode context.
Note:
Some other environment variables should also be set for parameter server training mode.
These environment variables are listed below:
MS_SERVER_NUM: Server number
MS_WORKER_NUM: Worker number
MS_SCHED_HOST: Scheduler IP address
MS_SCHED_PORT: Scheduler port
MS_ROLE: The role of this process:
MS_SCHED: represents the scheduler,
MS_WORKER: represents the worker,
MS_PSERVER: represents the Server
Args:
enable_ps (bool): Whether to enable parameter server training mode.
Only after enable_ps is set True, the environment variables will be effective.
Default: False.
Raises:
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> context.set_ps_context(enable_ps=True)
"""
_set_ps_context(**kwargs)
def get_ps_context(attr_key):
"""
Get parameter server training mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute:
- enable_ps (bool): Whether to enable parameter server training mode.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> context.get_ps_context(enable_ps)
"""
return _get_ps_context(attr_key)
def reset_ps_context():
"""
Reset parameter server training mode context attributes to the default values:
- enable_ps: False.
"""
_reset_ps_context()
def set_fl_context(**kwargs):
"""
Set federated learning training mode context.
Args:
enable_fl (bool): Whether to enable federated learning training mode.
Default: False.
server_mode (str): Describe the server mode, which must one of 'FEDERATED_LEARNING' and 'HYBRID_TRAINING'.
Default: 'FEDERATED_LEARNING'.
ms_role (str): The process's role in the federated learning mode,
which must be one of 'MS_SERVER', 'MS_WORKER' and 'MS_SCHED'.
Default: 'MS_SERVER'.
worker_num (int): The number of workers. For current version, this must be set to 1 or 0.
server_num (int): The number of federated learning servers. Default: 0.
scheduler_ip (str): The scheduler IP. Default: '0.0.0.0'.
scheduler_port (int): The scheduler port. Default: 6667.
fl_server_port (int): The http port of the federated learning server.
Normally for each server this should be set to the same value. Default: 6668.
enable_fl_client (bool): Whether this process is federated learning client. Default: False.
start_fl_job_threshold (int): The threshold count of startFLJob. Default: 1.
start_fl_job_time_window (int): The time window duration for startFLJob in millisecond. Default: 3000.
share_secrets_ratio (float): The ratio for computing the threshold count of share secrets. Default: 1.0.
update_model_ratio (float): The ratio for computing the threshold count of updateModel. Default: 1.0.
cipher_time_window (int): The time window duration for each cipher round in millisecond. Default: 300000.
reconstruct_secrets_threshold (int): The threshold count of reconstruct threshold. Default: 0.
update_model_time_window (int): The time window duration for updateModel in millisecond. Default: 3000.
fl_name (string): The federated learning job name. Default: ''.
fl_iteration_num (int): Iteration number of federated learning,
which is the number of interactions between client and server. Default: 20.
client_epoch_num (int): Client training epoch number. Default: 25.
client_batch_size (int): Client training data batch size. Default: 32.
client_learning_rate (float): Client training learning rate. Default: 0.001.
worker_step_num_per_iteration (int): The worker's standalone training step number before communicating with
server. Default: 65.
dp_eps (float): Epsilon budget of differential privacy mechanism. The smaller the dp_eps, the better the
privacy protection effect. Default: 50.0.
dp_delta (float): Delta budget of differential privacy mechanism, which is usually equals the reciprocal of
client number. The smaller the dp_delta, the better the privacy protection effect. Default: 0.01.
dp_norm_clip (float): A factor used for clipping model's weights for differential mechanism. Its value is
suggested to be 0.5~2. Default: 1.0.
encrypt_type (string): Secure schema for federated learning, which can be 'NOT_ENCRYPT', 'DP_ENCRYPT',
'PW_ENCRYPT' or 'STABLE_PW_ENCRYPT'. If 'DP_ENCRYPT', differential privacy schema would be applied
for clients and the privacy protection effect would be determined by dp_eps, dp_delta and dp_norm_clip
as described above. If 'PW_ENCRYPT', pairwise secure aggregation would be applied to protect clients'
model from stealing in cross-device scenario. If 'STABLE_PW_ENCRYPT', pairwise secure aggregation would
be applied to protect clients' model from stealing in cross-silo scenario. Default: 'NOT_ENCRYPT'.
config_file_path (string): Configuration file path used by recovery. Default: ''.
scheduler_manage_port (int): scheduler manage port used to scale out/in. Default: 11202.
enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: true.
client_password (str): <PASSWORD> decrypt the secret key stored in the client certificate.
server_password (str): Password to decrypt the secret key stored in the server certificate.
Raises:
ValueError: If input key is not the attribute in federated learning mode context.
Examples:
>>> context.set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
"""
_set_ps_context(**kwargs)
def get_fl_context(attr_key):
"""
Get federated learning mode context attribute value according to the key.
Args:
attr_key (str): The key of the attribute.
Please refer to `set_fl_context`'s parameters to decide what key should be passed.
Returns:
Returns attribute value according to the key.
Raises:
ValueError: If input key is not attribute in federated learning mode context.
Examples:
>>> context.get_fl_context("server_mode")
"""
return _get_ps_context(attr_key) | 0.667473 | 0.101456 |
"""Tests for substrate."""
import dataclasses
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import numpy as np
from meltingpot.python import substrate
REWARD_SPEC = dm_env.specs.Array(shape=[], dtype=np.float64, name='REWARD')
ACTION_SPEC = dm_env.specs.DiscreteArray(
num_values=1, dtype=np.int32, name='action')
def _get_lua_randomization_map():
"""Replaces first row of walls with items randomized by Lua."""
config = substrate.get_config('running_with_scissors_in_the_matrix')
head, line, *tail = config.lab2d_settings.simulation.map.split('\n')
# Replace line 1 (walls) with a row of 'a' (items randomized by Lua).
new_map = '\n'.join([head, 'a' * len(line), *tail])
return new_map
_LUA_RANDOMIZED_LINE = 1
_LUA_RANDOMIZATION_MAP = _get_lua_randomization_map()
class SubstrateTest(parameterized.TestCase):
@parameterized.product(seed=[42, 123, 1337, 12481632])
def test_seed_causes_determinism(self, seed):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = seed
env1 = substrate.build(config)
env2 = substrate.build(config)
for episode in range(5):
obs1 = env1.reset().observation[0]['WORLD.RGB']
obs2 = env2.reset().observation[0]['WORLD.RGB']
np.testing.assert_equal(
obs1, obs2, f'Episode {episode} mismatch: {obs1} != {obs2} ')
@parameterized.product(seed=[None, 42, 123, 1337, 12481632])
def test_episodes_are_randomized(self, seed):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = seed
env = substrate.build(config)
obs = env.reset().observation[0]['WORLD.RGB']
for episode in range(4):
last_obs = obs
obs = env.reset().observation[0]['WORLD.RGB']
with self.assertRaises(
AssertionError,
msg=f'Episodes {episode} and {episode+1} match: {last_obs} == {obs}'):
np.testing.assert_equal(last_obs, obs)
def test_no_seed_causes_nondeterminism(self):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = None
env1 = substrate.build(config)
env2 = substrate.build(config)
for episode in range(5):
obs1 = env1.reset().observation[0]['WORLD.RGB']
obs2 = env2.reset().observation[0]['WORLD.RGB']
with self.assertRaises(
AssertionError, msg=f'Episode {episode} match: {obs1} == {obs2}'):
np.testing.assert_equal(obs1, obs2)
@parameterized.product(seed=[None, 42, 123, 1337, 12481632])
def test_episodes_are_randomized_in_lua(self, seed):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = seed
config.lab2d_settings.simulation.map = _LUA_RANDOMIZATION_MAP
env = substrate.build(config)
obs = env.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
for episode in range(4):
last_obs = obs
obs = env.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
with self.assertRaises(
AssertionError,
msg=f'Episodes {episode} and {episode+1} match: {last_obs} == {obs}'):
np.testing.assert_equal(last_obs, obs)
def test_no_seed_causes_nondeterminism_for_lua(self):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = None
config.lab2d_settings.simulation.map = _LUA_RANDOMIZATION_MAP
env1 = substrate.build(config)
env2 = substrate.build(config)
for episode in range(5):
obs1 = env1.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
obs2 = env2.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
with self.assertRaises(
AssertionError, msg=f'Episode {episode} match {obs1} == {obs2}'):
np.testing.assert_equal(obs1, obs2)
@parameterized.named_parameters(
(name, name) for name in substrate.AVAILABLE_SUBSTRATES)
def test_substrate_creation(self, substrate_name):
config = substrate.get_config(substrate_name)
with substrate.build(config) as env:
reset_timestep = env.reset()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
reward_spec = env.reward_spec()
with self.subTest('reset_reward'):
self.assertNotEqual(reset_timestep.reward, None)
with self.subTest('reset_discount'):
self.assertNotEqual(reset_timestep.discount, None)
with self.subTest('observation_spec'):
self.assertLen(observation_spec, config.num_players)
with self.subTest('action_spec'):
spec = ACTION_SPEC.replace(num_values=len(config.action_set))
self.assertEqual(action_spec, (spec,) * config.num_players)
with self.subTest('reward_spec'):
self.assertEqual(reward_spec, [REWARD_SPEC] * config.num_players)
def test_observables(self):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with substrate.build(config) as env:
received = []
observables = env.observables()
for field in dataclasses.fields(observables):
getattr(observables, field.name).subscribe(
on_next=received.append,
on_error=lambda e: received.append(type(e)),
on_completed=lambda: received.append('DONE'),
)
expected = []
timestep = env.reset()
events = list(env.events())
expected.extend([timestep] + events)
for n in range(2):
action = [n] * config.num_players
timestep = env.step(action)
events = list(env.events())
expected.extend([action, timestep] + events)
expected.extend(['DONE', 'DONE', 'DONE'])
self.assertEqual(received, expected)
if __name__ == '__main__':
absltest.main() | meltingpot/python/substrate_test.py | """Tests for substrate."""
import dataclasses
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import numpy as np
from meltingpot.python import substrate
REWARD_SPEC = dm_env.specs.Array(shape=[], dtype=np.float64, name='REWARD')
ACTION_SPEC = dm_env.specs.DiscreteArray(
num_values=1, dtype=np.int32, name='action')
def _get_lua_randomization_map():
"""Replaces first row of walls with items randomized by Lua."""
config = substrate.get_config('running_with_scissors_in_the_matrix')
head, line, *tail = config.lab2d_settings.simulation.map.split('\n')
# Replace line 1 (walls) with a row of 'a' (items randomized by Lua).
new_map = '\n'.join([head, 'a' * len(line), *tail])
return new_map
_LUA_RANDOMIZED_LINE = 1
_LUA_RANDOMIZATION_MAP = _get_lua_randomization_map()
class SubstrateTest(parameterized.TestCase):
@parameterized.product(seed=[42, 123, 1337, 12481632])
def test_seed_causes_determinism(self, seed):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = seed
env1 = substrate.build(config)
env2 = substrate.build(config)
for episode in range(5):
obs1 = env1.reset().observation[0]['WORLD.RGB']
obs2 = env2.reset().observation[0]['WORLD.RGB']
np.testing.assert_equal(
obs1, obs2, f'Episode {episode} mismatch: {obs1} != {obs2} ')
@parameterized.product(seed=[None, 42, 123, 1337, 12481632])
def test_episodes_are_randomized(self, seed):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = seed
env = substrate.build(config)
obs = env.reset().observation[0]['WORLD.RGB']
for episode in range(4):
last_obs = obs
obs = env.reset().observation[0]['WORLD.RGB']
with self.assertRaises(
AssertionError,
msg=f'Episodes {episode} and {episode+1} match: {last_obs} == {obs}'):
np.testing.assert_equal(last_obs, obs)
def test_no_seed_causes_nondeterminism(self):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = None
env1 = substrate.build(config)
env2 = substrate.build(config)
for episode in range(5):
obs1 = env1.reset().observation[0]['WORLD.RGB']
obs2 = env2.reset().observation[0]['WORLD.RGB']
with self.assertRaises(
AssertionError, msg=f'Episode {episode} match: {obs1} == {obs2}'):
np.testing.assert_equal(obs1, obs2)
@parameterized.product(seed=[None, 42, 123, 1337, 12481632])
def test_episodes_are_randomized_in_lua(self, seed):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = seed
config.lab2d_settings.simulation.map = _LUA_RANDOMIZATION_MAP
env = substrate.build(config)
obs = env.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
for episode in range(4):
last_obs = obs
obs = env.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
with self.assertRaises(
AssertionError,
msg=f'Episodes {episode} and {episode+1} match: {last_obs} == {obs}'):
np.testing.assert_equal(last_obs, obs)
def test_no_seed_causes_nondeterminism_for_lua(self):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with config.unlocked():
config.env_seed = None
config.lab2d_settings.simulation.map = _LUA_RANDOMIZATION_MAP
env1 = substrate.build(config)
env2 = substrate.build(config)
for episode in range(5):
obs1 = env1.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
obs2 = env2.reset().observation[0]['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
with self.assertRaises(
AssertionError, msg=f'Episode {episode} match {obs1} == {obs2}'):
np.testing.assert_equal(obs1, obs2)
@parameterized.named_parameters(
(name, name) for name in substrate.AVAILABLE_SUBSTRATES)
def test_substrate_creation(self, substrate_name):
config = substrate.get_config(substrate_name)
with substrate.build(config) as env:
reset_timestep = env.reset()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
reward_spec = env.reward_spec()
with self.subTest('reset_reward'):
self.assertNotEqual(reset_timestep.reward, None)
with self.subTest('reset_discount'):
self.assertNotEqual(reset_timestep.discount, None)
with self.subTest('observation_spec'):
self.assertLen(observation_spec, config.num_players)
with self.subTest('action_spec'):
spec = ACTION_SPEC.replace(num_values=len(config.action_set))
self.assertEqual(action_spec, (spec,) * config.num_players)
with self.subTest('reward_spec'):
self.assertEqual(reward_spec, [REWARD_SPEC] * config.num_players)
def test_observables(self):
config = substrate.get_config('running_with_scissors_in_the_matrix')
with substrate.build(config) as env:
received = []
observables = env.observables()
for field in dataclasses.fields(observables):
getattr(observables, field.name).subscribe(
on_next=received.append,
on_error=lambda e: received.append(type(e)),
on_completed=lambda: received.append('DONE'),
)
expected = []
timestep = env.reset()
events = list(env.events())
expected.extend([timestep] + events)
for n in range(2):
action = [n] * config.num_players
timestep = env.step(action)
events = list(env.events())
expected.extend([action, timestep] + events)
expected.extend(['DONE', 'DONE', 'DONE'])
self.assertEqual(received, expected)
if __name__ == '__main__':
absltest.main() | 0.641422 | 0.464112 |
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
PI = 3.1415926535897
def main():
K = int(args[2])
cancer_type_list = load_types(args[1])
cancer_type_num = []
for x in cancer_type_list:
File = open('data/data' + args[1] + '_' + x + '.txt', 'r')
cancer_type_num.append(int(File.readline().split()[0]))
File.close()
doc_num = sum(cancer_type_num)
doc_arrange, alpha_list, Average_arrange = load_result(doc_num, \
cancer_type_list, cancer_type_num)
for i,x in enumerate(cancer_type_list):
left = range(1, K+1)
height = alpha_list[i]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.bar(left, height, width=1, align="center")
ax1.set_xlim(0, K+1)
ax1.set_ylim(0, 5.0)
ax1.set_xticks(left)
ax1.tick_params(labelsize=10)
ax1.set_xlabel("Signature", fontsize=10)
ax1.set_ylabel("Alpha", fontsize=10)
Title = x + " alpha"
ax1.set_title(Title, fontsize=10)
fig.tight_layout()
name = "result/data" + args[1] + "/figure/" + args[2] + "_arrangement/" +\
x + "_alpha.png"
fig.savefig(name, dpi=200)
plt.close(1)
def load_types(data_type):
File = open('data/PL_data' + data_type + '.txt', 'r')
File.readline()
cancer_type_list = []
for line in File.readlines():
cancer_type_list.append(line[:-1])
File.close()
return cancer_type_list
def load_result(doc_num, cancer_type_list, cancer_type_num):
K = int(args[2])
doc_arrange = []
alpha_list = []
if(K <= 9):
topic = '0' + args[2]
else:
topic = args[2]
File = open('result/data' + args[1] + '/result_k' + topic + '.txt', 'r')
File.readline(); File.readline();
for i in range(K):
File.readline()
for i in range(doc_num):
doc_arrange.append([])
temp_list = File.readline().split()
for j in range(K):
doc_arrange[i].append(float(temp_list[j]))
for i in range(len(cancer_type_list)):
alpha_list.append([])
temp_list = File.readline().split()
for j in range(K):
alpha_list[i].append(float(temp_list[j]))
File.close()
Average_arrange = []
index = 0
for i in range(len(cancer_type_list)):
Average_arrange.append([0 for k in range(K)])
for j in range(cancer_type_num[i]):
for k in range(K):
Average_arrange[i][k] += doc_arrange[index][k]
index += 1
for k in range(K):
Average_arrange[i][k] /= cancer_type_num[i]
return doc_arrange, alpha_list, Average_arrange
if __name__ == '__main__':
args = sys.argv ## args[1] : data_type, [2] : num_topic
main() | Drawing/draw_alpha.py | import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
PI = 3.1415926535897
def main():
K = int(args[2])
cancer_type_list = load_types(args[1])
cancer_type_num = []
for x in cancer_type_list:
File = open('data/data' + args[1] + '_' + x + '.txt', 'r')
cancer_type_num.append(int(File.readline().split()[0]))
File.close()
doc_num = sum(cancer_type_num)
doc_arrange, alpha_list, Average_arrange = load_result(doc_num, \
cancer_type_list, cancer_type_num)
for i,x in enumerate(cancer_type_list):
left = range(1, K+1)
height = alpha_list[i]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.bar(left, height, width=1, align="center")
ax1.set_xlim(0, K+1)
ax1.set_ylim(0, 5.0)
ax1.set_xticks(left)
ax1.tick_params(labelsize=10)
ax1.set_xlabel("Signature", fontsize=10)
ax1.set_ylabel("Alpha", fontsize=10)
Title = x + " alpha"
ax1.set_title(Title, fontsize=10)
fig.tight_layout()
name = "result/data" + args[1] + "/figure/" + args[2] + "_arrangement/" +\
x + "_alpha.png"
fig.savefig(name, dpi=200)
plt.close(1)
def load_types(data_type):
File = open('data/PL_data' + data_type + '.txt', 'r')
File.readline()
cancer_type_list = []
for line in File.readlines():
cancer_type_list.append(line[:-1])
File.close()
return cancer_type_list
def load_result(doc_num, cancer_type_list, cancer_type_num):
K = int(args[2])
doc_arrange = []
alpha_list = []
if(K <= 9):
topic = '0' + args[2]
else:
topic = args[2]
File = open('result/data' + args[1] + '/result_k' + topic + '.txt', 'r')
File.readline(); File.readline();
for i in range(K):
File.readline()
for i in range(doc_num):
doc_arrange.append([])
temp_list = File.readline().split()
for j in range(K):
doc_arrange[i].append(float(temp_list[j]))
for i in range(len(cancer_type_list)):
alpha_list.append([])
temp_list = File.readline().split()
for j in range(K):
alpha_list[i].append(float(temp_list[j]))
File.close()
Average_arrange = []
index = 0
for i in range(len(cancer_type_list)):
Average_arrange.append([0 for k in range(K)])
for j in range(cancer_type_num[i]):
for k in range(K):
Average_arrange[i][k] += doc_arrange[index][k]
index += 1
for k in range(K):
Average_arrange[i][k] /= cancer_type_num[i]
return doc_arrange, alpha_list, Average_arrange
if __name__ == '__main__':
args = sys.argv ## args[1] : data_type, [2] : num_topic
main() | 0.075833 | 0.28951 |
import os
import xgboost as xgb
from time_series_detector.feature import feature_service
from time_series_detector.common.tsd_errorcode import *
from time_series_detector.common.tsd_common import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../model/')
DEFAULT_MODEL = MODEL_PATH + "xgb_default_model"
class XGBoosting(object):
"""
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient,
flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems
in a fast and accurate way. The same code runs on major distributed environment (Hadoop, SGE, MPI)
and can solve problems beyond billions of examples.
https://github.com/dmlc/xgboost
"""
def __init__(self,
threshold=0.15,
max_depth=10,
eta=0.05,
gamma=0.1,
silent=1,
min_child_weight=1,
subsample=0.8,
colsample_bytree=1,
booster='gbtree',
objective='binary:logistic',
eval_metric='auc'):
"""
:param threshold: The critical point of normal.
:param max_depth: Maximum tree depth for base learners.
:param eta: Value means model more robust to overfitting but slower to compute.
:param gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree.
:param silent: If 1, it will print information about performance. If 2, some additional information will be printed out.
:param min_child_weight: Minimum sum of instance weight(hessian) needed in a child.
:param subsample: Subsample ratio of the training instance.
:param colsample_bytree: Subsample ratio of columns when constructing each tree.
:param booster: Specify which booster to use: gbtree, gblinear or dart.
:param objective: Specify the learning task and the corresponding learning objective or a custom objective function to be used (see note below).
:param eval_metric: If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric.
"""
self.threshold = threshold
self.max_depth = max_depth
self.eta = eta
self.gamma = gamma
self.silent = silent
self.min_child_weight = min_child_weight
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.booster = booster
self.objective = objective
self.eval_metric = eval_metric
def __save_libsvm_format(self, data, feature_file_name):
"""
Save the time features to libsvm format.
:param data: feature values
:param file_name: file saves the time features and label
"""
try:
f = open(feature_file_name, "w")
except Exception as ex:
return TSD_CAL_FEATURE_ERR, str(ex)
times = 0
for temp in data:
if times > 0:
f.write("\n")
result = ['{0}:{1}'.format(int(index) + 1, value) for index, value in enumerate(temp[0])]
f.write(str(temp[1]))
for x in result:
f.write(' ' + x)
times = times + 1
return TSD_OP_SUCCESS, ""
def __calculate_features(self, data, feature_file_name, window=DEFAULT_WINDOW):
"""
Caculate time features and save as libsvm format.
:param data: the time series to detect of
:param feature_file_name: the file to use
:param window: the length of window
"""
features = []
for index in data:
if is_standard_time_series(index["data"], window):
temp = []
temp.append(feature_service.extract_features(index["data"], window))
temp.append(index["flag"])
features.append(temp)
try:
ret_code, ret_data = self.__save_libsvm_format(features, feature_file_name)
except Exception as ex:
ret_code = TSD_CAL_FEATURE_ERR
ret_data = str(ex)
return ret_code, ret_data
def xgb_train(self, data, task_id, num_round=300):
"""
Train an xgboost model.
:param data: Training dataset.
:param task_id: The id of the training task.
:param num_round: Max number of boosting iterations.
"""
model_name = MODEL_PATH + task_id + "_model"
feature_file_name = MODEL_PATH + task_id + "_features"
ret_code, ret_data = self.__calculate_features(data, feature_file_name)
if ret_code != TSD_OP_SUCCESS:
return ret_code, ret_data
try:
dtrain = xgb.DMatrix(feature_file_name)
except Exception as ex:
return TSD_READ_FEATURE_FAILED, str(ex)
params = {
'max_depth': self.max_depth,
'eta': self.eta,
'gamma': self.gamma,
'silent': self.silent,
'min_child_weight': self.min_child_weight,
'subsample': self.subsample,
'colsample_bytree': self.colsample_bytree,
'booster': self.booster,
'objective': self.objective,
'eval_metric': self.eval_metric,
}
try:
bst = xgb.train(params, dtrain, num_round)
bst.save_model(model_name)
except Exception as ex:
return TSD_TRAIN_ERR, str(ex)
return TSD_OP_SUCCESS, ""
def predict(self, X, window=DEFAULT_WINDOW, model_name=DEFAULT_MODEL):
"""
:param X: the time series to detect of
:type X: pandas.Series
:param window: the length of window
:param model_name: Use a xgboost model to predict a particular sample is an outlier or not.
:return 1 denotes normal, 0 denotes abnormal.
"""
if is_standard_time_series(X, window):
ts_features = []
features = [10]
features.extend(feature_service.extract_features(X, window))
ts_features.append(features)
res_pred = xgb.DMatrix(np.array(ts_features))
bst = xgb.Booster({'nthread': 4})
bst.load_model(model_name)
xgb_ret = bst.predict(res_pred)
if xgb_ret[0] < self.threshold:
value = 0
else:
value = 1
return [value, xgb_ret[0]]
else:
return [0, 0] | time_series_detector/algorithm/xgboosting.py | import os
import xgboost as xgb
from time_series_detector.feature import feature_service
from time_series_detector.common.tsd_errorcode import *
from time_series_detector.common.tsd_common import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../model/')
DEFAULT_MODEL = MODEL_PATH + "xgb_default_model"
class XGBoosting(object):
"""
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient,
flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems
in a fast and accurate way. The same code runs on major distributed environment (Hadoop, SGE, MPI)
and can solve problems beyond billions of examples.
https://github.com/dmlc/xgboost
"""
def __init__(self,
threshold=0.15,
max_depth=10,
eta=0.05,
gamma=0.1,
silent=1,
min_child_weight=1,
subsample=0.8,
colsample_bytree=1,
booster='gbtree',
objective='binary:logistic',
eval_metric='auc'):
"""
:param threshold: The critical point of normal.
:param max_depth: Maximum tree depth for base learners.
:param eta: Value means model more robust to overfitting but slower to compute.
:param gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree.
:param silent: If 1, it will print information about performance. If 2, some additional information will be printed out.
:param min_child_weight: Minimum sum of instance weight(hessian) needed in a child.
:param subsample: Subsample ratio of the training instance.
:param colsample_bytree: Subsample ratio of columns when constructing each tree.
:param booster: Specify which booster to use: gbtree, gblinear or dart.
:param objective: Specify the learning task and the corresponding learning objective or a custom objective function to be used (see note below).
:param eval_metric: If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric.
"""
self.threshold = threshold
self.max_depth = max_depth
self.eta = eta
self.gamma = gamma
self.silent = silent
self.min_child_weight = min_child_weight
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.booster = booster
self.objective = objective
self.eval_metric = eval_metric
def __save_libsvm_format(self, data, feature_file_name):
"""
Save the time features to libsvm format.
:param data: feature values
:param file_name: file saves the time features and label
"""
try:
f = open(feature_file_name, "w")
except Exception as ex:
return TSD_CAL_FEATURE_ERR, str(ex)
times = 0
for temp in data:
if times > 0:
f.write("\n")
result = ['{0}:{1}'.format(int(index) + 1, value) for index, value in enumerate(temp[0])]
f.write(str(temp[1]))
for x in result:
f.write(' ' + x)
times = times + 1
return TSD_OP_SUCCESS, ""
def __calculate_features(self, data, feature_file_name, window=DEFAULT_WINDOW):
"""
Caculate time features and save as libsvm format.
:param data: the time series to detect of
:param feature_file_name: the file to use
:param window: the length of window
"""
features = []
for index in data:
if is_standard_time_series(index["data"], window):
temp = []
temp.append(feature_service.extract_features(index["data"], window))
temp.append(index["flag"])
features.append(temp)
try:
ret_code, ret_data = self.__save_libsvm_format(features, feature_file_name)
except Exception as ex:
ret_code = TSD_CAL_FEATURE_ERR
ret_data = str(ex)
return ret_code, ret_data
def xgb_train(self, data, task_id, num_round=300):
"""
Train an xgboost model.
:param data: Training dataset.
:param task_id: The id of the training task.
:param num_round: Max number of boosting iterations.
"""
model_name = MODEL_PATH + task_id + "_model"
feature_file_name = MODEL_PATH + task_id + "_features"
ret_code, ret_data = self.__calculate_features(data, feature_file_name)
if ret_code != TSD_OP_SUCCESS:
return ret_code, ret_data
try:
dtrain = xgb.DMatrix(feature_file_name)
except Exception as ex:
return TSD_READ_FEATURE_FAILED, str(ex)
params = {
'max_depth': self.max_depth,
'eta': self.eta,
'gamma': self.gamma,
'silent': self.silent,
'min_child_weight': self.min_child_weight,
'subsample': self.subsample,
'colsample_bytree': self.colsample_bytree,
'booster': self.booster,
'objective': self.objective,
'eval_metric': self.eval_metric,
}
try:
bst = xgb.train(params, dtrain, num_round)
bst.save_model(model_name)
except Exception as ex:
return TSD_TRAIN_ERR, str(ex)
return TSD_OP_SUCCESS, ""
def predict(self, X, window=DEFAULT_WINDOW, model_name=DEFAULT_MODEL):
"""
:param X: the time series to detect of
:type X: pandas.Series
:param window: the length of window
:param model_name: Use a xgboost model to predict a particular sample is an outlier or not.
:return 1 denotes normal, 0 denotes abnormal.
"""
if is_standard_time_series(X, window):
ts_features = []
features = [10]
features.extend(feature_service.extract_features(X, window))
ts_features.append(features)
res_pred = xgb.DMatrix(np.array(ts_features))
bst = xgb.Booster({'nthread': 4})
bst.load_model(model_name)
xgb_ret = bst.predict(res_pred)
if xgb_ret[0] < self.threshold:
value = 0
else:
value = 1
return [value, xgb_ret[0]]
else:
return [0, 0] | 0.741112 | 0.461502 |
import torch
"""
checkpoint and state generation
"""
from ..databases.restarter import Restartable
DEFAULT_STRUCTURE_FNAME= 'experiment_structure.pt'
def create_state(model,controller,metric_tracker):
"""
Create an experiment state dictionary.
:param model:
:param controller:
:param metric_tracker:
:return: dictionary containing experiment state.
"""
return {
'model': model.state_dict(),
'controller': controller.state_dict(),
'metric_tracker': metric_tracker,
'torch_rng_state':torch.random.get_rng_state(),
}
def create_structure_file(training_modules, database, controller, fname=DEFAULT_STRUCTURE_FNAME):
"""
Save an experiment structure. (i.e. full model, not just state_dict).
:param training_modules:
:param database:
:param controller:
:param fname:
:return: Nothing
"""
structure = {
'training_modules': training_modules,
'controller': controller,
}
if isinstance(database,Restartable):
structure['database'] = database.restarter
with open(fname, 'wb') as pfile:
torch.save(structure,pfile)
def restore_checkpoint(structure,state,restore_db=True):
"""
:param structure: experiment structure object
:param state: experiment state object
:param restore_db: Attempt to restore database (true/false)
:return: experiment structure
"""
structure['training_modules'][0].load_state_dict(state['model'])
structure['controller'].load_state_dict(state['controller'])
if 'database' in structure and restore_db:
structure['database'] = structure['database'].attempt_reload()
structure['metric_tracker'] = state['metric_tracker']
torch.random.set_rng_state(state['torch_rng_state'])
return structure
def load_checkpoint(structure_fname,state_fname,restore_db=True,**kwargs):
"""
Load a checkpoint from filenames. kwargs are passed to torch
:param structure_fname:
:param state_fname:
:param restore_db:
:param kwargs: passed to torch.load, i.e. use `map_location` to load the model
on a specific device
:return:
"""
with open(structure_fname,'rb') as pfile:
structure = torch.load(pfile,**kwargs)
with open(state_fname,'rb') as pfile:
state = torch.load(pfile,**kwargs)
return restore_checkpoint(structure,state,restore_db=restore_db)
def load_checkpoint_from_cwd(**kwargs):
"""
See load_checkpoint, but using default filenames.
:param kwargs:
:return:
"""
return load_checkpoint(DEFAULT_STRUCTURE_FNAME,'best_checkpoint.pt',**kwargs)
def load_model_from_cwd(**kwargs):
"""
Loads structure and best model params from cwd, returns model only.
:param kwargs: passed to torch.load
:return:
"""
with open('experiment_structure.pt','rb') as pfile:
structure = torch.load(pfile,**kwargs)
with open('best_model.pkl','rb') as pfile:
state = torch.load(pfile,**kwargs)
model = structure['training_modules'].model
model.load_state_dict(state)
return model | hippynn/experiment/serialization.py | import torch
"""
checkpoint and state generation
"""
from ..databases.restarter import Restartable
DEFAULT_STRUCTURE_FNAME= 'experiment_structure.pt'
def create_state(model,controller,metric_tracker):
"""
Create an experiment state dictionary.
:param model:
:param controller:
:param metric_tracker:
:return: dictionary containing experiment state.
"""
return {
'model': model.state_dict(),
'controller': controller.state_dict(),
'metric_tracker': metric_tracker,
'torch_rng_state':torch.random.get_rng_state(),
}
def create_structure_file(training_modules, database, controller, fname=DEFAULT_STRUCTURE_FNAME):
"""
Save an experiment structure. (i.e. full model, not just state_dict).
:param training_modules:
:param database:
:param controller:
:param fname:
:return: Nothing
"""
structure = {
'training_modules': training_modules,
'controller': controller,
}
if isinstance(database,Restartable):
structure['database'] = database.restarter
with open(fname, 'wb') as pfile:
torch.save(structure,pfile)
def restore_checkpoint(structure,state,restore_db=True):
"""
:param structure: experiment structure object
:param state: experiment state object
:param restore_db: Attempt to restore database (true/false)
:return: experiment structure
"""
structure['training_modules'][0].load_state_dict(state['model'])
structure['controller'].load_state_dict(state['controller'])
if 'database' in structure and restore_db:
structure['database'] = structure['database'].attempt_reload()
structure['metric_tracker'] = state['metric_tracker']
torch.random.set_rng_state(state['torch_rng_state'])
return structure
def load_checkpoint(structure_fname,state_fname,restore_db=True,**kwargs):
"""
Load a checkpoint from filenames. kwargs are passed to torch
:param structure_fname:
:param state_fname:
:param restore_db:
:param kwargs: passed to torch.load, i.e. use `map_location` to load the model
on a specific device
:return:
"""
with open(structure_fname,'rb') as pfile:
structure = torch.load(pfile,**kwargs)
with open(state_fname,'rb') as pfile:
state = torch.load(pfile,**kwargs)
return restore_checkpoint(structure,state,restore_db=restore_db)
def load_checkpoint_from_cwd(**kwargs):
"""
See load_checkpoint, but using default filenames.
:param kwargs:
:return:
"""
return load_checkpoint(DEFAULT_STRUCTURE_FNAME,'best_checkpoint.pt',**kwargs)
def load_model_from_cwd(**kwargs):
"""
Loads structure and best model params from cwd, returns model only.
:param kwargs: passed to torch.load
:return:
"""
with open('experiment_structure.pt','rb') as pfile:
structure = torch.load(pfile,**kwargs)
with open('best_model.pkl','rb') as pfile:
state = torch.load(pfile,**kwargs)
model = structure['training_modules'].model
model.load_state_dict(state)
return model | 0.851459 | 0.472927 |
from collections import defaultdict, deque
import datetime
import logging
import threading
import weakref
import tornado.locks
from tornado import gen
from .core import CommClosedError
from .utils import sync
from .protocol.serialize import to_serialize
logger = logging.getLogger(__name__)
class PubSubSchedulerExtension(object):
""" Extend Dask's scheduler with routes to handle PubSub machinery """
def __init__(self, scheduler):
self.scheduler = scheduler
self.publishers = defaultdict(set)
self.subscribers = defaultdict(set)
self.client_subscribers = defaultdict(set)
self.scheduler.handlers.update({"pubsub_add_publisher": self.add_publisher})
self.scheduler.stream_handlers.update(
{
"pubsub-add-subscriber": self.add_subscriber,
"pubsub-remove-publisher": self.remove_publisher,
"pubsub-remove-subscriber": self.remove_subscriber,
"pubsub-msg": self.handle_message,
}
)
self.scheduler.extensions["pubsub"] = self
def add_publisher(self, comm=None, name=None, worker=None):
logger.debug("Add publisher: %s %s", name, worker)
self.publishers[name].add(worker)
return {
"subscribers": {addr: {} for addr in self.subscribers[name]},
"publish-scheduler": name in self.client_subscribers
and len(self.client_subscribers[name]) > 0,
}
def add_subscriber(self, comm=None, name=None, worker=None, client=None):
if worker:
logger.debug("Add worker subscriber: %s %s", name, worker)
self.subscribers[name].add(worker)
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{"op": "pubsub-add-subscriber", "address": worker, "name": name},
)
elif client:
logger.debug("Add client subscriber: %s %s", name, client)
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{"op": "pubsub-publish-scheduler", "name": name, "publish": True},
)
self.client_subscribers[name].add(client)
def remove_publisher(self, comm=None, name=None, worker=None):
if worker in self.publishers[name]:
logger.debug("Remove publisher: %s %s", name, worker)
self.publishers[name].remove(worker)
if not self.subscribers[name] and not self.publishers[name]:
del self.subscribers[name]
del self.publishers[name]
def remove_subscriber(self, comm=None, name=None, worker=None, client=None):
if worker:
logger.debug("Add worker subscriber: %s %s", name, worker)
self.subscribers[name].remove(worker)
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{"op": "pubsub-remove-subscriber", "address": worker, "name": name},
)
elif client:
logger.debug("Add client subscriber: %s %s", name, client)
self.client_subscribers[name].remove(client)
if not self.client_subscribers[name]:
del self.client_subscribers[name]
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{
"op": "pubsub-publish-scheduler",
"name": name,
"publish": False,
},
)
if not self.subscribers[name] and not self.publishers[name]:
logger.debug("Remove PubSub topic %s", name)
del self.subscribers[name]
del self.publishers[name]
def handle_message(self, name=None, msg=None, worker=None, client=None):
for c in list(self.client_subscribers[name]):
try:
self.scheduler.client_comms[c].send(
{"op": "pubsub-msg", "name": name, "msg": msg}
)
except (KeyError, CommClosedError):
self.remove_subscriber(name=name, client=c)
if client:
for sub in self.subscribers[name]:
self.scheduler.worker_send(
sub, {"op": "pubsub-msg", "name": name, "msg": msg}
)
class PubSubWorkerExtension(object):
""" Extend Dask's Worker with routes to handle PubSub machinery """
def __init__(self, worker):
self.worker = worker
self.worker.stream_handlers.update(
{
"pubsub-add-subscriber": self.add_subscriber,
"pubsub-remove-subscriber": self.remove_subscriber,
"pubsub-msg": self.handle_message,
"pubsub-publish-scheduler": self.publish_scheduler,
}
)
self.subscribers = defaultdict(weakref.WeakSet)
self.publishers = defaultdict(weakref.WeakSet)
self.publish_to_scheduler = defaultdict(lambda: False)
self.worker.extensions["pubsub"] = self # circular reference
def add_subscriber(self, name=None, address=None, **info):
for pub in self.publishers[name]:
pub.subscribers[address] = info
def remove_subscriber(self, name=None, address=None):
for pub in self.publishers[name]:
del pub.subscribers[address]
def publish_scheduler(self, name=None, publish=None):
self.publish_to_scheduler[name] = publish
def handle_message(self, name=None, msg=None):
for sub in self.subscribers.get(name, []):
sub._put(msg)
def trigger_cleanup(self):
self.worker.loop.add_callback(self.cleanup)
def cleanup(self):
for name, s in dict(self.subscribers).items():
if not len(s):
msg = {"op": "pubsub-remove-subscriber", "name": name}
self.worker.batched_stream.send(msg)
del self.subscribers[name]
for name, p in dict(self.publishers).items():
if not len(p):
msg = {"op": "pubsub-remove-publisher", "name": name}
self.worker.batched_stream.send(msg)
del self.publishers[name]
del self.publish_to_scheduler[name]
class PubSubClientExtension(object):
""" Extend Dask's Client with handlers to handle PubSub machinery """
def __init__(self, client):
self.client = client
self.client._stream_handlers.update({"pubsub-msg": self.handle_message})
self.subscribers = defaultdict(weakref.WeakSet)
self.client.extensions["pubsub"] = self # TODO: circular reference
def handle_message(self, name=None, msg=None):
for sub in self.subscribers[name]:
sub._put(msg)
if not self.subscribers[name]:
self.client.scheduler_comm.send(
{"op": "pubsub-remove-subscribers", "name": name}
)
def trigger_cleanup(self):
self.client.loop.add_callback(self.cleanup)
def cleanup(self):
for name, s in self.subscribers.items():
if not s:
msg = {"op": "pubsub-remove-subscriber", "name": name}
self.client.scheduler_comm.send(msg)
class Pub(object):
""" Publish data with Publish-Subscribe pattern
This allows clients and workers to directly communicate data between each
other with a typical Publish-Subscribe pattern. This involves two
components,
Pub objects, into which we put data:
>>> pub = Pub('my-topic')
>>> pub.put(123)
And Sub objects, from which we collect data:
>>> sub = Sub('my-topic')
>>> sub.get()
123
Many Pub and Sub objects can exist for the same topic. All data sent from
any Pub will be sent to all Sub objects on that topic that are currently
connected. Pub's and Sub's find each other using the scheduler, but they
communicate directly with each other without coordination from the
scheduler.
Pubs and Subs use the central scheduler to find each other, but not to
mediate the communication. This means that there is very little additional
latency or overhead, and they are appropriate for very frequent data
transfers. For context, most data transfer first checks with the scheduler to find which
workers should participate, and then does direct worker-to-worker
transfers. This checking in with the scheduler provides some stability
guarantees, but also adds in a few extra network hops. PubSub doesn't do
this, and so is faster, but also can easily drop messages if Pubs or Subs
disappear without notice.
When using a Pub or Sub from a Client all communications will be routed
through the scheduler. This can cause some performance degradation. Pubs
and Subs only operate at top-speed when they are both on workers.
Parameters
----------
name: object (msgpack serializable)
The name of the group of Pubs and Subs on which to participate
Examples
--------
>>> pub = Pub('my-topic')
>>> sub = Sub('my-topic')
>>> pub.put([1, 2, 3])
>>> sub.get()
[1, 2, 3]
You can also use sub within a for loop:
>>> for msg in sub: # doctest: +SKIP
... print(msg)
or an async for loop
>>> async for msg in sub: # doctest: +SKIP
... print(msg)
Similarly the ``.get`` method will return an awaitable if used by an async
client or within the IOLoop thread of a worker
>>> await sub.get() # doctest: +SKIP
You can see the set of connected worker subscribers by looking at the
``.subscribers`` attribute:
>>> pub.subscribers
{'tcp://...': {},
'tcp://...': {}}
See Also
--------
Sub
"""
def __init__(self, name, worker=None, client=None):
if worker is None and client is None:
from distributed import get_worker, get_client
try:
worker = get_worker()
except Exception:
client = get_client()
self.subscribers = dict()
self.worker = worker
self.client = client
assert client or worker
if self.worker:
self.scheduler = self.worker.scheduler
self.loop = self.worker.loop
elif self.client:
self.scheduler = self.client.scheduler
self.loop = self.client.loop
self.name = name
self._started = False
self._buffer = []
self.loop.add_callback(self._start)
if self.worker:
pubsub = self.worker.extensions["pubsub"]
self.loop.add_callback(pubsub.publishers[name].add, self)
weakref.finalize(self, pubsub.trigger_cleanup)
async def _start(self):
if self.worker:
result = await self.scheduler.pubsub_add_publisher(
name=self.name, worker=self.worker.address
)
pubsub = self.worker.extensions["pubsub"]
self.subscribers.update(result["subscribers"])
pubsub.publish_to_scheduler[self.name] = result["publish-scheduler"]
self._started = True
for msg in self._buffer:
self.put(msg)
del self._buffer[:]
def _put(self, msg):
if not self._started:
self._buffer.append(msg)
return
data = {"op": "pubsub-msg", "name": self.name, "msg": to_serialize(msg)}
if self.worker:
for sub in self.subscribers:
self.worker.send_to_worker(sub, data)
if self.worker.extensions["pubsub"].publish_to_scheduler[self.name]:
self.worker.batched_stream.send(data)
elif self.client:
self.client.scheduler_comm.send(data)
def put(self, msg):
""" Publish a message to all subscribers of this topic """
self.loop.add_callback(self._put, msg)
class Sub(object):
""" Subscribe to a Publish/Subscribe topic
See Also
--------
Pub: for full docstring
"""
def __init__(self, name, worker=None, client=None):
if worker is None and client is None:
from distributed.worker import get_worker, get_client
try:
worker = get_worker()
except Exception:
client = get_client()
self.worker = worker
self.client = client
if self.worker:
self.loop = self.worker.loop
elif self.client:
self.loop = self.client.loop
self.name = name
self.buffer = deque()
self.condition = tornado.locks.Condition()
if self.worker:
pubsub = self.worker.extensions["pubsub"]
elif self.client:
pubsub = self.client.extensions["pubsub"]
self.loop.add_callback(pubsub.subscribers[name].add, self)
msg = {"op": "pubsub-add-subscriber", "name": self.name}
if self.worker:
self.loop.add_callback(self.worker.batched_stream.send, msg)
elif self.client:
self.loop.add_callback(self.client.scheduler_comm.send, msg)
else:
raise Exception()
weakref.finalize(self, pubsub.trigger_cleanup)
async def _get(self, timeout=None):
if timeout is not None:
timeout = datetime.timedelta(seconds=timeout)
start = datetime.datetime.now()
while not self.buffer:
if timeout is not None:
timeout2 = timeout - (datetime.datetime.now() - start)
if timeout2.total_seconds() < 0:
raise gen.TimeoutError()
else:
timeout2 = None
await self.condition.wait(timeout=timeout2)
return self.buffer.popleft()
__anext__ = _get
def get(self, timeout=None):
""" Get a single message """
if self.client:
return self.client.sync(self._get, timeout=timeout)
elif self.worker.thread_id == threading.get_ident():
return self._get()
else:
if self.buffer: # fastpath
return self.buffer.popleft()
return sync(self.loop, self._get, timeout=timeout)
next = __next__ = get
def __iter__(self):
return self
def __aiter__(self):
return self
def _put(self, msg):
self.buffer.append(msg)
self.condition.notify() | distributed/pubsub.py | from collections import defaultdict, deque
import datetime
import logging
import threading
import weakref
import tornado.locks
from tornado import gen
from .core import CommClosedError
from .utils import sync
from .protocol.serialize import to_serialize
logger = logging.getLogger(__name__)
class PubSubSchedulerExtension(object):
""" Extend Dask's scheduler with routes to handle PubSub machinery """
def __init__(self, scheduler):
self.scheduler = scheduler
self.publishers = defaultdict(set)
self.subscribers = defaultdict(set)
self.client_subscribers = defaultdict(set)
self.scheduler.handlers.update({"pubsub_add_publisher": self.add_publisher})
self.scheduler.stream_handlers.update(
{
"pubsub-add-subscriber": self.add_subscriber,
"pubsub-remove-publisher": self.remove_publisher,
"pubsub-remove-subscriber": self.remove_subscriber,
"pubsub-msg": self.handle_message,
}
)
self.scheduler.extensions["pubsub"] = self
def add_publisher(self, comm=None, name=None, worker=None):
logger.debug("Add publisher: %s %s", name, worker)
self.publishers[name].add(worker)
return {
"subscribers": {addr: {} for addr in self.subscribers[name]},
"publish-scheduler": name in self.client_subscribers
and len(self.client_subscribers[name]) > 0,
}
def add_subscriber(self, comm=None, name=None, worker=None, client=None):
if worker:
logger.debug("Add worker subscriber: %s %s", name, worker)
self.subscribers[name].add(worker)
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{"op": "pubsub-add-subscriber", "address": worker, "name": name},
)
elif client:
logger.debug("Add client subscriber: %s %s", name, client)
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{"op": "pubsub-publish-scheduler", "name": name, "publish": True},
)
self.client_subscribers[name].add(client)
def remove_publisher(self, comm=None, name=None, worker=None):
if worker in self.publishers[name]:
logger.debug("Remove publisher: %s %s", name, worker)
self.publishers[name].remove(worker)
if not self.subscribers[name] and not self.publishers[name]:
del self.subscribers[name]
del self.publishers[name]
def remove_subscriber(self, comm=None, name=None, worker=None, client=None):
if worker:
logger.debug("Add worker subscriber: %s %s", name, worker)
self.subscribers[name].remove(worker)
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{"op": "pubsub-remove-subscriber", "address": worker, "name": name},
)
elif client:
logger.debug("Add client subscriber: %s %s", name, client)
self.client_subscribers[name].remove(client)
if not self.client_subscribers[name]:
del self.client_subscribers[name]
for pub in self.publishers[name]:
self.scheduler.worker_send(
pub,
{
"op": "pubsub-publish-scheduler",
"name": name,
"publish": False,
},
)
if not self.subscribers[name] and not self.publishers[name]:
logger.debug("Remove PubSub topic %s", name)
del self.subscribers[name]
del self.publishers[name]
def handle_message(self, name=None, msg=None, worker=None, client=None):
for c in list(self.client_subscribers[name]):
try:
self.scheduler.client_comms[c].send(
{"op": "pubsub-msg", "name": name, "msg": msg}
)
except (KeyError, CommClosedError):
self.remove_subscriber(name=name, client=c)
if client:
for sub in self.subscribers[name]:
self.scheduler.worker_send(
sub, {"op": "pubsub-msg", "name": name, "msg": msg}
)
class PubSubWorkerExtension(object):
""" Extend Dask's Worker with routes to handle PubSub machinery """
def __init__(self, worker):
self.worker = worker
self.worker.stream_handlers.update(
{
"pubsub-add-subscriber": self.add_subscriber,
"pubsub-remove-subscriber": self.remove_subscriber,
"pubsub-msg": self.handle_message,
"pubsub-publish-scheduler": self.publish_scheduler,
}
)
self.subscribers = defaultdict(weakref.WeakSet)
self.publishers = defaultdict(weakref.WeakSet)
self.publish_to_scheduler = defaultdict(lambda: False)
self.worker.extensions["pubsub"] = self # circular reference
def add_subscriber(self, name=None, address=None, **info):
for pub in self.publishers[name]:
pub.subscribers[address] = info
def remove_subscriber(self, name=None, address=None):
for pub in self.publishers[name]:
del pub.subscribers[address]
def publish_scheduler(self, name=None, publish=None):
self.publish_to_scheduler[name] = publish
def handle_message(self, name=None, msg=None):
for sub in self.subscribers.get(name, []):
sub._put(msg)
def trigger_cleanup(self):
self.worker.loop.add_callback(self.cleanup)
def cleanup(self):
for name, s in dict(self.subscribers).items():
if not len(s):
msg = {"op": "pubsub-remove-subscriber", "name": name}
self.worker.batched_stream.send(msg)
del self.subscribers[name]
for name, p in dict(self.publishers).items():
if not len(p):
msg = {"op": "pubsub-remove-publisher", "name": name}
self.worker.batched_stream.send(msg)
del self.publishers[name]
del self.publish_to_scheduler[name]
class PubSubClientExtension(object):
""" Extend Dask's Client with handlers to handle PubSub machinery """
def __init__(self, client):
self.client = client
self.client._stream_handlers.update({"pubsub-msg": self.handle_message})
self.subscribers = defaultdict(weakref.WeakSet)
self.client.extensions["pubsub"] = self # TODO: circular reference
def handle_message(self, name=None, msg=None):
for sub in self.subscribers[name]:
sub._put(msg)
if not self.subscribers[name]:
self.client.scheduler_comm.send(
{"op": "pubsub-remove-subscribers", "name": name}
)
def trigger_cleanup(self):
self.client.loop.add_callback(self.cleanup)
def cleanup(self):
for name, s in self.subscribers.items():
if not s:
msg = {"op": "pubsub-remove-subscriber", "name": name}
self.client.scheduler_comm.send(msg)
class Pub(object):
""" Publish data with Publish-Subscribe pattern
This allows clients and workers to directly communicate data between each
other with a typical Publish-Subscribe pattern. This involves two
components,
Pub objects, into which we put data:
>>> pub = Pub('my-topic')
>>> pub.put(123)
And Sub objects, from which we collect data:
>>> sub = Sub('my-topic')
>>> sub.get()
123
Many Pub and Sub objects can exist for the same topic. All data sent from
any Pub will be sent to all Sub objects on that topic that are currently
connected. Pub's and Sub's find each other using the scheduler, but they
communicate directly with each other without coordination from the
scheduler.
Pubs and Subs use the central scheduler to find each other, but not to
mediate the communication. This means that there is very little additional
latency or overhead, and they are appropriate for very frequent data
transfers. For context, most data transfer first checks with the scheduler to find which
workers should participate, and then does direct worker-to-worker
transfers. This checking in with the scheduler provides some stability
guarantees, but also adds in a few extra network hops. PubSub doesn't do
this, and so is faster, but also can easily drop messages if Pubs or Subs
disappear without notice.
When using a Pub or Sub from a Client all communications will be routed
through the scheduler. This can cause some performance degradation. Pubs
and Subs only operate at top-speed when they are both on workers.
Parameters
----------
name: object (msgpack serializable)
The name of the group of Pubs and Subs on which to participate
Examples
--------
>>> pub = Pub('my-topic')
>>> sub = Sub('my-topic')
>>> pub.put([1, 2, 3])
>>> sub.get()
[1, 2, 3]
You can also use sub within a for loop:
>>> for msg in sub: # doctest: +SKIP
... print(msg)
or an async for loop
>>> async for msg in sub: # doctest: +SKIP
... print(msg)
Similarly the ``.get`` method will return an awaitable if used by an async
client or within the IOLoop thread of a worker
>>> await sub.get() # doctest: +SKIP
You can see the set of connected worker subscribers by looking at the
``.subscribers`` attribute:
>>> pub.subscribers
{'tcp://...': {},
'tcp://...': {}}
See Also
--------
Sub
"""
def __init__(self, name, worker=None, client=None):
if worker is None and client is None:
from distributed import get_worker, get_client
try:
worker = get_worker()
except Exception:
client = get_client()
self.subscribers = dict()
self.worker = worker
self.client = client
assert client or worker
if self.worker:
self.scheduler = self.worker.scheduler
self.loop = self.worker.loop
elif self.client:
self.scheduler = self.client.scheduler
self.loop = self.client.loop
self.name = name
self._started = False
self._buffer = []
self.loop.add_callback(self._start)
if self.worker:
pubsub = self.worker.extensions["pubsub"]
self.loop.add_callback(pubsub.publishers[name].add, self)
weakref.finalize(self, pubsub.trigger_cleanup)
async def _start(self):
if self.worker:
result = await self.scheduler.pubsub_add_publisher(
name=self.name, worker=self.worker.address
)
pubsub = self.worker.extensions["pubsub"]
self.subscribers.update(result["subscribers"])
pubsub.publish_to_scheduler[self.name] = result["publish-scheduler"]
self._started = True
for msg in self._buffer:
self.put(msg)
del self._buffer[:]
def _put(self, msg):
if not self._started:
self._buffer.append(msg)
return
data = {"op": "pubsub-msg", "name": self.name, "msg": to_serialize(msg)}
if self.worker:
for sub in self.subscribers:
self.worker.send_to_worker(sub, data)
if self.worker.extensions["pubsub"].publish_to_scheduler[self.name]:
self.worker.batched_stream.send(data)
elif self.client:
self.client.scheduler_comm.send(data)
def put(self, msg):
""" Publish a message to all subscribers of this topic """
self.loop.add_callback(self._put, msg)
class Sub(object):
""" Subscribe to a Publish/Subscribe topic
See Also
--------
Pub: for full docstring
"""
def __init__(self, name, worker=None, client=None):
if worker is None and client is None:
from distributed.worker import get_worker, get_client
try:
worker = get_worker()
except Exception:
client = get_client()
self.worker = worker
self.client = client
if self.worker:
self.loop = self.worker.loop
elif self.client:
self.loop = self.client.loop
self.name = name
self.buffer = deque()
self.condition = tornado.locks.Condition()
if self.worker:
pubsub = self.worker.extensions["pubsub"]
elif self.client:
pubsub = self.client.extensions["pubsub"]
self.loop.add_callback(pubsub.subscribers[name].add, self)
msg = {"op": "pubsub-add-subscriber", "name": self.name}
if self.worker:
self.loop.add_callback(self.worker.batched_stream.send, msg)
elif self.client:
self.loop.add_callback(self.client.scheduler_comm.send, msg)
else:
raise Exception()
weakref.finalize(self, pubsub.trigger_cleanup)
async def _get(self, timeout=None):
if timeout is not None:
timeout = datetime.timedelta(seconds=timeout)
start = datetime.datetime.now()
while not self.buffer:
if timeout is not None:
timeout2 = timeout - (datetime.datetime.now() - start)
if timeout2.total_seconds() < 0:
raise gen.TimeoutError()
else:
timeout2 = None
await self.condition.wait(timeout=timeout2)
return self.buffer.popleft()
__anext__ = _get
def get(self, timeout=None):
""" Get a single message """
if self.client:
return self.client.sync(self._get, timeout=timeout)
elif self.worker.thread_id == threading.get_ident():
return self._get()
else:
if self.buffer: # fastpath
return self.buffer.popleft()
return sync(self.loop, self._get, timeout=timeout)
next = __next__ = get
def __iter__(self):
return self
def __aiter__(self):
return self
def _put(self, msg):
self.buffer.append(msg)
self.condition.notify() | 0.487551 | 0.07971 |
import itertools
from typing import Dict, Iterator, Optional, Tuple, Type, TypeVar, Union
from .. import (
SPECIFIED_DIRECTIVES,
Directive,
EnumType,
Field,
GraphQLType,
InputObjectType,
InterfaceType,
ListType,
NamedType,
NonNullType,
ObjectType,
Schema,
UnionType,
is_introspection_type,
)
from .changes import (
DirectiveAdded,
DirectiveArgumentAdded,
DirectiveArgumentChangedType,
DirectiveArgumentDefaultValueChange,
DirectiveArgumentRemoved,
DirectiveLocationAdded,
DirectiveLocationRemoved,
DirectiveRemoved,
EnumValueAdded,
EnumValueDeprecated,
EnumValueDeprecationReasonChanged,
EnumValueDeprecationRemoved,
EnumValueRemoved,
FieldAdded,
FieldArgumentAdded,
FieldArgumentChangedType,
FieldArgumentDefaultValueChange,
FieldArgumentRemoved,
FieldChangedType,
FieldDeprecated,
FieldDeprecationReasonChanged,
FieldDeprecationRemoved,
FieldRemoved,
InputFieldAdded,
InputFieldChangedType,
InputFieldDefaultValueChange,
InputFieldRemoved,
SchemaChange,
SchemaChangeSeverity,
TypeAdded,
TypeAddedToInterface,
TypeAddedToUnion,
TypeChangedKind,
TypeRemoved,
TypeRemovedFromInterface,
TypeRemovedFromUnion,
)
TGraphQLType = TypeVar("TGraphQLType", bound=GraphQLType)
__all__ = (
"diff_schema",
"SchemaChange",
"SchemaChangeSeverity",
"DirectiveAdded",
"DirectiveArgumentAdded",
"DirectiveArgumentChangedType",
"DirectiveArgumentDefaultValueChange",
"DirectiveArgumentRemoved",
"DirectiveLocationAdded",
"DirectiveLocationRemoved",
"DirectiveRemoved",
"EnumValueAdded",
"EnumValueDeprecated",
"EnumValueDeprecationReasonChanged",
"EnumValueDeprecationRemoved",
"EnumValueRemoved",
"FieldAdded",
"FieldArgumentAdded",
"FieldArgumentChangedType",
"FieldArgumentDefaultValueChange",
"FieldArgumentRemoved",
"FieldChangedType",
"FieldDeprecated",
"FieldDeprecationReasonChanged",
"FieldDeprecationRemoved",
"FieldRemoved",
"InputFieldAdded",
"InputFieldChangedType",
"InputFieldDefaultValueChange",
"InputFieldRemoved",
"TypeAdded",
"TypeAddedToInterface",
"TypeAddedToUnion",
"TypeChangedKind",
"TypeRemoved",
"TypeRemovedFromInterface",
"TypeRemovedFromUnion",
)
def _iterate_matching_pairs(
old_schema: Schema, new_schema: Schema, cls: Type[TGraphQLType]
) -> Iterator[Tuple[TGraphQLType, TGraphQLType]]:
old_types = {
n: t for n, t in old_schema.types.items() if isinstance(t, cls)
} # type: Dict[str, TGraphQLType]
new_types = {
n: t for n, t in new_schema.types.items() if isinstance(t, cls)
} # type: Dict[str, TGraphQLType]
for name, old_type in old_types.items():
if is_introspection_type(old_type):
continue
try:
yield old_type, new_types[name]
except KeyError:
pass
def diff_schema(
old_schema: Schema,
new_schema: Schema,
min_severity: Optional[SchemaChangeSeverity] = None,
) -> Iterator[SchemaChange]:
"""
Iterate over all changes between `old_schema` and `new_schema`.
Some ``BREAKING`` and ``DANGEROUS`` changes could be safe depending on the
actual queries made by clients of your schema. However it is not possible
to detect this without looking at the queries being run against the schema
so this classification errs on the side of safety.
Some compatible type changes are ignored given that they should not lead to
any change in client behavior.
Args:
old_schema: Source schema
new_schema: Updated schema
min_severity: Set this to filter for changes of a given severity
Yields:
SchemaChange: All detected changes.
"""
old_schema.validate()
new_schema.validate()
diffs = [
_find_removed_types(old_schema, new_schema),
_find_added_types(old_schema, new_schema),
_diff_directives(old_schema, new_schema),
_find_changed_types(old_schema, new_schema),
_diff_union_types(old_schema, new_schema),
_diff_enum_types(old_schema, new_schema),
_diff_object_types(old_schema, new_schema),
_diff_interface_types(old_schema, new_schema),
_diff_input_types(old_schema, new_schema),
]
for change in itertools.chain(*diffs):
if min_severity is None or change.severity >= min_severity:
yield change
def _find_removed_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name in old.types.keys():
if name not in new.types:
yield TypeRemoved(name)
def _find_added_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name in new.types.keys():
if name not in old.types:
yield TypeAdded(name)
def _find_changed_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name, old_type in old.types.items():
try:
new_type = new.types[name]
except KeyError:
pass
else:
if old_type.__class__ != new_type.__class__:
yield TypeChangedKind(
name, old_type.__class__, new_type.__class__
)
def _diff_union_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_union, new_union in _iterate_matching_pairs(old, new, UnionType):
old_type_names = set(t.name for t in old_union.types)
new_type_names = set(t.name for t in new_union.types)
for t in old_type_names - new_type_names:
yield TypeRemovedFromUnion(t, old_union)
for t in new_type_names - old_type_names:
yield TypeAddedToUnion(t, new_union)
def _diff_enum_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_enum, new_enum in _iterate_matching_pairs(old, new, EnumType):
for old_ev in old_enum.values:
try:
new_ev = new_enum._values[old_ev.name]
except KeyError:
yield EnumValueRemoved(old_enum, old_ev)
else:
if old_ev.deprecated:
if not new_ev.deprecated:
yield EnumValueDeprecationRemoved(
old_enum, old_ev, new_ev
)
elif old_ev.deprecation_reason != new_ev.deprecation_reason:
yield EnumValueDeprecationReasonChanged(
old_enum, old_ev, new_ev
)
elif new_ev.deprecated:
yield EnumValueDeprecated(old_enum, old_ev, new_ev)
for new_ev in new_enum.values:
if new_ev.name not in old_enum._values:
yield EnumValueAdded(new_enum, new_ev)
def _diff_directives(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name, old_directive in old.directives.items():
if old_directive in SPECIFIED_DIRECTIVES:
continue
try:
new_directive = new.directives[name]
except KeyError:
yield DirectiveRemoved(old_directive)
else:
old_locs = set(old_directive.locations)
new_locs = set(new_directive.locations)
for loc in old_locs - new_locs:
yield DirectiveLocationRemoved(old_directive, loc)
for loc in new_locs - old_locs:
yield DirectiveLocationAdded(old_directive, loc)
for d in _diff_directive_arguments(old_directive, new_directive):
yield d
for name, new_directive in new.directives.items():
if new_directive in SPECIFIED_DIRECTIVES:
continue
if name not in old.directives:
yield DirectiveAdded(new_directive)
def _diff_directive_arguments(
old_directive: Directive, new_directive: Directive
) -> Iterator[SchemaChange]:
old_args = old_directive.argument_map
new_args = new_directive.argument_map
for name, old_arg in old_args.items():
try:
new_arg = new_args[name]
except KeyError:
yield DirectiveArgumentRemoved(old_directive, old_arg)
else:
if not _is_safe_input_type_change(old_arg.type, new_arg.type):
yield DirectiveArgumentChangedType(
old_directive, old_arg, new_arg
)
elif (
(old_arg.has_default_value and not new_arg.has_default_value)
or (not old_arg.has_default_value and new_arg.has_default_value)
or (
old_arg.has_default_value
and old_arg.default_value != new_arg.default_value
)
):
yield DirectiveArgumentDefaultValueChange(
old_directive, old_arg, new_arg
)
for name, new_arg in new_args.items():
if name not in old_args:
yield DirectiveArgumentAdded(new_directive, new_arg)
def _diff_field_arguments(
parent: Union[ObjectType, InterfaceType], old_field: Field, new_field: Field
) -> Iterator[SchemaChange]:
old_args = old_field.argument_map
new_args = new_field.argument_map
for name, old_arg in old_args.items():
try:
new_arg = new_args[name]
except KeyError:
yield FieldArgumentRemoved(parent, old_field, old_arg)
else:
if not _is_safe_input_type_change(old_arg.type, new_arg.type):
yield FieldArgumentChangedType(
parent, old_field, old_arg, new_arg
)
elif (
(old_arg.has_default_value and not new_arg.has_default_value)
or (not old_arg.has_default_value and new_arg.has_default_value)
or (
old_arg.has_default_value
and old_arg.default_value != new_arg.default_value
)
):
yield FieldArgumentDefaultValueChange(
parent, old_field, old_arg, new_arg
)
for name, new_arg in new_args.items():
if name not in old_args:
yield FieldArgumentAdded(parent, new_field, new_arg)
def _is_safe_input_type_change(
old_type: GraphQLType, new_type: GraphQLType
) -> bool:
if isinstance(old_type, NamedType):
return bool(
isinstance(new_type, NamedType) and old_type.name == new_type.name
)
elif isinstance(old_type, ListType):
return isinstance(new_type, ListType) and _is_safe_input_type_change(
old_type.type, new_type.type
)
elif isinstance(old_type, NonNullType):
return (
isinstance(new_type, NonNullType)
and _is_safe_input_type_change(old_type.type, new_type.type)
) or (
not isinstance(new_type, NonNullType)
and _is_safe_input_type_change(old_type.type, new_type)
)
return False
def _is_safe_output_type_change(
old_type: GraphQLType, new_type: GraphQLType
) -> bool:
if isinstance(old_type, NamedType):
return (
isinstance(new_type, NamedType) and old_type.name == new_type.name
) or (
isinstance(new_type, NonNullType)
and _is_safe_output_type_change(old_type, new_type.type)
)
elif isinstance(old_type, ListType):
return (
isinstance(new_type, ListType)
and _is_safe_input_type_change(old_type.type, new_type.type)
) or (
isinstance(new_type, NonNullType)
and _is_safe_output_type_change(old_type, new_type.type)
)
elif isinstance(old_type, NonNullType):
return isinstance(
new_type, NonNullType
) and _is_safe_output_type_change(old_type.type, new_type.type)
return False
def _diff_object_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_object, new_object in _iterate_matching_pairs(old, new, ObjectType):
for field_name, old_field in old_object.field_map.items():
try:
new_field = new_object.field_map[field_name]
except KeyError:
yield FieldRemoved(old_object, old_field)
else:
for d in _diff_field(old_field, new_field, old_object):
yield d
for new_field in new_object.field_map.values():
if new_field.name not in old_object.field_map:
yield FieldAdded(new_object, new_field)
old_interfaces = {i.name: i for i in old_object.interfaces}
new_interfaces = {i.name: i for i in new_object.interfaces}
for iname, i in old_interfaces.items():
if iname not in new_interfaces:
yield TypeRemovedFromInterface(i, old_object)
for iname, i in new_interfaces.items():
if iname not in old_interfaces:
yield TypeAddedToInterface(i, old_object)
def _diff_interface_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_interface, new_interface in _iterate_matching_pairs(
old, new, InterfaceType
):
for field_name, old_field in old_interface.field_map.items():
try:
new_field = new_interface.field_map[field_name]
except KeyError:
yield FieldRemoved(old_interface, old_field)
else:
for d in _diff_field(old_field, new_field, old_interface):
yield d
for new_field in new_interface.field_map.values():
if new_field.name not in old_interface.field_map:
yield FieldAdded(new_interface, new_field)
def _diff_field(
old: Field, new: Field, parent_type: Union[ObjectType, InterfaceType]
) -> Iterator[SchemaChange]:
if not _is_safe_output_type_change(old.type, new.type):
yield FieldChangedType(parent_type, old, new)
for d in _diff_field_arguments(parent_type, old, new):
yield d
if old.deprecated:
if not new.deprecated:
yield FieldDeprecationRemoved(parent_type, old, new)
elif old.deprecation_reason != new.deprecation_reason:
yield FieldDeprecationReasonChanged(parent_type, old, new)
elif new.deprecated:
yield FieldDeprecated(parent_type, old, new)
def _diff_input_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_type, new_type in _iterate_matching_pairs(
old, new, InputObjectType
):
old_fields = old_type.field_map
new_fields = new_type.field_map
for name, old_field in old_fields.items():
try:
new_field = new_fields[name]
except KeyError:
yield InputFieldRemoved(old_type, old_field)
else:
if not _is_safe_input_type_change(
old_field.type, new_field.type
):
yield InputFieldChangedType(old_type, old_field, new_field)
elif (
(
old_field.has_default_value
and not new_field.has_default_value
)
or (
not old_field.has_default_value
and new_field.has_default_value
)
or (
old_field.has_default_value
and old_field.default_value != new_field.default_value
)
):
yield InputFieldDefaultValueChange(
old_type, old_field, new_field
)
for name, new_field in new_fields.items():
if name not in old_fields:
yield InputFieldAdded(new_type, new_field) | src/py_gql/schema/differ/__init__.py | import itertools
from typing import Dict, Iterator, Optional, Tuple, Type, TypeVar, Union
from .. import (
SPECIFIED_DIRECTIVES,
Directive,
EnumType,
Field,
GraphQLType,
InputObjectType,
InterfaceType,
ListType,
NamedType,
NonNullType,
ObjectType,
Schema,
UnionType,
is_introspection_type,
)
from .changes import (
DirectiveAdded,
DirectiveArgumentAdded,
DirectiveArgumentChangedType,
DirectiveArgumentDefaultValueChange,
DirectiveArgumentRemoved,
DirectiveLocationAdded,
DirectiveLocationRemoved,
DirectiveRemoved,
EnumValueAdded,
EnumValueDeprecated,
EnumValueDeprecationReasonChanged,
EnumValueDeprecationRemoved,
EnumValueRemoved,
FieldAdded,
FieldArgumentAdded,
FieldArgumentChangedType,
FieldArgumentDefaultValueChange,
FieldArgumentRemoved,
FieldChangedType,
FieldDeprecated,
FieldDeprecationReasonChanged,
FieldDeprecationRemoved,
FieldRemoved,
InputFieldAdded,
InputFieldChangedType,
InputFieldDefaultValueChange,
InputFieldRemoved,
SchemaChange,
SchemaChangeSeverity,
TypeAdded,
TypeAddedToInterface,
TypeAddedToUnion,
TypeChangedKind,
TypeRemoved,
TypeRemovedFromInterface,
TypeRemovedFromUnion,
)
TGraphQLType = TypeVar("TGraphQLType", bound=GraphQLType)
__all__ = (
"diff_schema",
"SchemaChange",
"SchemaChangeSeverity",
"DirectiveAdded",
"DirectiveArgumentAdded",
"DirectiveArgumentChangedType",
"DirectiveArgumentDefaultValueChange",
"DirectiveArgumentRemoved",
"DirectiveLocationAdded",
"DirectiveLocationRemoved",
"DirectiveRemoved",
"EnumValueAdded",
"EnumValueDeprecated",
"EnumValueDeprecationReasonChanged",
"EnumValueDeprecationRemoved",
"EnumValueRemoved",
"FieldAdded",
"FieldArgumentAdded",
"FieldArgumentChangedType",
"FieldArgumentDefaultValueChange",
"FieldArgumentRemoved",
"FieldChangedType",
"FieldDeprecated",
"FieldDeprecationReasonChanged",
"FieldDeprecationRemoved",
"FieldRemoved",
"InputFieldAdded",
"InputFieldChangedType",
"InputFieldDefaultValueChange",
"InputFieldRemoved",
"TypeAdded",
"TypeAddedToInterface",
"TypeAddedToUnion",
"TypeChangedKind",
"TypeRemoved",
"TypeRemovedFromInterface",
"TypeRemovedFromUnion",
)
def _iterate_matching_pairs(
old_schema: Schema, new_schema: Schema, cls: Type[TGraphQLType]
) -> Iterator[Tuple[TGraphQLType, TGraphQLType]]:
old_types = {
n: t for n, t in old_schema.types.items() if isinstance(t, cls)
} # type: Dict[str, TGraphQLType]
new_types = {
n: t for n, t in new_schema.types.items() if isinstance(t, cls)
} # type: Dict[str, TGraphQLType]
for name, old_type in old_types.items():
if is_introspection_type(old_type):
continue
try:
yield old_type, new_types[name]
except KeyError:
pass
def diff_schema(
old_schema: Schema,
new_schema: Schema,
min_severity: Optional[SchemaChangeSeverity] = None,
) -> Iterator[SchemaChange]:
"""
Iterate over all changes between `old_schema` and `new_schema`.
Some ``BREAKING`` and ``DANGEROUS`` changes could be safe depending on the
actual queries made by clients of your schema. However it is not possible
to detect this without looking at the queries being run against the schema
so this classification errs on the side of safety.
Some compatible type changes are ignored given that they should not lead to
any change in client behavior.
Args:
old_schema: Source schema
new_schema: Updated schema
min_severity: Set this to filter for changes of a given severity
Yields:
SchemaChange: All detected changes.
"""
old_schema.validate()
new_schema.validate()
diffs = [
_find_removed_types(old_schema, new_schema),
_find_added_types(old_schema, new_schema),
_diff_directives(old_schema, new_schema),
_find_changed_types(old_schema, new_schema),
_diff_union_types(old_schema, new_schema),
_diff_enum_types(old_schema, new_schema),
_diff_object_types(old_schema, new_schema),
_diff_interface_types(old_schema, new_schema),
_diff_input_types(old_schema, new_schema),
]
for change in itertools.chain(*diffs):
if min_severity is None or change.severity >= min_severity:
yield change
def _find_removed_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name in old.types.keys():
if name not in new.types:
yield TypeRemoved(name)
def _find_added_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name in new.types.keys():
if name not in old.types:
yield TypeAdded(name)
def _find_changed_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name, old_type in old.types.items():
try:
new_type = new.types[name]
except KeyError:
pass
else:
if old_type.__class__ != new_type.__class__:
yield TypeChangedKind(
name, old_type.__class__, new_type.__class__
)
def _diff_union_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_union, new_union in _iterate_matching_pairs(old, new, UnionType):
old_type_names = set(t.name for t in old_union.types)
new_type_names = set(t.name for t in new_union.types)
for t in old_type_names - new_type_names:
yield TypeRemovedFromUnion(t, old_union)
for t in new_type_names - old_type_names:
yield TypeAddedToUnion(t, new_union)
def _diff_enum_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_enum, new_enum in _iterate_matching_pairs(old, new, EnumType):
for old_ev in old_enum.values:
try:
new_ev = new_enum._values[old_ev.name]
except KeyError:
yield EnumValueRemoved(old_enum, old_ev)
else:
if old_ev.deprecated:
if not new_ev.deprecated:
yield EnumValueDeprecationRemoved(
old_enum, old_ev, new_ev
)
elif old_ev.deprecation_reason != new_ev.deprecation_reason:
yield EnumValueDeprecationReasonChanged(
old_enum, old_ev, new_ev
)
elif new_ev.deprecated:
yield EnumValueDeprecated(old_enum, old_ev, new_ev)
for new_ev in new_enum.values:
if new_ev.name not in old_enum._values:
yield EnumValueAdded(new_enum, new_ev)
def _diff_directives(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for name, old_directive in old.directives.items():
if old_directive in SPECIFIED_DIRECTIVES:
continue
try:
new_directive = new.directives[name]
except KeyError:
yield DirectiveRemoved(old_directive)
else:
old_locs = set(old_directive.locations)
new_locs = set(new_directive.locations)
for loc in old_locs - new_locs:
yield DirectiveLocationRemoved(old_directive, loc)
for loc in new_locs - old_locs:
yield DirectiveLocationAdded(old_directive, loc)
for d in _diff_directive_arguments(old_directive, new_directive):
yield d
for name, new_directive in new.directives.items():
if new_directive in SPECIFIED_DIRECTIVES:
continue
if name not in old.directives:
yield DirectiveAdded(new_directive)
def _diff_directive_arguments(
old_directive: Directive, new_directive: Directive
) -> Iterator[SchemaChange]:
old_args = old_directive.argument_map
new_args = new_directive.argument_map
for name, old_arg in old_args.items():
try:
new_arg = new_args[name]
except KeyError:
yield DirectiveArgumentRemoved(old_directive, old_arg)
else:
if not _is_safe_input_type_change(old_arg.type, new_arg.type):
yield DirectiveArgumentChangedType(
old_directive, old_arg, new_arg
)
elif (
(old_arg.has_default_value and not new_arg.has_default_value)
or (not old_arg.has_default_value and new_arg.has_default_value)
or (
old_arg.has_default_value
and old_arg.default_value != new_arg.default_value
)
):
yield DirectiveArgumentDefaultValueChange(
old_directive, old_arg, new_arg
)
for name, new_arg in new_args.items():
if name not in old_args:
yield DirectiveArgumentAdded(new_directive, new_arg)
def _diff_field_arguments(
parent: Union[ObjectType, InterfaceType], old_field: Field, new_field: Field
) -> Iterator[SchemaChange]:
old_args = old_field.argument_map
new_args = new_field.argument_map
for name, old_arg in old_args.items():
try:
new_arg = new_args[name]
except KeyError:
yield FieldArgumentRemoved(parent, old_field, old_arg)
else:
if not _is_safe_input_type_change(old_arg.type, new_arg.type):
yield FieldArgumentChangedType(
parent, old_field, old_arg, new_arg
)
elif (
(old_arg.has_default_value and not new_arg.has_default_value)
or (not old_arg.has_default_value and new_arg.has_default_value)
or (
old_arg.has_default_value
and old_arg.default_value != new_arg.default_value
)
):
yield FieldArgumentDefaultValueChange(
parent, old_field, old_arg, new_arg
)
for name, new_arg in new_args.items():
if name not in old_args:
yield FieldArgumentAdded(parent, new_field, new_arg)
def _is_safe_input_type_change(
old_type: GraphQLType, new_type: GraphQLType
) -> bool:
if isinstance(old_type, NamedType):
return bool(
isinstance(new_type, NamedType) and old_type.name == new_type.name
)
elif isinstance(old_type, ListType):
return isinstance(new_type, ListType) and _is_safe_input_type_change(
old_type.type, new_type.type
)
elif isinstance(old_type, NonNullType):
return (
isinstance(new_type, NonNullType)
and _is_safe_input_type_change(old_type.type, new_type.type)
) or (
not isinstance(new_type, NonNullType)
and _is_safe_input_type_change(old_type.type, new_type)
)
return False
def _is_safe_output_type_change(
old_type: GraphQLType, new_type: GraphQLType
) -> bool:
if isinstance(old_type, NamedType):
return (
isinstance(new_type, NamedType) and old_type.name == new_type.name
) or (
isinstance(new_type, NonNullType)
and _is_safe_output_type_change(old_type, new_type.type)
)
elif isinstance(old_type, ListType):
return (
isinstance(new_type, ListType)
and _is_safe_input_type_change(old_type.type, new_type.type)
) or (
isinstance(new_type, NonNullType)
and _is_safe_output_type_change(old_type, new_type.type)
)
elif isinstance(old_type, NonNullType):
return isinstance(
new_type, NonNullType
) and _is_safe_output_type_change(old_type.type, new_type.type)
return False
def _diff_object_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_object, new_object in _iterate_matching_pairs(old, new, ObjectType):
for field_name, old_field in old_object.field_map.items():
try:
new_field = new_object.field_map[field_name]
except KeyError:
yield FieldRemoved(old_object, old_field)
else:
for d in _diff_field(old_field, new_field, old_object):
yield d
for new_field in new_object.field_map.values():
if new_field.name not in old_object.field_map:
yield FieldAdded(new_object, new_field)
old_interfaces = {i.name: i for i in old_object.interfaces}
new_interfaces = {i.name: i for i in new_object.interfaces}
for iname, i in old_interfaces.items():
if iname not in new_interfaces:
yield TypeRemovedFromInterface(i, old_object)
for iname, i in new_interfaces.items():
if iname not in old_interfaces:
yield TypeAddedToInterface(i, old_object)
def _diff_interface_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_interface, new_interface in _iterate_matching_pairs(
old, new, InterfaceType
):
for field_name, old_field in old_interface.field_map.items():
try:
new_field = new_interface.field_map[field_name]
except KeyError:
yield FieldRemoved(old_interface, old_field)
else:
for d in _diff_field(old_field, new_field, old_interface):
yield d
for new_field in new_interface.field_map.values():
if new_field.name not in old_interface.field_map:
yield FieldAdded(new_interface, new_field)
def _diff_field(
old: Field, new: Field, parent_type: Union[ObjectType, InterfaceType]
) -> Iterator[SchemaChange]:
if not _is_safe_output_type_change(old.type, new.type):
yield FieldChangedType(parent_type, old, new)
for d in _diff_field_arguments(parent_type, old, new):
yield d
if old.deprecated:
if not new.deprecated:
yield FieldDeprecationRemoved(parent_type, old, new)
elif old.deprecation_reason != new.deprecation_reason:
yield FieldDeprecationReasonChanged(parent_type, old, new)
elif new.deprecated:
yield FieldDeprecated(parent_type, old, new)
def _diff_input_types(old: Schema, new: Schema) -> Iterator[SchemaChange]:
for old_type, new_type in _iterate_matching_pairs(
old, new, InputObjectType
):
old_fields = old_type.field_map
new_fields = new_type.field_map
for name, old_field in old_fields.items():
try:
new_field = new_fields[name]
except KeyError:
yield InputFieldRemoved(old_type, old_field)
else:
if not _is_safe_input_type_change(
old_field.type, new_field.type
):
yield InputFieldChangedType(old_type, old_field, new_field)
elif (
(
old_field.has_default_value
and not new_field.has_default_value
)
or (
not old_field.has_default_value
and new_field.has_default_value
)
or (
old_field.has_default_value
and old_field.default_value != new_field.default_value
)
):
yield InputFieldDefaultValueChange(
old_type, old_field, new_field
)
for name, new_field in new_fields.items():
if name not in old_fields:
yield InputFieldAdded(new_type, new_field) | 0.743727 | 0.224565 |
from __future__ import print_function
import os
import numpy as np
import networkx as nx
from utils import common_utils
def read_graph(dataset, is_weighted, is_directed, num_classes):
'''
Reads the graph structure using networkx.
# Arguments
dataset: Name of the dataset
is_weighted: set to true if the graph is weighted
is_directed: set to true if the graph is directed
num_classes: integer denoting number of unique labels for the dataset
# Returns
A networkx graph object
'''
print("Loading the data...")
FILE_PATH = os.path.abspath(__file__)
MODULE_PATH = os.path.dirname(FILE_PATH)
SRC_PATH = os.path.dirname(MODULE_PATH)
PROJ_PATH = os.path.dirname(SRC_PATH)
DATA_PATH = os.path.join(PROJ_PATH, 'data/{}/'.format(dataset))
# Read edgelist file
with open("{}{}.edgelist".format(DATA_PATH, dataset), 'rb') as edgelist_file:
if is_weighted:
graph = nx.read_edgelist(edgelist_file, nodetype = int, data = (('weight',float),), create_using = nx.DiGraph())
else:
graph = nx.read_edgelist(edgelist_file, nodetype = int, create_using = nx.DiGraph())
# set all weights as default weight = 1
for edge in graph.edges():
graph[edge[0]][edge[1]]['weight'] = 1
edgelist_file.close()
# set if graph should be directed or undirected
if not is_directed:
graph = graph.to_undirected()
# Read labels file
labels = []
with open("{}{}_label.csv".format(DATA_PATH, dataset), 'r') as f:
for line in f:
labels.append(common_utils.sample_mask(int(line), num_classes, np.int))
labels = np.asarray(labels)
return graph, labels
def print_graph(graph):
'''
Prints the graph using networkx module
# Arguments
graph: A networkx graph object
'''
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
nx.draw(graph)
plt.show()
def convert_to_adj_matrix(nx_G, is_directed):
'''
Converts a networkx graph object to numpy adjacency matrix
# Arguments
nx_G: networkx graph object
is_directed: Flag to indicate if graph is directed
# Returns
Adjacency Matrix Representation
'''
num_nodes = nx_G.order()
adj = np.zeros((num_nodes, num_nodes), dtype = np.int64)
for edge in nx_G.edges():
adj[edge[0]][edge[1]] = 1
if not is_directed:
adj[edge[1]][edge[0]] = 1
return adj
def leakyReLU(x, alpha = 0.2):
'''
Computes LeakyReLU of a numpy array
# Arguments
x: Numpy array
alpha: Negative slope coefficient
# Returns
Numpy array
'''
x_np_array = np.asarray(x)
return np.where(x_np_array > 0, x_np_array, np.multiply(x_np_array, alpha))
def softmax(x):
'''
Computes Softmax of a numpy array
# Arguments
x: Numpy array
# Returns
Numpy array
'''
return (np.exp(x).transpose() / np.sum(np.exp(x), axis = 1)).transpose()
def compute_dense(X, A, kernel, attention_kernel, bias):
'''
Computes the alpha embeddings for a pair of nodes with an edge between them
# Arguments
X: Feature Matrix
A: Adjacency Matrix
kernel: Weight Matrix of the layer
attention_kernel: Weight Matrix of the attention kernel layer
# Returns
Dense matrix corresponding to importance of a node on its neighbours
'''
features = np.dot(X, kernel) # (N x F')
#Compute feature combinations
# Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
attn_for_self = np.dot(features, attention_kernel[0]) # (N x 1), [a_1]^T [Wh_i]
# attn_for_self = K.print_tensor(attn_for_self, message = "attn_for_self is: ")
attn_for_neighs = np.dot(features, attention_kernel[1]) # (N x 1), [a_2]^T [Wh_j]
# attn_for_neighs = K.print_tensor(attn_for_neighs, message = "attn_for_neighs is: ")
# Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
dense = attn_for_self + attn_for_neighs.transpose() # (N x N) via broadcasting
# Add nonlinearty
dense = leakyReLU(dense)
# Mask values before activation (Vaswani et al., 2017)
mask = -10e9 * (1.0 - A)
dense += mask
# Apply softmax to get attention coefficients
dense = softmax(dense) # (N x N)
embeddings = np.dot(dense, features)
return dense, embeddings | src/utils/graph_utils.py | from __future__ import print_function
import os
import numpy as np
import networkx as nx
from utils import common_utils
def read_graph(dataset, is_weighted, is_directed, num_classes):
'''
Reads the graph structure using networkx.
# Arguments
dataset: Name of the dataset
is_weighted: set to true if the graph is weighted
is_directed: set to true if the graph is directed
num_classes: integer denoting number of unique labels for the dataset
# Returns
A networkx graph object
'''
print("Loading the data...")
FILE_PATH = os.path.abspath(__file__)
MODULE_PATH = os.path.dirname(FILE_PATH)
SRC_PATH = os.path.dirname(MODULE_PATH)
PROJ_PATH = os.path.dirname(SRC_PATH)
DATA_PATH = os.path.join(PROJ_PATH, 'data/{}/'.format(dataset))
# Read edgelist file
with open("{}{}.edgelist".format(DATA_PATH, dataset), 'rb') as edgelist_file:
if is_weighted:
graph = nx.read_edgelist(edgelist_file, nodetype = int, data = (('weight',float),), create_using = nx.DiGraph())
else:
graph = nx.read_edgelist(edgelist_file, nodetype = int, create_using = nx.DiGraph())
# set all weights as default weight = 1
for edge in graph.edges():
graph[edge[0]][edge[1]]['weight'] = 1
edgelist_file.close()
# set if graph should be directed or undirected
if not is_directed:
graph = graph.to_undirected()
# Read labels file
labels = []
with open("{}{}_label.csv".format(DATA_PATH, dataset), 'r') as f:
for line in f:
labels.append(common_utils.sample_mask(int(line), num_classes, np.int))
labels = np.asarray(labels)
return graph, labels
def print_graph(graph):
'''
Prints the graph using networkx module
# Arguments
graph: A networkx graph object
'''
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
nx.draw(graph)
plt.show()
def convert_to_adj_matrix(nx_G, is_directed):
'''
Converts a networkx graph object to numpy adjacency matrix
# Arguments
nx_G: networkx graph object
is_directed: Flag to indicate if graph is directed
# Returns
Adjacency Matrix Representation
'''
num_nodes = nx_G.order()
adj = np.zeros((num_nodes, num_nodes), dtype = np.int64)
for edge in nx_G.edges():
adj[edge[0]][edge[1]] = 1
if not is_directed:
adj[edge[1]][edge[0]] = 1
return adj
def leakyReLU(x, alpha = 0.2):
'''
Computes LeakyReLU of a numpy array
# Arguments
x: Numpy array
alpha: Negative slope coefficient
# Returns
Numpy array
'''
x_np_array = np.asarray(x)
return np.where(x_np_array > 0, x_np_array, np.multiply(x_np_array, alpha))
def softmax(x):
'''
Computes Softmax of a numpy array
# Arguments
x: Numpy array
# Returns
Numpy array
'''
return (np.exp(x).transpose() / np.sum(np.exp(x), axis = 1)).transpose()
def compute_dense(X, A, kernel, attention_kernel, bias):
'''
Computes the alpha embeddings for a pair of nodes with an edge between them
# Arguments
X: Feature Matrix
A: Adjacency Matrix
kernel: Weight Matrix of the layer
attention_kernel: Weight Matrix of the attention kernel layer
# Returns
Dense matrix corresponding to importance of a node on its neighbours
'''
features = np.dot(X, kernel) # (N x F')
#Compute feature combinations
# Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
attn_for_self = np.dot(features, attention_kernel[0]) # (N x 1), [a_1]^T [Wh_i]
# attn_for_self = K.print_tensor(attn_for_self, message = "attn_for_self is: ")
attn_for_neighs = np.dot(features, attention_kernel[1]) # (N x 1), [a_2]^T [Wh_j]
# attn_for_neighs = K.print_tensor(attn_for_neighs, message = "attn_for_neighs is: ")
# Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
dense = attn_for_self + attn_for_neighs.transpose() # (N x N) via broadcasting
# Add nonlinearty
dense = leakyReLU(dense)
# Mask values before activation (Vaswani et al., 2017)
mask = -10e9 * (1.0 - A)
dense += mask
# Apply softmax to get attention coefficients
dense = softmax(dense) # (N x N)
embeddings = np.dot(dense, features)
return dense, embeddings | 0.743634 | 0.486027 |
class InputStream:
def __init__(self, _input):
self.input = _input
self.pos = 0
self.line = 1
self.col = 0
def next(self):
ch = self.input[self.pos]
self.pos += 1
if ch == '\n':
self.line += 1
self.col = 0
else:
self.col += 1
return ch
def peek(self):
if self.pos < len(self.input):
return self.input[self.pos]
def eof(self):
return self.peek() is None
def croak(self, msg):
raise Exception('%s (%d:%d)' %(msg, self.line, self.col))
class TokenStream:
def __init__(self, _input):
self.input = _input
self.current = None
self.croak = self.input.croak
def _is_whitespace(self, ch):
return ch == ' ' or ch == '\t' or ch == '\n'
def _is_parenthesis(self, ch):
return ch == '(' or ch == ')'
def _is_symbol(self, ch):
return (not self._is_whitespace(ch)) and (not self._is_parenthesis(ch))
def _read_while(self, predicate):
string = ''
while (not self.input.eof() and predicate(self.input.peek())):
string += self.input.next()
return string
def _read_escaped(self, end):
escaped = False
string = ''
self.input.next()
while not self.input.eof():
ch = self.input.next()
if escaped:
string += ch
escaped = False
elif ch == '\\':
escaped = True
elif ch == end:
break
else:
string += ch
return string
def _read_string(self):
return {'type': 'symbol', 'value': self._read_escaped('"')}
def _read_symbol(self):
identifier = self._read_while(self._is_symbol);
return {'type': 'symbol', 'value': identifier}
def _skip_comment(self):
self._read_while(lambda ch: ch != '\n')
self.input.next()
def _read_next(self):
self._read_while(self._is_whitespace)
if self.input.eof():
return None
ch = self.input.peek()
if ch == ';':
self._skip_comment()
return self._read_next()
if ch == '"':
return self._read_string()
if self._is_symbol(ch):
return self._read_symbol()
if self._is_parenthesis(ch):
return {'type': 'punc', 'value': self.input.next()}
self.input.croak("Can't handler character %c" % ch)
def peek(self):
if self.current:
return self.current
else:
self.current = self._read_next()
return self.current
def next(self):
tok = self.current
self.current = None
return tok or self._read_next()
def eof(self):
return self.peek() is None
def _parse(_input):
ast = []
tok = _input.next()
while tok is not None and not (tok['type'] == 'punc' and
tok['value'] == ')'):
if tok['type'] == 'symbol':
ast.append(tok['value'])
elif tok['type'] == 'punc' and tok['value'] == '(':
ast.append(_parse(_input))
tok = _input.next()
return ast
def parse(code):
return _parse(TokenStream(InputStream(code))); | python/parser.py | class InputStream:
def __init__(self, _input):
self.input = _input
self.pos = 0
self.line = 1
self.col = 0
def next(self):
ch = self.input[self.pos]
self.pos += 1
if ch == '\n':
self.line += 1
self.col = 0
else:
self.col += 1
return ch
def peek(self):
if self.pos < len(self.input):
return self.input[self.pos]
def eof(self):
return self.peek() is None
def croak(self, msg):
raise Exception('%s (%d:%d)' %(msg, self.line, self.col))
class TokenStream:
def __init__(self, _input):
self.input = _input
self.current = None
self.croak = self.input.croak
def _is_whitespace(self, ch):
return ch == ' ' or ch == '\t' or ch == '\n'
def _is_parenthesis(self, ch):
return ch == '(' or ch == ')'
def _is_symbol(self, ch):
return (not self._is_whitespace(ch)) and (not self._is_parenthesis(ch))
def _read_while(self, predicate):
string = ''
while (not self.input.eof() and predicate(self.input.peek())):
string += self.input.next()
return string
def _read_escaped(self, end):
escaped = False
string = ''
self.input.next()
while not self.input.eof():
ch = self.input.next()
if escaped:
string += ch
escaped = False
elif ch == '\\':
escaped = True
elif ch == end:
break
else:
string += ch
return string
def _read_string(self):
return {'type': 'symbol', 'value': self._read_escaped('"')}
def _read_symbol(self):
identifier = self._read_while(self._is_symbol);
return {'type': 'symbol', 'value': identifier}
def _skip_comment(self):
self._read_while(lambda ch: ch != '\n')
self.input.next()
def _read_next(self):
self._read_while(self._is_whitespace)
if self.input.eof():
return None
ch = self.input.peek()
if ch == ';':
self._skip_comment()
return self._read_next()
if ch == '"':
return self._read_string()
if self._is_symbol(ch):
return self._read_symbol()
if self._is_parenthesis(ch):
return {'type': 'punc', 'value': self.input.next()}
self.input.croak("Can't handler character %c" % ch)
def peek(self):
if self.current:
return self.current
else:
self.current = self._read_next()
return self.current
def next(self):
tok = self.current
self.current = None
return tok or self._read_next()
def eof(self):
return self.peek() is None
def _parse(_input):
ast = []
tok = _input.next()
while tok is not None and not (tok['type'] == 'punc' and
tok['value'] == ')'):
if tok['type'] == 'symbol':
ast.append(tok['value'])
elif tok['type'] == 'punc' and tok['value'] == '(':
ast.append(_parse(_input))
tok = _input.next()
return ast
def parse(code):
return _parse(TokenStream(InputStream(code))); | 0.529263 | 0.339992 |
import pytest
from bondhon import bijoy_classic
def test_swap_kar_location():
assert bijoy_classic.swap_kar_location('আমি') == 'আিম'
@pytest.mark.parametrize('given,expected', [
('১', '1'),
('২', '2'),
('৩', '3'),
('৪', '4'),
('৫', '5'),
('৬', '6'),
('৭', '7'),
('৮', '8'),
('৯', '9'),
('০', '0'),
])
def test_numbers(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,expected', [
('ং', 's'),
('ঃ', 't'),
('অ', 'A'),
('আ', 'Av'),
('ই', 'B'),
('ঈ', 'C'),
('উ', 'D'),
('ঊ', 'E'),
('ঋ', 'F'),
('এ', 'G'),
('ঐ', 'H'),
('ও', 'I'),
('ঔ', 'J'),
('ক', 'K'),
('খ', 'L'),
('গ', 'M'),
('ঘ', 'N'),
('ঙ', 'O'),
('চ', 'P'),
('ছ', 'Q'),
('জ', 'R'),
('ঝ', 'S'),
('ঞ', 'T'),
('ট', 'U'),
('ঠ', 'V'),
('ড', 'W'),
('ঢ', 'X'),
('ণ', 'Y'),
('ত', 'Z'),
('থ', '_'),
('দ', '`'),
('ধ', 'a'),
('ন', 'b'),
('প', 'c'),
('ফ', 'd'),
('ব', 'e'),
('ভ', 'f'),
('ম', 'g'),
('য', 'h'),
('র', 'i'),
('ল', 'j'),
('শ', 'k'),
('ষ', 'l'),
('স', 'm'),
('হ', 'n'),
('া', 'v'),
('ি', 'w'),
('ী', 'x')
])
def test_individual_chars(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,expected', [
('ক্ক', '°'),
('ক্ট', '±'),
('ক্ট্র', '±ª'),
('ক্ত', '³'),
('ক্ত্র', '³«'),
('ক্ব', 'K¡'),
('ক্ম', '´'),
('ক্য', 'K¨'),
('ক্র', 'µ'),
('ক্ল', 'K¬'),
('ক্ষ', '¶'),
('ক্ষ্ণ', '¶è'),
('ক্ষ্ব', '¶¡'),
('ক্ষ্ম', '²'),
('ক্ষ্ম্য', '²¨'),
('ক্ষ্য', '¶¨'),
('ক্স', '·'),
('খ্য', 'L¨'),
('খ্র', 'Lª'),
('গ্ণ', 'M&Y'),
('গ্ধ', '»'),
('গ্ধ্য', '»¨'),
('গ্ধ্র', '»ª'),
('গ্ন', 'Mœ'),
('গ্ন্য', 'Mœ¨'),
('গ্ব', 'M¦'),
('গ্ম', 'M¥'),
('গ্য', 'M¨'),
('গ্র', 'MÖ'),
('গ্র্য', 'M¨©'),
('গ্ল', 'M'),
('ঘ্ন', 'Nœ'),
('ঘ্য', 'N¨'),
('ঘ্র', 'Nª'),
('ঙ্ক', '¼'),
('ঙ্ক্ত', 'O&³'),
('ঙ্ক্য', '¼¨'),
('ঙ্ক্ষ', '•¶'),
('ঙ্খ', '•L'),
('ঙ্গ', '½'),
('ঙ্গ্য', '½¨'),
('ঙ্ঘ', '•N'),
('ঙ্ঘ্য', '•N¨'),
('ঙ্ঘ্র', '•Nª'),
('ঙ্ম', '•g'),
('চ্চ', '”P'),
('চ্ছ', '”Q'),
('চ্ছ্ব', '”Q¡'),
('চ্ছ্র', '”Qª'),
('চ্ঞ', '”T'),
('চ্ব', '”¡'),
('চ্য', 'P¨'),
('জ্জ', '¾'),
('জ্জ্ব', '¾¡'),
('জ্ঝ', 'À'),
('জ্ঞ', 'Á'),
('জ্ব', 'R¡'),
('জ্য', 'R¨'),
('জ্র', 'Rª'),
('ঞ্চ', 'Â'),
('ঞ্ছ', 'Ã'),
('ঞ্জ', 'Ä'),
('ঞ্ঝ', 'Å'),
('ট্ট', 'Æ'),
('ট্ব', 'U¡'),
('ট্ম', 'U¥'),
('ট্য', 'U¨'),
('ট্র', 'Uª'),
('ড্ড', 'Ç'),
('ড্ব', 'W¡'),
('ড্য', 'W¨'),
('ড্র', 'Wª'),
('ড়্গ', 'ÿ'),
('ঢ্য', 'X¨'),
('ঢ্র', 'Xª'),
('ণ্ট', 'È'),
('ণ্ঠ', 'É'),
('ণ্ঠ্য', 'ɨ'),
('ণ্ড', 'Ê'),
('ণ্ড্য', 'ʨ'),
('ণ্ড্র', 'ʪ'),
('ণ্ঢ', 'YX'),
('ণ্ণ', 'Yè'),
('ণ্ব', 'Y¡'),
('ণ্ম', 'Y¥'),
('ণ্য', 'Y¨'),
('ত্ত', 'Ë'),
('ত্ত্র', 'Ë«'),
('ত্ত্ব', 'Ë¡'),
('ত্ত্য', '˨'),
('ত্থ', 'Ì'),
('ত্ন', 'Zœ'),
('ত্ব', 'Z¡'),
('ত্ম', 'Í'),
('ত্ম্য', 'ͨ'),
('ত্য', 'Z¨'),
('ত্র', 'Î'),
('ত্র্য', 'Z¨©'),
('ৎল', 'rj'),
('ৎস', 'rm'),
('থ্ব', '_¡'),
('থ্য', '_¨'),
('থ্র', '_ª'),
('দ্গ', '˜M'),
('দ্ঘ', '™N'),
('দ্দ', 'Ï'),
('দ্দ্ব', 'Ï¡'),
('দ্ধ', '×'),
('দ্ব', 'Ø'),
('দ্ভ', '™¢'),
('দ্ভ্র', '™£'),
('দ্ম', 'Ù'),
('দ্য', '`¨'),
('দ্র', '`ª'),
('দ্র্য', '`¨©'),
('ধ্ন', 'aœ'),
('ধ্ব', 'aŸ'),
('ধ্ম', 'a¥'),
('ধ্য', 'a¨'),
('ধ্র', 'aª'),
('ন্ট', '›U'),
('ন্ট্র', '›Uª'),
('ন্ঠ', 'Ú'),
('ন্ড', 'Û'),
('ন্ড্র', 'Ûª'),
('ন্ত', 'š—'),
('ন্ত্ব', 'š—¡'),
('ন্ত্য', 'š—¨'),
('ন্ত্র', 'š¿'),
('ন্ত্র্য', '𗍩'),
('ন্থ', 'š’'),
('ন্থ্র', 'š’ª'),
('ন্দ', '›`'),
('ন্দ্য', '›`¨'),
('ন্দ্ব', '›Ø'),
('ন্দ্র', '›`ª'),
('ন্ধ', 'Ü'),
('ন্ধ্য', 'ܨ'),
('ন্ধ্র', 'ܪ'),
('ন্ন', 'bœ'),
('ন্ব', 'š^'),
('ন্ম', 'b¥'),
('ন্য', 'b¨'),
('প্ট', 'Þ'),
('প্ত', 'ß'),
('প্ন', 'cœ'),
('প্প', 'à'),
('প্য', 'c¨'),
('প্র', 'cÖ'),
('প্র্য', 'c¨©'),
('প্ল', 'c'),
('প্স', 'á'),
('ফ্র', 'd«'),
('ফ্ল', 'd¬'),
('ব্জ', 'â'),
('ব্দ', 'ã'),
('ব্ধ', 'ä'),
('ব্ব', 'eŸ'),
('ব্য', 'e¨'),
('ব্র', 'eª'),
('ব্ল', 'e'),
('ভ্ব', 'f¡'),
('ভ্য', 'f¨'),
('ভ্র', 'å'),
('ম্ন', 'æ'),
('ম্প', '¤c'),
('ম্প্র', '¤cÖ'),
('ম্ফ', 'ç'),
('ম্ব', '¤^'),
('ম্ব্র', '¤^ª'),
('ম্ভ', '¤¢'),
('ম্ভ্র', '¤£'),
('ম্ম', '¤§'),
('ম্য', 'g¨'),
('ম্র', 'gª'),
('ম্ল', '¤¬'),
('য্য', 'h¨'),
('র্ক', 'K©'),
('র্ক্য', 'K¨©'),
('র্গ্য', 'M¨©'),
('র্ঘ্য', 'N¨©'),
('র্চ্য', 'P¨©'),
('র্জ্য', 'R¨©'),
('র্ণ্য', 'Y¨©'),
('র্ত্য', 'Z¨©'),
('র্থ্য', '_¨©'),
('র্ব্য', 'e¨©'),
('র্ম্য', 'g¨©'),
('র্শ্য', 'k¨©'),
('র্ষ্য', 'l¨©'),
('র্হ্য', 'n¨©'),
('র্খ', 'L©'),
('র্গ', 'M©'),
('র্গ্র', 'MÖ©'),
('র্ঘ', 'N©'),
('র্চ', 'P©'),
('র্ছ', 'Q©'),
('র্জ', 'R©'),
('র্ঝ', 'S©'),
('র্ট', 'U©'),
('র্ড', 'W©'),
('র্ণ', 'Y©'),
('র্ত', 'Z©'),
('র্ত্র', 'Ω'),
('র্থ', '_©'),
('র্দ', '`©'),
('র্দ্ব', 'Ø©'),
('র্দ্র', '`ª©'),
('র্ধ', 'a©'),
('র্ধ্ব', 'aŸ©'),
('র্ন', 'b©'),
('র্প', 'c©'),
('র্ফ', 'd©'),
('র্ভ', 'f©'),
('র্ম', 'g©'),
('র্য', 'h©'),
('র্ল', 'j©'),
('র্শ', 'k©'),
('র্শ্ব', 'k¦©'),
('র্ষ', 'l©'),
('র্স', 'm©'),
('র্হ', 'n©'),
('র্ঢ্য', 'X¨©'),
('ল্ক', 'é'),
('ল্ক্য', 'é¨'),
('ল্গ', 'ê'),
('ল্ট', 'ë'),
('ল্ড', 'ì'),
('ল্প', 'í'),
('ল্ফ', 'j&d'),
('ল্ব', 'j¡'),
('ল্ভ', 'j&f'),
('ল্ম', 'j¥'),
('ল্য', 'j¨'),
('ল্ল', 'j'),
('শ্চ', 'ð'),
('শ্ছ', 'ñ'),
('শ্ন', 'kœ'),
('শ্ব', 'k¦'),
('শ্ম', 'k¥'),
('শ্য', 'k¨'),
('শ্র', 'kª'),
('শ্ল', 'k'),
('ষ্ক', '®‹'),
('ষ্ক্র', '®Œ'),
('ষ্ট', 'ó'),
('ষ্ট্য', 'ó¨'),
('ষ্ট্র', 'óª'),
('ষ্ঠ', 'ô'),
('ষ্ঠ্য', 'ô¨'),
('ষ্ণ', 'ò'),
('ষ্প', '®c'),
('ষ্প্র', '®cÖ'),
('ষ্ফ', 'õ'),
('ষ্ব', '®^'),
('ষ্ম', '®§'),
('ষ্য', 'l¨'),
('স্ক', '¯‹'),
('স্ক্র', '¯Œ'),
('স্খ', 'ö'),
('স্ট', '÷'),
('স্ট্র', '÷ª'),
('স্ত', '¯—'),
('স্ত্ব', '¯—¡'),
('স্ত্য', '¯—¨'),
('স্ত্র', '¯¿'),
('স্থ', '¯’'),
('স্থ্য', '¯’¨'),
('স্ন', 'ø'),
('স্প', '¯c'),
('স্প্র', '¯cÖ'),
('স্প্ল', '¯c&j'),
('স্ফ', 'ù'),
('স্ব', '¯^'),
('স্ম', '¯§'),
('স্য', 'm¨'),
('স্র', 'mª'),
('স্ল', '¯¬'),
('হ্ণ', 'nœ'),
('হ্ন', 'ý'),
('হ্ব', 'nŸ'),
('হ্ম', 'þ'),
('হ্য', 'n¨'),
('হ্র', 'nª'),
('হ্ল', 'n'),
])
def test_conjugated(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,expected', [
('কোন', '†Kvb'),
('মৌনতা', '†gŠbZv'),
])
def test_surrounding_kars(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,bijoy', [
('কুরুক্ষেত্র', 'Kzi“‡¶Î'),
('কিংকর্তব্যবিমূঢ়', 'wKsKZ©e¨weg‚‚p'),
('অনিরুদ্ধ', 'Awbi“×'),
('বাংলাদেশ', 'evsjv‡`k'),
])
def test_words(given, bijoy):
assert bijoy_classic.from_unicode(given) == bijoy
@pytest.mark.parametrize('given,bijoy', [
('নাকের নাসারন্ধ্র দিয়ে শ্বাসক্রিয়ার বায়ু প্রবেশ ও প্রস্থান করে।',
'bv‡Ki bvmviܪ w`‡q k¦vmwµqvi evqz cÖ‡ek I cÖ¯’vb K‡i|'),
('আমাদের দেশে যে একবার বিবাহ করিয়াছে বিবাহ সম্বন্ধে তাহার মনে আর কোন উদ্বেগ থাকে না।', # noqa: E501
'Avgv‡`i †`‡k †h GKevi weevn Kwiqv‡Q weevn m¤^‡Ü Zvnvi g‡b Avi †Kvb D‡ØM _v‡K bv|'), # noqa: E501
])
def test_sentences(given, bijoy):
assert bijoy_classic.from_unicode(given) == bijoy | tests/test_bijoy_from_unicode.py | import pytest
from bondhon import bijoy_classic
def test_swap_kar_location():
assert bijoy_classic.swap_kar_location('আমি') == 'আিম'
@pytest.mark.parametrize('given,expected', [
('১', '1'),
('২', '2'),
('৩', '3'),
('৪', '4'),
('৫', '5'),
('৬', '6'),
('৭', '7'),
('৮', '8'),
('৯', '9'),
('০', '0'),
])
def test_numbers(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,expected', [
('ং', 's'),
('ঃ', 't'),
('অ', 'A'),
('আ', 'Av'),
('ই', 'B'),
('ঈ', 'C'),
('উ', 'D'),
('ঊ', 'E'),
('ঋ', 'F'),
('এ', 'G'),
('ঐ', 'H'),
('ও', 'I'),
('ঔ', 'J'),
('ক', 'K'),
('খ', 'L'),
('গ', 'M'),
('ঘ', 'N'),
('ঙ', 'O'),
('চ', 'P'),
('ছ', 'Q'),
('জ', 'R'),
('ঝ', 'S'),
('ঞ', 'T'),
('ট', 'U'),
('ঠ', 'V'),
('ড', 'W'),
('ঢ', 'X'),
('ণ', 'Y'),
('ত', 'Z'),
('থ', '_'),
('দ', '`'),
('ধ', 'a'),
('ন', 'b'),
('প', 'c'),
('ফ', 'd'),
('ব', 'e'),
('ভ', 'f'),
('ম', 'g'),
('য', 'h'),
('র', 'i'),
('ল', 'j'),
('শ', 'k'),
('ষ', 'l'),
('স', 'm'),
('হ', 'n'),
('া', 'v'),
('ি', 'w'),
('ী', 'x')
])
def test_individual_chars(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,expected', [
('ক্ক', '°'),
('ক্ট', '±'),
('ক্ট্র', '±ª'),
('ক্ত', '³'),
('ক্ত্র', '³«'),
('ক্ব', 'K¡'),
('ক্ম', '´'),
('ক্য', 'K¨'),
('ক্র', 'µ'),
('ক্ল', 'K¬'),
('ক্ষ', '¶'),
('ক্ষ্ণ', '¶è'),
('ক্ষ্ব', '¶¡'),
('ক্ষ্ম', '²'),
('ক্ষ্ম্য', '²¨'),
('ক্ষ্য', '¶¨'),
('ক্স', '·'),
('খ্য', 'L¨'),
('খ্র', 'Lª'),
('গ্ণ', 'M&Y'),
('গ্ধ', '»'),
('গ্ধ্য', '»¨'),
('গ্ধ্র', '»ª'),
('গ্ন', 'Mœ'),
('গ্ন্য', 'Mœ¨'),
('গ্ব', 'M¦'),
('গ্ম', 'M¥'),
('গ্য', 'M¨'),
('গ্র', 'MÖ'),
('গ্র্য', 'M¨©'),
('গ্ল', 'M'),
('ঘ্ন', 'Nœ'),
('ঘ্য', 'N¨'),
('ঘ্র', 'Nª'),
('ঙ্ক', '¼'),
('ঙ্ক্ত', 'O&³'),
('ঙ্ক্য', '¼¨'),
('ঙ্ক্ষ', '•¶'),
('ঙ্খ', '•L'),
('ঙ্গ', '½'),
('ঙ্গ্য', '½¨'),
('ঙ্ঘ', '•N'),
('ঙ্ঘ্য', '•N¨'),
('ঙ্ঘ্র', '•Nª'),
('ঙ্ম', '•g'),
('চ্চ', '”P'),
('চ্ছ', '”Q'),
('চ্ছ্ব', '”Q¡'),
('চ্ছ্র', '”Qª'),
('চ্ঞ', '”T'),
('চ্ব', '”¡'),
('চ্য', 'P¨'),
('জ্জ', '¾'),
('জ্জ্ব', '¾¡'),
('জ্ঝ', 'À'),
('জ্ঞ', 'Á'),
('জ্ব', 'R¡'),
('জ্য', 'R¨'),
('জ্র', 'Rª'),
('ঞ্চ', 'Â'),
('ঞ্ছ', 'Ã'),
('ঞ্জ', 'Ä'),
('ঞ্ঝ', 'Å'),
('ট্ট', 'Æ'),
('ট্ব', 'U¡'),
('ট্ম', 'U¥'),
('ট্য', 'U¨'),
('ট্র', 'Uª'),
('ড্ড', 'Ç'),
('ড্ব', 'W¡'),
('ড্য', 'W¨'),
('ড্র', 'Wª'),
('ড়্গ', 'ÿ'),
('ঢ্য', 'X¨'),
('ঢ্র', 'Xª'),
('ণ্ট', 'È'),
('ণ্ঠ', 'É'),
('ণ্ঠ্য', 'ɨ'),
('ণ্ড', 'Ê'),
('ণ্ড্য', 'ʨ'),
('ণ্ড্র', 'ʪ'),
('ণ্ঢ', 'YX'),
('ণ্ণ', 'Yè'),
('ণ্ব', 'Y¡'),
('ণ্ম', 'Y¥'),
('ণ্য', 'Y¨'),
('ত্ত', 'Ë'),
('ত্ত্র', 'Ë«'),
('ত্ত্ব', 'Ë¡'),
('ত্ত্য', '˨'),
('ত্থ', 'Ì'),
('ত্ন', 'Zœ'),
('ত্ব', 'Z¡'),
('ত্ম', 'Í'),
('ত্ম্য', 'ͨ'),
('ত্য', 'Z¨'),
('ত্র', 'Î'),
('ত্র্য', 'Z¨©'),
('ৎল', 'rj'),
('ৎস', 'rm'),
('থ্ব', '_¡'),
('থ্য', '_¨'),
('থ্র', '_ª'),
('দ্গ', '˜M'),
('দ্ঘ', '™N'),
('দ্দ', 'Ï'),
('দ্দ্ব', 'Ï¡'),
('দ্ধ', '×'),
('দ্ব', 'Ø'),
('দ্ভ', '™¢'),
('দ্ভ্র', '™£'),
('দ্ম', 'Ù'),
('দ্য', '`¨'),
('দ্র', '`ª'),
('দ্র্য', '`¨©'),
('ধ্ন', 'aœ'),
('ধ্ব', 'aŸ'),
('ধ্ম', 'a¥'),
('ধ্য', 'a¨'),
('ধ্র', 'aª'),
('ন্ট', '›U'),
('ন্ট্র', '›Uª'),
('ন্ঠ', 'Ú'),
('ন্ড', 'Û'),
('ন্ড্র', 'Ûª'),
('ন্ত', 'š—'),
('ন্ত্ব', 'š—¡'),
('ন্ত্য', 'š—¨'),
('ন্ত্র', 'š¿'),
('ন্ত্র্য', '𗍩'),
('ন্থ', 'š’'),
('ন্থ্র', 'š’ª'),
('ন্দ', '›`'),
('ন্দ্য', '›`¨'),
('ন্দ্ব', '›Ø'),
('ন্দ্র', '›`ª'),
('ন্ধ', 'Ü'),
('ন্ধ্য', 'ܨ'),
('ন্ধ্র', 'ܪ'),
('ন্ন', 'bœ'),
('ন্ব', 'š^'),
('ন্ম', 'b¥'),
('ন্য', 'b¨'),
('প্ট', 'Þ'),
('প্ত', 'ß'),
('প্ন', 'cœ'),
('প্প', 'à'),
('প্য', 'c¨'),
('প্র', 'cÖ'),
('প্র্য', 'c¨©'),
('প্ল', 'c'),
('প্স', 'á'),
('ফ্র', 'd«'),
('ফ্ল', 'd¬'),
('ব্জ', 'â'),
('ব্দ', 'ã'),
('ব্ধ', 'ä'),
('ব্ব', 'eŸ'),
('ব্য', 'e¨'),
('ব্র', 'eª'),
('ব্ল', 'e'),
('ভ্ব', 'f¡'),
('ভ্য', 'f¨'),
('ভ্র', 'å'),
('ম্ন', 'æ'),
('ম্প', '¤c'),
('ম্প্র', '¤cÖ'),
('ম্ফ', 'ç'),
('ম্ব', '¤^'),
('ম্ব্র', '¤^ª'),
('ম্ভ', '¤¢'),
('ম্ভ্র', '¤£'),
('ম্ম', '¤§'),
('ম্য', 'g¨'),
('ম্র', 'gª'),
('ম্ল', '¤¬'),
('য্য', 'h¨'),
('র্ক', 'K©'),
('র্ক্য', 'K¨©'),
('র্গ্য', 'M¨©'),
('র্ঘ্য', 'N¨©'),
('র্চ্য', 'P¨©'),
('র্জ্য', 'R¨©'),
('র্ণ্য', 'Y¨©'),
('র্ত্য', 'Z¨©'),
('র্থ্য', '_¨©'),
('র্ব্য', 'e¨©'),
('র্ম্য', 'g¨©'),
('র্শ্য', 'k¨©'),
('র্ষ্য', 'l¨©'),
('র্হ্য', 'n¨©'),
('র্খ', 'L©'),
('র্গ', 'M©'),
('র্গ্র', 'MÖ©'),
('র্ঘ', 'N©'),
('র্চ', 'P©'),
('র্ছ', 'Q©'),
('র্জ', 'R©'),
('র্ঝ', 'S©'),
('র্ট', 'U©'),
('র্ড', 'W©'),
('র্ণ', 'Y©'),
('র্ত', 'Z©'),
('র্ত্র', 'Ω'),
('র্থ', '_©'),
('র্দ', '`©'),
('র্দ্ব', 'Ø©'),
('র্দ্র', '`ª©'),
('র্ধ', 'a©'),
('র্ধ্ব', 'aŸ©'),
('র্ন', 'b©'),
('র্প', 'c©'),
('র্ফ', 'd©'),
('র্ভ', 'f©'),
('র্ম', 'g©'),
('র্য', 'h©'),
('র্ল', 'j©'),
('র্শ', 'k©'),
('র্শ্ব', 'k¦©'),
('র্ষ', 'l©'),
('র্স', 'm©'),
('র্হ', 'n©'),
('র্ঢ্য', 'X¨©'),
('ল্ক', 'é'),
('ল্ক্য', 'é¨'),
('ল্গ', 'ê'),
('ল্ট', 'ë'),
('ল্ড', 'ì'),
('ল্প', 'í'),
('ল্ফ', 'j&d'),
('ল্ব', 'j¡'),
('ল্ভ', 'j&f'),
('ল্ম', 'j¥'),
('ল্য', 'j¨'),
('ল্ল', 'j'),
('শ্চ', 'ð'),
('শ্ছ', 'ñ'),
('শ্ন', 'kœ'),
('শ্ব', 'k¦'),
('শ্ম', 'k¥'),
('শ্য', 'k¨'),
('শ্র', 'kª'),
('শ্ল', 'k'),
('ষ্ক', '®‹'),
('ষ্ক্র', '®Œ'),
('ষ্ট', 'ó'),
('ষ্ট্য', 'ó¨'),
('ষ্ট্র', 'óª'),
('ষ্ঠ', 'ô'),
('ষ্ঠ্য', 'ô¨'),
('ষ্ণ', 'ò'),
('ষ্প', '®c'),
('ষ্প্র', '®cÖ'),
('ষ্ফ', 'õ'),
('ষ্ব', '®^'),
('ষ্ম', '®§'),
('ষ্য', 'l¨'),
('স্ক', '¯‹'),
('স্ক্র', '¯Œ'),
('স্খ', 'ö'),
('স্ট', '÷'),
('স্ট্র', '÷ª'),
('স্ত', '¯—'),
('স্ত্ব', '¯—¡'),
('স্ত্য', '¯—¨'),
('স্ত্র', '¯¿'),
('স্থ', '¯’'),
('স্থ্য', '¯’¨'),
('স্ন', 'ø'),
('স্প', '¯c'),
('স্প্র', '¯cÖ'),
('স্প্ল', '¯c&j'),
('স্ফ', 'ù'),
('স্ব', '¯^'),
('স্ম', '¯§'),
('স্য', 'm¨'),
('স্র', 'mª'),
('স্ল', '¯¬'),
('হ্ণ', 'nœ'),
('হ্ন', 'ý'),
('হ্ব', 'nŸ'),
('হ্ম', 'þ'),
('হ্য', 'n¨'),
('হ্র', 'nª'),
('হ্ল', 'n'),
])
def test_conjugated(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,expected', [
('কোন', '†Kvb'),
('মৌনতা', '†gŠbZv'),
])
def test_surrounding_kars(given, expected):
assert bijoy_classic.from_unicode(given) == expected
@pytest.mark.parametrize('given,bijoy', [
('কুরুক্ষেত্র', 'Kzi“‡¶Î'),
('কিংকর্তব্যবিমূঢ়', 'wKsKZ©e¨weg‚‚p'),
('অনিরুদ্ধ', 'Awbi“×'),
('বাংলাদেশ', 'evsjv‡`k'),
])
def test_words(given, bijoy):
assert bijoy_classic.from_unicode(given) == bijoy
@pytest.mark.parametrize('given,bijoy', [
('নাকের নাসারন্ধ্র দিয়ে শ্বাসক্রিয়ার বায়ু প্রবেশ ও প্রস্থান করে।',
'bv‡Ki bvmviܪ w`‡q k¦vmwµqvi evqz cÖ‡ek I cÖ¯’vb K‡i|'),
('আমাদের দেশে যে একবার বিবাহ করিয়াছে বিবাহ সম্বন্ধে তাহার মনে আর কোন উদ্বেগ থাকে না।', # noqa: E501
'Avgv‡`i †`‡k †h GKevi weevn Kwiqv‡Q weevn m¤^‡Ü Zvnvi g‡b Avi †Kvb D‡ØM _v‡K bv|'), # noqa: E501
])
def test_sentences(given, bijoy):
assert bijoy_classic.from_unicode(given) == bijoy | 0.343562 | 0.402333 |
from dash.dependencies import Input, Output
# Plotly graph objects to render graph plots
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
# Import dash html, bootstrap components, and tables for datatables
from dash import html
import dash_bootstrap_components as dbc
from dash import dcc
# Import app
from app import app
# Import custom data.py
import data
# Import data from data.py file
sales_dt_df = data.sales_dt_df
sales_df = data.sales_df
ads_df = data.ads_df
sales_customer_aggs = data.sales_customer_aggs
ads_customer_aggs = data.ads_customer_aggs
from datetime import datetime, timedelta
prime_day_start = '2020-10-13'
prime_day_end = '2020-10-14'
@app.callback(
[
Output("sales-chart", "figure"),
Output("orders-chart", "figure"),
Output("quantity-chart", "figure"),
# Output("sales-dt-chart", "figure"),
Output("med-sales-dt-chart", "figure"),
],
[
Input("marketplace-dropdown", "value"),
],
)
def update_sales_stats(marketplace):
mask = (
(sales_df['Marketplace'] == marketplace)
)
filtered_data = sales_df.loc[mask, :]
mask = (
(sales_dt_df['Marketplace'] == marketplace)
)
filtered_dt_data = sales_dt_df.loc[mask, :]
var = 'Median 2-Day Sales'
chart1 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart1.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart1.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart1.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Orders'
chart2 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart2.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart2.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart2.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Quantity'
chart3 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart3.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart3.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart3.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
# var = 'Total Sales'
# chart4 = px.line(
# filtered_dt_data,
# x= 'Date', y= var,
# title= '''{} Market {}'''.format(marketplace, var),
# template= 'none',
# markers= True,
# # hover_data= {variable: '{}'.format(':$.2f' if variable in dollar_cols else ':.2f')},
# )
# chart4.update_traces(hovertemplate= None)
# chart4.update_layout(
# hovermode= 'x',
# yaxis_tickprefix = '$',
# # yaxis_tickformat = '.2f',
# shapes=[
# dict(
# type= 'rect',
# xref= 'x',
# yref= 'y',
# x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
# y0= '0',
# x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
# y1= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .05,
# # fillcolor= 'lightgray',
# fillcolor= '#00AAE2',
# opacity= 0.2,
# line_width= 0,
# layer= 'below',
# ),
# ],
# )
# chart4.add_annotation(
# x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
# y= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .1,
# text= '<b>Prime Day</b>',
# showarrow= False,
# font= {'family': 'Franklin Gothic'},
# )
var = 'Median Sales'
chart5 = px.line(
filtered_dt_data,
x= 'Date', y= var,
title= '''{} Market {}'''.format(marketplace, var),
template= 'none',
markers= True,
# hover_data= {variable: '{}'.format(':$.2f' if variable in dollar_cols else ':.2f')},
)
chart5.update_traces(hovertemplate= None)
chart5.update_layout(
hovermode= 'x',
yaxis_tickprefix = '$',
# yaxis_tickformat = '.2f',
shapes=[
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
y1= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart5.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
return chart1, chart2, chart3, chart5
@app.callback(
[
Output("ad-revenue-chart", "figure"),
Output("ad-spending-chart", "figure"),
Output("roas-chart", "figure"),
Output("acos-chart", "figure"),
Output("ctr-chart", "figure"),
Output("cpc-chart", "figure"),
],
[
Input("marketplace-dropdown", "value"),
Input("sponsored-type-dropdown", "value"),
],
)
def update_ad_stats(marketplace, stype):
mask = (
(ads_df['Marketplace'] == marketplace)
& (ads_df['Sponsored Type'] == stype)
)
filtered_data = ads_df.loc[mask, :]
var = 'Median 2-Day Ad Revenue'
chart1 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart1.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart1.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart1.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Ad Spending'
chart2 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart2.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart2.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart2.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median ROAS'
chart3 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart3.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart3.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart3.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median ACoS'
chart4 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart4.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart4.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart4.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median CTR'
chart5 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart5.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart5.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart5.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median CPC'
chart6 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart6.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart6.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart6.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
return chart1, chart2, chart3, chart4, chart5, chart6
@app.callback(
[
Output("customer-dropdown", "options"),
Output("customer-dropdown", "value"),
],
Input("marketplace-dropdown-2", "value"),
)
def set_customer_options(marketplace):
customers = [customer for customer in (
sales_customer_aggs[sales_customer_aggs['Marketplace'] == marketplace]['Customer'].unique()
) if customer in ads_customer_aggs[ads_customer_aggs['Marketplace'] == marketplace]['Customer'].unique()
]
return [{"label": customer, "value": customer} for customer in customers], customers[4]
@app.callback(
[
Output("sales-dt-cust-chart", "figure"),
Output("ad-dt-cust-chart", "figure"),
],
[
Input("marketplace-dropdown-2", "value"),
Input("customer-dropdown", "value"),
Input("sponsored-type-dropdown-2", "value"),
Input("sales-var-dropdown", "value"),
Input("ad-var-a-dropdown", "value"),
Input("ad-var-b-dropdown", "value"),
],
)
def update_cust_stats(marketplace, customer, stype, var, var1, var2):
mask = (
(sales_customer_aggs['Marketplace'] == marketplace)
& (sales_customer_aggs['Customer'] == customer)
)
filtered_data = sales_customer_aggs.loc[mask, :]
if stype != None:
mask = (
(ads_customer_aggs['Marketplace'] == marketplace)
& (ads_customer_aggs['Customer'] == customer)
& (ads_customer_aggs['Sponsored Type'] == stype)
)
else:
mask = (
(ads_customer_aggs['Marketplace'] == marketplace)
& (ads_customer_aggs['Customer'] == customer)
& (ads_customer_aggs['Sponsored Type'] != 'All')
)
filtered_ad_data = ads_customer_aggs.loc[mask, :]
dollar_cols = ['Sales', 'Average Price', 'Median Price', 'Ad Costs', 'Ad Revenue', 'CPC']
pct_cols = ['CTR', 'ACoS', 'ROAS']
chart1 = px.line(
filtered_data,
x= 'Date', y= var,
title= '''Customer's {}'''.format(var),
template= 'none',
markers= True,
)
chart1.update_layout(
hovermode= 'x',
yaxis_tickprefix = '{}'.format('$' if var in dollar_cols else ''),
yaxis_ticksuffix = '{}'.format('%' if var in pct_cols else ''),
# yaxis_tickformat = '.2f',
shapes=[
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
y1= filtered_data[var].max() + filtered_data[var].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart1.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_data[var].max() + filtered_data[var].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
chart2 = make_subplots(specs=[[{'secondary_y': True}]])
for stype in filtered_ad_data['Sponsored Type'].unique():
chart2.add_trace(go.Bar(x= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype]['Date'],
y= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype][var1],
name= '{}, {}'.format(var1, stype),
)
)
for stype in filtered_ad_data['Sponsored Type'].unique():
chart2.add_trace(
go.Scatter(x= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype]['Date'],
y= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype][var2],
name= '{}, {}'.format(var2, stype),
yaxis= 'y2',
mode= 'lines+markers',
)
)
chart2.update_yaxes(rangemode= 'tozero',
)
chart2.update_layout(
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '{}'.format('$' if var1 in dollar_cols else ''),
yaxis_ticksuffix = '{}'.format('%' if var1 in pct_cols else ''),
title= '{} and {}'.format(var1, var2),
xaxis_title= 'Date',
yaxis_title= '{}'.format(var1),
yaxis2= dict(
tickprefix= '{}'.format('$' if var2 in dollar_cols else ''),
ticksuffix= '{}'.format('%' if var2 in pct_cols else ''),
title= '{}'.format(var2)
),
legend= dict(
title= 'Variable, Sponsored Type'
),
shapes= [
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.5),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.5),
y1= filtered_ad_data[var1].max() + filtered_ad_data[var1].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart2.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_ad_data[var1].max() + filtered_ad_data[var1].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
return chart1, chart2 | callbacks.py | from dash.dependencies import Input, Output
# Plotly graph objects to render graph plots
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
# Import dash html, bootstrap components, and tables for datatables
from dash import html
import dash_bootstrap_components as dbc
from dash import dcc
# Import app
from app import app
# Import custom data.py
import data
# Import data from data.py file
sales_dt_df = data.sales_dt_df
sales_df = data.sales_df
ads_df = data.ads_df
sales_customer_aggs = data.sales_customer_aggs
ads_customer_aggs = data.ads_customer_aggs
from datetime import datetime, timedelta
prime_day_start = '2020-10-13'
prime_day_end = '2020-10-14'
@app.callback(
[
Output("sales-chart", "figure"),
Output("orders-chart", "figure"),
Output("quantity-chart", "figure"),
# Output("sales-dt-chart", "figure"),
Output("med-sales-dt-chart", "figure"),
],
[
Input("marketplace-dropdown", "value"),
],
)
def update_sales_stats(marketplace):
mask = (
(sales_df['Marketplace'] == marketplace)
)
filtered_data = sales_df.loc[mask, :]
mask = (
(sales_dt_df['Marketplace'] == marketplace)
)
filtered_dt_data = sales_dt_df.loc[mask, :]
var = 'Median 2-Day Sales'
chart1 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart1.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart1.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart1.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Orders'
chart2 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart2.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart2.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart2.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Quantity'
chart3 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart3.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart3.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart3.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
# var = 'Total Sales'
# chart4 = px.line(
# filtered_dt_data,
# x= 'Date', y= var,
# title= '''{} Market {}'''.format(marketplace, var),
# template= 'none',
# markers= True,
# # hover_data= {variable: '{}'.format(':$.2f' if variable in dollar_cols else ':.2f')},
# )
# chart4.update_traces(hovertemplate= None)
# chart4.update_layout(
# hovermode= 'x',
# yaxis_tickprefix = '$',
# # yaxis_tickformat = '.2f',
# shapes=[
# dict(
# type= 'rect',
# xref= 'x',
# yref= 'y',
# x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
# y0= '0',
# x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
# y1= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .05,
# # fillcolor= 'lightgray',
# fillcolor= '#00AAE2',
# opacity= 0.2,
# line_width= 0,
# layer= 'below',
# ),
# ],
# )
# chart4.add_annotation(
# x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
# y= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .1,
# text= '<b>Prime Day</b>',
# showarrow= False,
# font= {'family': 'Franklin Gothic'},
# )
var = 'Median Sales'
chart5 = px.line(
filtered_dt_data,
x= 'Date', y= var,
title= '''{} Market {}'''.format(marketplace, var),
template= 'none',
markers= True,
# hover_data= {variable: '{}'.format(':$.2f' if variable in dollar_cols else ':.2f')},
)
chart5.update_traces(hovertemplate= None)
chart5.update_layout(
hovermode= 'x',
yaxis_tickprefix = '$',
# yaxis_tickformat = '.2f',
shapes=[
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
y1= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart5.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
return chart1, chart2, chart3, chart5
@app.callback(
[
Output("ad-revenue-chart", "figure"),
Output("ad-spending-chart", "figure"),
Output("roas-chart", "figure"),
Output("acos-chart", "figure"),
Output("ctr-chart", "figure"),
Output("cpc-chart", "figure"),
],
[
Input("marketplace-dropdown", "value"),
Input("sponsored-type-dropdown", "value"),
],
)
def update_ad_stats(marketplace, stype):
mask = (
(ads_df['Marketplace'] == marketplace)
& (ads_df['Sponsored Type'] == stype)
)
filtered_data = ads_df.loc[mask, :]
var = 'Median 2-Day Ad Revenue'
chart1 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart1.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart1.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart1.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Ad Spending'
chart2 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart2.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart2.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart2.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median ROAS'
chart3 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart3.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart3.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart3.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median ACoS'
chart4 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart4.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart4.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart4.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median CTR'
chart5 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart5.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart5.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart5.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median CPC'
chart6 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart6.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart6.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart6.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
return chart1, chart2, chart3, chart4, chart5, chart6
@app.callback(
[
Output("customer-dropdown", "options"),
Output("customer-dropdown", "value"),
],
Input("marketplace-dropdown-2", "value"),
)
def set_customer_options(marketplace):
customers = [customer for customer in (
sales_customer_aggs[sales_customer_aggs['Marketplace'] == marketplace]['Customer'].unique()
) if customer in ads_customer_aggs[ads_customer_aggs['Marketplace'] == marketplace]['Customer'].unique()
]
return [{"label": customer, "value": customer} for customer in customers], customers[4]
@app.callback(
[
Output("sales-dt-cust-chart", "figure"),
Output("ad-dt-cust-chart", "figure"),
],
[
Input("marketplace-dropdown-2", "value"),
Input("customer-dropdown", "value"),
Input("sponsored-type-dropdown-2", "value"),
Input("sales-var-dropdown", "value"),
Input("ad-var-a-dropdown", "value"),
Input("ad-var-b-dropdown", "value"),
],
)
def update_cust_stats(marketplace, customer, stype, var, var1, var2):
mask = (
(sales_customer_aggs['Marketplace'] == marketplace)
& (sales_customer_aggs['Customer'] == customer)
)
filtered_data = sales_customer_aggs.loc[mask, :]
if stype != None:
mask = (
(ads_customer_aggs['Marketplace'] == marketplace)
& (ads_customer_aggs['Customer'] == customer)
& (ads_customer_aggs['Sponsored Type'] == stype)
)
else:
mask = (
(ads_customer_aggs['Marketplace'] == marketplace)
& (ads_customer_aggs['Customer'] == customer)
& (ads_customer_aggs['Sponsored Type'] != 'All')
)
filtered_ad_data = ads_customer_aggs.loc[mask, :]
dollar_cols = ['Sales', 'Average Price', 'Median Price', 'Ad Costs', 'Ad Revenue', 'CPC']
pct_cols = ['CTR', 'ACoS', 'ROAS']
chart1 = px.line(
filtered_data,
x= 'Date', y= var,
title= '''Customer's {}'''.format(var),
template= 'none',
markers= True,
)
chart1.update_layout(
hovermode= 'x',
yaxis_tickprefix = '{}'.format('$' if var in dollar_cols else ''),
yaxis_ticksuffix = '{}'.format('%' if var in pct_cols else ''),
# yaxis_tickformat = '.2f',
shapes=[
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
y1= filtered_data[var].max() + filtered_data[var].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart1.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_data[var].max() + filtered_data[var].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
chart2 = make_subplots(specs=[[{'secondary_y': True}]])
for stype in filtered_ad_data['Sponsored Type'].unique():
chart2.add_trace(go.Bar(x= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype]['Date'],
y= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype][var1],
name= '{}, {}'.format(var1, stype),
)
)
for stype in filtered_ad_data['Sponsored Type'].unique():
chart2.add_trace(
go.Scatter(x= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype]['Date'],
y= filtered_ad_data[filtered_ad_data['Sponsored Type'] == stype][var2],
name= '{}, {}'.format(var2, stype),
yaxis= 'y2',
mode= 'lines+markers',
)
)
chart2.update_yaxes(rangemode= 'tozero',
)
chart2.update_layout(
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '{}'.format('$' if var1 in dollar_cols else ''),
yaxis_ticksuffix = '{}'.format('%' if var1 in pct_cols else ''),
title= '{} and {}'.format(var1, var2),
xaxis_title= 'Date',
yaxis_title= '{}'.format(var1),
yaxis2= dict(
tickprefix= '{}'.format('$' if var2 in dollar_cols else ''),
ticksuffix= '{}'.format('%' if var2 in pct_cols else ''),
title= '{}'.format(var2)
),
legend= dict(
title= 'Variable, Sponsored Type'
),
shapes= [
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.5),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.5),
y1= filtered_ad_data[var1].max() + filtered_ad_data[var1].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart2.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_ad_data[var1].max() + filtered_ad_data[var1].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
return chart1, chart2 | 0.711331 | 0.371707 |
import datetime
from oslo_serialization import jsonutils
import six
from manila.scheduler import scheduler_options
from manila import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = six.b(filedata)
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
if six.PY2:
import StringIO
return StringIO.StringIO(self._file_data)
else:
import io
return io.BytesIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.TestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded) | manila/tests/scheduler/test_scheduler_options.py | import datetime
from oslo_serialization import jsonutils
import six
from manila.scheduler import scheduler_options
from manila import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = six.b(filedata)
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
if six.PY2:
import StringIO
return StringIO.StringIO(self._file_data)
else:
import io
return io.BytesIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.TestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded) | 0.511473 | 0.3229 |
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_diskpool.generated._client_factory import cf_disk_pool
diskpool_disk_pool = CliCommandType(
operations_tmpl='azext_diskpool.vendored_sdks.storagepool.operations._disk_pools_operations#DiskPoolsOperations'
'.{}',
client_factory=cf_disk_pool)
with self.command_group('disk-pool', diskpool_disk_pool, client_factory=cf_disk_pool) as g:
g.custom_command('list', 'disk_pool_list')
g.custom_show_command('show', 'disk_pool_show')
g.custom_command('create', 'disk_pool_create', supports_no_wait=True)
g.custom_command('update', 'disk_pool_update', supports_no_wait=True)
g.custom_command('delete', 'disk_pool_delete', supports_no_wait=True, confirmation=True)
g.custom_command('list-outbound-network-dependency-endpoint', 'disk_pool_list_outbound_network_dependency_endpo'
'int')
g.custom_command('list-skus', 'disk_pool_list_skus')
g.custom_command('start', 'disk_pool_start', supports_no_wait=True)
g.custom_command('stop', 'disk_pool_stop', supports_no_wait=True)
g.custom_wait_command('wait', 'disk_pool_show')
from azext_diskpool.generated._client_factory import cf_iscsi_target
diskpool_iscsi_target = CliCommandType(
operations_tmpl='azext_diskpool.vendored_sdks.storagepool.operations._iscsi_targets_operations#IscsiTargetsOper'
'ations.{}',
client_factory=cf_iscsi_target)
with self.command_group('disk-pool iscsi-target', diskpool_iscsi_target, client_factory=cf_iscsi_target) as g:
g.custom_command('list', 'disk_pool_iscsi_target_list')
g.custom_show_command('show', 'disk_pool_iscsi_target_show')
g.custom_command('create', 'disk_pool_iscsi_target_create', supports_no_wait=True)
g.custom_command('update', 'disk_pool_iscsi_target_update', supports_no_wait=True)
g.custom_command('delete', 'disk_pool_iscsi_target_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'disk_pool_iscsi_target_show')
with self.command_group('diskpool', is_experimental=True):
pass | src/diskpool/azext_diskpool/generated/commands.py |
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_diskpool.generated._client_factory import cf_disk_pool
diskpool_disk_pool = CliCommandType(
operations_tmpl='azext_diskpool.vendored_sdks.storagepool.operations._disk_pools_operations#DiskPoolsOperations'
'.{}',
client_factory=cf_disk_pool)
with self.command_group('disk-pool', diskpool_disk_pool, client_factory=cf_disk_pool) as g:
g.custom_command('list', 'disk_pool_list')
g.custom_show_command('show', 'disk_pool_show')
g.custom_command('create', 'disk_pool_create', supports_no_wait=True)
g.custom_command('update', 'disk_pool_update', supports_no_wait=True)
g.custom_command('delete', 'disk_pool_delete', supports_no_wait=True, confirmation=True)
g.custom_command('list-outbound-network-dependency-endpoint', 'disk_pool_list_outbound_network_dependency_endpo'
'int')
g.custom_command('list-skus', 'disk_pool_list_skus')
g.custom_command('start', 'disk_pool_start', supports_no_wait=True)
g.custom_command('stop', 'disk_pool_stop', supports_no_wait=True)
g.custom_wait_command('wait', 'disk_pool_show')
from azext_diskpool.generated._client_factory import cf_iscsi_target
diskpool_iscsi_target = CliCommandType(
operations_tmpl='azext_diskpool.vendored_sdks.storagepool.operations._iscsi_targets_operations#IscsiTargetsOper'
'ations.{}',
client_factory=cf_iscsi_target)
with self.command_group('disk-pool iscsi-target', diskpool_iscsi_target, client_factory=cf_iscsi_target) as g:
g.custom_command('list', 'disk_pool_iscsi_target_list')
g.custom_show_command('show', 'disk_pool_iscsi_target_show')
g.custom_command('create', 'disk_pool_iscsi_target_create', supports_no_wait=True)
g.custom_command('update', 'disk_pool_iscsi_target_update', supports_no_wait=True)
g.custom_command('delete', 'disk_pool_iscsi_target_delete', supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'disk_pool_iscsi_target_show')
with self.command_group('diskpool', is_experimental=True):
pass | 0.442516 | 0.061904 |
import tinctest
import os
from mpp.models import SQLTestCase
from tinctest.lib import run_shell_command, local_path, Gpdiff
from mpp.lib.PSQL import PSQL
MYD = os.path.abspath(os.path.dirname(__file__))
GPHOME = os.environ['GPHOME']
class EnhancedTableFunctionTest(SQLTestCase):
"""
@description This contains several test cases for possible ways of manipulating objects. This test case specifically deals with enhanced table functionality. For more details refer QA task - QA-143
@created 2009-01-27 14:00:00
@modified 2013-10-17 17:10:15
@tags ddl schema_topology
"""
sql_dir = 'sqls/ddls/enhanced_tables'
ans_dir = 'sqls/ddls/enhanced_tables'
out_dir = 'sqls/ddls/enhanced_tables'
@classmethod
def setUpClass(cls):
super(EnhancedTableFunctionTest, cls).setUpClass()
tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
tinctest.logger.info("*** Starting the Enhaced table test")
def getMultinodeHosts(self):
"""
Returns distinct no. of nodes for a mult-node cluster environment, else returns None
"""
cmd = "SELECT DISTINCT(hostname) FROM gp_segment_configuration"
hosts = PSQL.run_sql_command(cmd).split('\n')[3:-3]
if len(hosts) > 1:
return hosts
else:
return None
@classmethod
def get_out_dir(cls):
# If the sqls are located in a different directory than the source file, create an output
# directory at the same level as the sql dir
if cls.get_source_dir() == cls.get_sql_dir():
out_dir = os.path.join(cls.get_sql_dir(), 'output/EnhancedTableFunctionTest/')
else:
out_dir = os.path.join(cls.get_sql_dir(), '../output/EnhancedTableFunctionTest/')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_dir
def setUp(self):
# compile tablefunc_demo.c and install the tablefunc_demo.so
cmdMakeInstall = 'cd '+MYD+'/%s/data && make && make install' %self.sql_dir
ok = run_shell_command(cmdMakeInstall)
# Current make file works for linux, but not for Solaris or OSX.
# If compilation fails or installation fails, force system quit: os._exit(1)
if not ok:
tinctest.logger.error("***** make command failed!! Executed Command : %s"%cmdMakeInstall)
self.fail("ERROR: make command failed!!")
sharedObj = GPHOME+'/lib/postgresql/tabfunc_demo.so'
if not os.path.isfile(sharedObj):
tinctest.logger.error("***** Shared object '%s' does not exist!!"%sharedObj)
self.fail("ERROR: Shared object '%s' does not exist!!"%sharedObj)
# For multinode cluster, need to copy shared object tabfunc_demo.so to all primary segments
hosts = self.getMultinodeHosts()
if hosts is not None:
for host in hosts:
cmd_str = "scp "+GPHOME+"/lib/postgresql/tabfunc_demo.so "+host.strip()+":"+GPHOME+"/lib/postgresql"
ok = run_shell_command(cmd_str)
if not ok:
tinctest.logger.error('***** Could not copy shared object to primary segment: '+cmd_str)
self.fail('Could not copy shared object to primary segment: '+cmd_str) | src/test/tinc/tincrepo/mpp/gpdb/tests/catalog/schema_topology/test_ST_EnhancedTableFunctionTest.py | import tinctest
import os
from mpp.models import SQLTestCase
from tinctest.lib import run_shell_command, local_path, Gpdiff
from mpp.lib.PSQL import PSQL
MYD = os.path.abspath(os.path.dirname(__file__))
GPHOME = os.environ['GPHOME']
class EnhancedTableFunctionTest(SQLTestCase):
"""
@description This contains several test cases for possible ways of manipulating objects. This test case specifically deals with enhanced table functionality. For more details refer QA task - QA-143
@created 2009-01-27 14:00:00
@modified 2013-10-17 17:10:15
@tags ddl schema_topology
"""
sql_dir = 'sqls/ddls/enhanced_tables'
ans_dir = 'sqls/ddls/enhanced_tables'
out_dir = 'sqls/ddls/enhanced_tables'
@classmethod
def setUpClass(cls):
super(EnhancedTableFunctionTest, cls).setUpClass()
tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
tinctest.logger.info("*** Starting the Enhaced table test")
def getMultinodeHosts(self):
"""
Returns distinct no. of nodes for a mult-node cluster environment, else returns None
"""
cmd = "SELECT DISTINCT(hostname) FROM gp_segment_configuration"
hosts = PSQL.run_sql_command(cmd).split('\n')[3:-3]
if len(hosts) > 1:
return hosts
else:
return None
@classmethod
def get_out_dir(cls):
# If the sqls are located in a different directory than the source file, create an output
# directory at the same level as the sql dir
if cls.get_source_dir() == cls.get_sql_dir():
out_dir = os.path.join(cls.get_sql_dir(), 'output/EnhancedTableFunctionTest/')
else:
out_dir = os.path.join(cls.get_sql_dir(), '../output/EnhancedTableFunctionTest/')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_dir
def setUp(self):
# compile tablefunc_demo.c and install the tablefunc_demo.so
cmdMakeInstall = 'cd '+MYD+'/%s/data && make && make install' %self.sql_dir
ok = run_shell_command(cmdMakeInstall)
# Current make file works for linux, but not for Solaris or OSX.
# If compilation fails or installation fails, force system quit: os._exit(1)
if not ok:
tinctest.logger.error("***** make command failed!! Executed Command : %s"%cmdMakeInstall)
self.fail("ERROR: make command failed!!")
sharedObj = GPHOME+'/lib/postgresql/tabfunc_demo.so'
if not os.path.isfile(sharedObj):
tinctest.logger.error("***** Shared object '%s' does not exist!!"%sharedObj)
self.fail("ERROR: Shared object '%s' does not exist!!"%sharedObj)
# For multinode cluster, need to copy shared object tabfunc_demo.so to all primary segments
hosts = self.getMultinodeHosts()
if hosts is not None:
for host in hosts:
cmd_str = "scp "+GPHOME+"/lib/postgresql/tabfunc_demo.so "+host.strip()+":"+GPHOME+"/lib/postgresql"
ok = run_shell_command(cmd_str)
if not ok:
tinctest.logger.error('***** Could not copy shared object to primary segment: '+cmd_str)
self.fail('Could not copy shared object to primary segment: '+cmd_str) | 0.296756 | 0.185246 |
import argparse
import logging
import os
import sys
import OpenSSL
import zope.component
from letsencrypt import account
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import colored_logging
from letsencrypt import cli
from letsencrypt import client
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt import notify
from letsencrypt import storage
from letsencrypt.display import util as display_util
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
class _AttrDict(dict):
"""Attribute dictionary.
A trick to allow accessing dictionary keys as object attributes.
"""
def __init__(self, *args, **kwargs):
super(_AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def renew(cert, old_version):
"""Perform automated renewal of the referenced cert, if possible.
:param letsencrypt.storage.RenewableCert cert: The certificate
lineage to attempt to renew.
:param int old_version: The version of the certificate lineage
relative to which the renewal should be attempted.
:returns: A number referring to newly created version of this cert
lineage, or ``False`` if renewal was not successful.
:rtype: `int` or `bool`
"""
# TODO: handle partial success (some names can be renewed but not
# others)
# TODO: handle obligatory key rotation vs. optional key rotation vs.
# requested key rotation
if "renewalparams" not in cert.configfile:
# TODO: notify user?
return False
renewalparams = cert.configfile["renewalparams"]
if "authenticator" not in renewalparams:
# TODO: notify user?
return False
# Instantiate the appropriate authenticator
plugins = plugins_disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(_AttrDict(renewalparams))
# XXX: this loses type data (for example, the fact that key_size
# was an int, not a str)
config.rsa_key_size = int(config.rsa_key_size)
config.tls_sni_01_port = int(config.tls_sni_01_port)
config.namespace.http01_port = int(config.namespace.http01_port)
zope.component.provideUtility(config)
try:
authenticator = plugins[renewalparams["authenticator"]]
except KeyError:
# TODO: Notify user? (authenticator could not be found)
return False
authenticator = authenticator.init(config)
authenticator.prepare()
acc = account.AccountFileStorage(config).load(
account_id=renewalparams["account"])
le_client = client.Client(config, acc, authenticator, None)
with open(cert.version("cert", old_version)) as f:
sans = crypto_util.get_sans_from_cert(f.read())
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(sans)
if new_chain:
# XXX: Assumes that there was a key change. We need logic
# for figuring out whether there was or not. Probably
# best is to have obtain_certificate return None for
# new_key if the old key is to be used (since save_successor
# already understands this distinction!)
return cert.save_successor(
old_version, OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body),
new_key.pem, crypto_util.dump_pyopenssl_chain(new_chain))
# TODO: Notify results
else:
# TODO: Notify negative results
return False
# TODO: Consider the case where the renewal was partially successful
# (where fewer than all names were renewed)
def _cli_log_handler(args, level, fmt): # pylint: disable=unused-argument
handler = colored_logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
return handler
def _paths_parser(parser):
add = parser.add_argument_group("paths").add_argument
add("--config-dir", default=cli.flag_default("config_dir"),
help=cli.config_help("config_dir"))
add("--work-dir", default=cli.flag_default("work_dir"),
help=cli.config_help("work_dir"))
add("--logs-dir", default=cli.flag_default("logs_dir"),
help="Path to a directory where logs are stored.")
return parser
def _create_parser():
parser = argparse.ArgumentParser()
#parser.add_argument("--cron", action="store_true", help="Run as cronjob.")
parser.add_argument(
"-v", "--verbose", dest="verbose_count", action="count",
default=cli.flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
return _paths_parser(parser)
def main(cli_args=sys.argv[1:]):
"""Main function for autorenewer script."""
# TODO: Distinguish automated invocation from manual invocation,
# perhaps by looking at sys.argv[0] and inhibiting automated
# invocations if /etc/letsencrypt/renewal.conf defaults have
# turned it off. (The boolean parameter should probably be
# called renewer_enabled.)
# TODO: When we have a more elaborate renewer command line, we will
# presumably also be able to specify a config file on the
# command line, which, if provided, should take precedence over
# te default config files
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
args = _create_parser().parse_args(cli_args)
uid = os.geteuid()
le_util.make_or_verify_dir(args.logs_dir, 0o700, uid)
cli.setup_logging(args, _cli_log_handler, logfile='renewer.log')
cli_config = configuration.RenewerConfiguration(args)
# Ensure that all of the needed folders have been created before continuing
le_util.make_or_verify_dir(cli_config.work_dir,
constants.CONFIG_DIRS_MODE, uid)
for renewal_file in os.listdir(cli_config.renewal_configs_dir):
print "Processing", renewal_file
try:
# TODO: Before trying to initialize the RenewableCert object,
# we could check here whether the combination of the config
# and the rc_config together disables all autorenewal and
# autodeployment applicable to this cert. In that case, we
# can simply continue and don't need to instantiate a
# RenewableCert object for this cert at all, which could
# dramatically improve performance for large deployments
# where autorenewal is widely turned off.
cert = storage.RenewableCert(renewal_file, cli_config)
except errors.CertStorageError:
# This indicates an invalid renewal configuration file, such
# as one missing a required parameter (in the future, perhaps
# also one that is internally inconsistent or is missing a
# required parameter). As a TODO, maybe we should warn the
# user about the existence of an invalid or corrupt renewal
# config rather than simply ignoring it.
continue
if cert.should_autorenew():
# Note: not cert.current_version() because the basis for
# the renewal is the latest version, even if it hasn't been
# deployed yet!
old_version = cert.latest_common_version()
renew(cert, old_version)
notify.notify("Autorenewed a cert!!!", "root", "It worked!")
# TODO: explain what happened
if cert.should_autodeploy():
cert.update_all_links_to(cert.latest_common_version())
# TODO: restart web server (invoke IInstaller.restart() method)
notify.notify("Autodeployed a cert!!!", "root", "It worked!")
# TODO: explain what happened | letsencrypt/renewer.py | import argparse
import logging
import os
import sys
import OpenSSL
import zope.component
from letsencrypt import account
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import colored_logging
from letsencrypt import cli
from letsencrypt import client
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt import notify
from letsencrypt import storage
from letsencrypt.display import util as display_util
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
class _AttrDict(dict):
"""Attribute dictionary.
A trick to allow accessing dictionary keys as object attributes.
"""
def __init__(self, *args, **kwargs):
super(_AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def renew(cert, old_version):
"""Perform automated renewal of the referenced cert, if possible.
:param letsencrypt.storage.RenewableCert cert: The certificate
lineage to attempt to renew.
:param int old_version: The version of the certificate lineage
relative to which the renewal should be attempted.
:returns: A number referring to newly created version of this cert
lineage, or ``False`` if renewal was not successful.
:rtype: `int` or `bool`
"""
# TODO: handle partial success (some names can be renewed but not
# others)
# TODO: handle obligatory key rotation vs. optional key rotation vs.
# requested key rotation
if "renewalparams" not in cert.configfile:
# TODO: notify user?
return False
renewalparams = cert.configfile["renewalparams"]
if "authenticator" not in renewalparams:
# TODO: notify user?
return False
# Instantiate the appropriate authenticator
plugins = plugins_disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(_AttrDict(renewalparams))
# XXX: this loses type data (for example, the fact that key_size
# was an int, not a str)
config.rsa_key_size = int(config.rsa_key_size)
config.tls_sni_01_port = int(config.tls_sni_01_port)
config.namespace.http01_port = int(config.namespace.http01_port)
zope.component.provideUtility(config)
try:
authenticator = plugins[renewalparams["authenticator"]]
except KeyError:
# TODO: Notify user? (authenticator could not be found)
return False
authenticator = authenticator.init(config)
authenticator.prepare()
acc = account.AccountFileStorage(config).load(
account_id=renewalparams["account"])
le_client = client.Client(config, acc, authenticator, None)
with open(cert.version("cert", old_version)) as f:
sans = crypto_util.get_sans_from_cert(f.read())
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(sans)
if new_chain:
# XXX: Assumes that there was a key change. We need logic
# for figuring out whether there was or not. Probably
# best is to have obtain_certificate return None for
# new_key if the old key is to be used (since save_successor
# already understands this distinction!)
return cert.save_successor(
old_version, OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body),
new_key.pem, crypto_util.dump_pyopenssl_chain(new_chain))
# TODO: Notify results
else:
# TODO: Notify negative results
return False
# TODO: Consider the case where the renewal was partially successful
# (where fewer than all names were renewed)
def _cli_log_handler(args, level, fmt): # pylint: disable=unused-argument
handler = colored_logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
return handler
def _paths_parser(parser):
add = parser.add_argument_group("paths").add_argument
add("--config-dir", default=cli.flag_default("config_dir"),
help=cli.config_help("config_dir"))
add("--work-dir", default=cli.flag_default("work_dir"),
help=cli.config_help("work_dir"))
add("--logs-dir", default=cli.flag_default("logs_dir"),
help="Path to a directory where logs are stored.")
return parser
def _create_parser():
parser = argparse.ArgumentParser()
#parser.add_argument("--cron", action="store_true", help="Run as cronjob.")
parser.add_argument(
"-v", "--verbose", dest="verbose_count", action="count",
default=cli.flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
return _paths_parser(parser)
def main(cli_args=sys.argv[1:]):
"""Main function for autorenewer script."""
# TODO: Distinguish automated invocation from manual invocation,
# perhaps by looking at sys.argv[0] and inhibiting automated
# invocations if /etc/letsencrypt/renewal.conf defaults have
# turned it off. (The boolean parameter should probably be
# called renewer_enabled.)
# TODO: When we have a more elaborate renewer command line, we will
# presumably also be able to specify a config file on the
# command line, which, if provided, should take precedence over
# te default config files
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
args = _create_parser().parse_args(cli_args)
uid = os.geteuid()
le_util.make_or_verify_dir(args.logs_dir, 0o700, uid)
cli.setup_logging(args, _cli_log_handler, logfile='renewer.log')
cli_config = configuration.RenewerConfiguration(args)
# Ensure that all of the needed folders have been created before continuing
le_util.make_or_verify_dir(cli_config.work_dir,
constants.CONFIG_DIRS_MODE, uid)
for renewal_file in os.listdir(cli_config.renewal_configs_dir):
print "Processing", renewal_file
try:
# TODO: Before trying to initialize the RenewableCert object,
# we could check here whether the combination of the config
# and the rc_config together disables all autorenewal and
# autodeployment applicable to this cert. In that case, we
# can simply continue and don't need to instantiate a
# RenewableCert object for this cert at all, which could
# dramatically improve performance for large deployments
# where autorenewal is widely turned off.
cert = storage.RenewableCert(renewal_file, cli_config)
except errors.CertStorageError:
# This indicates an invalid renewal configuration file, such
# as one missing a required parameter (in the future, perhaps
# also one that is internally inconsistent or is missing a
# required parameter). As a TODO, maybe we should warn the
# user about the existence of an invalid or corrupt renewal
# config rather than simply ignoring it.
continue
if cert.should_autorenew():
# Note: not cert.current_version() because the basis for
# the renewal is the latest version, even if it hasn't been
# deployed yet!
old_version = cert.latest_common_version()
renew(cert, old_version)
notify.notify("Autorenewed a cert!!!", "root", "It worked!")
# TODO: explain what happened
if cert.should_autodeploy():
cert.update_all_links_to(cert.latest_common_version())
# TODO: restart web server (invoke IInstaller.restart() method)
notify.notify("Autodeployed a cert!!!", "root", "It worked!")
# TODO: explain what happened | 0.309441 | 0.099426 |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class IfPerfHourlyRemote(RemoteModel):
"""
This table list out the entries of interface hourly performance.
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``StartTime:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``EndTime:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``DeviceID:`` The internal NetMRI identifier for the device from which interface hourly performance information was collected.
| ``attribute type:`` number
| ``ifIndex:`` The current index of hourly performance of an interface.
| ``attribute type:`` number
| ``ifSpeed:`` The speed of packets sends per hour.
| ``attribute type:`` number
| ``ifTotalChanges:`` The total number of changes occurs in each hour of an interface.
| ``attribute type:`` number
| ``ifInOctets:`` The total number of incoming octets.
| ``attribute type:`` number
| ``ifInUcastPkts:`` The number of incoming unicast packets.
| ``attribute type:`` number
| ``ifInNUcastPkts:`` The number of non unicasting packets.
| ``attribute type:`` number
| ``ifInMulticastPkts:`` The number of incoming multicast packets.
| ``attribute type:`` number
| ``ifInBroadcastPkts:`` The number of incoming broadcast packets.
| ``attribute type:`` number
| ``ifInDiscards:`` The number of incoming discarded packets.
| ``attribute type:`` number
| ``ifInErrors:`` The number of incoming errors in each packet of an interface.
| ``attribute type:`` number
| ``ifOutOctets:`` The number of outgoing octets of an interface.
| ``attribute type:`` number
| ``ifOutUcastPkts:`` The number of outgoing unicast packets of an interface.
| ``attribute type:`` number
| ``ifOutNUcastPkts:`` The number of outgoing non unicast packets of an interface.
| ``attribute type:`` number
| ``ifOutMulticastPkts:`` The number of outgoing multicast packets of an interface.
| ``attribute type:`` number
| ``ifOutBroadcastPkts:`` The number of outgoing broadcast packets of an interface.
| ``attribute type:`` number
| ``ifOutDiscards:`` The number of outgoing discarded packets.
| ``attribute type:`` number
| ``ifOutErrors:`` The number of outgoing error packets.
| ``attribute type:`` number
| ``ifAlignmentErrors:`` The number of alignment errors of each packet in the interface.
| ``attribute type:`` number
| ``ifFCSErrors:`` The number of FCS errors of each packet in the interface.
| ``attribute type:`` number
| ``ifLateCollisions:`` The number of late collisions occurs while sending the packets.
| ``attribute type:`` number
"""
properties = ("DataSourceID",
"StartTime",
"EndTime",
"DeviceID",
"ifIndex",
"ifSpeed",
"ifTotalChanges",
"ifInOctets",
"ifInUcastPkts",
"ifInNUcastPkts",
"ifInMulticastPkts",
"ifInBroadcastPkts",
"ifInDiscards",
"ifInErrors",
"ifOutOctets",
"ifOutUcastPkts",
"ifOutNUcastPkts",
"ifOutMulticastPkts",
"ifOutBroadcastPkts",
"ifOutDiscards",
"ifOutErrors",
"ifAlignmentErrors",
"ifFCSErrors",
"ifLateCollisions",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceID": self.DeviceID }) | infoblox_netmri/api/remote/models/if_perf_hourly_remote.py | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class IfPerfHourlyRemote(RemoteModel):
"""
This table list out the entries of interface hourly performance.
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``StartTime:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``EndTime:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``DeviceID:`` The internal NetMRI identifier for the device from which interface hourly performance information was collected.
| ``attribute type:`` number
| ``ifIndex:`` The current index of hourly performance of an interface.
| ``attribute type:`` number
| ``ifSpeed:`` The speed of packets sends per hour.
| ``attribute type:`` number
| ``ifTotalChanges:`` The total number of changes occurs in each hour of an interface.
| ``attribute type:`` number
| ``ifInOctets:`` The total number of incoming octets.
| ``attribute type:`` number
| ``ifInUcastPkts:`` The number of incoming unicast packets.
| ``attribute type:`` number
| ``ifInNUcastPkts:`` The number of non unicasting packets.
| ``attribute type:`` number
| ``ifInMulticastPkts:`` The number of incoming multicast packets.
| ``attribute type:`` number
| ``ifInBroadcastPkts:`` The number of incoming broadcast packets.
| ``attribute type:`` number
| ``ifInDiscards:`` The number of incoming discarded packets.
| ``attribute type:`` number
| ``ifInErrors:`` The number of incoming errors in each packet of an interface.
| ``attribute type:`` number
| ``ifOutOctets:`` The number of outgoing octets of an interface.
| ``attribute type:`` number
| ``ifOutUcastPkts:`` The number of outgoing unicast packets of an interface.
| ``attribute type:`` number
| ``ifOutNUcastPkts:`` The number of outgoing non unicast packets of an interface.
| ``attribute type:`` number
| ``ifOutMulticastPkts:`` The number of outgoing multicast packets of an interface.
| ``attribute type:`` number
| ``ifOutBroadcastPkts:`` The number of outgoing broadcast packets of an interface.
| ``attribute type:`` number
| ``ifOutDiscards:`` The number of outgoing discarded packets.
| ``attribute type:`` number
| ``ifOutErrors:`` The number of outgoing error packets.
| ``attribute type:`` number
| ``ifAlignmentErrors:`` The number of alignment errors of each packet in the interface.
| ``attribute type:`` number
| ``ifFCSErrors:`` The number of FCS errors of each packet in the interface.
| ``attribute type:`` number
| ``ifLateCollisions:`` The number of late collisions occurs while sending the packets.
| ``attribute type:`` number
"""
properties = ("DataSourceID",
"StartTime",
"EndTime",
"DeviceID",
"ifIndex",
"ifSpeed",
"ifTotalChanges",
"ifInOctets",
"ifInUcastPkts",
"ifInNUcastPkts",
"ifInMulticastPkts",
"ifInBroadcastPkts",
"ifInDiscards",
"ifInErrors",
"ifOutOctets",
"ifOutUcastPkts",
"ifOutNUcastPkts",
"ifOutMulticastPkts",
"ifOutBroadcastPkts",
"ifOutDiscards",
"ifOutErrors",
"ifAlignmentErrors",
"ifFCSErrors",
"ifLateCollisions",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceID": self.DeviceID }) | 0.691289 | 0.582194 |
from lark import Lark, Transformer, Tree, Token
from collections import namedtuple
import typing
import json
BNF = r'''
OCTALDIGIT: "0..7"
IDENT: ( "_" )* LETTER ( LETTER | DECIMALDIGIT | "_" )*
FULLIDENT: IDENT ( "." IDENT )*
MESSAGENAME: IDENT
ENUMNAME: IDENT
FIELDNAME: IDENT
ONEOFNAME: IDENT
MAPNAME: IDENT
SERVICENAME: IDENT
TAGNAME: IDENT
TAGVALUE: IDENT
RPCNAME: IDENT
QUALIFIER: ( "stream" )
MESSAGETYPE: [ "." ] ( IDENT "." )* MESSAGENAME
ENUMTYPE: [ "." ] ( IDENT "." )* ENUMNAME
INTLIT : DECIMALLIT | OCTALLIT | HEXLIT
DECIMALLIT: ( "1".."9" ) ( DECIMALDIGIT )*
OCTALLIT : "0" ( OCTALDIGIT )*
HEXLIT : "0" ( "x" | "X" ) HEXDIGIT ( HEXDIGIT )*
FLOATLIT: ( DECIMALS "." [ DECIMALS ] [ EXPONENT ] | DECIMALS EXPONENT | "."DECIMALS [ EXPONENT ] ) | "inf" | "nan"
DECIMALS : DECIMALDIGIT ( DECIMALDIGIT )*
EXPONENT : ( "e" | "E" ) [ "+" | "-" ] DECIMALS
BOOLLIT: "true" | "false"
STRLIT: ( "'" ( CHARVALUE )* "'" ) | ( "\"" ( CHARVALUE )* "\"" )
CHARVALUE: HEXESCAPE | OCTESCAPE | CHARESCAPE | /[^\0\n\\]/
HEXESCAPE: "\\" ( "x" | "X" ) HEXDIGIT HEXDIGIT
OCTESCAPE: "\\" OCTALDIGIT OCTALDIGIT OCTALDIGIT
CHARESCAPE: "\\" ( "a" | "b" | "f" | "n" | "r" | "t" | "v" | "\\" | "'" | "\"" )
QUOTE: "'" | "\""
EMPTYSTATEMENT: ";"
CONSTANT: FULLIDENT | ( [ "-" | "+" ] INTLIT ) | ( [ "-" | "+" ] FLOATLIT ) | STRLIT | BOOLLIT
syntax: "syntax" "=" QUOTE "proto3" QUOTE ";"
import: "import" [ "weak" | "public" ] STRLIT ";"
package: "package" FULLIDENT ";"
option: "option" OPTIONNAME "=" CONSTANT ";"
OPTIONNAME: ( IDENT | "(" FULLIDENT ")" ) ( "." IDENT )*
TYPE: "double" | "float" | "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string" | "bytes" | MESSAGETYPE | ENUMTYPE
FIELDNUMBER: INTLIT
field: [ comments ] TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL
fieldoptions: fieldoption ( "," fieldoption )*
fieldoption: OPTIONNAME "=" CONSTANT
repeatedfield: [ comments ] "repeated" field
oneof: "oneof" ONEOFNAME "{" ( oneoffield | EMPTYSTATEMENT )* "}"
oneoffield: TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] ";"
mapfield: [ comments ] "map" "<" KEYTYPE "," TYPE ">" MAPNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL
KEYTYPE: "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string"
reserved: "reserved" ( ranges | fieldnames ) ";"
ranges: range ( "," range )*
range: INTLIT [ "to" ( INTLIT | "max" ) ]
fieldnames: FIELDNAME ( "," FIELDNAME )*
enum: [ comments ] "enum" ENUMNAME enumbody
enumbody: "{" ( enumfield | EMPTYSTATEMENT )* "}"
enumfield: [ COMMENTS ] IDENT "=" INTLIT [ "[" enumvalueoption ( "," enumvalueoption )* "]" ] TAIL
enumvalueoption: OPTIONNAME "=" CONSTANT
message: [ comments ] "message" MESSAGENAME messagebody
messagebody: "{" ( repeatedfield | field | enum | message | option | oneof | mapfield | reserved | EMPTYSTATEMENT )* "}"
googleoption: "option" "(google.api.http)" "=" "{" [ "post:" CONSTANT [ "body:" CONSTANT ] ] "}" ";"
service: [ comments ] "service" SERVICENAME "{" ( option | rpc | EMPTYSTATEMENT )* "}"
rpc: [ comments ] "rpc" RPCNAME "(" ( QUALIFIER )* MESSAGETYPE ")" "returns" "(" ( QUALIFIER )* MESSAGETYPE ")" ( ( "{" ( googleoption | option | EMPTYSTATEMENT )* "}" ) | ";" )
proto:[ comments ] syntax ( import | package | option | topleveldef | EMPTYSTATEMENT )*
topleveldef: message | enum | service | comments
TAIL: ";" [/[\s|\t]/] [ COMMENT ]
COMMENT: "//" /.*/ [ "\n" ]
comments: COMMENT ( COMMENT )*
COMMENTS: COMMENT ( COMMENT )*
%import common.HEXDIGIT
%import common.DIGIT -> DECIMALDIGIT
%import common.LETTER
%import common.WS
%import common.NEWLINE
%ignore WS
'''
Comment = typing.NamedTuple('Comment', [('content', str), ('tags', typing.Dict[str, typing.Any])])
Field = typing.NamedTuple('Field', [('comment', 'Comment'), ('type', str), ('key_type', str), ('val_type', str), ('name', str), ('number', int)])
Enum = typing.NamedTuple('Enum', [('comment', 'Comment'), ('name', str), ('fields', typing.Dict[str, 'Field'])])
Message = typing.NamedTuple('Message', [('comment', 'Comment'), ('name', str), ('fields', typing.List['Field']),
('messages', typing.Dict[str, 'Message']), ('enums', typing.Dict[str, 'Enum'])])
Service = typing.NamedTuple('Service', [('name', str), ('functions', typing.Dict[str, 'RpcFunc'])])
RpcFunc = typing.NamedTuple('RpcFunc', [('name', str), ('in_stream', bool), ('in_type', str), ('out_stream', bool), ('out_type', str), ('uri', str)])
ProtoFile = typing.NamedTuple('ProtoFile',
[('messages', typing.Dict[str, 'Message']), ('enums', typing.Dict[str, 'Enum']),
('services', typing.Dict[str, 'Service']), ('imports', typing.List[str]),
('options', typing.Dict[str, str]), ('package', str)])
class ProtoTransformer(Transformer):
'''Converts syntax tree token into more easily usable namedtuple objects'''
def message(self, tokens):
'''Returns a Message namedtuple'''
comment = Comment("", {})
if len(tokens) < 3:
name_token, body = tokens
else:
comment, name_token, body = tokens
return Message(comment, name_token.value, *body)
def messagebody(self, items):
'''Returns a tuple of message body namedtuples'''
messages = {}
enums = {}
fields = []
for item in items:
if isinstance(item, Message):
messages[item.name] = item
elif isinstance(item, Enum):
enums[item.name] = item
elif isinstance(item, Field):
fields.append(item)
return fields, messages, enums
def field(self, tokens):
'''Returns a Field namedtuple'''
comment = Comment("", {})
type = Token("TYPE", "")
fieldname = Token("FIELDNAME", "")
fieldnumber = Token("FIELDNUMBER", "")
for token in tokens:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "TYPE":
type = token
elif token.type == "FIELDNAME":
fieldname = token
elif token.type == "FIELDNUMBER":
fieldnumber = token
elif token.type == "COMMENT":
comment = Comment(token.value, {})
return Field(comment, type.value, type.value, type.value, fieldname.value, int(fieldnumber.value))
def repeatedfield(self, tokens):
'''Returns a Field namedtuple'''
comment = Comment("", {})
if len(tokens) < 2:
field = tokens[0]
else:
comment, field = tuple(tokens)
return Field(comment, 'repeated', field.type, field.type, field.name, field.number)
def mapfield(self, tokens):
'''Returns a Field namedtuple'''
comment = Comment("", {})
val_type = Token("TYPE", "")
key_type = Token("KEYTYPE", "")
fieldname = Token("MAPNAME", "")
fieldnumber = Token("FIELDNUMBER", "")
for token in tokens:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "TYPE":
val_type = token
elif token.type == "KEYTYPE":
key_type = token
elif token.type == "MAPNAME":
fieldname = token
elif token.type == "FIELDNUMBER":
fieldnumber = token
elif token.type == "COMMENT":
comment = Comment(token.value, {})
return Field(comment, 'map', key_type.value, val_type.value, fieldname.value, int(fieldnumber.value))
def comments(self, tokens):
'''Returns a Tag namedtuple'''
comment = ''
tags = {}
for token in tokens:
comment += token
if token.find('@') < 0:
continue
kvs = token.strip(" /\n").split('@')
for kv in kvs:
kv = kv.strip(" /\n")
if not kv:
continue
tmp = kv.split('=')
key = tmp[0].strip(" /\n").lower()
if key.find(" ") >= 0:
continue
if len(tmp) > 1:
tags[key] = tmp[1].lower()
else:
tags[key] = True
return Comment(comment, tags)
def enum(self, tokens):
'''Returns an Enum namedtuple'''
comment = Comment("", {})
if len(tokens) < 3:
name, fields = tokens
else:
comment, name, fields = tokens
return Enum(comment, name.value, fields)
def enumbody(self, tokens):
'''Returns a sequence of enum identifiers'''
enumitems = []
for tree in tokens:
if tree.data != 'enumfield':
continue
comment = Comment("", {})
name = Token("IDENT", "")
value = Token("INTLIT", "")
for token in tree.children:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "IDENT":
name = token
elif token.type == "INTLIT":
value = token
elif token.type == "COMMENTS":
comment = Comment(token.value, {})
enumitems.append(Field(comment, 'enum', 'enum', 'enum', name.value, value.value))
return enumitems
def service(self, tokens):
'''Returns a Service namedtuple'''
functions = []
name = ''
for i in range(0, len(tokens)):
if not isinstance(tokens[i], Comment):
if isinstance(tokens[i], RpcFunc):
functions.append(tokens[i])
else:
name = tokens[i].value
return Service(name, functions)
def rpc(self, tokens):
'''Returns a RpcFunc namedtuple'''
uri = ''
in_type = ''
in_stream = False
out_stream = False
for token in tokens:
if isinstance(token, Token):
if token.type == "RPCNAME":
name = token
elif token.type == "MESSAGETYPE":
if in_type:
out_type = token
else:
in_type = token
elif token.type == "QUALIFIER":
if in_type:
out_stream = token.value == "stream"
else:
in_stream = token.value == "stream"
elif not isinstance(token, Comment):
option_token = token
uri = option_token.children[0].value
return RpcFunc(name.value, in_stream, in_type.value, out_stream, out_type.value, uri.strip('"'))
def _recursive_to_dict(obj):
_dict = {}
if isinstance(obj, tuple):
node = obj._asdict()
for item in node:
if isinstance(node[item], list): # Process as a list
_dict[item] = [_recursive_to_dict(x) for x in (node[item])]
elif isinstance(node[item], tuple): # Process as a NamedTuple
_dict[item] = _recursive_to_dict(node[item])
elif isinstance(node[item], dict):
for k in node[item]:
if isinstance(node[item][k], tuple):
node[item][k] = _recursive_to_dict(node[item][k])
_dict[item] = node[item]
else: # Process as a regular element
_dict[item] = (node[item])
return _dict
def parse_from_file(file: str):
with open(file, 'r') as f:
data = f.read()
if data:
return parse(data)
def parse(data: str):
parser = Lark(BNF, start='proto', parser='lalr')
tree = parser.parse(data)
trans_tree = ProtoTransformer().transform(tree)
enums = {}
messages = {}
services = {}
imports = []
import_tree = trans_tree.find_data('import')
for tree in import_tree:
for child in tree.children:
imports.append(child.value.strip('"'))
options = {}
option_tree = trans_tree.find_data('option')
for tree in option_tree:
options[tree.children[0]] = tree.children[1].strip('"')
package = ''
package_tree = trans_tree.find_data('package')
for tree in package_tree:
package = tree.children[0]
top_data = trans_tree.find_data('topleveldef')
for top_level in top_data:
for child in top_level.children:
if isinstance(child, Message):
messages[child.name] = child
if isinstance(child, Enum):
enums[child.name] = child
if isinstance(child, Service):
services[child.name] = child
return ProtoFile(messages, enums, services, imports, options, package)
def serialize2json(data):
return json.dumps(_recursive_to_dict(parse(data)))
def serialize2json_from_file(file: str):
with open(file, 'r') as f:
data = f.read()
if data:
return json.dumps(_recursive_to_dict(parse(data))) | protoparser/parser.py | from lark import Lark, Transformer, Tree, Token
from collections import namedtuple
import typing
import json
BNF = r'''
OCTALDIGIT: "0..7"
IDENT: ( "_" )* LETTER ( LETTER | DECIMALDIGIT | "_" )*
FULLIDENT: IDENT ( "." IDENT )*
MESSAGENAME: IDENT
ENUMNAME: IDENT
FIELDNAME: IDENT
ONEOFNAME: IDENT
MAPNAME: IDENT
SERVICENAME: IDENT
TAGNAME: IDENT
TAGVALUE: IDENT
RPCNAME: IDENT
QUALIFIER: ( "stream" )
MESSAGETYPE: [ "." ] ( IDENT "." )* MESSAGENAME
ENUMTYPE: [ "." ] ( IDENT "." )* ENUMNAME
INTLIT : DECIMALLIT | OCTALLIT | HEXLIT
DECIMALLIT: ( "1".."9" ) ( DECIMALDIGIT )*
OCTALLIT : "0" ( OCTALDIGIT )*
HEXLIT : "0" ( "x" | "X" ) HEXDIGIT ( HEXDIGIT )*
FLOATLIT: ( DECIMALS "." [ DECIMALS ] [ EXPONENT ] | DECIMALS EXPONENT | "."DECIMALS [ EXPONENT ] ) | "inf" | "nan"
DECIMALS : DECIMALDIGIT ( DECIMALDIGIT )*
EXPONENT : ( "e" | "E" ) [ "+" | "-" ] DECIMALS
BOOLLIT: "true" | "false"
STRLIT: ( "'" ( CHARVALUE )* "'" ) | ( "\"" ( CHARVALUE )* "\"" )
CHARVALUE: HEXESCAPE | OCTESCAPE | CHARESCAPE | /[^\0\n\\]/
HEXESCAPE: "\\" ( "x" | "X" ) HEXDIGIT HEXDIGIT
OCTESCAPE: "\\" OCTALDIGIT OCTALDIGIT OCTALDIGIT
CHARESCAPE: "\\" ( "a" | "b" | "f" | "n" | "r" | "t" | "v" | "\\" | "'" | "\"" )
QUOTE: "'" | "\""
EMPTYSTATEMENT: ";"
CONSTANT: FULLIDENT | ( [ "-" | "+" ] INTLIT ) | ( [ "-" | "+" ] FLOATLIT ) | STRLIT | BOOLLIT
syntax: "syntax" "=" QUOTE "proto3" QUOTE ";"
import: "import" [ "weak" | "public" ] STRLIT ";"
package: "package" FULLIDENT ";"
option: "option" OPTIONNAME "=" CONSTANT ";"
OPTIONNAME: ( IDENT | "(" FULLIDENT ")" ) ( "." IDENT )*
TYPE: "double" | "float" | "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string" | "bytes" | MESSAGETYPE | ENUMTYPE
FIELDNUMBER: INTLIT
field: [ comments ] TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL
fieldoptions: fieldoption ( "," fieldoption )*
fieldoption: OPTIONNAME "=" CONSTANT
repeatedfield: [ comments ] "repeated" field
oneof: "oneof" ONEOFNAME "{" ( oneoffield | EMPTYSTATEMENT )* "}"
oneoffield: TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] ";"
mapfield: [ comments ] "map" "<" KEYTYPE "," TYPE ">" MAPNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL
KEYTYPE: "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string"
reserved: "reserved" ( ranges | fieldnames ) ";"
ranges: range ( "," range )*
range: INTLIT [ "to" ( INTLIT | "max" ) ]
fieldnames: FIELDNAME ( "," FIELDNAME )*
enum: [ comments ] "enum" ENUMNAME enumbody
enumbody: "{" ( enumfield | EMPTYSTATEMENT )* "}"
enumfield: [ COMMENTS ] IDENT "=" INTLIT [ "[" enumvalueoption ( "," enumvalueoption )* "]" ] TAIL
enumvalueoption: OPTIONNAME "=" CONSTANT
message: [ comments ] "message" MESSAGENAME messagebody
messagebody: "{" ( repeatedfield | field | enum | message | option | oneof | mapfield | reserved | EMPTYSTATEMENT )* "}"
googleoption: "option" "(google.api.http)" "=" "{" [ "post:" CONSTANT [ "body:" CONSTANT ] ] "}" ";"
service: [ comments ] "service" SERVICENAME "{" ( option | rpc | EMPTYSTATEMENT )* "}"
rpc: [ comments ] "rpc" RPCNAME "(" ( QUALIFIER )* MESSAGETYPE ")" "returns" "(" ( QUALIFIER )* MESSAGETYPE ")" ( ( "{" ( googleoption | option | EMPTYSTATEMENT )* "}" ) | ";" )
proto:[ comments ] syntax ( import | package | option | topleveldef | EMPTYSTATEMENT )*
topleveldef: message | enum | service | comments
TAIL: ";" [/[\s|\t]/] [ COMMENT ]
COMMENT: "//" /.*/ [ "\n" ]
comments: COMMENT ( COMMENT )*
COMMENTS: COMMENT ( COMMENT )*
%import common.HEXDIGIT
%import common.DIGIT -> DECIMALDIGIT
%import common.LETTER
%import common.WS
%import common.NEWLINE
%ignore WS
'''
Comment = typing.NamedTuple('Comment', [('content', str), ('tags', typing.Dict[str, typing.Any])])
Field = typing.NamedTuple('Field', [('comment', 'Comment'), ('type', str), ('key_type', str), ('val_type', str), ('name', str), ('number', int)])
Enum = typing.NamedTuple('Enum', [('comment', 'Comment'), ('name', str), ('fields', typing.Dict[str, 'Field'])])
Message = typing.NamedTuple('Message', [('comment', 'Comment'), ('name', str), ('fields', typing.List['Field']),
('messages', typing.Dict[str, 'Message']), ('enums', typing.Dict[str, 'Enum'])])
Service = typing.NamedTuple('Service', [('name', str), ('functions', typing.Dict[str, 'RpcFunc'])])
RpcFunc = typing.NamedTuple('RpcFunc', [('name', str), ('in_stream', bool), ('in_type', str), ('out_stream', bool), ('out_type', str), ('uri', str)])
ProtoFile = typing.NamedTuple('ProtoFile',
[('messages', typing.Dict[str, 'Message']), ('enums', typing.Dict[str, 'Enum']),
('services', typing.Dict[str, 'Service']), ('imports', typing.List[str]),
('options', typing.Dict[str, str]), ('package', str)])
class ProtoTransformer(Transformer):
'''Converts syntax tree token into more easily usable namedtuple objects'''
def message(self, tokens):
'''Returns a Message namedtuple'''
comment = Comment("", {})
if len(tokens) < 3:
name_token, body = tokens
else:
comment, name_token, body = tokens
return Message(comment, name_token.value, *body)
def messagebody(self, items):
'''Returns a tuple of message body namedtuples'''
messages = {}
enums = {}
fields = []
for item in items:
if isinstance(item, Message):
messages[item.name] = item
elif isinstance(item, Enum):
enums[item.name] = item
elif isinstance(item, Field):
fields.append(item)
return fields, messages, enums
def field(self, tokens):
'''Returns a Field namedtuple'''
comment = Comment("", {})
type = Token("TYPE", "")
fieldname = Token("FIELDNAME", "")
fieldnumber = Token("FIELDNUMBER", "")
for token in tokens:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "TYPE":
type = token
elif token.type == "FIELDNAME":
fieldname = token
elif token.type == "FIELDNUMBER":
fieldnumber = token
elif token.type == "COMMENT":
comment = Comment(token.value, {})
return Field(comment, type.value, type.value, type.value, fieldname.value, int(fieldnumber.value))
def repeatedfield(self, tokens):
'''Returns a Field namedtuple'''
comment = Comment("", {})
if len(tokens) < 2:
field = tokens[0]
else:
comment, field = tuple(tokens)
return Field(comment, 'repeated', field.type, field.type, field.name, field.number)
def mapfield(self, tokens):
'''Returns a Field namedtuple'''
comment = Comment("", {})
val_type = Token("TYPE", "")
key_type = Token("KEYTYPE", "")
fieldname = Token("MAPNAME", "")
fieldnumber = Token("FIELDNUMBER", "")
for token in tokens:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "TYPE":
val_type = token
elif token.type == "KEYTYPE":
key_type = token
elif token.type == "MAPNAME":
fieldname = token
elif token.type == "FIELDNUMBER":
fieldnumber = token
elif token.type == "COMMENT":
comment = Comment(token.value, {})
return Field(comment, 'map', key_type.value, val_type.value, fieldname.value, int(fieldnumber.value))
def comments(self, tokens):
'''Returns a Tag namedtuple'''
comment = ''
tags = {}
for token in tokens:
comment += token
if token.find('@') < 0:
continue
kvs = token.strip(" /\n").split('@')
for kv in kvs:
kv = kv.strip(" /\n")
if not kv:
continue
tmp = kv.split('=')
key = tmp[0].strip(" /\n").lower()
if key.find(" ") >= 0:
continue
if len(tmp) > 1:
tags[key] = tmp[1].lower()
else:
tags[key] = True
return Comment(comment, tags)
def enum(self, tokens):
'''Returns an Enum namedtuple'''
comment = Comment("", {})
if len(tokens) < 3:
name, fields = tokens
else:
comment, name, fields = tokens
return Enum(comment, name.value, fields)
def enumbody(self, tokens):
'''Returns a sequence of enum identifiers'''
enumitems = []
for tree in tokens:
if tree.data != 'enumfield':
continue
comment = Comment("", {})
name = Token("IDENT", "")
value = Token("INTLIT", "")
for token in tree.children:
if isinstance(token, Comment):
comment = token
elif isinstance(token, Token):
if token.type == "IDENT":
name = token
elif token.type == "INTLIT":
value = token
elif token.type == "COMMENTS":
comment = Comment(token.value, {})
enumitems.append(Field(comment, 'enum', 'enum', 'enum', name.value, value.value))
return enumitems
def service(self, tokens):
'''Returns a Service namedtuple'''
functions = []
name = ''
for i in range(0, len(tokens)):
if not isinstance(tokens[i], Comment):
if isinstance(tokens[i], RpcFunc):
functions.append(tokens[i])
else:
name = tokens[i].value
return Service(name, functions)
def rpc(self, tokens):
'''Returns a RpcFunc namedtuple'''
uri = ''
in_type = ''
in_stream = False
out_stream = False
for token in tokens:
if isinstance(token, Token):
if token.type == "RPCNAME":
name = token
elif token.type == "MESSAGETYPE":
if in_type:
out_type = token
else:
in_type = token
elif token.type == "QUALIFIER":
if in_type:
out_stream = token.value == "stream"
else:
in_stream = token.value == "stream"
elif not isinstance(token, Comment):
option_token = token
uri = option_token.children[0].value
return RpcFunc(name.value, in_stream, in_type.value, out_stream, out_type.value, uri.strip('"'))
def _recursive_to_dict(obj):
_dict = {}
if isinstance(obj, tuple):
node = obj._asdict()
for item in node:
if isinstance(node[item], list): # Process as a list
_dict[item] = [_recursive_to_dict(x) for x in (node[item])]
elif isinstance(node[item], tuple): # Process as a NamedTuple
_dict[item] = _recursive_to_dict(node[item])
elif isinstance(node[item], dict):
for k in node[item]:
if isinstance(node[item][k], tuple):
node[item][k] = _recursive_to_dict(node[item][k])
_dict[item] = node[item]
else: # Process as a regular element
_dict[item] = (node[item])
return _dict
def parse_from_file(file: str):
with open(file, 'r') as f:
data = f.read()
if data:
return parse(data)
def parse(data: str):
parser = Lark(BNF, start='proto', parser='lalr')
tree = parser.parse(data)
trans_tree = ProtoTransformer().transform(tree)
enums = {}
messages = {}
services = {}
imports = []
import_tree = trans_tree.find_data('import')
for tree in import_tree:
for child in tree.children:
imports.append(child.value.strip('"'))
options = {}
option_tree = trans_tree.find_data('option')
for tree in option_tree:
options[tree.children[0]] = tree.children[1].strip('"')
package = ''
package_tree = trans_tree.find_data('package')
for tree in package_tree:
package = tree.children[0]
top_data = trans_tree.find_data('topleveldef')
for top_level in top_data:
for child in top_level.children:
if isinstance(child, Message):
messages[child.name] = child
if isinstance(child, Enum):
enums[child.name] = child
if isinstance(child, Service):
services[child.name] = child
return ProtoFile(messages, enums, services, imports, options, package)
def serialize2json(data):
return json.dumps(_recursive_to_dict(parse(data)))
def serialize2json_from_file(file: str):
with open(file, 'r') as f:
data = f.read()
if data:
return json.dumps(_recursive_to_dict(parse(data))) | 0.562297 | 0.113236 |
from functools import wraps
from itertools import takewhile
class Combinators(object):
def __init__(self, value):
self._value = value
def K(self, f, *args, **kwargs):
"""The Kestrel combinator, invokes a method, and returns the original value"""
if callable(f):
f(self._value, *args, **kwargs)
else:
getattr(self._value, f)(*args, **kwargs)
return self._value
def T(self, function, *args, **kwargs):
"""The Thrush combinator, makes a function call look like a method"""
return function(self._value, *args, **kwargs)
def R(self, function, *args, **kwargs):
"""The Robin combinator, like the thrust, except appends to the end of the argument list"""
return function(*(args + (self._value,)), **kwargs)
def chain(self):
return ChainedCombinators(self._value)
class ChainedCombinators(object):
def __init__(self, value):
self._value = value
def K(self, f, *args, **kwargs):
"""The Kestrel combinator, invokes a method, and returns the original value"""
if callable(f):
f(self._value, *args, **kwargs)
else:
getattr(self._value, f)(*args, **kwargs)
return self
def T(self, function, *args, **kwargs):
"""The Thrush combinator, makes a function call look like a method"""
return self.__class__(function(self._value, *args, **kwargs))
def R(self, function, *args, **kwargs):
"""The Robin combinator, like the thrust, except appends to the end of the argument list."""
return self.__class__(function(*(args + (self._value,)), **kwargs))
def value(self):
return self._value
def bw(value):
return Combinators(value)
def fluent_combinator_style(xs):
return bw(xs).chain()\
.R(takewhile, lambda x: x < 7)\
.R(filter, lambda x: x < 2)\
.R(map, lambda x: 4 * x)\
.value()
assert fluent_combinator_style(range(10)) == [0, 4] | combinators.py | from functools import wraps
from itertools import takewhile
class Combinators(object):
def __init__(self, value):
self._value = value
def K(self, f, *args, **kwargs):
"""The Kestrel combinator, invokes a method, and returns the original value"""
if callable(f):
f(self._value, *args, **kwargs)
else:
getattr(self._value, f)(*args, **kwargs)
return self._value
def T(self, function, *args, **kwargs):
"""The Thrush combinator, makes a function call look like a method"""
return function(self._value, *args, **kwargs)
def R(self, function, *args, **kwargs):
"""The Robin combinator, like the thrust, except appends to the end of the argument list"""
return function(*(args + (self._value,)), **kwargs)
def chain(self):
return ChainedCombinators(self._value)
class ChainedCombinators(object):
def __init__(self, value):
self._value = value
def K(self, f, *args, **kwargs):
"""The Kestrel combinator, invokes a method, and returns the original value"""
if callable(f):
f(self._value, *args, **kwargs)
else:
getattr(self._value, f)(*args, **kwargs)
return self
def T(self, function, *args, **kwargs):
"""The Thrush combinator, makes a function call look like a method"""
return self.__class__(function(self._value, *args, **kwargs))
def R(self, function, *args, **kwargs):
"""The Robin combinator, like the thrust, except appends to the end of the argument list."""
return self.__class__(function(*(args + (self._value,)), **kwargs))
def value(self):
return self._value
def bw(value):
return Combinators(value)
def fluent_combinator_style(xs):
return bw(xs).chain()\
.R(takewhile, lambda x: x < 7)\
.R(filter, lambda x: x < 2)\
.R(map, lambda x: 4 * x)\
.value()
assert fluent_combinator_style(range(10)) == [0, 4] | 0.661048 | 0.227255 |
"""Generate pynessie docs script."""
import os
from pathlib import Path
from typing import IO, List, Optional
from click import Group
from click.testing import CliRunner
from pynessie import cli
PATH_DOCS = "docs"
def generate_docs() -> None:
"""Generate all the commands docs."""
print("Generating docs....\n\n")
# Write the initial CLI help doc
_write_command_doc([])
# Go through all the commands items and generate the docs
_generate_commands_docs([], cli.cli)
def _write_command_doc(command: List[str]) -> None:
with _open_doc_file(command) as f:
_write_command_output_to_file(f, command + ["--help"])
def _open_doc_file(command: List[str]) -> IO:
file_name = _get_file_name_from_command(command)
cwd = os.getcwd()
file_full_path = Path(Path(cwd), PATH_DOCS, file_name)
print(f"Writing file: {file_full_path.parent}/{file_full_path.name}")
return open(file_full_path, "w", encoding="UTF-8")
def _get_file_name_from_command(command: List[str]) -> str:
return f"{'_'.join(command) if len(command) > 0 else 'main'}.rst"
def _write_command_output_to_file(file_io: IO, command: List[str]) -> None:
result = _run_cli(command)
space = " "
file_io.write(f".. code-block:: bash\n\n{space}")
for line in result.split("\n"):
file_io.write(line + f"\n{space}")
file_io.write("\n\n")
def _run_cli(args: List[str], input_data: Optional[str] = None) -> str:
return CliRunner().invoke(cli.cli, args, input=input_data).output
def _generate_commands_docs(parent_commands: List[str], command_group: Group) -> None:
for name, value in command_group.commands.items():
command = parent_commands + [name]
if isinstance(value, Group):
_write_command_group_doc(command, list(value.commands.keys()))
_generate_commands_docs(command, value)
else:
_write_command_doc(command)
def _write_command_group_doc(command: List[str], command_items: List[str]) -> None:
with _open_doc_file(command) as f:
_write_command_output_to_file(f, command + ["--help"])
f.write("It contains the following sub-commands:\n\n")
for item in command_items:
_write_sub_command_reference_to_file(f, command + [item])
def _write_sub_command_reference_to_file(file_io: IO, command: List[str]) -> None:
command_title = " ".join([c.capitalize() for c in command])
file_io.write(f"{command_title} Command\n")
file_io.write("~~~~~~~~~\n\n")
file_io.write(f".. include:: {_get_file_name_from_command(command)}\n\n")
if __name__ == "__main__":
generate_docs() # pragma: no cover | python/tools/generate_docs.py | """Generate pynessie docs script."""
import os
from pathlib import Path
from typing import IO, List, Optional
from click import Group
from click.testing import CliRunner
from pynessie import cli
PATH_DOCS = "docs"
def generate_docs() -> None:
"""Generate all the commands docs."""
print("Generating docs....\n\n")
# Write the initial CLI help doc
_write_command_doc([])
# Go through all the commands items and generate the docs
_generate_commands_docs([], cli.cli)
def _write_command_doc(command: List[str]) -> None:
with _open_doc_file(command) as f:
_write_command_output_to_file(f, command + ["--help"])
def _open_doc_file(command: List[str]) -> IO:
file_name = _get_file_name_from_command(command)
cwd = os.getcwd()
file_full_path = Path(Path(cwd), PATH_DOCS, file_name)
print(f"Writing file: {file_full_path.parent}/{file_full_path.name}")
return open(file_full_path, "w", encoding="UTF-8")
def _get_file_name_from_command(command: List[str]) -> str:
return f"{'_'.join(command) if len(command) > 0 else 'main'}.rst"
def _write_command_output_to_file(file_io: IO, command: List[str]) -> None:
result = _run_cli(command)
space = " "
file_io.write(f".. code-block:: bash\n\n{space}")
for line in result.split("\n"):
file_io.write(line + f"\n{space}")
file_io.write("\n\n")
def _run_cli(args: List[str], input_data: Optional[str] = None) -> str:
return CliRunner().invoke(cli.cli, args, input=input_data).output
def _generate_commands_docs(parent_commands: List[str], command_group: Group) -> None:
for name, value in command_group.commands.items():
command = parent_commands + [name]
if isinstance(value, Group):
_write_command_group_doc(command, list(value.commands.keys()))
_generate_commands_docs(command, value)
else:
_write_command_doc(command)
def _write_command_group_doc(command: List[str], command_items: List[str]) -> None:
with _open_doc_file(command) as f:
_write_command_output_to_file(f, command + ["--help"])
f.write("It contains the following sub-commands:\n\n")
for item in command_items:
_write_sub_command_reference_to_file(f, command + [item])
def _write_sub_command_reference_to_file(file_io: IO, command: List[str]) -> None:
command_title = " ".join([c.capitalize() for c in command])
file_io.write(f"{command_title} Command\n")
file_io.write("~~~~~~~~~\n\n")
file_io.write(f".. include:: {_get_file_name_from_command(command)}\n\n")
if __name__ == "__main__":
generate_docs() # pragma: no cover | 0.778649 | 0.279122 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
import scipy.stats.distributions as dist
from nitime.timeseries import TimeSeries
from nitime import utils
import nitime.algorithms as alg
import nitime.viz
from nitime.viz import drawmatrix_channels
from nitime.analysis import CoherenceAnalyzer,MTCoherenceAnalyzer
TR=1.89
f_ub = 0.15
f_lb = 0.02
"""
We read in the data into a recarray from a csv file:
"""
data_rec = csv2rec('data/fmri_timeseries.csv')
"""
The first line in the file contains the names of the different brain regions
(or ROI = regions of interest) from which the time-series were derived. We
extract the data into a regular array, while keeping the names to be used later:
"""
roi_names= np.array(data_rec.dtype.names)
nseq = len(roi_names)
n_samples = data_rec.shape[0]
data = np.zeros((nseq, n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
"""
We normalize the data in each of the ROIs to be in units of % change:
"""
pdata = utils.percent_change(data)
"""
We start by performing the detailed analysis, but note that a significant
short-cut is presented below, so if you just want to know how to do this
(without needing to understand the details), skip on down.
We start by defining how many tapers will be used and calculate the values of
the tapers and the associated eigenvalues of each taper:
"""
NW = 4
K = 2*NW-1
tapers, eigs = alg.DPSS_windows(n_samples, NW, 2*NW-1)
"""
We multiply the data by the tapers and derive the fourier transform and the
magnitude of the squared spectra (the power) for each tapered time-series:
"""
tdata = tapers[None,:,:] * pdata[:,None,:]
tspectra = np.fft.fft(tdata)
mag_sqr_spectra = np.abs(tspectra)
np.power(mag_sqr_spectra, 2, mag_sqr_spectra)
"""
Coherence for real sequences is symmetric, so we calculate this for only half
the spectrum (the other half is equal):
"""
L = n_samples/2 + 1
"""
We estimate adaptive weighting of the tapers, based on the data (see
:ref:`multi-taper-psd` for an explanation and references):
"""
w = np.empty( (nseq, K, L) )
for i in xrange(nseq):
w[i], _ = utils.adaptive_weights(mag_sqr_spectra[i], eigs, L)
"""
We proceed to calculate the coherence. We initialize empty data containers:
"""
csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)
"""
Looping over the ROIs :
"""
for i in xrange(nseq):
for j in xrange(i):
"""
We calculate the multi-tapered cross spectrum between each two
time-series:
"""
sxy = alg.mtm_cross_spectrum(
tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
)
"""
And the individual PSD for each:
"""
sxx = alg.mtm_cross_spectrum(
tspectra[i], tspectra[i], (w[i], w[i]), sides='onesided'
).real
syy = alg.mtm_cross_spectrum(
tspectra[j], tspectra[j], (w[i], w[j]), sides='onesided'
).real
psd_mat[0,i,j] = sxx
psd_mat[1,i,j] = syy
"""
Coherence is : $Coh_{xy}(\lambda) = \frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}$
"""
coh_mat[i,j] = np.abs(sxy)**2
coh_mat[i,j] /= (sxx * syy)
csd_mat[i,j] = sxy
"""
The variance from the different samples is calculated using a jack-knife
approach:
"""
if i != j:
coh_var[i,j] = utils.jackknifed_coh_variance(
tspectra[i], tspectra[j], weights=(w[i], w[j]), last_freq=L
)
"""
This measure is normalized, based on the number of tapers:
"""
coh_mat_xform = utils.normalize_coherence(coh_mat, 2*K-2)
"""
We calculate 95% confidence intervals based on the jack-knife variance
calculation:
"""
t025_limit = coh_mat_xform + dist.t.ppf(.025, K-1)*np.sqrt(coh_var)
t975_limit = coh_mat_xform + dist.t.ppf(.975, K-1)*np.sqrt(coh_var)
utils.normal_coherence_to_unit(t025_limit, 2*K-2, t025_limit)
utils.normal_coherence_to_unit(t975_limit, 2*K-2, t975_limit)
if L < n_samples:
freqs = np.linspace(0, 1/(2*TR), L)
else:
freqs = np.linspace(0, 1/TR, L, endpoint=False)
"""
We look only at frequencies between 0.02 and 0.15 (the physiologically
relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
"""
freq_idx = np.where((freqs>f_lb) * (freqs<f_ub))[0]
"""
We extract the coherence and average over all these frequency bands:
"""
coh = np.mean(coh_mat[:,:,freq_idx],-1) #Averaging on the last dimension
"""
The next line calls the visualization routine which displays the data
"""
fig01 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTM Coherence')
"""
.. image:: fig/multi_taper_coh_01.png
Next we perform the same analysis, using the nitime object oriented interface.
We start by initializing a TimeSeries object with this data and with the
sampling_interval provided above. We set the metadata 'roi' field with the ROI
names.
"""
T = TimeSeries(pdata,sampling_interval=TR)
T.metadata['roi'] = roi_names
"""
We initialize an MTCoherenceAnalyzer object with the TimeSeries object
"""
C2 = MTCoherenceAnalyzer(T)
"""
The relevant indices in the Analyzer object are derived:
"""
freq_idx = np.where((C2.frequencies>0.02) * (C2.frequencies<0.15))[0]
"""
The call to C2.coherence triggers the computation and this is averaged over the
frequency range of interest in the same line and then displayed:
"""
coh = np.mean(C2.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
fig02 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTCoherenceAnalyzer')
"""
.. image:: fig/multi_taper_coh_02.png
For comparison, we also perform the analysis using the standard
CoherenceAnalyzer object, which does the analysis using Welch's windowed
periodogram, instead of the multi-taper spectral estimation method (see
:ref:`resting_state` for a more thorough analysis of this data using this
method):
"""
C3 = CoherenceAnalyzer(T)
freq_idx = np.where((C3.frequencies>f_lb) * (C3.frequencies<f_ub))[0]
#Extract the coherence and average across these frequency bands:
coh = np.mean(C3.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
fig03 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='CoherenceAnalyzer')
"""
.. image:: fig/multi_taper_coh_03.png
plt.show() is called in order to display the figures:
"""
plt.show() | doc/examples/multi_taper_coh.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
import scipy.stats.distributions as dist
from nitime.timeseries import TimeSeries
from nitime import utils
import nitime.algorithms as alg
import nitime.viz
from nitime.viz import drawmatrix_channels
from nitime.analysis import CoherenceAnalyzer,MTCoherenceAnalyzer
TR=1.89
f_ub = 0.15
f_lb = 0.02
"""
We read in the data into a recarray from a csv file:
"""
data_rec = csv2rec('data/fmri_timeseries.csv')
"""
The first line in the file contains the names of the different brain regions
(or ROI = regions of interest) from which the time-series were derived. We
extract the data into a regular array, while keeping the names to be used later:
"""
roi_names= np.array(data_rec.dtype.names)
nseq = len(roi_names)
n_samples = data_rec.shape[0]
data = np.zeros((nseq, n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
"""
We normalize the data in each of the ROIs to be in units of % change:
"""
pdata = utils.percent_change(data)
"""
We start by performing the detailed analysis, but note that a significant
short-cut is presented below, so if you just want to know how to do this
(without needing to understand the details), skip on down.
We start by defining how many tapers will be used and calculate the values of
the tapers and the associated eigenvalues of each taper:
"""
NW = 4
K = 2*NW-1
tapers, eigs = alg.DPSS_windows(n_samples, NW, 2*NW-1)
"""
We multiply the data by the tapers and derive the fourier transform and the
magnitude of the squared spectra (the power) for each tapered time-series:
"""
tdata = tapers[None,:,:] * pdata[:,None,:]
tspectra = np.fft.fft(tdata)
mag_sqr_spectra = np.abs(tspectra)
np.power(mag_sqr_spectra, 2, mag_sqr_spectra)
"""
Coherence for real sequences is symmetric, so we calculate this for only half
the spectrum (the other half is equal):
"""
L = n_samples/2 + 1
"""
We estimate adaptive weighting of the tapers, based on the data (see
:ref:`multi-taper-psd` for an explanation and references):
"""
w = np.empty( (nseq, K, L) )
for i in xrange(nseq):
w[i], _ = utils.adaptive_weights(mag_sqr_spectra[i], eigs, L)
"""
We proceed to calculate the coherence. We initialize empty data containers:
"""
csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)
"""
Looping over the ROIs :
"""
for i in xrange(nseq):
for j in xrange(i):
"""
We calculate the multi-tapered cross spectrum between each two
time-series:
"""
sxy = alg.mtm_cross_spectrum(
tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
)
"""
And the individual PSD for each:
"""
sxx = alg.mtm_cross_spectrum(
tspectra[i], tspectra[i], (w[i], w[i]), sides='onesided'
).real
syy = alg.mtm_cross_spectrum(
tspectra[j], tspectra[j], (w[i], w[j]), sides='onesided'
).real
psd_mat[0,i,j] = sxx
psd_mat[1,i,j] = syy
"""
Coherence is : $Coh_{xy}(\lambda) = \frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}$
"""
coh_mat[i,j] = np.abs(sxy)**2
coh_mat[i,j] /= (sxx * syy)
csd_mat[i,j] = sxy
"""
The variance from the different samples is calculated using a jack-knife
approach:
"""
if i != j:
coh_var[i,j] = utils.jackknifed_coh_variance(
tspectra[i], tspectra[j], weights=(w[i], w[j]), last_freq=L
)
"""
This measure is normalized, based on the number of tapers:
"""
coh_mat_xform = utils.normalize_coherence(coh_mat, 2*K-2)
"""
We calculate 95% confidence intervals based on the jack-knife variance
calculation:
"""
t025_limit = coh_mat_xform + dist.t.ppf(.025, K-1)*np.sqrt(coh_var)
t975_limit = coh_mat_xform + dist.t.ppf(.975, K-1)*np.sqrt(coh_var)
utils.normal_coherence_to_unit(t025_limit, 2*K-2, t025_limit)
utils.normal_coherence_to_unit(t975_limit, 2*K-2, t975_limit)
if L < n_samples:
freqs = np.linspace(0, 1/(2*TR), L)
else:
freqs = np.linspace(0, 1/TR, L, endpoint=False)
"""
We look only at frequencies between 0.02 and 0.15 (the physiologically
relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
"""
freq_idx = np.where((freqs>f_lb) * (freqs<f_ub))[0]
"""
We extract the coherence and average over all these frequency bands:
"""
coh = np.mean(coh_mat[:,:,freq_idx],-1) #Averaging on the last dimension
"""
The next line calls the visualization routine which displays the data
"""
fig01 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTM Coherence')
"""
.. image:: fig/multi_taper_coh_01.png
Next we perform the same analysis, using the nitime object oriented interface.
We start by initializing a TimeSeries object with this data and with the
sampling_interval provided above. We set the metadata 'roi' field with the ROI
names.
"""
T = TimeSeries(pdata,sampling_interval=TR)
T.metadata['roi'] = roi_names
"""
We initialize an MTCoherenceAnalyzer object with the TimeSeries object
"""
C2 = MTCoherenceAnalyzer(T)
"""
The relevant indices in the Analyzer object are derived:
"""
freq_idx = np.where((C2.frequencies>0.02) * (C2.frequencies<0.15))[0]
"""
The call to C2.coherence triggers the computation and this is averaged over the
frequency range of interest in the same line and then displayed:
"""
coh = np.mean(C2.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
fig02 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTCoherenceAnalyzer')
"""
.. image:: fig/multi_taper_coh_02.png
For comparison, we also perform the analysis using the standard
CoherenceAnalyzer object, which does the analysis using Welch's windowed
periodogram, instead of the multi-taper spectral estimation method (see
:ref:`resting_state` for a more thorough analysis of this data using this
method):
"""
C3 = CoherenceAnalyzer(T)
freq_idx = np.where((C3.frequencies>f_lb) * (C3.frequencies<f_ub))[0]
#Extract the coherence and average across these frequency bands:
coh = np.mean(C3.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
fig03 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='CoherenceAnalyzer')
"""
.. image:: fig/multi_taper_coh_03.png
plt.show() is called in order to display the figures:
"""
plt.show() | 0.657209 | 0.741768 |
import logging
from fvcore.common.config import CfgNode as _CfgNode
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# ###### _immutable: ON
# loaded_cfg.defrost()
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
# +
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
.. code-block:: python
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func):
"""
Decorate a class's __init__ method so that it can be called with a CfgNode
object using the class's from_config classmethod.
Examples:
.. code-block:: python
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg):
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
"""
assert init_func.__name__ == "__init__", "@configurable should only be used for __init__!"
if init_func.__module__.startswith("detectron2."):
assert (
init_func.__doc__ is not None and "experimental" in init_func.__doc__
), f"configurable {init_func} should be marked experimental"
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError:
raise AttributeError("Class with @configurable must have a 'from_config' classmethod.")
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
raise TypeError(
f"{from_config_func.__self__}.from_config must take 'cfg' as the first argument!"
)
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
if len(args) and isinstance(args[0], _CfgNode):
return True
if isinstance(kwargs.pop("cfg", None), _CfgNode):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False | detectron2/config/config.py |
import logging
from fvcore.common.config import CfgNode as _CfgNode
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# ###### _immutable: ON
# loaded_cfg.defrost()
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
# +
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
.. code-block:: python
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func):
"""
Decorate a class's __init__ method so that it can be called with a CfgNode
object using the class's from_config classmethod.
Examples:
.. code-block:: python
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg):
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
"""
assert init_func.__name__ == "__init__", "@configurable should only be used for __init__!"
if init_func.__module__.startswith("detectron2."):
assert (
init_func.__doc__ is not None and "experimental" in init_func.__doc__
), f"configurable {init_func} should be marked experimental"
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError:
raise AttributeError("Class with @configurable must have a 'from_config' classmethod.")
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
raise TypeError(
f"{from_config_func.__self__}.from_config must take 'cfg' as the first argument!"
)
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
if len(args) and isinstance(args[0], _CfgNode):
return True
if isinstance(kwargs.pop("cfg", None), _CfgNode):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False | 0.75101 | 0.192824 |
import pytest
import pybnesian as pbn
from pybnesian import FactorType, Factor
def test_factor_type():
lg1 = pbn.LinearGaussianCPD("a", [])
lg2 = pbn.LinearGaussianCPD("b", ["a"])
lg3 = pbn.LinearGaussianCPD("c", ["b", "a"])
assert lg1.type() == pbn.LinearGaussianCPDType()
assert lg1.type() == lg2.type()
assert lg1.type() == lg3.type()
assert lg2.type() == lg3.type()
c1 = pbn.CKDE("a", [])
c2 = pbn.CKDE("b", ["a"])
c3 = pbn.CKDE("c", ["b", "a"])
assert c1.type() == pbn.CKDEType()
assert c1.type() == c2.type()
assert c1.type() == c3.type()
assert c2.type() == c3.type()
d1 = pbn.DiscreteFactor("a", [])
d2 = pbn.DiscreteFactor("b", ["a"])
d3 = pbn.DiscreteFactor("c", ["b", "a"])
assert d1.type() == pbn.DiscreteFactorType()
assert d1.type() == d2.type()
assert d1.type() == d3.type()
assert d2.type() == d3.type()
assert lg1.type() != c1.type()
assert lg1.type() != d1.type()
assert c1.type() != d1.type()
def test_new_factor_type():
class A(FactorType):
def __init__(self):
FactorType.__init__(self)
a1 = A()
a2 = A()
a3 = A()
assert a1 == a2
assert a1 == a3
assert a2 == a3
class B(FactorType):
def __init__(self):
FactorType.__init__(self)
b1 = B()
b2 = B()
b3 = B()
assert b1 == b2
assert b1 == b3
assert b2 == b3
assert a1 != b1
def test_factor_defined_factor_type():
class F_type(FactorType):
def __init__(self):
FactorType.__init__(self)
def __str__(self):
return "FType"
class F(Factor):
def __init__(self, variable, evidence):
Factor.__init__(self, variable, evidence)
def type(self):
return F_type()
f1 = F("a", [])
f2 = F("b", ["a"])
f3 = F("c", ["a", "b"])
assert f1.type() == f2.type()
assert f1.type() == f3.type()
assert f2.type() == f3.type()
assert str(f1.type()) == str(f2.type()) == str(f3.type()) == "FType"
dummy_network = pbn.GaussianNetwork(["a", "b", "c", "d"])
with pytest.raises(RuntimeError) as ex:
f4 = f1.type().new_factor(dummy_network, "d", ["a", "b", "c"])
assert 'Tried to call pure virtual function "FactorType::new_factor"' in str(ex.value)
class G_type(FactorType):
def __init__(self):
FactorType.__init__(self)
def new_factor(self, model, variable, evidence):
return G(variable, evidence)
def __str__(self):
return "GType"
class G(Factor):
def __init__(self, variable, evidence):
Factor.__init__(self, variable, evidence)
def type(self):
return G_type()
g1 = G("a", [])
g2 = G("b", ["a"])
g3 = G("c", ["a", "b"])
assert g1.type() == g2.type()
assert g1.type() == g3.type()
assert g2.type() == g3.type()
assert f1.type() != g1.type()
assert str(g1.type()) == str(g2.type()) == str(g3.type()) == "GType"
g4 = g1.type().new_factor(dummy_network, "d", ["a", "b", "c"])
assert g1.type() == g4.type()
assert g4.variable() == "d"
assert g4.evidence() == ["a", "b", "c"] | tests/factors/factor_type_test.py | import pytest
import pybnesian as pbn
from pybnesian import FactorType, Factor
def test_factor_type():
lg1 = pbn.LinearGaussianCPD("a", [])
lg2 = pbn.LinearGaussianCPD("b", ["a"])
lg3 = pbn.LinearGaussianCPD("c", ["b", "a"])
assert lg1.type() == pbn.LinearGaussianCPDType()
assert lg1.type() == lg2.type()
assert lg1.type() == lg3.type()
assert lg2.type() == lg3.type()
c1 = pbn.CKDE("a", [])
c2 = pbn.CKDE("b", ["a"])
c3 = pbn.CKDE("c", ["b", "a"])
assert c1.type() == pbn.CKDEType()
assert c1.type() == c2.type()
assert c1.type() == c3.type()
assert c2.type() == c3.type()
d1 = pbn.DiscreteFactor("a", [])
d2 = pbn.DiscreteFactor("b", ["a"])
d3 = pbn.DiscreteFactor("c", ["b", "a"])
assert d1.type() == pbn.DiscreteFactorType()
assert d1.type() == d2.type()
assert d1.type() == d3.type()
assert d2.type() == d3.type()
assert lg1.type() != c1.type()
assert lg1.type() != d1.type()
assert c1.type() != d1.type()
def test_new_factor_type():
class A(FactorType):
def __init__(self):
FactorType.__init__(self)
a1 = A()
a2 = A()
a3 = A()
assert a1 == a2
assert a1 == a3
assert a2 == a3
class B(FactorType):
def __init__(self):
FactorType.__init__(self)
b1 = B()
b2 = B()
b3 = B()
assert b1 == b2
assert b1 == b3
assert b2 == b3
assert a1 != b1
def test_factor_defined_factor_type():
class F_type(FactorType):
def __init__(self):
FactorType.__init__(self)
def __str__(self):
return "FType"
class F(Factor):
def __init__(self, variable, evidence):
Factor.__init__(self, variable, evidence)
def type(self):
return F_type()
f1 = F("a", [])
f2 = F("b", ["a"])
f3 = F("c", ["a", "b"])
assert f1.type() == f2.type()
assert f1.type() == f3.type()
assert f2.type() == f3.type()
assert str(f1.type()) == str(f2.type()) == str(f3.type()) == "FType"
dummy_network = pbn.GaussianNetwork(["a", "b", "c", "d"])
with pytest.raises(RuntimeError) as ex:
f4 = f1.type().new_factor(dummy_network, "d", ["a", "b", "c"])
assert 'Tried to call pure virtual function "FactorType::new_factor"' in str(ex.value)
class G_type(FactorType):
def __init__(self):
FactorType.__init__(self)
def new_factor(self, model, variable, evidence):
return G(variable, evidence)
def __str__(self):
return "GType"
class G(Factor):
def __init__(self, variable, evidence):
Factor.__init__(self, variable, evidence)
def type(self):
return G_type()
g1 = G("a", [])
g2 = G("b", ["a"])
g3 = G("c", ["a", "b"])
assert g1.type() == g2.type()
assert g1.type() == g3.type()
assert g2.type() == g3.type()
assert f1.type() != g1.type()
assert str(g1.type()) == str(g2.type()) == str(g3.type()) == "GType"
g4 = g1.type().new_factor(dummy_network, "d", ["a", "b", "c"])
assert g1.type() == g4.type()
assert g4.variable() == "d"
assert g4.evidence() == ["a", "b", "c"] | 0.79166 | 0.844697 |
import argparse
import os
import re
import glob
import shutil
import subprocess as sp
from tempfile import TemporaryDirectory
from contextlib import contextmanager
# YAML imports
try:
import yaml # PyYAML
loader = yaml.load
except ImportError:
try:
import ruamel_yaml as yaml # Ruamel YAML
except ImportError:
try:
# Load Ruamel YAML from the base conda environment
from importlib import util as import_util
CONDA_BIN = os.path.dirname(os.environ['CONDA_EXE'])
ruamel_yaml_path = glob.glob(os.path.join(CONDA_BIN, '..',
'lib', 'python*.*', 'site-packages',
'ruamel_yaml', '__init__.py'))[0]
# Based on importlib example, but only needs to load_module since its the whole package, not just
# a module
spec = import_util.spec_from_file_location('ruamel_yaml', ruamel_yaml_path)
yaml = spec.loader.load_module()
except (KeyError, ImportError, IndexError):
raise ImportError("No YAML parser could be found in this or the conda environment. "
"Could not find PyYAML or Ruamel YAML in the current environment, "
"AND could not find Ruamel YAML in the base conda environment through CONDA_EXE path. "
"Environment not created!")
loader = yaml.YAML(typ="safe").load # typ="safe" avoids odd typing on output
@contextmanager
def temp_cd():
"""Temporary CD Helper"""
cwd = os.getcwd()
with TemporaryDirectory() as td:
try:
os.chdir(td)
yield
finally:
os.chdir(cwd)
# Args
parser = argparse.ArgumentParser(description='Creates a conda environment from file for a given Python version.')
parser.add_argument('-n', '--name', type=str,
help='The name of the created Python environment')
parser.add_argument('-p', '--python', type=str,
help='The version of the created Python environment')
parser.add_argument('conda_file',
help='The file for the created Python environment')
args = parser.parse_args()
# Open the base file
with open(args.conda_file, "r") as handle:
yaml_script = loader(handle.read())
python_replacement_string = "python {}*".format(args.python)
try:
for dep_index, dep_value in enumerate(yaml_script['dependencies']):
if re.match('python([ ><=*]+[0-9.*]*)?$', dep_value): # Match explicitly 'python' and its formats
yaml_script['dependencies'].pop(dep_index)
break # Making the assumption there is only one Python entry, also avoids need to enumerate in reverse
except (KeyError, TypeError):
# Case of no dependencies key, or dependencies: None
yaml_script['dependencies'] = []
finally:
# Ensure the python version is added in. Even if the code does not need it, we assume the env does
yaml_script['dependencies'].insert(0, python_replacement_string)
# Figure out conda path
if "CONDA_EXE" in os.environ:
conda_path = os.environ["CONDA_EXE"]
else:
conda_path = shutil.which("conda")
if conda_path is None:
raise RuntimeError("Could not find a conda binary in CONDA_EXE variable or in executable search path")
print("CONDA ENV NAME {}".format(args.name))
print("PYTHON VERSION {}".format(args.python))
print("CONDA FILE NAME {}".format(args.conda_file))
print("CONDA PATH {}".format(conda_path))
# Write to a temp directory which will always be cleaned up
with temp_cd():
temp_file_name = "temp_script.yaml"
with open(temp_file_name, 'w') as f:
f.write(yaml.dump(yaml_script))
sp.call("{} env create -n {} -f {}".format(conda_path, args.name, temp_file_name), shell=True) | devtools/scripts/create_conda_env.py | import argparse
import os
import re
import glob
import shutil
import subprocess as sp
from tempfile import TemporaryDirectory
from contextlib import contextmanager
# YAML imports
try:
import yaml # PyYAML
loader = yaml.load
except ImportError:
try:
import ruamel_yaml as yaml # Ruamel YAML
except ImportError:
try:
# Load Ruamel YAML from the base conda environment
from importlib import util as import_util
CONDA_BIN = os.path.dirname(os.environ['CONDA_EXE'])
ruamel_yaml_path = glob.glob(os.path.join(CONDA_BIN, '..',
'lib', 'python*.*', 'site-packages',
'ruamel_yaml', '__init__.py'))[0]
# Based on importlib example, but only needs to load_module since its the whole package, not just
# a module
spec = import_util.spec_from_file_location('ruamel_yaml', ruamel_yaml_path)
yaml = spec.loader.load_module()
except (KeyError, ImportError, IndexError):
raise ImportError("No YAML parser could be found in this or the conda environment. "
"Could not find PyYAML or Ruamel YAML in the current environment, "
"AND could not find Ruamel YAML in the base conda environment through CONDA_EXE path. "
"Environment not created!")
loader = yaml.YAML(typ="safe").load # typ="safe" avoids odd typing on output
@contextmanager
def temp_cd():
"""Temporary CD Helper"""
cwd = os.getcwd()
with TemporaryDirectory() as td:
try:
os.chdir(td)
yield
finally:
os.chdir(cwd)
# Args
parser = argparse.ArgumentParser(description='Creates a conda environment from file for a given Python version.')
parser.add_argument('-n', '--name', type=str,
help='The name of the created Python environment')
parser.add_argument('-p', '--python', type=str,
help='The version of the created Python environment')
parser.add_argument('conda_file',
help='The file for the created Python environment')
args = parser.parse_args()
# Open the base file
with open(args.conda_file, "r") as handle:
yaml_script = loader(handle.read())
python_replacement_string = "python {}*".format(args.python)
try:
for dep_index, dep_value in enumerate(yaml_script['dependencies']):
if re.match('python([ ><=*]+[0-9.*]*)?$', dep_value): # Match explicitly 'python' and its formats
yaml_script['dependencies'].pop(dep_index)
break # Making the assumption there is only one Python entry, also avoids need to enumerate in reverse
except (KeyError, TypeError):
# Case of no dependencies key, or dependencies: None
yaml_script['dependencies'] = []
finally:
# Ensure the python version is added in. Even if the code does not need it, we assume the env does
yaml_script['dependencies'].insert(0, python_replacement_string)
# Figure out conda path
if "CONDA_EXE" in os.environ:
conda_path = os.environ["CONDA_EXE"]
else:
conda_path = shutil.which("conda")
if conda_path is None:
raise RuntimeError("Could not find a conda binary in CONDA_EXE variable or in executable search path")
print("CONDA ENV NAME {}".format(args.name))
print("PYTHON VERSION {}".format(args.python))
print("CONDA FILE NAME {}".format(args.conda_file))
print("CONDA PATH {}".format(conda_path))
# Write to a temp directory which will always be cleaned up
with temp_cd():
temp_file_name = "temp_script.yaml"
with open(temp_file_name, 'w') as f:
f.write(yaml.dump(yaml_script))
sp.call("{} env create -n {} -f {}".format(conda_path, args.name, temp_file_name), shell=True) | 0.446495 | 0.114567 |
from hashlib import sha512
from openprocurement.api.utils import (
json_view,
APIResource,
save_tender,
ROUTE_PREFIX,
context_unpack
)
from openprocurement.tender.openeu.utils import qualifications_resource
from openprocurement.relocation.api.utils import (
extract_transfer, update_ownership, save_transfer
)
from openprocurement.relocation.api.validation import (
validate_ownership_data, validate_complaint_accreditation_level
)
@qualifications_resource(name='Qualification complaint ownership',
path='/tenders/{tender_id}/qualifications/{qualification_id}/complaints/{complaint_id}/ownership',
description="Qualification complaint Ownership")
class QualificationComplaintOwnershipResource(APIResource):
@json_view(permission='create_complaint',
validators=(validate_complaint_accreditation_level,
validate_ownership_data,))
def post(self):
complaint = self.request.context
tender = self.request.validated['tender']
award_id = self.request.validated['qualification_id']
qualification_id = self.request.validated['qualification_id']
data = self.request.validated['ownership_data']
if complaint.transfer_token == sha512(data['transfer']).hexdigest():
location = self.request.route_path('Tender EU Qualification Complaints', tender_id=tender.id, qualification_id=qualification_id, complaint_id=complaint.id)
location = location[len(ROUTE_PREFIX):] # strips /api/<version>
transfer = extract_transfer(self.request, transfer_id=data['id'])
if transfer.get('usedFor') and transfer.get('usedFor') != location:
self.request.errors.add('body', 'transfer', 'Transfer already used')
self.request.errors.status = 403
return
else:
self.request.errors.add('body', 'transfer', 'Invalid transfer')
self.request.errors.status = 403
return
update_ownership(complaint, transfer)
transfer.usedFor = location
self.request.validated['transfer'] = transfer
if save_transfer(self.request):
self.LOGGER.info('Updated transfer relation {}'.format(transfer.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'transfer_relation_update'}))
if save_tender(self.request):
self.LOGGER.info('Updated qualification {} complaint {} ownership of tender {}'.format(complaint.id, qualification_id, tender.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'qualification_complaint_ownership_update'}, {'complaint_id': complaint.id, 'qualification_id': qualification_id, 'tender_id': tender.id}))
return {'data': complaint.serialize('view')} | openprocurement/relocation/api/views/qualification_complaint.py | from hashlib import sha512
from openprocurement.api.utils import (
json_view,
APIResource,
save_tender,
ROUTE_PREFIX,
context_unpack
)
from openprocurement.tender.openeu.utils import qualifications_resource
from openprocurement.relocation.api.utils import (
extract_transfer, update_ownership, save_transfer
)
from openprocurement.relocation.api.validation import (
validate_ownership_data, validate_complaint_accreditation_level
)
@qualifications_resource(name='Qualification complaint ownership',
path='/tenders/{tender_id}/qualifications/{qualification_id}/complaints/{complaint_id}/ownership',
description="Qualification complaint Ownership")
class QualificationComplaintOwnershipResource(APIResource):
@json_view(permission='create_complaint',
validators=(validate_complaint_accreditation_level,
validate_ownership_data,))
def post(self):
complaint = self.request.context
tender = self.request.validated['tender']
award_id = self.request.validated['qualification_id']
qualification_id = self.request.validated['qualification_id']
data = self.request.validated['ownership_data']
if complaint.transfer_token == sha512(data['transfer']).hexdigest():
location = self.request.route_path('Tender EU Qualification Complaints', tender_id=tender.id, qualification_id=qualification_id, complaint_id=complaint.id)
location = location[len(ROUTE_PREFIX):] # strips /api/<version>
transfer = extract_transfer(self.request, transfer_id=data['id'])
if transfer.get('usedFor') and transfer.get('usedFor') != location:
self.request.errors.add('body', 'transfer', 'Transfer already used')
self.request.errors.status = 403
return
else:
self.request.errors.add('body', 'transfer', 'Invalid transfer')
self.request.errors.status = 403
return
update_ownership(complaint, transfer)
transfer.usedFor = location
self.request.validated['transfer'] = transfer
if save_transfer(self.request):
self.LOGGER.info('Updated transfer relation {}'.format(transfer.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'transfer_relation_update'}))
if save_tender(self.request):
self.LOGGER.info('Updated qualification {} complaint {} ownership of tender {}'.format(complaint.id, qualification_id, tender.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'qualification_complaint_ownership_update'}, {'complaint_id': complaint.id, 'qualification_id': qualification_id, 'tender_id': tender.id}))
return {'data': complaint.serialize('view')} | 0.388038 | 0.106784 |
import os
import platform
import shutil
import typer
import flybirds.template as template
import flybirds.utils.flybirds_log as log
from flybirds.utils.file_helper import get_files_from_dir, \
get_paths_from_dir, \
replace_file_content, update, update_json_data
from flybirds.utils.pkg_helper import find_package_base_path
def create_demo():
"""
Create project cli demo
"""
typer.secho(
"Welcome to flybirds cli. Please enter any information to continue.",
fg=typer.colors.MAGENTA,
)
user_dict = {
'device_id': "127.0.0.1:8200",
'package_name': "ctrip.android.view",
'web_driver_agent': "com.fd.test.WebDriverAgentLib.xctrunner",
'headless': True,
'browser_type': ['chromium']
}
project_name = typer.prompt("Please input your project name>>")
user_dict['project_name'] = project_name
platform_start = "Please input your test platform? "
platform_ending = typer.style("(Android/IOS/Web)", fg=typer.colors.CYAN,
bold=True)
p_message = platform_start + platform_ending
test_platform = typer.prompt(p_message)
if test_platform is None or test_platform.strip().lower() not in [
'android', 'ios', 'web']:
test_platform = 'android'
test_platform = test_platform.strip().lower()
user_dict['test_platform'] = test_platform
if test_platform in ['android', 'ios']:
if test_platform == 'ios':
user_dict['package_name'] = "com.ctrip.inner.wireless"
is_bundle = typer.confirm(
"Do you want to configure your webDriverAgent now?"
"(this step can be skipped)")
if is_bundle:
web_driver_agent = typer.prompt(
"Please input your Bundle ID of"
" webDriverAgent?")
user_dict['web_driver_agent'] = web_driver_agent
else:
typer.secho(
"You can configure your Bundle ID of webDriverAgent later"
" in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
connect_device = typer.confirm(
"Do you want to configure your deviceId now?"
"(this step can be skipped)")
if connect_device:
device_id = typer.prompt("Please input your deviceId?")
user_dict['device_id'] = device_id
else:
typer.secho(
"You can configure your deviceId later in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
if_package = typer.confirm(
"Do you want to configure your packageName now?"
"(this step can be skipped)")
if if_package:
package_name = typer.prompt(
"Please input your packageName?(You can use"
" the ADB command to view your package name"
", such as: adb shell pm list packages |"
" findstr 'trip')"
)
user_dict['package_name'] = package_name
else:
typer.secho(
"You can configure your packageName later in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
if test_platform == 'web':
message_start = "Please enter the number represented by the " \
"browserType you want to test? Multiple browsers " \
"are separated by commas(,)."
ending = typer.style("(1:chromium 2:firefox 3:webkit)",
fg=typer.colors.CYAN, bold=True)
message = message_start + ending
out_index = typer.prompt(message)
index_arr = out_index.strip().split(',')
browser_dict = {
'1': "chromium",
'2': "firefox",
'3': "webkit"
}
browser_types = []
[browser_types.append(browser_dict.get(i)) for i in index_arr if
i in browser_dict.keys()]
# add default value
if len(browser_types) < 1:
browser_types.append('chromium')
user_dict['browser_type'] = browser_types
headless = typer.confirm(
"Do you want to launch browser in headless mode?")
user_dict['headless'] = headless
try:
typer.echo(f"Cloning into '{project_name}'...")
total = 900
with typer.progressbar(length=total, label="Processing") as progress:
demo_path = copy_from_template(progress, user_dict)
typer.secho(
f"Done it! Create Project {project_name} has success!\n"
f"You can find it at: {demo_path}",
fg=typer.colors.MAGENTA,
)
except Exception as e:
typer.secho(
f"Error!! create project {project_name} has error, errMsg: {e}",
fg=typer.colors.MAGENTA,
err=True,
)
def copy_from_template(progress, user_dict):
"""
Generate project files from template
"""
# Serialization path
src_file_path = template.__file__
src_path = os.path.normpath(src_file_path[0: src_file_path.rfind(os.sep)])
target_path = os.path.normpath(
os.path.join(os.path.normpath(os.getcwd()),
user_dict.get('project_name'))
)
if os.path.isdir(target_path):
# target_path is existed
shutil.rmtree(target_path)
shutil.copytree(src_path, target_path)
progress.update(100)
try:
# process extend pkg
add_extend_pkg(target_path)
except Exception as e:
log.error(f"[create_project][add_extend_pkg] has error, msg: {e}")
progress.update(100)
# delete file
os.remove(os.path.normpath(os.path.join(target_path, "__init__.py")))
progress.update(100)
# modify platform
test_platform = user_dict.get('test_platform')
if test_platform is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"platform",
test_platform,
)
progress.update(100)
# modify deviceId
device_id = user_dict.get('device_id')
if device_id is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"deviceId",
device_id,
)
progress.update(100)
# modify packageName
package_name = user_dict.get('package_name')
package_name = user_dict.get('package_name')
if package_name is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"packageName",
package_name,
)
progress.update(100)
# modify webDriverAgent
web_driver_agent = user_dict.get('web_driver_agent')
if web_driver_agent is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"webDriverAgent",
web_driver_agent,
)
progress.update(100)
# modify browserType
browser_type = user_dict.get('browser_type')
if browser_type is not None:
update_json_data(
os.path.join(target_path, "config/flybirds_config.json"),
"web_info.browserType",
browser_type,
)
progress.update(100)
# modify headless
headless = user_dict.get('headless')
if headless is not None:
update_json_data(
os.path.join(target_path, "config/flybirds_config.json"),
"web_info.headless",
headless,
)
progress.update(100)
return target_path
def run_project(progress, target_path):
"""
install packages
"""
target_venv_path = os.path.normpath(
os.path.join(target_path, activate_venv())
)
os.system(
f"cd {target_path} && virtualenv venv && {target_venv_path} "
f"&& pip install -r requirements.txt"
)
progress.update(100)
def activate_venv():
"""
Activate venv
:return:
"""
sys_str = platform.system()
if sys_str == "Windows":
return "./venv/Scripts/activate"
else:
return "source venv/bin/activate"
def add_extend_pkg(demo_path):
"""
Add expansion pack
"""
pkg_query = "-flybirds-plugin"
pkg_list = find_package_base_path(pkg_query)
if pkg_list is None or len(pkg_list) <= 0:
return
copy_extend_files(pkg_list, demo_path)
def copy_extend_files(pkg_list, demo_pro_path):
"""
Add extend features and config files
"""
if demo_pro_path is None:
return
for pkg in pkg_list:
# features src path
extend_path = os.path.normpath(
os.path.join(pkg.get("path"), pkg.get("name"), 'template'))
if extend_path is None or not os.path.exists(extend_path):
log.info(
"[create_project][copy_extend_files]extend_path is none or not"
"existed.")
continue
feat_path = os.path.join(os.path.normpath(extend_path), 'features')
config_path = os.path.join(os.path.normpath(extend_path), 'config')
custom_handle_path = os.path.join(os.path.normpath(extend_path),
'pscript', "custom_handle")
# add extend features
if feat_path is not None and os.path.exists(feat_path):
feat_files = get_files_from_dir(feat_path)
# features target path
demo_an_paths = get_paths_from_dir(demo_pro_path, 'android')
demo_ios_paths = get_paths_from_dir(demo_pro_path, 'ios')
for an_path in demo_an_paths:
for file in feat_files:
shutil.copy(file, an_path)
for ios_path in demo_ios_paths:
for file in feat_files:
shutil.copy(file, ios_path)
# add extend config
if config_path is not None and os.path.exists(config_path):
# config target path
demo_config_path = os.path.join(os.path.normpath(demo_pro_path),
'config')
if os.path.isdir(demo_config_path):
# target_path is existed
shutil.rmtree(demo_config_path)
shutil.copytree(config_path, demo_config_path)
# add extend custom operation
if custom_handle_path is not None and os.path.exists(
custom_handle_path):
# config target path
demo_custom_handle_path = os.path.join(
os.path.normpath(demo_pro_path),
'pscript', "custom_handle")
if os.path.isdir(demo_custom_handle_path):
# target_path is existed
shutil.rmtree(demo_custom_handle_path)
shutil.copytree(custom_handle_path, demo_custom_handle_path)
def write_import_steps(pkg_list, demo_pro_path, site_path):
"""
Write the steps that needs to be imported
"""
if site_path is None:
return
import_str = ''
# str that need to be imported
for pkg in pkg_list:
step_path = os.path.normpath(
os.path.join(pkg.get("path"), pkg.get("name"), 'dsl', 'step'))
if step_path is None or not os.path.exists(step_path):
log.info(
"[create_project][write_import_steps] extend_step path is none"
"or not existed.")
continue
step_files = os.listdir(step_path)
pkg_import_str = ''
if step_files is not None and len(step_files) > 0:
pkg_import_str = f'from {pkg.get("name")}.dsl.step import'
for file in step_files:
stem, suffix = os.path.splitext(file)
if '__init__' == stem or '__pycache__' == stem:
continue
pkg_import_str += " " + stem + ","
import_str += pkg_import_str.strip(',') + "\n"
# write the extension steps that need to be imported into the file
steps_file = os.path.join(os.path.normpath(demo_pro_path),
'features/steps/steps.py')
update(steps_file, import_str) | flybirds/cli/create_project.py | import os
import platform
import shutil
import typer
import flybirds.template as template
import flybirds.utils.flybirds_log as log
from flybirds.utils.file_helper import get_files_from_dir, \
get_paths_from_dir, \
replace_file_content, update, update_json_data
from flybirds.utils.pkg_helper import find_package_base_path
def create_demo():
"""
Create project cli demo
"""
typer.secho(
"Welcome to flybirds cli. Please enter any information to continue.",
fg=typer.colors.MAGENTA,
)
user_dict = {
'device_id': "127.0.0.1:8200",
'package_name': "ctrip.android.view",
'web_driver_agent': "com.fd.test.WebDriverAgentLib.xctrunner",
'headless': True,
'browser_type': ['chromium']
}
project_name = typer.prompt("Please input your project name>>")
user_dict['project_name'] = project_name
platform_start = "Please input your test platform? "
platform_ending = typer.style("(Android/IOS/Web)", fg=typer.colors.CYAN,
bold=True)
p_message = platform_start + platform_ending
test_platform = typer.prompt(p_message)
if test_platform is None or test_platform.strip().lower() not in [
'android', 'ios', 'web']:
test_platform = 'android'
test_platform = test_platform.strip().lower()
user_dict['test_platform'] = test_platform
if test_platform in ['android', 'ios']:
if test_platform == 'ios':
user_dict['package_name'] = "com.ctrip.inner.wireless"
is_bundle = typer.confirm(
"Do you want to configure your webDriverAgent now?"
"(this step can be skipped)")
if is_bundle:
web_driver_agent = typer.prompt(
"Please input your Bundle ID of"
" webDriverAgent?")
user_dict['web_driver_agent'] = web_driver_agent
else:
typer.secho(
"You can configure your Bundle ID of webDriverAgent later"
" in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
connect_device = typer.confirm(
"Do you want to configure your deviceId now?"
"(this step can be skipped)")
if connect_device:
device_id = typer.prompt("Please input your deviceId?")
user_dict['device_id'] = device_id
else:
typer.secho(
"You can configure your deviceId later in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
if_package = typer.confirm(
"Do you want to configure your packageName now?"
"(this step can be skipped)")
if if_package:
package_name = typer.prompt(
"Please input your packageName?(You can use"
" the ADB command to view your package name"
", such as: adb shell pm list packages |"
" findstr 'trip')"
)
user_dict['package_name'] = package_name
else:
typer.secho(
"You can configure your packageName later in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
if test_platform == 'web':
message_start = "Please enter the number represented by the " \
"browserType you want to test? Multiple browsers " \
"are separated by commas(,)."
ending = typer.style("(1:chromium 2:firefox 3:webkit)",
fg=typer.colors.CYAN, bold=True)
message = message_start + ending
out_index = typer.prompt(message)
index_arr = out_index.strip().split(',')
browser_dict = {
'1': "chromium",
'2': "firefox",
'3': "webkit"
}
browser_types = []
[browser_types.append(browser_dict.get(i)) for i in index_arr if
i in browser_dict.keys()]
# add default value
if len(browser_types) < 1:
browser_types.append('chromium')
user_dict['browser_type'] = browser_types
headless = typer.confirm(
"Do you want to launch browser in headless mode?")
user_dict['headless'] = headless
try:
typer.echo(f"Cloning into '{project_name}'...")
total = 900
with typer.progressbar(length=total, label="Processing") as progress:
demo_path = copy_from_template(progress, user_dict)
typer.secho(
f"Done it! Create Project {project_name} has success!\n"
f"You can find it at: {demo_path}",
fg=typer.colors.MAGENTA,
)
except Exception as e:
typer.secho(
f"Error!! create project {project_name} has error, errMsg: {e}",
fg=typer.colors.MAGENTA,
err=True,
)
def copy_from_template(progress, user_dict):
"""
Generate project files from template
"""
# Serialization path
src_file_path = template.__file__
src_path = os.path.normpath(src_file_path[0: src_file_path.rfind(os.sep)])
target_path = os.path.normpath(
os.path.join(os.path.normpath(os.getcwd()),
user_dict.get('project_name'))
)
if os.path.isdir(target_path):
# target_path is existed
shutil.rmtree(target_path)
shutil.copytree(src_path, target_path)
progress.update(100)
try:
# process extend pkg
add_extend_pkg(target_path)
except Exception as e:
log.error(f"[create_project][add_extend_pkg] has error, msg: {e}")
progress.update(100)
# delete file
os.remove(os.path.normpath(os.path.join(target_path, "__init__.py")))
progress.update(100)
# modify platform
test_platform = user_dict.get('test_platform')
if test_platform is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"platform",
test_platform,
)
progress.update(100)
# modify deviceId
device_id = user_dict.get('device_id')
if device_id is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"deviceId",
device_id,
)
progress.update(100)
# modify packageName
package_name = user_dict.get('package_name')
package_name = user_dict.get('package_name')
if package_name is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"packageName",
package_name,
)
progress.update(100)
# modify webDriverAgent
web_driver_agent = user_dict.get('web_driver_agent')
if web_driver_agent is not None:
replace_file_content(
os.path.join(target_path, "config/flybirds_config.json"),
"webDriverAgent",
web_driver_agent,
)
progress.update(100)
# modify browserType
browser_type = user_dict.get('browser_type')
if browser_type is not None:
update_json_data(
os.path.join(target_path, "config/flybirds_config.json"),
"web_info.browserType",
browser_type,
)
progress.update(100)
# modify headless
headless = user_dict.get('headless')
if headless is not None:
update_json_data(
os.path.join(target_path, "config/flybirds_config.json"),
"web_info.headless",
headless,
)
progress.update(100)
return target_path
def run_project(progress, target_path):
"""
install packages
"""
target_venv_path = os.path.normpath(
os.path.join(target_path, activate_venv())
)
os.system(
f"cd {target_path} && virtualenv venv && {target_venv_path} "
f"&& pip install -r requirements.txt"
)
progress.update(100)
def activate_venv():
"""
Activate venv
:return:
"""
sys_str = platform.system()
if sys_str == "Windows":
return "./venv/Scripts/activate"
else:
return "source venv/bin/activate"
def add_extend_pkg(demo_path):
"""
Add expansion pack
"""
pkg_query = "-flybirds-plugin"
pkg_list = find_package_base_path(pkg_query)
if pkg_list is None or len(pkg_list) <= 0:
return
copy_extend_files(pkg_list, demo_path)
def copy_extend_files(pkg_list, demo_pro_path):
"""
Add extend features and config files
"""
if demo_pro_path is None:
return
for pkg in pkg_list:
# features src path
extend_path = os.path.normpath(
os.path.join(pkg.get("path"), pkg.get("name"), 'template'))
if extend_path is None or not os.path.exists(extend_path):
log.info(
"[create_project][copy_extend_files]extend_path is none or not"
"existed.")
continue
feat_path = os.path.join(os.path.normpath(extend_path), 'features')
config_path = os.path.join(os.path.normpath(extend_path), 'config')
custom_handle_path = os.path.join(os.path.normpath(extend_path),
'pscript', "custom_handle")
# add extend features
if feat_path is not None and os.path.exists(feat_path):
feat_files = get_files_from_dir(feat_path)
# features target path
demo_an_paths = get_paths_from_dir(demo_pro_path, 'android')
demo_ios_paths = get_paths_from_dir(demo_pro_path, 'ios')
for an_path in demo_an_paths:
for file in feat_files:
shutil.copy(file, an_path)
for ios_path in demo_ios_paths:
for file in feat_files:
shutil.copy(file, ios_path)
# add extend config
if config_path is not None and os.path.exists(config_path):
# config target path
demo_config_path = os.path.join(os.path.normpath(demo_pro_path),
'config')
if os.path.isdir(demo_config_path):
# target_path is existed
shutil.rmtree(demo_config_path)
shutil.copytree(config_path, demo_config_path)
# add extend custom operation
if custom_handle_path is not None and os.path.exists(
custom_handle_path):
# config target path
demo_custom_handle_path = os.path.join(
os.path.normpath(demo_pro_path),
'pscript', "custom_handle")
if os.path.isdir(demo_custom_handle_path):
# target_path is existed
shutil.rmtree(demo_custom_handle_path)
shutil.copytree(custom_handle_path, demo_custom_handle_path)
def write_import_steps(pkg_list, demo_pro_path, site_path):
"""
Write the steps that needs to be imported
"""
if site_path is None:
return
import_str = ''
# str that need to be imported
for pkg in pkg_list:
step_path = os.path.normpath(
os.path.join(pkg.get("path"), pkg.get("name"), 'dsl', 'step'))
if step_path is None or not os.path.exists(step_path):
log.info(
"[create_project][write_import_steps] extend_step path is none"
"or not existed.")
continue
step_files = os.listdir(step_path)
pkg_import_str = ''
if step_files is not None and len(step_files) > 0:
pkg_import_str = f'from {pkg.get("name")}.dsl.step import'
for file in step_files:
stem, suffix = os.path.splitext(file)
if '__init__' == stem or '__pycache__' == stem:
continue
pkg_import_str += " " + stem + ","
import_str += pkg_import_str.strip(',') + "\n"
# write the extension steps that need to be imported into the file
steps_file = os.path.join(os.path.normpath(demo_pro_path),
'features/steps/steps.py')
update(steps_file, import_str) | 0.160595 | 0.077553 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import re
import librosa
from magenta.models.onsets_frames_transcription import create_dataset_util
from magenta.music import audio_io
from magenta.music import midi_io
from magenta.music import sequences_lib
from magenta.protobuf import music_pb2
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('input_dir', None,
'Directory where the un-zipped MAPS files are.')
tf.app.flags.DEFINE_string('output_dir', './',
'Directory where the two output TFRecord files '
'(train and test) will be placed.')
tf.app.flags.DEFINE_integer('min_length', 5, 'minimum segment length')
tf.app.flags.DEFINE_integer('max_length', 20, 'maximum segment length')
tf.app.flags.DEFINE_integer('sample_rate', 16000, 'desired sample rate')
TEST_DIRS = ['ENSTDkCl/MUS', 'ENSTDkAm/MUS']
TRAIN_DIRS = ['AkPnBcht/MUS', 'AkPnBsdf/MUS', 'AkPnCGdD/MUS', 'AkPnStgb/MUS',
'SptkBGAm/MUS', 'SptkBGCl/MUS', 'StbgTGd2/MUS']
def filename_to_id(filename):
"""Translate a .wav or .mid path to a MAPS sequence id."""
return re.match(r'.*MUS-(.*)_[^_]+\.\w{3}',
os.path.basename(filename)).group(1)
def generate_train_set(exclude_ids):
"""Generate the train TFRecord."""
train_file_pairs = []
for directory in TRAIN_DIRS:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
if filename_to_id(wav_file) not in exclude_ids:
train_file_pairs.append((wav_file, mid_file))
train_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_train.tfrecord')
with tf.python_io.TFRecordWriter(train_output_name) as writer:
for idx, pair in enumerate(train_file_pairs):
print('{} of {}: {}'.format(idx, len(train_file_pairs), pair[0]))
# load the wav data
wav_data = tf.gfile.Open(pair[0], 'rb').read()
samples = audio_io.wav_data_to_samples(wav_data, FLAGS.sample_rate)
norm_samples = librosa.util.normalize(samples, norm=np.inf)
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
splits = create_dataset_util.find_split_points(
ns, norm_samples, FLAGS.sample_rate, FLAGS.min_length,
FLAGS.max_length)
velocities = [note.velocity for note in ns.notes]
velocity_max = np.max(velocities)
velocity_min = np.min(velocities)
new_velocity_tuple = music_pb2.VelocityRange(
min=velocity_min, max=velocity_max)
for start, end in zip(splits[:-1], splits[1:]):
if end - start < FLAGS.min_length:
continue
new_ns = sequences_lib.extract_subsequence(ns, start, end)
samples_start = int(start * FLAGS.sample_rate)
samples_end = samples_start + int((end-start) * FLAGS.sample_rate)
new_samples = samples[samples_start:samples_end]
new_wav_data = audio_io.samples_to_wav_data(new_samples,
FLAGS.sample_rate)
example = tf.train.Example(features=tf.train.Features(feature={
'id':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[pair[0].encode()]
)),
'sequence':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_ns.SerializeToString()]
)),
'audio':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_wav_data]
)),
'velocity_range':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_velocity_tuple.SerializeToString()]
)),
}))
writer.write(example.SerializeToString())
def generate_test_set():
"""Generate the test TFRecord."""
test_file_pairs = []
for directory in TEST_DIRS:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
test_file_pairs.append((wav_file, mid_file))
test_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_test.tfrecord')
with tf.python_io.TFRecordWriter(test_output_name) as writer:
for idx, pair in enumerate(test_file_pairs):
print('{} of {}: {}'.format(idx, len(test_file_pairs), pair[0]))
# load the wav data and resample it.
samples = audio_io.load_audio(pair[0], FLAGS.sample_rate)
wav_data = audio_io.samples_to_wav_data(samples, FLAGS.sample_rate)
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
velocities = [note.velocity for note in ns.notes]
velocity_max = np.max(velocities)
velocity_min = np.min(velocities)
new_velocity_tuple = music_pb2.VelocityRange(
min=velocity_min, max=velocity_max)
example = tf.train.Example(features=tf.train.Features(feature={
'id':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[pair[0].encode()]
)),
'sequence':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[ns.SerializeToString()]
)),
'audio':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[wav_data]
)),
'velocity_range':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_velocity_tuple.SerializeToString()]
)),
}))
writer.write(example.SerializeToString())
return [filename_to_id(wav) for wav, _ in test_file_pairs]
def main(unused_argv):
test_ids = generate_test_set()
generate_train_set(test_ids)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point() | magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_dataset.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import re
import librosa
from magenta.models.onsets_frames_transcription import create_dataset_util
from magenta.music import audio_io
from magenta.music import midi_io
from magenta.music import sequences_lib
from magenta.protobuf import music_pb2
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('input_dir', None,
'Directory where the un-zipped MAPS files are.')
tf.app.flags.DEFINE_string('output_dir', './',
'Directory where the two output TFRecord files '
'(train and test) will be placed.')
tf.app.flags.DEFINE_integer('min_length', 5, 'minimum segment length')
tf.app.flags.DEFINE_integer('max_length', 20, 'maximum segment length')
tf.app.flags.DEFINE_integer('sample_rate', 16000, 'desired sample rate')
TEST_DIRS = ['ENSTDkCl/MUS', 'ENSTDkAm/MUS']
TRAIN_DIRS = ['AkPnBcht/MUS', 'AkPnBsdf/MUS', 'AkPnCGdD/MUS', 'AkPnStgb/MUS',
'SptkBGAm/MUS', 'SptkBGCl/MUS', 'StbgTGd2/MUS']
def filename_to_id(filename):
"""Translate a .wav or .mid path to a MAPS sequence id."""
return re.match(r'.*MUS-(.*)_[^_]+\.\w{3}',
os.path.basename(filename)).group(1)
def generate_train_set(exclude_ids):
"""Generate the train TFRecord."""
train_file_pairs = []
for directory in TRAIN_DIRS:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
if filename_to_id(wav_file) not in exclude_ids:
train_file_pairs.append((wav_file, mid_file))
train_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_train.tfrecord')
with tf.python_io.TFRecordWriter(train_output_name) as writer:
for idx, pair in enumerate(train_file_pairs):
print('{} of {}: {}'.format(idx, len(train_file_pairs), pair[0]))
# load the wav data
wav_data = tf.gfile.Open(pair[0], 'rb').read()
samples = audio_io.wav_data_to_samples(wav_data, FLAGS.sample_rate)
norm_samples = librosa.util.normalize(samples, norm=np.inf)
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
splits = create_dataset_util.find_split_points(
ns, norm_samples, FLAGS.sample_rate, FLAGS.min_length,
FLAGS.max_length)
velocities = [note.velocity for note in ns.notes]
velocity_max = np.max(velocities)
velocity_min = np.min(velocities)
new_velocity_tuple = music_pb2.VelocityRange(
min=velocity_min, max=velocity_max)
for start, end in zip(splits[:-1], splits[1:]):
if end - start < FLAGS.min_length:
continue
new_ns = sequences_lib.extract_subsequence(ns, start, end)
samples_start = int(start * FLAGS.sample_rate)
samples_end = samples_start + int((end-start) * FLAGS.sample_rate)
new_samples = samples[samples_start:samples_end]
new_wav_data = audio_io.samples_to_wav_data(new_samples,
FLAGS.sample_rate)
example = tf.train.Example(features=tf.train.Features(feature={
'id':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[pair[0].encode()]
)),
'sequence':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_ns.SerializeToString()]
)),
'audio':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_wav_data]
)),
'velocity_range':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_velocity_tuple.SerializeToString()]
)),
}))
writer.write(example.SerializeToString())
def generate_test_set():
"""Generate the test TFRecord."""
test_file_pairs = []
for directory in TEST_DIRS:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
test_file_pairs.append((wav_file, mid_file))
test_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_test.tfrecord')
with tf.python_io.TFRecordWriter(test_output_name) as writer:
for idx, pair in enumerate(test_file_pairs):
print('{} of {}: {}'.format(idx, len(test_file_pairs), pair[0]))
# load the wav data and resample it.
samples = audio_io.load_audio(pair[0], FLAGS.sample_rate)
wav_data = audio_io.samples_to_wav_data(samples, FLAGS.sample_rate)
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
velocities = [note.velocity for note in ns.notes]
velocity_max = np.max(velocities)
velocity_min = np.min(velocities)
new_velocity_tuple = music_pb2.VelocityRange(
min=velocity_min, max=velocity_max)
example = tf.train.Example(features=tf.train.Features(feature={
'id':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[pair[0].encode()]
)),
'sequence':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[ns.SerializeToString()]
)),
'audio':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[wav_data]
)),
'velocity_range':
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[new_velocity_tuple.SerializeToString()]
)),
}))
writer.write(example.SerializeToString())
return [filename_to_id(wav) for wav, _ in test_file_pairs]
def main(unused_argv):
test_ids = generate_test_set()
generate_train_set(test_ids)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point() | 0.727685 | 0.124745 |
from __future__ import absolute_import, print_function
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import regularizers
from niftynet.network.vnet import VNet
from tests.niftynet_testcase import NiftyNetTestCase
class VNetTest(NiftyNetTestCase):
def test_3d_shape(self):
input_shape = (2, 32, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(num_classes=160)
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 32, 160), out.shape)
def test_2d_shape(self):
input_shape = (2, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(num_classes=160)
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 160), out.shape)
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(
num_classes=160,
w_regularizer=regularizers.l2_regularizer(0.4),
b_regularizer=regularizers.l2_regularizer(0.4))
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
# print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 32, 160), out.shape)
def test_2d_reg_shape(self):
input_shape = (2, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(
num_classes=160,
w_regularizer=regularizers.l2_regularizer(0.4),
b_regularizer=regularizers.l2_regularizer(0.4))
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
# print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# print(vnet_instance.regularizer_loss())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 160), out.shape)
if __name__ == "__main__":
tf.test.main() | tests/vnet_test.py | from __future__ import absolute_import, print_function
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import regularizers
from niftynet.network.vnet import VNet
from tests.niftynet_testcase import NiftyNetTestCase
class VNetTest(NiftyNetTestCase):
def test_3d_shape(self):
input_shape = (2, 32, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(num_classes=160)
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 32, 160), out.shape)
def test_2d_shape(self):
input_shape = (2, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(num_classes=160)
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 160), out.shape)
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(
num_classes=160,
w_regularizer=regularizers.l2_regularizer(0.4),
b_regularizer=regularizers.l2_regularizer(0.4))
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
# print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 32, 160), out.shape)
def test_2d_reg_shape(self):
input_shape = (2, 32, 32, 1)
x = tf.ones(input_shape)
# vnet_instance = VNet(num_classes=160)
vnet_instance = VNet(
num_classes=160,
w_regularizer=regularizers.l2_regularizer(0.4),
b_regularizer=regularizers.l2_regularizer(0.4))
out = vnet_instance(x, is_training=True)
print(vnet_instance.num_trainable_params())
# print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# print(vnet_instance.regularizer_loss())
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(out)
self.assertAllClose((2, 32, 32, 160), out.shape)
if __name__ == "__main__":
tf.test.main() | 0.537527 | 0.253503 |
import os
import os.path as op
from subprocess import check_output
from six.moves import urllib
from zipfile import ZipFile
import tarfile
import logging
from math import log, ceil
import time
import sys
import shutil
import tempfile
import ftplib
from functools import partial
from tqdm import tqdm
if sys.version_info[0] == 3:
string_types = str
else:
string_types = basestring
ALLOWED_KINDS = ["file", "tar", "zip", "tar.gz"]
ZIP_KINDS = ["tar", "zip", "tar.gz"]
remote_file_size_default = 1
def download(
url, path, kind="file", progressbar=True, replace=False, timeout=10.0, verbose=True
):
"""Download a URL.
This will download a file and store it in a '~/data/` folder,
creating directories if need be. It will also work for zip
files, in which case it will unzip all of the files to the
desired location.
Parameters
----------
url : string
The url of the file to download. This may be a dropbox
or google drive "share link", or a regular URL. If it
is a share link, then it should point to a single file and
not a folder. To download folders, zip them first.
path : string
The path where the downloaded file will be stored. If ``zipfile``
is True, then this must be a folder into which files will be zipped.
kind : one of ['file', 'zip', 'tar', 'tar.gz']
The kind of file to be downloaded. If not 'file', then the file
contents will be unpackaged according to the kind specified. Package
contents will be placed in ``root_destination/<name>``.
progressbar : bool
Whether to display a progress bar during file download.
replace : bool
If True and the URL points to a single file, overwrite the
old file if possible.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status to the screen.
Returns
-------
out_path : string
A path to the downloaded file (or folder, in the case of
a zip file).
"""
if kind not in ALLOWED_KINDS:
raise ValueError("`kind` must be one of {}, got {}".format(ALLOWED_KINDS, kind))
# Make sure we have directories to dump files
path = op.expanduser(path)
if len(path) == 0:
raise ValueError("You must specify a path. For current directory use .")
download_url = _convert_url_to_downloadable(url)
if replace is False and op.exists(path):
msg = (
"Replace is False and data exists, so doing nothing. "
"Use replace==True to re-download the data."
)
elif kind in ZIP_KINDS:
# Create new folder for data if we need it
if not op.isdir(path):
if verbose:
tqdm.write("Creating data folder...", file=sys.stdout)
os.makedirs(path)
# Download the file to a temporary folder to unzip
path_temp = _TempDir()
path_temp_file = op.join(path_temp, "tmp.{}".format(kind))
_fetch_file(
download_url,
path_temp_file,
timeout=timeout,
verbose=verbose,
progressbar=progressbar,
)
# Unzip the file to the out path
if verbose:
tqdm.write("Extracting {} file...".format(kind), file=sys.stdout)
if kind == "zip":
zipper = ZipFile
elif kind == "tar":
zipper = tarfile.open
elif kind == "tar.gz":
zipper = partial(tarfile.open, mode="r:gz")
with zipper(path_temp_file) as myobj:
myobj.extractall(path)
msg = "Successfully downloaded / unzipped to {}".format(path)
else:
if not op.isdir(op.dirname(path)):
os.makedirs(op.dirname(path))
_fetch_file(
download_url,
path,
timeout=timeout,
verbose=verbose,
progressbar=progressbar,
)
msg = "Successfully downloaded file to {}".format(path)
if verbose:
tqdm.write(msg, file=sys.stdout)
return path
def _convert_url_to_downloadable(url):
"""Convert a url to the proper style depending on its website."""
if "drive.google.com" in url:
# For future support of google drive
file_id = url.split("d/")[1].split("/")[0]
base_url = "https://drive.google.com/uc?export=download&id="
out = "{}{}".format(base_url, file_id)
elif "dropbox.com" in url:
if url.endswith(".png"):
out = url + "?dl=1"
else:
out = url.replace("dl=0", "dl=1")
elif "github.com" in url:
out = url.replace("github.com", "raw.githubusercontent.com")
out = out.replace("blob/", "")
else:
out = url
return out
def _fetch_file(
url,
file_name,
resume=True,
hash_=None,
timeout=10.0,
progressbar=True,
verbose=True,
):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status.
"""
# Adapted from NISL and MNE-python:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
# https://martinos.org/mne
if hash_ is not None and (not isinstance(hash_, string_types) or len(hash_) != 32):
raise ValueError(
"Bad hash value given, should be a 32-character " "string:\n%s" % (hash_,)
)
temp_file_name = file_name + ".part"
try:
remote_file_size = remote_file_size_default
if "dropbox.com" in url:
# Use requests to handle cookies.
# XXX In the future, we should probably use requests everywhere.
# Unless we want to minimize dependencies.
try:
import requests
except ModuleNotFoundError:
raise ValueError(
"To download Dropbox links, you need to "
"install the `requests` module."
)
resp = requests.get(url)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, "wb") as ff:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
ff.write(chunk)
else:
# Check file size and displaying it alongside the download url
req = request_agent(url)
u = urllib.request.urlopen(req, timeout=timeout)
u.close()
# this is necessary to follow any redirects
url = u.geturl()
req = request_agent(url)
u = urllib.request.urlopen(req, timeout=timeout)
try:
remote_file_size = int(
u.headers.get(
"Content-Length", str(remote_file_size_default)
).strip()
)
finally:
u.close()
del u
if verbose:
tqdm.write(
"Downloading data from %s (%s)\n"
% (url, sizeof_fmt(remote_file_size)),
file=sys.stdout,
)
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
initial_size = get_file_size(temp_file_name)
else:
initial_size = 0
# This should never happen if our functions work properly
if initial_size > remote_file_size:
raise RuntimeError(
"Local file (%s) is larger than remote "
"file (%s), cannot resume download"
% (sizeof_fmt(initial_size), sizeof_fmt(remote_file_size))
)
scheme = urllib.parse.urlparse(url).scheme
fun = _get_http if scheme in ("http", "https") else _get_ftp
fun(
url,
temp_file_name,
initial_size,
remote_file_size,
verbose,
progressbar,
ncols=80,
)
# check md5sum
if hash_ is not None:
if verbose:
tqdm.write("Verifying download hash.", file=sys.stdout)
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError(
"Hash mismatch for downloaded file %s, "
"expected %s but got %s" % (temp_file_name, hash_, md5)
)
local_file_size = get_file_size(temp_file_name)
if local_file_size != remote_file_size:
if remote_file_size != remote_file_size_default:
raise Exception(
"Error: File size is %d and should be %d"
"* Please wait some time and try re-downloading the file again."
% (local_file_size, remote_file_size)
)
shutil.move(temp_file_name, file_name)
except Exception as ee:
raise RuntimeError(
"Error while fetching file %s."
" Dataset fetching aborted.\nError: %s" % (url, ee)
)
def _get_ftp(
url, temp_file_name, initial_size, file_size, verbose_bool, progressbar, ncols=80
):
"""Safely (resume a) download to a file from FTP."""
# Adapted from: https://pypi.python.org/pypi/fileDownloader.py
# but with changes
parsed_url = urllib.parse.urlparse(url)
file_name = os.path.basename(parsed_url.path)
server_path = parsed_url.path.replace(file_name, "")
unquoted_server_path = urllib.parse.unquote(server_path)
data = ftplib.FTP()
if parsed_url.port is not None:
data.connect(parsed_url.hostname, parsed_url.port)
else:
data.connect(parsed_url.hostname)
data.login()
if len(server_path) > 1:
data.cwd(unquoted_server_path)
data.sendcmd("TYPE I")
data.sendcmd("REST " + str(initial_size))
down_cmd = "RETR " + file_name
assert file_size == data.size(file_name)
if progressbar:
progress = tqdm(
total=file_size,
initial=initial_size,
desc="file_sizes",
ncols=ncols,
unit="B",
unit_scale=True,
file=sys.stdout,
)
else:
progress = None
# Callback lambda function that will be passed the downloaded data
# chunk and will write it to file and update the progress bar
mode = "ab" if initial_size > 0 else "wb"
with open(temp_file_name, mode) as local_file:
def chunk_write(chunk):
return _chunk_write(chunk, local_file, progress)
data.retrbinary(down_cmd, chunk_write)
data.close()
if progressbar:
progress.close()
def _get_http(
url, temp_file_name, initial_size, file_size, verbose_bool, progressbar, ncols=80
):
"""Safely (resume a) download to a file from http(s)."""
# Actually do the reading
req = request_agent(url)
if initial_size > 0:
req.headers["Range"] = "bytes=%s-" % (initial_size,)
try:
response = urllib.request.urlopen(req)
except Exception:
# There is a problem that may be due to resuming, some
# servers may not support the "Range" header. Switch
# back to complete download method
tqdm.write(
"Resuming download failed (server "
"rejected the request). Attempting to "
"restart downloading the entire file.",
file=sys.stdout,
)
del req.headers["Range"]
response = urllib.request.urlopen(req)
total_size = int(
response.headers.get("Content-Length", str(remote_file_size_default)).strip()
)
if initial_size > 0 and file_size == total_size:
tqdm.write(
"Resuming download failed (resume file size "
"mismatch). Attempting to restart downloading the "
"entire file.",
file=sys.stdout,
)
initial_size = 0
total_size += initial_size
if total_size != file_size:
raise RuntimeError("URL could not be parsed properly")
mode = "ab" if initial_size > 0 else "wb"
if progressbar is True:
progress = tqdm(
total=total_size,
initial=initial_size,
desc="file_sizes",
ncols=ncols,
unit="B",
unit_scale=True,
file=sys.stdout,
)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.005:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
break
local_file.write(chunk)
if progressbar is True:
progress.update(len(chunk))
if progressbar is True:
progress.close()
def md5sum(fname, block_size=1048576): # 2 ** 20
"""Calculate the md5sum for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
md5 = hashlib.md5()
with open(fname, "rb") as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar."""
local_file.write(chunk)
if progress is not None:
progress.update(len(chunk))
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ["bytes", "kB", "MB", "GB", "TB", "PB"]
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = "{0:.%sf} {1}" % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return "0 bytes"
if num == 1:
return "1 byte"
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix="tmp_download_tempdir_"))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
shutil.rmtree(self._path, ignore_errors=True)
def request_agent(url):
req = urllib.request.Request(
url,
data=None,
# Simulate a user-agent because some websites require it for this to work
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
},
)
return req
def get_file_size(file_name):
with open(file_name, "rb", buffering=0) as local_file:
local_file.seek(0, 2) # move the cursor to the end of the file
local_file_size = local_file.tell()
del local_file
return local_file_size | python/download/download.py | import os
import os.path as op
from subprocess import check_output
from six.moves import urllib
from zipfile import ZipFile
import tarfile
import logging
from math import log, ceil
import time
import sys
import shutil
import tempfile
import ftplib
from functools import partial
from tqdm import tqdm
if sys.version_info[0] == 3:
string_types = str
else:
string_types = basestring
ALLOWED_KINDS = ["file", "tar", "zip", "tar.gz"]
ZIP_KINDS = ["tar", "zip", "tar.gz"]
remote_file_size_default = 1
def download(
url, path, kind="file", progressbar=True, replace=False, timeout=10.0, verbose=True
):
"""Download a URL.
This will download a file and store it in a '~/data/` folder,
creating directories if need be. It will also work for zip
files, in which case it will unzip all of the files to the
desired location.
Parameters
----------
url : string
The url of the file to download. This may be a dropbox
or google drive "share link", or a regular URL. If it
is a share link, then it should point to a single file and
not a folder. To download folders, zip them first.
path : string
The path where the downloaded file will be stored. If ``zipfile``
is True, then this must be a folder into which files will be zipped.
kind : one of ['file', 'zip', 'tar', 'tar.gz']
The kind of file to be downloaded. If not 'file', then the file
contents will be unpackaged according to the kind specified. Package
contents will be placed in ``root_destination/<name>``.
progressbar : bool
Whether to display a progress bar during file download.
replace : bool
If True and the URL points to a single file, overwrite the
old file if possible.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status to the screen.
Returns
-------
out_path : string
A path to the downloaded file (or folder, in the case of
a zip file).
"""
if kind not in ALLOWED_KINDS:
raise ValueError("`kind` must be one of {}, got {}".format(ALLOWED_KINDS, kind))
# Make sure we have directories to dump files
path = op.expanduser(path)
if len(path) == 0:
raise ValueError("You must specify a path. For current directory use .")
download_url = _convert_url_to_downloadable(url)
if replace is False and op.exists(path):
msg = (
"Replace is False and data exists, so doing nothing. "
"Use replace==True to re-download the data."
)
elif kind in ZIP_KINDS:
# Create new folder for data if we need it
if not op.isdir(path):
if verbose:
tqdm.write("Creating data folder...", file=sys.stdout)
os.makedirs(path)
# Download the file to a temporary folder to unzip
path_temp = _TempDir()
path_temp_file = op.join(path_temp, "tmp.{}".format(kind))
_fetch_file(
download_url,
path_temp_file,
timeout=timeout,
verbose=verbose,
progressbar=progressbar,
)
# Unzip the file to the out path
if verbose:
tqdm.write("Extracting {} file...".format(kind), file=sys.stdout)
if kind == "zip":
zipper = ZipFile
elif kind == "tar":
zipper = tarfile.open
elif kind == "tar.gz":
zipper = partial(tarfile.open, mode="r:gz")
with zipper(path_temp_file) as myobj:
myobj.extractall(path)
msg = "Successfully downloaded / unzipped to {}".format(path)
else:
if not op.isdir(op.dirname(path)):
os.makedirs(op.dirname(path))
_fetch_file(
download_url,
path,
timeout=timeout,
verbose=verbose,
progressbar=progressbar,
)
msg = "Successfully downloaded file to {}".format(path)
if verbose:
tqdm.write(msg, file=sys.stdout)
return path
def _convert_url_to_downloadable(url):
"""Convert a url to the proper style depending on its website."""
if "drive.google.com" in url:
# For future support of google drive
file_id = url.split("d/")[1].split("/")[0]
base_url = "https://drive.google.com/uc?export=download&id="
out = "{}{}".format(base_url, file_id)
elif "dropbox.com" in url:
if url.endswith(".png"):
out = url + "?dl=1"
else:
out = url.replace("dl=0", "dl=1")
elif "github.com" in url:
out = url.replace("github.com", "raw.githubusercontent.com")
out = out.replace("blob/", "")
else:
out = url
return out
def _fetch_file(
url,
file_name,
resume=True,
hash_=None,
timeout=10.0,
progressbar=True,
verbose=True,
):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status.
"""
# Adapted from NISL and MNE-python:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
# https://martinos.org/mne
if hash_ is not None and (not isinstance(hash_, string_types) or len(hash_) != 32):
raise ValueError(
"Bad hash value given, should be a 32-character " "string:\n%s" % (hash_,)
)
temp_file_name = file_name + ".part"
try:
remote_file_size = remote_file_size_default
if "dropbox.com" in url:
# Use requests to handle cookies.
# XXX In the future, we should probably use requests everywhere.
# Unless we want to minimize dependencies.
try:
import requests
except ModuleNotFoundError:
raise ValueError(
"To download Dropbox links, you need to "
"install the `requests` module."
)
resp = requests.get(url)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, "wb") as ff:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
ff.write(chunk)
else:
# Check file size and displaying it alongside the download url
req = request_agent(url)
u = urllib.request.urlopen(req, timeout=timeout)
u.close()
# this is necessary to follow any redirects
url = u.geturl()
req = request_agent(url)
u = urllib.request.urlopen(req, timeout=timeout)
try:
remote_file_size = int(
u.headers.get(
"Content-Length", str(remote_file_size_default)
).strip()
)
finally:
u.close()
del u
if verbose:
tqdm.write(
"Downloading data from %s (%s)\n"
% (url, sizeof_fmt(remote_file_size)),
file=sys.stdout,
)
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
initial_size = get_file_size(temp_file_name)
else:
initial_size = 0
# This should never happen if our functions work properly
if initial_size > remote_file_size:
raise RuntimeError(
"Local file (%s) is larger than remote "
"file (%s), cannot resume download"
% (sizeof_fmt(initial_size), sizeof_fmt(remote_file_size))
)
scheme = urllib.parse.urlparse(url).scheme
fun = _get_http if scheme in ("http", "https") else _get_ftp
fun(
url,
temp_file_name,
initial_size,
remote_file_size,
verbose,
progressbar,
ncols=80,
)
# check md5sum
if hash_ is not None:
if verbose:
tqdm.write("Verifying download hash.", file=sys.stdout)
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError(
"Hash mismatch for downloaded file %s, "
"expected %s but got %s" % (temp_file_name, hash_, md5)
)
local_file_size = get_file_size(temp_file_name)
if local_file_size != remote_file_size:
if remote_file_size != remote_file_size_default:
raise Exception(
"Error: File size is %d and should be %d"
"* Please wait some time and try re-downloading the file again."
% (local_file_size, remote_file_size)
)
shutil.move(temp_file_name, file_name)
except Exception as ee:
raise RuntimeError(
"Error while fetching file %s."
" Dataset fetching aborted.\nError: %s" % (url, ee)
)
def _get_ftp(
url, temp_file_name, initial_size, file_size, verbose_bool, progressbar, ncols=80
):
"""Safely (resume a) download to a file from FTP."""
# Adapted from: https://pypi.python.org/pypi/fileDownloader.py
# but with changes
parsed_url = urllib.parse.urlparse(url)
file_name = os.path.basename(parsed_url.path)
server_path = parsed_url.path.replace(file_name, "")
unquoted_server_path = urllib.parse.unquote(server_path)
data = ftplib.FTP()
if parsed_url.port is not None:
data.connect(parsed_url.hostname, parsed_url.port)
else:
data.connect(parsed_url.hostname)
data.login()
if len(server_path) > 1:
data.cwd(unquoted_server_path)
data.sendcmd("TYPE I")
data.sendcmd("REST " + str(initial_size))
down_cmd = "RETR " + file_name
assert file_size == data.size(file_name)
if progressbar:
progress = tqdm(
total=file_size,
initial=initial_size,
desc="file_sizes",
ncols=ncols,
unit="B",
unit_scale=True,
file=sys.stdout,
)
else:
progress = None
# Callback lambda function that will be passed the downloaded data
# chunk and will write it to file and update the progress bar
mode = "ab" if initial_size > 0 else "wb"
with open(temp_file_name, mode) as local_file:
def chunk_write(chunk):
return _chunk_write(chunk, local_file, progress)
data.retrbinary(down_cmd, chunk_write)
data.close()
if progressbar:
progress.close()
def _get_http(
url, temp_file_name, initial_size, file_size, verbose_bool, progressbar, ncols=80
):
"""Safely (resume a) download to a file from http(s)."""
# Actually do the reading
req = request_agent(url)
if initial_size > 0:
req.headers["Range"] = "bytes=%s-" % (initial_size,)
try:
response = urllib.request.urlopen(req)
except Exception:
# There is a problem that may be due to resuming, some
# servers may not support the "Range" header. Switch
# back to complete download method
tqdm.write(
"Resuming download failed (server "
"rejected the request). Attempting to "
"restart downloading the entire file.",
file=sys.stdout,
)
del req.headers["Range"]
response = urllib.request.urlopen(req)
total_size = int(
response.headers.get("Content-Length", str(remote_file_size_default)).strip()
)
if initial_size > 0 and file_size == total_size:
tqdm.write(
"Resuming download failed (resume file size "
"mismatch). Attempting to restart downloading the "
"entire file.",
file=sys.stdout,
)
initial_size = 0
total_size += initial_size
if total_size != file_size:
raise RuntimeError("URL could not be parsed properly")
mode = "ab" if initial_size > 0 else "wb"
if progressbar is True:
progress = tqdm(
total=total_size,
initial=initial_size,
desc="file_sizes",
ncols=ncols,
unit="B",
unit_scale=True,
file=sys.stdout,
)
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.005:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
break
local_file.write(chunk)
if progressbar is True:
progress.update(len(chunk))
if progressbar is True:
progress.close()
def md5sum(fname, block_size=1048576): # 2 ** 20
"""Calculate the md5sum for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
md5 = hashlib.md5()
with open(fname, "rb") as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar."""
local_file.write(chunk)
if progress is not None:
progress.update(len(chunk))
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ["bytes", "kB", "MB", "GB", "TB", "PB"]
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = "{0:.%sf} {1}" % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return "0 bytes"
if num == 1:
return "1 byte"
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix="tmp_download_tempdir_"))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
shutil.rmtree(self._path, ignore_errors=True)
def request_agent(url):
req = urllib.request.Request(
url,
data=None,
# Simulate a user-agent because some websites require it for this to work
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
},
)
return req
def get_file_size(file_name):
with open(file_name, "rb", buffering=0) as local_file:
local_file.seek(0, 2) # move the cursor to the end of the file
local_file_size = local_file.tell()
del local_file
return local_file_size | 0.451327 | 0.22595 |
import os
import sys
import pandas as pd
import numpy as np
from sklearn.decomposition import IncrementalPCA, PCA
import random
import pickle
import matplotlib.pyplot as plt
import time
from multiprocessing import Process, Manager
import subprocess
# input_path = "/shares/perception-temp/voxceleb2/facemesh/train/"
# output_path = "/shares/perception-working/minh/transformer_data/facemesh_pca6/train/"
input_path, output_path = sys.argv[1], sys.argv[2]
model_path = "../data/pca_6.pkl"
pca_model = pickle.load(open(model_path, 'rb'))
def rescale(landmarks):
x = landmarks[:, 0:468]
y = landmarks[:, 468:468*2]
z = landmarks[:, 468*2:468*3]
processed_x = []
processed_y = []
processed_z = []
for i in range(x.shape[0]):
z[i] -= z[i].mean()
current_x, current_y, current_z = x[i], y[i], z[i]
scale = 1.0 / (max(current_y)-min(current_y))
current_x = current_x * scale * (-1)
current_y = current_y * scale * (-1)
# plt.scatter(current_x, current_y, s = 4)
# plt.show()
processed_x.append(current_x)
processed_y.append(current_y)
processed_z.append(current_z)
return np.concatenate((np.array(processed_x), np.array(processed_y), np.array(processed_z)), axis=1)
def convert(pca_model, file_paths):
for file in file_paths:
data = rescale(pd.read_csv(file, header=None).values)
transformed_data = pca_model.transform(data)
file_out = os.path.join(output_path, file.split('/')[-1])
pd.DataFrame(transformed_data).to_csv(file_out, header=None, index=False)
def convert_in_parallel(concurreny_count, files, fn):
Processes = []
files_ = [files[(i * (len(files)//concurreny_count)):((i+1) * (len(files)//concurreny_count))]
for i in range(concurreny_count)]
leftovers = files[(concurreny_count * (len(files)//concurreny_count)): len(files)]
for i in range(len(leftovers)):
files_[i] += [leftovers[i]]
for files_list_ in files_:
p = Process(target=fn, args=(pca_model, files_list_))
Processes.append(p)
p.start()
# block until all the threads finish (i.e. block until all function_x calls finish)
for t in Processes:
t.join()
file_list = os.listdir(input_path)
convert_in_parallel(200, file_list, convert) | bert/preprocess/convert_pca.py | import os
import sys
import pandas as pd
import numpy as np
from sklearn.decomposition import IncrementalPCA, PCA
import random
import pickle
import matplotlib.pyplot as plt
import time
from multiprocessing import Process, Manager
import subprocess
# input_path = "/shares/perception-temp/voxceleb2/facemesh/train/"
# output_path = "/shares/perception-working/minh/transformer_data/facemesh_pca6/train/"
input_path, output_path = sys.argv[1], sys.argv[2]
model_path = "../data/pca_6.pkl"
pca_model = pickle.load(open(model_path, 'rb'))
def rescale(landmarks):
x = landmarks[:, 0:468]
y = landmarks[:, 468:468*2]
z = landmarks[:, 468*2:468*3]
processed_x = []
processed_y = []
processed_z = []
for i in range(x.shape[0]):
z[i] -= z[i].mean()
current_x, current_y, current_z = x[i], y[i], z[i]
scale = 1.0 / (max(current_y)-min(current_y))
current_x = current_x * scale * (-1)
current_y = current_y * scale * (-1)
# plt.scatter(current_x, current_y, s = 4)
# plt.show()
processed_x.append(current_x)
processed_y.append(current_y)
processed_z.append(current_z)
return np.concatenate((np.array(processed_x), np.array(processed_y), np.array(processed_z)), axis=1)
def convert(pca_model, file_paths):
for file in file_paths:
data = rescale(pd.read_csv(file, header=None).values)
transformed_data = pca_model.transform(data)
file_out = os.path.join(output_path, file.split('/')[-1])
pd.DataFrame(transformed_data).to_csv(file_out, header=None, index=False)
def convert_in_parallel(concurreny_count, files, fn):
Processes = []
files_ = [files[(i * (len(files)//concurreny_count)):((i+1) * (len(files)//concurreny_count))]
for i in range(concurreny_count)]
leftovers = files[(concurreny_count * (len(files)//concurreny_count)): len(files)]
for i in range(len(leftovers)):
files_[i] += [leftovers[i]]
for files_list_ in files_:
p = Process(target=fn, args=(pca_model, files_list_))
Processes.append(p)
p.start()
# block until all the threads finish (i.e. block until all function_x calls finish)
for t in Processes:
t.join()
file_list = os.listdir(input_path)
convert_in_parallel(200, file_list, convert) | 0.252016 | 0.189427 |
import dbutils
import htmlutils
import page.utils
def renderSearch(req, db, user):
summary_value = req.getParameter("summary", None)
summary_mode_value = req.getParameter("summarymode", None)
branch_value = req.getParameter("branch", None)
owner_value = req.getParameter("owner", None)
path_value = req.getParameter("path", None)
document = htmlutils.Document(req)
document.setTitle("Search")
html = document.html()
head = html.head()
body = html.body()
page.utils.generateHeader(body, db, user, current_page="search")
document.addExternalStylesheet("resource/search.css")
document.addExternalScript("resource/search.js")
document.addInternalScript(user.getJS())
cursor = db.cursor()
cursor.execute("SELECT DISTINCT name, fullname FROM users JOIN reviewusers ON (reviewusers.uid=users.id) WHERE reviewusers.owner")
users = [("{ label: %s, value: %s }" % (htmlutils.jsify("%s (%s)" % (fullname, name)),
htmlutils.jsify(name)))
for name, fullname in cursor]
document.addInternalScript("var users = [ %s ];" % ", ".join(users))
search = page.utils.PaleYellowTable(body, "Search")
def renderSummary(target):
target.input(name="summary", value=summary_value or "")
summary_mode = target.select(name="summary_mode")
summary_mode.option(value="all", selected="selected" if summary_mode_value == "all" else None).text("All words")
summary_mode.option(value="any", selected="selected" if summary_mode_value == "any" else None).text("Any word")
def renderBranch(target):
target.input(name="branch", value=branch_value or "")
def renderOwner(target):
target.input(name="owner", value=owner_value or "")
def renderPath(target):
target.input(name="path", value=path_value or "")
def renderButton(target):
target.button(onclick="search();").text("Search")
search.addItem("Summary", renderSummary, "Words occurring in the review's summary.")
search.addItem("Branch", renderBranch, "Name of review branch.")
search.addItem("Owner", renderOwner, "Owner of the review.")
search.addItem("Path", renderPath, "Path (file or directory) that the review contains changes in.")
search.addCentered(renderButton)
if summary_value is not None: summary_value = summary_value.strip()
if branch_value is not None: branch_value = branch_value.strip()
if owner_value is not None: owner_value = owner_value.strip()
if path_value is not None: path_value = path_value.strip()
if summary_value or branch_value or owner_value or path_value:
query = """SELECT DISTINCT reviews.id, reviews.summary, branches.name
FROM %s
WHERE %s"""
tables = ["reviews", "branches ON (branches.id=reviews.branch)"]
conditions = []
arguments = []
if summary_value:
words = summary_value.split()
operator = " AND " if summary_mode_value == "all" else " OR "
conditions.append("(%s)" % operator.join(["reviews.summary ~* %s"] * len(words)))
arguments.extend([".*\\m" + word + "\\M.*" for word in words])
if branch_value:
def globToSQLPattern(glob):
pattern = glob.replace("\\", "\\\\").replace("%", "\\%").replace("?", "_").replace("*", "%")
if pattern[0] != "%": pattern = "%" + pattern
if pattern[-1] != "%": pattern = pattern + "%"
return pattern
conditions.append("branches.name LIKE %s")
arguments.append(globToSQLPattern(branch_value))
if owner_value:
owner = dbutils.User.fromName(db, owner_value)
tables.append("reviewusers ON (reviewusers.review=reviews.id)")
conditions.append("reviewusers.uid=%s")
conditions.append("reviewusers.owner")
arguments.append(owner.id)
if path_value:
file_ids = dbutils.contained_files(db, dbutils.find_directory(db, path_value))
if path_value[-1] != '/':
file_ids.append(dbutils.find_file(db, path_value))
tables.append("reviewfiles ON (reviewfiles.review=reviews.id)")
conditions.append("reviewfiles.file=ANY (%s)")
arguments.append(file_ids)
query = """SELECT DISTINCT reviews.id, reviews.summary, branches.name
FROM %s
WHERE %s
ORDER BY reviews.id""" % (" JOIN ".join(tables), " AND ".join(conditions))
cursor.execute(query, arguments)
table = body.div("main").table("paleyellow reviews", align="center")
table.col(width="20%")
table.col(width="80%")
header = table.tr().td("h1", colspan=4).h1()
header.text("Reviews")
for review_id, summary, branch_name in cursor:
row = table.tr("review")
row.td("name").text(branch_name)
row.td("title").a(href="r/%d" % review_id).text(summary)
return document | page/search.py |
import dbutils
import htmlutils
import page.utils
def renderSearch(req, db, user):
summary_value = req.getParameter("summary", None)
summary_mode_value = req.getParameter("summarymode", None)
branch_value = req.getParameter("branch", None)
owner_value = req.getParameter("owner", None)
path_value = req.getParameter("path", None)
document = htmlutils.Document(req)
document.setTitle("Search")
html = document.html()
head = html.head()
body = html.body()
page.utils.generateHeader(body, db, user, current_page="search")
document.addExternalStylesheet("resource/search.css")
document.addExternalScript("resource/search.js")
document.addInternalScript(user.getJS())
cursor = db.cursor()
cursor.execute("SELECT DISTINCT name, fullname FROM users JOIN reviewusers ON (reviewusers.uid=users.id) WHERE reviewusers.owner")
users = [("{ label: %s, value: %s }" % (htmlutils.jsify("%s (%s)" % (fullname, name)),
htmlutils.jsify(name)))
for name, fullname in cursor]
document.addInternalScript("var users = [ %s ];" % ", ".join(users))
search = page.utils.PaleYellowTable(body, "Search")
def renderSummary(target):
target.input(name="summary", value=summary_value or "")
summary_mode = target.select(name="summary_mode")
summary_mode.option(value="all", selected="selected" if summary_mode_value == "all" else None).text("All words")
summary_mode.option(value="any", selected="selected" if summary_mode_value == "any" else None).text("Any word")
def renderBranch(target):
target.input(name="branch", value=branch_value or "")
def renderOwner(target):
target.input(name="owner", value=owner_value or "")
def renderPath(target):
target.input(name="path", value=path_value or "")
def renderButton(target):
target.button(onclick="search();").text("Search")
search.addItem("Summary", renderSummary, "Words occurring in the review's summary.")
search.addItem("Branch", renderBranch, "Name of review branch.")
search.addItem("Owner", renderOwner, "Owner of the review.")
search.addItem("Path", renderPath, "Path (file or directory) that the review contains changes in.")
search.addCentered(renderButton)
if summary_value is not None: summary_value = summary_value.strip()
if branch_value is not None: branch_value = branch_value.strip()
if owner_value is not None: owner_value = owner_value.strip()
if path_value is not None: path_value = path_value.strip()
if summary_value or branch_value or owner_value or path_value:
query = """SELECT DISTINCT reviews.id, reviews.summary, branches.name
FROM %s
WHERE %s"""
tables = ["reviews", "branches ON (branches.id=reviews.branch)"]
conditions = []
arguments = []
if summary_value:
words = summary_value.split()
operator = " AND " if summary_mode_value == "all" else " OR "
conditions.append("(%s)" % operator.join(["reviews.summary ~* %s"] * len(words)))
arguments.extend([".*\\m" + word + "\\M.*" for word in words])
if branch_value:
def globToSQLPattern(glob):
pattern = glob.replace("\\", "\\\\").replace("%", "\\%").replace("?", "_").replace("*", "%")
if pattern[0] != "%": pattern = "%" + pattern
if pattern[-1] != "%": pattern = pattern + "%"
return pattern
conditions.append("branches.name LIKE %s")
arguments.append(globToSQLPattern(branch_value))
if owner_value:
owner = dbutils.User.fromName(db, owner_value)
tables.append("reviewusers ON (reviewusers.review=reviews.id)")
conditions.append("reviewusers.uid=%s")
conditions.append("reviewusers.owner")
arguments.append(owner.id)
if path_value:
file_ids = dbutils.contained_files(db, dbutils.find_directory(db, path_value))
if path_value[-1] != '/':
file_ids.append(dbutils.find_file(db, path_value))
tables.append("reviewfiles ON (reviewfiles.review=reviews.id)")
conditions.append("reviewfiles.file=ANY (%s)")
arguments.append(file_ids)
query = """SELECT DISTINCT reviews.id, reviews.summary, branches.name
FROM %s
WHERE %s
ORDER BY reviews.id""" % (" JOIN ".join(tables), " AND ".join(conditions))
cursor.execute(query, arguments)
table = body.div("main").table("paleyellow reviews", align="center")
table.col(width="20%")
table.col(width="80%")
header = table.tr().td("h1", colspan=4).h1()
header.text("Reviews")
for review_id, summary, branch_name in cursor:
row = table.tr("review")
row.td("name").text(branch_name)
row.td("title").a(href="r/%d" % review_id).text(summary)
return document | 0.352425 | 0.064212 |
import discord
import os
import traceback
import sys
from discord.ext import commands
from utils import usefull
# Based off of https://github.com/AlexFlipnote/discord_bot.py/blob/master/utils/permissions.py
owners = [os.getenv("OWNER_ID")]
c = usefull.colors
def is_owner(ctx):
return ctx.author.id in owners
# Custom errors
class AuthorLacksPermssions(commands.CheckFailure):
"""Raised when the author of a command does not have sufficent permissions to execute it"""
def __init__(self, missing_perms, *args):
self.missing_perms = missing_perms
missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in missing_perms]
if len(missing) > 2:
fmt = '{}, and {}'.format(", ".join(missing[:-1]), missing[-1])
else:
fmt = ' and '.join(missing)
message = 'Author requires {} permission(s) to run this command.'.format(fmt)
super().__init__(message, *args)
class BotLacksPermssions(commands.CheckFailure):
"""Raised when the bot does not have sufficent permissions to execute the command"""
def __init__(self, missing_perms, *args):
self.missing_perms = missing_perms
missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in missing_perms]
if len(missing) > 2:
fmt = '{}, and {}'.format(", ".join(missing[:-1]), missing[-1])
else:
fmt = ' and '.join(missing)
message = 'Bot requires {} permission(s) to run this command.'.format(fmt)
super().__init__(message, *args)
# author checks
async def check_author_permissions(ctx, perms, *, check=all):
if ctx.author.id in owners:
return True
resolved = ctx.channel.permissions_for(ctx.author)
return check(getattr(resolved, name, None) == value for name, value in perms.items())
# decorator
def author_has_permissions(*, check=all, **perms):
async def pred(ctx):
if (not await check_author_permissions(ctx, perms, check=check)):
raise AuthorLacksPermssions(perms)
return True
return commands.check(pred)
# Bot checks
async def check_bot_permissions(ctx, perms, *, check=all):
resolved = ctx.channel.permissions_for(ctx.me)
return check(getattr(resolved, name, None) == value for name, value in perms.items())
# decorator
def bot_has_permissions(*, check=all, **perms):
async def pred(ctx):
if (not await check_bot_permissions(ctx, perms, check=check)):
raise BotLacksPermssions(perms)
return True
return commands.check(pred)
async def check_priv(ctx, member):
try:
# Self checks
if member == ctx.author:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_SELF.format(ctx))
return True
if member.id == ctx.bot.user.id:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT.format(ctx))
return True
# Check if the bot can do stuff
if ctx.guild.me.top_role == member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT_EQUAL.format(ctx))
return True
if ctx.guild.me.top_role < member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT_HIGHER.format(ctx))
return True
# Now permission check
if member.id in owners:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT_OWNER.format(ctx))
return True
# Check if user bypasses
if ctx.author.id == ctx.guild.owner_id:
return False
if ctx.author.id in owners:
return False
if member.id == ctx.guild.owner.id:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_GUILD_OWNER.format(ctx))
return True
if ctx.author.top_role == member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_AUTHOR_EQUAL.format(ctx))
return True
if ctx.author.top_role < member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_AUTHOR_HIGHER.format(ctx))
return True
return False
except Exception as err:
if (ctx.bot.dev):
tracebackString = "".join(traceback.format_exception(type(err), err, err.__traceback__))
await ctx.warning(f"Ignoring exception in command {ctx.command}:")
await ctx.send(f"```py\n{tracebackString}\n```")
else:
print(f"{c.FAIL}Error checking privilages:{c.END} {err}")
traceback.print_exception(type(err), err, err.__traceback__, file=sys.stderr)
def can_send(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).send_messages
def can_embed(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).embed_links
def can_upload(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).attach_files
def can_react(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).add_reactions
def is_nsfw(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.is_nsfw() | utils/permissions.py | import discord
import os
import traceback
import sys
from discord.ext import commands
from utils import usefull
# Based off of https://github.com/AlexFlipnote/discord_bot.py/blob/master/utils/permissions.py
owners = [os.getenv("OWNER_ID")]
c = usefull.colors
def is_owner(ctx):
return ctx.author.id in owners
# Custom errors
class AuthorLacksPermssions(commands.CheckFailure):
"""Raised when the author of a command does not have sufficent permissions to execute it"""
def __init__(self, missing_perms, *args):
self.missing_perms = missing_perms
missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in missing_perms]
if len(missing) > 2:
fmt = '{}, and {}'.format(", ".join(missing[:-1]), missing[-1])
else:
fmt = ' and '.join(missing)
message = 'Author requires {} permission(s) to run this command.'.format(fmt)
super().__init__(message, *args)
class BotLacksPermssions(commands.CheckFailure):
"""Raised when the bot does not have sufficent permissions to execute the command"""
def __init__(self, missing_perms, *args):
self.missing_perms = missing_perms
missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in missing_perms]
if len(missing) > 2:
fmt = '{}, and {}'.format(", ".join(missing[:-1]), missing[-1])
else:
fmt = ' and '.join(missing)
message = 'Bot requires {} permission(s) to run this command.'.format(fmt)
super().__init__(message, *args)
# author checks
async def check_author_permissions(ctx, perms, *, check=all):
if ctx.author.id in owners:
return True
resolved = ctx.channel.permissions_for(ctx.author)
return check(getattr(resolved, name, None) == value for name, value in perms.items())
# decorator
def author_has_permissions(*, check=all, **perms):
async def pred(ctx):
if (not await check_author_permissions(ctx, perms, check=check)):
raise AuthorLacksPermssions(perms)
return True
return commands.check(pred)
# Bot checks
async def check_bot_permissions(ctx, perms, *, check=all):
resolved = ctx.channel.permissions_for(ctx.me)
return check(getattr(resolved, name, None) == value for name, value in perms.items())
# decorator
def bot_has_permissions(*, check=all, **perms):
async def pred(ctx):
if (not await check_bot_permissions(ctx, perms, check=check)):
raise BotLacksPermssions(perms)
return True
return commands.check(pred)
async def check_priv(ctx, member):
try:
# Self checks
if member == ctx.author:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_SELF.format(ctx))
return True
if member.id == ctx.bot.user.id:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT.format(ctx))
return True
# Check if the bot can do stuff
if ctx.guild.me.top_role == member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT_EQUAL.format(ctx))
return True
if ctx.guild.me.top_role < member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT_HIGHER.format(ctx))
return True
# Now permission check
if member.id in owners:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_BOT_OWNER.format(ctx))
return True
# Check if user bypasses
if ctx.author.id == ctx.guild.owner_id:
return False
if ctx.author.id in owners:
return False
if member.id == ctx.guild.owner.id:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_GUILD_OWNER.format(ctx))
return True
if ctx.author.top_role == member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_AUTHOR_EQUAL.format(ctx))
return True
if ctx.author.top_role < member.top_role:
await ctx.error(ctx.strings.ERR_MOD_CANNOT_PUNNISH_AUTHOR_HIGHER.format(ctx))
return True
return False
except Exception as err:
if (ctx.bot.dev):
tracebackString = "".join(traceback.format_exception(type(err), err, err.__traceback__))
await ctx.warning(f"Ignoring exception in command {ctx.command}:")
await ctx.send(f"```py\n{tracebackString}\n```")
else:
print(f"{c.FAIL}Error checking privilages:{c.END} {err}")
traceback.print_exception(type(err), err, err.__traceback__, file=sys.stderr)
def can_send(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).send_messages
def can_embed(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).embed_links
def can_upload(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).attach_files
def can_react(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).add_reactions
def is_nsfw(ctx):
return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.is_nsfw() | 0.371707 | 0.258891 |
import json
import sys
import unittest
from contextlib import contextmanager
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from mock import patch
from auth0_client.Auth0Client import Auth0Client as class_to_test
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestRulesConfig(unittest.TestCase):
"""
Test command class
"""
@patch('sys.exit')
@patch('auth0_client.v3.management.rules_configs.RulesConfigs.all')
def test_list_config_variable_keys_for_rules(self, rules, exit):
rules.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
real_results = client.list_config_variable_keys_for_rules(
)
self.assertEqual('"123"', real_results)
@patch('sys.exit')
@patch('auth0_client.v3.management.rules_configs.RulesConfigs.remove')
def test_remove_rules_config_for_given_key(self, rules, exit):
rules.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
real_results = client.remove_rules_config_for_given_key(
key='123'
)
self.assertEqual('"123"', real_results)
@patch('sys.exit')
@patch('auth0_client.v3.management.rules_configs.RulesConfigs.set_rule_for_key')
def test_set_the_rules_config_for_a_given_key(self, rules, exit):
rules.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
body='{"123":"xxx"}'
real_results = client.set_the_rules_config_for_a_given_key(
key='123',
body=body
)
self.assertEqual('"123"', real_results) | test/test_rules_config.py | import json
import sys
import unittest
from contextlib import contextmanager
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from mock import patch
from auth0_client.Auth0Client import Auth0Client as class_to_test
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestRulesConfig(unittest.TestCase):
"""
Test command class
"""
@patch('sys.exit')
@patch('auth0_client.v3.management.rules_configs.RulesConfigs.all')
def test_list_config_variable_keys_for_rules(self, rules, exit):
rules.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
real_results = client.list_config_variable_keys_for_rules(
)
self.assertEqual('"123"', real_results)
@patch('sys.exit')
@patch('auth0_client.v3.management.rules_configs.RulesConfigs.remove')
def test_remove_rules_config_for_given_key(self, rules, exit):
rules.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
real_results = client.remove_rules_config_for_given_key(
key='123'
)
self.assertEqual('"123"', real_results)
@patch('sys.exit')
@patch('auth0_client.v3.management.rules_configs.RulesConfigs.set_rule_for_key')
def test_set_the_rules_config_for_a_given_key(self, rules, exit):
rules.return_value='123'
debug = False
exit.return_value=None
config_dict = {}
config_dict['debug'] = debug
config_dict['domain'] = 'test'
config_dict['client_id'] = 'id'
config_dict['client_secret'] = 'secret'
client= class_to_test(config_dict)
body='{"123":"xxx"}'
real_results = client.set_the_rules_config_for_a_given_key(
key='123',
body=body
)
self.assertEqual('"123"', real_results) | 0.14069 | 0.065485 |
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.event import listens_for
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.modules.events.surveys.fields import get_field_types
from indico.util.enum import IndicoEnum
from indico.util.string import text_to_repr
def _get_next_position(context):
"""Get the next question position for the event."""
survey_id = context.current_parameters['survey_id']
parent_id = context.current_parameters['parent_id']
res = (db.session.query(db.func.max(SurveyItem.position))
.filter(SurveyItem.survey_id == survey_id, SurveyItem.parent_id == parent_id)
.one())
return (res[0] or 0) + 1
def _get_item_default_title(context):
return '' if context.current_parameters['type'] == SurveyItemType.section else None
class SurveyItemType(int, IndicoEnum):
question = 1
section = 2
text = 3
class SurveyItem(DescriptionMixin, db.Model):
__tablename__ = 'items'
__table_args__ = (db.CheckConstraint("type != {type} OR ("
"title IS NOT NULL AND "
"is_required IS NOT NULL AND "
"field_type IS NOT NULL AND "
"parent_id IS NOT NULL AND "
"display_as_section IS NULL)"
.format(type=SurveyItemType.question), 'valid_question'),
db.CheckConstraint("type != {type} OR ("
"title IS NOT NULL AND "
"is_required IS NULL AND "
"field_type IS NULL AND "
"field_data::text = '{{}}' AND "
"parent_id IS NULL AND "
"display_as_section IS NOT NULL)"
.format(type=SurveyItemType.section), 'valid_section'),
db.CheckConstraint("type != {type} OR ("
"title IS NULL AND "
"is_required IS NULL AND "
"field_type IS NULL AND "
"field_data::text = '{{}}' AND "
"parent_id IS NOT NULL AND "
"display_as_section IS NULL)"
.format(type=SurveyItemType.text), 'valid_text'),
{'schema': 'event_surveys'})
__mapper_args__ = {
'polymorphic_on': 'type',
'polymorphic_identity': None
}
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
#: The ID of the item
id = db.Column(
db.Integer,
primary_key=True
)
#: The ID of the survey
survey_id = db.Column(
db.Integer,
db.ForeignKey('event_surveys.surveys.id'),
index=True,
nullable=False,
)
#: The ID of the parent section item (NULL for top-level items, i.e. sections)
parent_id = db.Column(
db.Integer,
db.ForeignKey('event_surveys.items.id'),
index=True,
nullable=True,
)
#: The position of the item in the survey form
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
#: The type of the survey item
type = db.Column(
PyIntEnum(SurveyItemType),
nullable=False
)
#: The title of the item
title = db.Column(
db.String,
nullable=True,
default=_get_item_default_title
)
#: If a section should be rendered as a section
display_as_section = db.Column(
db.Boolean,
nullable=True
)
# The following columns are only used for SurveyQuestion objects, but by
# specifying them here we can access them without an extra query when we
# query SurveyItem objects directly instead of going through a subclass.
# This is done e.g. when using the Survey.top_level_items relationship.
#: If the question must be answered (wtforms DataRequired)
is_required = db.Column(
db.Boolean,
nullable=True
)
#: The type of the field used for the question
field_type = db.Column(
db.String,
nullable=True
)
#: Field-specific data (such as choices for multi-select fields)
field_data = db.Column(
JSONB,
nullable=False,
default={}
)
# relationship backrefs:
# - parent (SurveySection.children)
# - survey (Survey.items)
def to_dict(self):
"""Return a json-serializable representation of this object.
Subclasses must add their own data to the dict.
"""
return {'type': self.type.name, 'title': self.title, 'description': self.description}
class SurveyQuestion(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.question
}
# relationship backrefs:
# - answers (SurveyAnswer.question)
@property
def field(self):
try:
impl = get_field_types()[self.field_type]
except KeyError:
return None
return impl(self)
@property
def locator(self):
return dict(self.survey.locator, section_id=self.parent_id, question_id=self.id)
@property
def not_empty_answers(self):
return [a for a in self.answers if not a.is_empty]
def get_summary(self, **kwargs):
"""Return the summary of answers submitted for this question."""
if self.field:
return self.field.get_summary(**kwargs)
def __repr__(self):
return f'<SurveyQuestion({self.id}, {self.survey_id}, {self.field_type}, {self.title})>'
def to_dict(self):
data = super().to_dict()
data.update({'is_required': self.is_required, 'field_type': self.field_type,
'field_data': self.field.copy_field_data()})
return data
class SurveySection(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.section
}
#: The child items of this section
children = db.relationship(
'SurveyItem',
order_by='SurveyItem.position',
cascade='all, delete-orphan',
backref=db.backref(
'parent',
remote_side=[SurveyItem.id]
)
)
@property
def locator(self):
return dict(self.survey.locator, section_id=self.id)
def __repr__(self):
return f'<SurveySection({self.id}, {self.survey_id}, {self.title})>'
def to_dict(self):
data = super().to_dict()
content = [child.to_dict() for child in self.children]
data.update({'content': content, 'display_as_section': self.display_as_section})
if not self.display_as_section:
del data['title']
del data['description']
return data
class SurveyText(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.text
}
@property
def locator(self):
return dict(self.survey.locator, section_id=self.parent_id, text_id=self.id)
def __repr__(self):
desc = text_to_repr(self.description)
return f'<SurveyText({self.id}, {self.survey_id}): "{desc}")>'
def to_dict(self):
data = super().to_dict()
del data['title']
return data
@listens_for(SurveySection.children, 'append')
def _set_survey(target, value, *unused):
if value.survey is None and target.survey is not None:
value.survey = target.survey
assert value.survey in {target.survey, None} | indico/modules/events/surveys/models/items.py |
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.event import listens_for
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.modules.events.surveys.fields import get_field_types
from indico.util.enum import IndicoEnum
from indico.util.string import text_to_repr
def _get_next_position(context):
"""Get the next question position for the event."""
survey_id = context.current_parameters['survey_id']
parent_id = context.current_parameters['parent_id']
res = (db.session.query(db.func.max(SurveyItem.position))
.filter(SurveyItem.survey_id == survey_id, SurveyItem.parent_id == parent_id)
.one())
return (res[0] or 0) + 1
def _get_item_default_title(context):
return '' if context.current_parameters['type'] == SurveyItemType.section else None
class SurveyItemType(int, IndicoEnum):
question = 1
section = 2
text = 3
class SurveyItem(DescriptionMixin, db.Model):
__tablename__ = 'items'
__table_args__ = (db.CheckConstraint("type != {type} OR ("
"title IS NOT NULL AND "
"is_required IS NOT NULL AND "
"field_type IS NOT NULL AND "
"parent_id IS NOT NULL AND "
"display_as_section IS NULL)"
.format(type=SurveyItemType.question), 'valid_question'),
db.CheckConstraint("type != {type} OR ("
"title IS NOT NULL AND "
"is_required IS NULL AND "
"field_type IS NULL AND "
"field_data::text = '{{}}' AND "
"parent_id IS NULL AND "
"display_as_section IS NOT NULL)"
.format(type=SurveyItemType.section), 'valid_section'),
db.CheckConstraint("type != {type} OR ("
"title IS NULL AND "
"is_required IS NULL AND "
"field_type IS NULL AND "
"field_data::text = '{{}}' AND "
"parent_id IS NOT NULL AND "
"display_as_section IS NULL)"
.format(type=SurveyItemType.text), 'valid_text'),
{'schema': 'event_surveys'})
__mapper_args__ = {
'polymorphic_on': 'type',
'polymorphic_identity': None
}
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
#: The ID of the item
id = db.Column(
db.Integer,
primary_key=True
)
#: The ID of the survey
survey_id = db.Column(
db.Integer,
db.ForeignKey('event_surveys.surveys.id'),
index=True,
nullable=False,
)
#: The ID of the parent section item (NULL for top-level items, i.e. sections)
parent_id = db.Column(
db.Integer,
db.ForeignKey('event_surveys.items.id'),
index=True,
nullable=True,
)
#: The position of the item in the survey form
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
#: The type of the survey item
type = db.Column(
PyIntEnum(SurveyItemType),
nullable=False
)
#: The title of the item
title = db.Column(
db.String,
nullable=True,
default=_get_item_default_title
)
#: If a section should be rendered as a section
display_as_section = db.Column(
db.Boolean,
nullable=True
)
# The following columns are only used for SurveyQuestion objects, but by
# specifying them here we can access them without an extra query when we
# query SurveyItem objects directly instead of going through a subclass.
# This is done e.g. when using the Survey.top_level_items relationship.
#: If the question must be answered (wtforms DataRequired)
is_required = db.Column(
db.Boolean,
nullable=True
)
#: The type of the field used for the question
field_type = db.Column(
db.String,
nullable=True
)
#: Field-specific data (such as choices for multi-select fields)
field_data = db.Column(
JSONB,
nullable=False,
default={}
)
# relationship backrefs:
# - parent (SurveySection.children)
# - survey (Survey.items)
def to_dict(self):
"""Return a json-serializable representation of this object.
Subclasses must add their own data to the dict.
"""
return {'type': self.type.name, 'title': self.title, 'description': self.description}
class SurveyQuestion(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.question
}
# relationship backrefs:
# - answers (SurveyAnswer.question)
@property
def field(self):
try:
impl = get_field_types()[self.field_type]
except KeyError:
return None
return impl(self)
@property
def locator(self):
return dict(self.survey.locator, section_id=self.parent_id, question_id=self.id)
@property
def not_empty_answers(self):
return [a for a in self.answers if not a.is_empty]
def get_summary(self, **kwargs):
"""Return the summary of answers submitted for this question."""
if self.field:
return self.field.get_summary(**kwargs)
def __repr__(self):
return f'<SurveyQuestion({self.id}, {self.survey_id}, {self.field_type}, {self.title})>'
def to_dict(self):
data = super().to_dict()
data.update({'is_required': self.is_required, 'field_type': self.field_type,
'field_data': self.field.copy_field_data()})
return data
class SurveySection(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.section
}
#: The child items of this section
children = db.relationship(
'SurveyItem',
order_by='SurveyItem.position',
cascade='all, delete-orphan',
backref=db.backref(
'parent',
remote_side=[SurveyItem.id]
)
)
@property
def locator(self):
return dict(self.survey.locator, section_id=self.id)
def __repr__(self):
return f'<SurveySection({self.id}, {self.survey_id}, {self.title})>'
def to_dict(self):
data = super().to_dict()
content = [child.to_dict() for child in self.children]
data.update({'content': content, 'display_as_section': self.display_as_section})
if not self.display_as_section:
del data['title']
del data['description']
return data
class SurveyText(SurveyItem):
__mapper_args__ = {
'polymorphic_identity': SurveyItemType.text
}
@property
def locator(self):
return dict(self.survey.locator, section_id=self.parent_id, text_id=self.id)
def __repr__(self):
desc = text_to_repr(self.description)
return f'<SurveyText({self.id}, {self.survey_id}): "{desc}")>'
def to_dict(self):
data = super().to_dict()
del data['title']
return data
@listens_for(SurveySection.children, 'append')
def _set_survey(target, value, *unused):
if value.survey is None and target.survey is not None:
value.survey = target.survey
assert value.survey in {target.survey, None} | 0.743447 | 0.138753 |
import logging
import torch
class Client(object):
"""Simulated federated learning client."""
def __init__(self, client_id):
self.client_id = client_id
def __repr__(self):
return 'Client #{}: {} samples in labels: {}'.format(
self.client_id, len(self.data), set([label for _, label in self.data]))
# Set non-IID data configurations
def set_bias(self, pref, bias):
self.pref = pref
self.bias = bias
def set_shard(self, shard):
self.shard = shard
# Server interactions
def download(self, argv):
# Download from the server.
try:
return argv.copy()
except:
return argv
def upload(self, argv):
# Upload to the server
try:
return argv.copy()
except:
return argv
# Federated learning phases
def set_data(self, data, config):
# Extract from config
do_test = self.do_test = config.clients.do_test
test_partition = self.test_partition = config.clients.test_partition
# Download data
self.data = self.download(data)
# Extract trainset, testset (if applicable)
data = self.data
if do_test: # Partition for testset if applicable
self.trainset = data[:int(len(data) * (1 - test_partition))]
self.testset = data[int(len(data) * (1 - test_partition)):]
else:
self.trainset = data
def configure(self, config):
import fl_model # pylint: disable=import-error
# Extract from config
model_path = self.model_path = config.paths.model
# Download from server
config = self.download(config)
# Extract machine learning task from config
self.task = config.fl.task
self.epochs = config.fl.epochs
self.batch_size = config.fl.batch_size
# Download most recent global model
path = model_path + '/global'
self.model = fl_model.Net()
self.model.load_state_dict(torch.load(path))
self.model.eval()
# Create optimizer
self.optimizer = fl_model.get_optimizer(self.model)
def run(self):
# Perform federated learning task
{
"train": self.train()
}[self.task]
def get_report(self):
# Report results to server.
return self.upload(self.report)
# Machine learning tasks
def train(self):
import fl_model # pylint: disable=import-error
logging.info('Training on client #{}'.format(self.client_id))
# Perform model training
trainloader = fl_model.get_trainloader(self.trainset, self.batch_size)
fl_model.train(self.model, trainloader,
self.optimizer, self.epochs)
# Extract model weights and biases
weights = fl_model.extract_weights(self.model)
# Generate report for server
self.report = Report(self)
self.report.weights = weights
# Perform model testing if applicable
if self.do_test:
testloader = fl_model.get_testloader(self.testset, 1000)
self.report.accuracy = fl_model.test(self.model, testloader)
def test(self):
# Perform model testing
raise NotImplementedError
class Report(object):
"""Federated learning client report."""
def __init__(self, client):
self.client_id = client.client_id
self.num_samples = len(client.data) | simulation/client.py | import logging
import torch
class Client(object):
"""Simulated federated learning client."""
def __init__(self, client_id):
self.client_id = client_id
def __repr__(self):
return 'Client #{}: {} samples in labels: {}'.format(
self.client_id, len(self.data), set([label for _, label in self.data]))
# Set non-IID data configurations
def set_bias(self, pref, bias):
self.pref = pref
self.bias = bias
def set_shard(self, shard):
self.shard = shard
# Server interactions
def download(self, argv):
# Download from the server.
try:
return argv.copy()
except:
return argv
def upload(self, argv):
# Upload to the server
try:
return argv.copy()
except:
return argv
# Federated learning phases
def set_data(self, data, config):
# Extract from config
do_test = self.do_test = config.clients.do_test
test_partition = self.test_partition = config.clients.test_partition
# Download data
self.data = self.download(data)
# Extract trainset, testset (if applicable)
data = self.data
if do_test: # Partition for testset if applicable
self.trainset = data[:int(len(data) * (1 - test_partition))]
self.testset = data[int(len(data) * (1 - test_partition)):]
else:
self.trainset = data
def configure(self, config):
import fl_model # pylint: disable=import-error
# Extract from config
model_path = self.model_path = config.paths.model
# Download from server
config = self.download(config)
# Extract machine learning task from config
self.task = config.fl.task
self.epochs = config.fl.epochs
self.batch_size = config.fl.batch_size
# Download most recent global model
path = model_path + '/global'
self.model = fl_model.Net()
self.model.load_state_dict(torch.load(path))
self.model.eval()
# Create optimizer
self.optimizer = fl_model.get_optimizer(self.model)
def run(self):
# Perform federated learning task
{
"train": self.train()
}[self.task]
def get_report(self):
# Report results to server.
return self.upload(self.report)
# Machine learning tasks
def train(self):
import fl_model # pylint: disable=import-error
logging.info('Training on client #{}'.format(self.client_id))
# Perform model training
trainloader = fl_model.get_trainloader(self.trainset, self.batch_size)
fl_model.train(self.model, trainloader,
self.optimizer, self.epochs)
# Extract model weights and biases
weights = fl_model.extract_weights(self.model)
# Generate report for server
self.report = Report(self)
self.report.weights = weights
# Perform model testing if applicable
if self.do_test:
testloader = fl_model.get_testloader(self.testset, 1000)
self.report.accuracy = fl_model.test(self.model, testloader)
def test(self):
# Perform model testing
raise NotImplementedError
class Report(object):
"""Federated learning client report."""
def __init__(self, client):
self.client_id = client.client_id
self.num_samples = len(client.data) | 0.741861 | 0.243294 |
"""Library of compiler functions for usage in the native execution context."""
from absl import logging
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import transformations
from tensorflow_federated.python.core.impl.wrappers import computation_wrapper_instances
def transform_to_native_form(
comp: computation_base.Computation) -> computation_base.Computation:
"""Compiles a computation for execution in the TFF native runtime.
This function transforms the proto underlying `comp` by transforming it
to call-dominant form (see `tff.framework.transform_to_call_dominant` for
definition).
Args:
comp: Instance of `computation_base.Computation` to compile.
Returns:
A new `computation_base.Computation` representing the compiled version of
`comp`.
"""
proto = computation_impl.ComputationImpl.get_proto(comp)
computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
proto)
try:
logging.debug('Compiling TFF computation.')
call_dominant_form, _ = transformations.transform_to_call_dominant(
computation_building_block)
logging.debug('Computation compiled to:')
logging.debug(call_dominant_form.formatted_representation())
return computation_wrapper_instances.building_block_to_computation(
call_dominant_form)
except ValueError as e:
logging.debug('Compilation for native runtime failed with error %s', e)
logging.debug('computation: %s',
computation_building_block.compact_representation())
return comp
def transform_mathematical_functions_to_tensorflow(
comp: computation_base.Computation,) -> computation_base.Computation:
"""Compiles all mathematical functions in `comp` to TensorFlow blocks.
Notice that this does not necessarily represent a strict performance
improvement. In particular, this compilation will not attempt to deduplicate
across the boundaries of communication operators, and therefore it may be
the case that compiling eagerly to TensorFlow hides the opportunity for
a dynamic cache to be used.
Args:
comp: Instance of `computation_base.Computation` to compile.
Returns:
A new `computation_base.Computation` representing the compiled version of
`comp`.
"""
proto = computation_impl.ComputationImpl.get_proto(comp)
computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
proto)
try:
logging.debug('Compiling local computations to TensorFlow.')
tf_compiled, _ = transformations.compile_local_computation_to_tensorflow(
computation_building_block)
logging.debug('Local computations compiled to TF:')
logging.debug(tf_compiled.formatted_representation())
return computation_wrapper_instances.building_block_to_computation(
tf_compiled)
except ValueError as e:
logging.debug(
'Compilation of local computation to TensorFlow failed with error %s',
e)
logging.debug('computation: %s',
computation_building_block.compact_representation())
return comp | tensorflow_federated/python/core/backends/native/compiler.py | """Library of compiler functions for usage in the native execution context."""
from absl import logging
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl import computation_impl
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import transformations
from tensorflow_federated.python.core.impl.wrappers import computation_wrapper_instances
def transform_to_native_form(
comp: computation_base.Computation) -> computation_base.Computation:
"""Compiles a computation for execution in the TFF native runtime.
This function transforms the proto underlying `comp` by transforming it
to call-dominant form (see `tff.framework.transform_to_call_dominant` for
definition).
Args:
comp: Instance of `computation_base.Computation` to compile.
Returns:
A new `computation_base.Computation` representing the compiled version of
`comp`.
"""
proto = computation_impl.ComputationImpl.get_proto(comp)
computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
proto)
try:
logging.debug('Compiling TFF computation.')
call_dominant_form, _ = transformations.transform_to_call_dominant(
computation_building_block)
logging.debug('Computation compiled to:')
logging.debug(call_dominant_form.formatted_representation())
return computation_wrapper_instances.building_block_to_computation(
call_dominant_form)
except ValueError as e:
logging.debug('Compilation for native runtime failed with error %s', e)
logging.debug('computation: %s',
computation_building_block.compact_representation())
return comp
def transform_mathematical_functions_to_tensorflow(
comp: computation_base.Computation,) -> computation_base.Computation:
"""Compiles all mathematical functions in `comp` to TensorFlow blocks.
Notice that this does not necessarily represent a strict performance
improvement. In particular, this compilation will not attempt to deduplicate
across the boundaries of communication operators, and therefore it may be
the case that compiling eagerly to TensorFlow hides the opportunity for
a dynamic cache to be used.
Args:
comp: Instance of `computation_base.Computation` to compile.
Returns:
A new `computation_base.Computation` representing the compiled version of
`comp`.
"""
proto = computation_impl.ComputationImpl.get_proto(comp)
computation_building_block = building_blocks.ComputationBuildingBlock.from_proto(
proto)
try:
logging.debug('Compiling local computations to TensorFlow.')
tf_compiled, _ = transformations.compile_local_computation_to_tensorflow(
computation_building_block)
logging.debug('Local computations compiled to TF:')
logging.debug(tf_compiled.formatted_representation())
return computation_wrapper_instances.building_block_to_computation(
tf_compiled)
except ValueError as e:
logging.debug(
'Compilation of local computation to TensorFlow failed with error %s',
e)
logging.debug('computation: %s',
computation_building_block.compact_representation())
return comp | 0.955569 | 0.520435 |
from collections import Counter
from pathlib import Path
from random import Random
from typing import List, Tuple, Iterable
import tqdm
from dp.model.model import ModelType
from dp.preprocessing.text import Preprocessor
from dp.utils.io import read_config, pickle_binary
from dp.utils.logging import get_logger
logger = get_logger(__name__)
def preprocess(config_file: str,
train_data: List[Tuple[str, Iterable[str], Iterable[str]]],
val_data: List[Tuple[str, Iterable[str], Iterable[str]]] = None,
deduplicate_train_data=True) -> None:
"""
Preprocesses a given dataset to enable model training. The preprocessing result is stored in
a folder provied by the config.
Args:
config_file (str): Path to the config.yaml that provides all necessary hyperparameters.
train_data (List[Tuple[str, Iterable[str], Iterable[str]]]):
Training data as a list of Tuples (language, grapheme sequence, phoneme sequence).
val_data (List[Tuple[str, Iterable[str], Iterable[str]]], optional):
Validation data as a list of Tuples (language, grapheme sequence, phoneme sequence).
deduplicate_train_data (bool): Whether to deduplicate multiple occurences of the same word,
the first is taken (Default value = True).
Returns:
None: the preprocessing result is stored in a folder provided by the config.
"""
config = read_config(config_file)
model_type = config['model']['type']
model_type = ModelType(model_type)
if model_type.is_autoregressive() and config['preprocessing']['char_repeats'] > 1:
char_repeats = config['preprocessing']['char_repeats']
logger.warning(f'WARNING: You are training autoregressive model with char_repeats={char_repeats}. '
f'It is recommended to set char_repeats=1 in the config and preprocess again.')
languages = set(config['preprocessing']['languages'])
logger.info(f'Preprocessing, train data: with {len(train_data)} files.')
data_dir = Path(config['paths']['data_dir'])
data_dir.mkdir(parents=True, exist_ok=True)
train_dict = {(l, w): [] for l, w, p in train_data}
for l, w, p in train_data:
train_dict[(l, w)] = train_dict[(l, w)] + [(l, w, p)]
train_keys = sorted(list(train_dict.keys()))
if val_data is not None:
val_data = [(l, w, p) for l, w, p in val_data if l in languages]
else:
n_val = config['preprocessing']['n_val']
logger.info(f'Performing random split with num val: {n_val}')
random = Random(42)
random.shuffle(train_keys)
val_keys = train_keys[:n_val]
train_keys = train_keys[n_val:]
val_data = []
for k in val_keys:
val_data.extend(train_dict[k])
train_data = []
for key in train_keys:
data_list = train_dict[key]
if deduplicate_train_data:
train_data.append(data_list[0])
else:
train_data.extend(data_list)
preprocessor = Preprocessor.from_config(config)
train_count = Counter()
val_count = Counter()
logger.info('Processing train data...')
train_dataset = []
for i, (lang, text, phonemes) in enumerate(tqdm.tqdm(train_data, total=len(train_data))):
tokens = preprocessor((lang, text, phonemes))
train_dataset.append(tokens)
train_count.update([lang])
val_dataset = []
for i, (lang, text, phonemes) in enumerate(val_data):
tokens = preprocessor((lang, text, phonemes))
val_dataset.append(tokens)
val_count.update([lang])
logger.info(f'\nSaving datasets to: {data_dir.absolute()}')
pickle_binary(train_dataset, data_dir / 'train_dataset.pkl')
pickle_binary(val_dataset, data_dir / 'val_dataset.pkl')
phoneme_dictionary = dict()
all_data = []
text_symbols = set(config['preprocessing']['text_symbols'])
phoneme_symbols = set(config['preprocessing']['phoneme_symbols'])
for lang, text, phon in sorted(train_data + val_data):
text = ''.join([t for t in text if t in text_symbols])
phons = ''.join([p for p in phon if p in phoneme_symbols])
all_data.append((lang, text, phons))
for l, w, p in all_data:
lang_dict = phoneme_dictionary.setdefault(l, {})
if w not in lang_dict:
lang_dict[w] = p
pickle_binary(phoneme_dictionary, data_dir / 'phoneme_dict.pkl')
with open(data_dir / 'combined_dataset.txt', 'w+', encoding='utf-8') as f:
for lang, text, phoneme in all_data:
f.write(f'{lang}\t{text}\t{phoneme}\n')
logger.info(f'Preprocessing. \nTrain counts (deduplicated): {train_count.most_common()}'
f'\nVal counts (including duplicates): {val_count.most_common()}')
assert len(train_count) > 0, 'Preprocessing resulted in zero train counts!'
assert len(val_count) > 0, 'Preprocessing resulted in zero validation counts!' | dp/preprocess.py | from collections import Counter
from pathlib import Path
from random import Random
from typing import List, Tuple, Iterable
import tqdm
from dp.model.model import ModelType
from dp.preprocessing.text import Preprocessor
from dp.utils.io import read_config, pickle_binary
from dp.utils.logging import get_logger
logger = get_logger(__name__)
def preprocess(config_file: str,
train_data: List[Tuple[str, Iterable[str], Iterable[str]]],
val_data: List[Tuple[str, Iterable[str], Iterable[str]]] = None,
deduplicate_train_data=True) -> None:
"""
Preprocesses a given dataset to enable model training. The preprocessing result is stored in
a folder provied by the config.
Args:
config_file (str): Path to the config.yaml that provides all necessary hyperparameters.
train_data (List[Tuple[str, Iterable[str], Iterable[str]]]):
Training data as a list of Tuples (language, grapheme sequence, phoneme sequence).
val_data (List[Tuple[str, Iterable[str], Iterable[str]]], optional):
Validation data as a list of Tuples (language, grapheme sequence, phoneme sequence).
deduplicate_train_data (bool): Whether to deduplicate multiple occurences of the same word,
the first is taken (Default value = True).
Returns:
None: the preprocessing result is stored in a folder provided by the config.
"""
config = read_config(config_file)
model_type = config['model']['type']
model_type = ModelType(model_type)
if model_type.is_autoregressive() and config['preprocessing']['char_repeats'] > 1:
char_repeats = config['preprocessing']['char_repeats']
logger.warning(f'WARNING: You are training autoregressive model with char_repeats={char_repeats}. '
f'It is recommended to set char_repeats=1 in the config and preprocess again.')
languages = set(config['preprocessing']['languages'])
logger.info(f'Preprocessing, train data: with {len(train_data)} files.')
data_dir = Path(config['paths']['data_dir'])
data_dir.mkdir(parents=True, exist_ok=True)
train_dict = {(l, w): [] for l, w, p in train_data}
for l, w, p in train_data:
train_dict[(l, w)] = train_dict[(l, w)] + [(l, w, p)]
train_keys = sorted(list(train_dict.keys()))
if val_data is not None:
val_data = [(l, w, p) for l, w, p in val_data if l in languages]
else:
n_val = config['preprocessing']['n_val']
logger.info(f'Performing random split with num val: {n_val}')
random = Random(42)
random.shuffle(train_keys)
val_keys = train_keys[:n_val]
train_keys = train_keys[n_val:]
val_data = []
for k in val_keys:
val_data.extend(train_dict[k])
train_data = []
for key in train_keys:
data_list = train_dict[key]
if deduplicate_train_data:
train_data.append(data_list[0])
else:
train_data.extend(data_list)
preprocessor = Preprocessor.from_config(config)
train_count = Counter()
val_count = Counter()
logger.info('Processing train data...')
train_dataset = []
for i, (lang, text, phonemes) in enumerate(tqdm.tqdm(train_data, total=len(train_data))):
tokens = preprocessor((lang, text, phonemes))
train_dataset.append(tokens)
train_count.update([lang])
val_dataset = []
for i, (lang, text, phonemes) in enumerate(val_data):
tokens = preprocessor((lang, text, phonemes))
val_dataset.append(tokens)
val_count.update([lang])
logger.info(f'\nSaving datasets to: {data_dir.absolute()}')
pickle_binary(train_dataset, data_dir / 'train_dataset.pkl')
pickle_binary(val_dataset, data_dir / 'val_dataset.pkl')
phoneme_dictionary = dict()
all_data = []
text_symbols = set(config['preprocessing']['text_symbols'])
phoneme_symbols = set(config['preprocessing']['phoneme_symbols'])
for lang, text, phon in sorted(train_data + val_data):
text = ''.join([t for t in text if t in text_symbols])
phons = ''.join([p for p in phon if p in phoneme_symbols])
all_data.append((lang, text, phons))
for l, w, p in all_data:
lang_dict = phoneme_dictionary.setdefault(l, {})
if w not in lang_dict:
lang_dict[w] = p
pickle_binary(phoneme_dictionary, data_dir / 'phoneme_dict.pkl')
with open(data_dir / 'combined_dataset.txt', 'w+', encoding='utf-8') as f:
for lang, text, phoneme in all_data:
f.write(f'{lang}\t{text}\t{phoneme}\n')
logger.info(f'Preprocessing. \nTrain counts (deduplicated): {train_count.most_common()}'
f'\nVal counts (including duplicates): {val_count.most_common()}')
assert len(train_count) > 0, 'Preprocessing resulted in zero train counts!'
assert len(val_count) > 0, 'Preprocessing resulted in zero validation counts!' | 0.829181 | 0.383786 |
import os.path as osp
from datasets.roidb import Roidb
from datasets.refer import Refer
from opt import parse_opt
import json
import io
opt = parse_opt()
opt = vars(opt)
class Refvg(object):
def __init__(self, split, model_method):
self._dataset = 'refvg'
self._imageset = 'vg'
self._split = split
self._ref_db = Refer(opt['data_root'], self._dataset, split)
if model_method == 'sgmn':
self._ref_sg = self._load_sg()
self._ref_sg_seq = self._load_sg_seq()
else:
self._ref_sg = None
self._ref_sg_seq = None
self._sent_ids = self._ref_db.get_sentIds()
self._image_ids = self._ref_db.get_imgIds(self._sent_ids)
roidb = Roidb(self._imageset, model_method)
self._rois_db = {}
self.max_num_box = 0
for img_id in self._image_ids:
assert roidb.roidb.has_key(img_id)
self._rois_db[img_id] = roidb.roidb[img_id].copy()
self.max_num_box = max(self.max_num_box, int(self._rois_db[img_id]['num_objs']))
self._h5_files = roidb.h5_files
self._h5_lrel_files = roidb.h5_lrel_files
@property
def sent_ids(self):
return self._sent_ids
@property
def ref_db(self):
return self._ref_db
@property
def image_ids(self):
return self._image_ids
@property
def rois_db(self):
return self._rois_db
@property
def h5_files(self):
return self._h5_files
@property
def h5_lrel_files(self):
return self._h5_lrel_files
@property
def ref_sg(self):
return self._ref_sg
@property
def ref_sg_seq(self):
return self._ref_sg_seq
@property
def id_to_path(self):
path = {}
for img_id in self.image_ids:
file_name = str(img_id)+ '.jpg'
image_path = osp.join(opt['data_root'], 'images/') + file_name
path[img_id] = image_path
return path
def get_imgIds(self, sent_ids):
return self._ref_db.get_imgIds(sent_ids)
def _load_sg(self):
sgs = {}
sg_file_path = osp.join(opt['data_root'], self._dataset, self._split + '_sgs.json')
data = json.load(open(sg_file_path, 'r'))
for key in list(data.keys()):
sgs[key] = data[key]
return sgs
def _load_sg_seq(self):
sg_seqs = {}
sg_seq_file_path = osp.join(opt['data_root'], self._dataset, self._split + '_sg_seqs.json')
data = json.load(open(sg_seq_file_path, 'r'))
for key in list(data.keys()):
sg_seqs[key] = data[key]
return sg_seqs
def load_dictionary(self, pad_at_first=True):
dict_file = osp.join(opt['data_root'], 'word_embedding', 'vocabulary_72700.txt')
with io.open(dict_file, encoding='utf-8') as f:
words = [w.strip() for w in f.readlines()]
if pad_at_first and words[0] != '<pad>':
raise Exception("The first word needs to be <pad> in the word list.")
vocab_dict = {words[n]: n for n in range(len(words))}
return vocab_dict
def get_img_path(self, id):
return self.id_to_path[id]
def get_sent(self, sent_id):
return self.ref_db.load_sent(sent_id) | lib/datasets/refvg.py | import os.path as osp
from datasets.roidb import Roidb
from datasets.refer import Refer
from opt import parse_opt
import json
import io
opt = parse_opt()
opt = vars(opt)
class Refvg(object):
def __init__(self, split, model_method):
self._dataset = 'refvg'
self._imageset = 'vg'
self._split = split
self._ref_db = Refer(opt['data_root'], self._dataset, split)
if model_method == 'sgmn':
self._ref_sg = self._load_sg()
self._ref_sg_seq = self._load_sg_seq()
else:
self._ref_sg = None
self._ref_sg_seq = None
self._sent_ids = self._ref_db.get_sentIds()
self._image_ids = self._ref_db.get_imgIds(self._sent_ids)
roidb = Roidb(self._imageset, model_method)
self._rois_db = {}
self.max_num_box = 0
for img_id in self._image_ids:
assert roidb.roidb.has_key(img_id)
self._rois_db[img_id] = roidb.roidb[img_id].copy()
self.max_num_box = max(self.max_num_box, int(self._rois_db[img_id]['num_objs']))
self._h5_files = roidb.h5_files
self._h5_lrel_files = roidb.h5_lrel_files
@property
def sent_ids(self):
return self._sent_ids
@property
def ref_db(self):
return self._ref_db
@property
def image_ids(self):
return self._image_ids
@property
def rois_db(self):
return self._rois_db
@property
def h5_files(self):
return self._h5_files
@property
def h5_lrel_files(self):
return self._h5_lrel_files
@property
def ref_sg(self):
return self._ref_sg
@property
def ref_sg_seq(self):
return self._ref_sg_seq
@property
def id_to_path(self):
path = {}
for img_id in self.image_ids:
file_name = str(img_id)+ '.jpg'
image_path = osp.join(opt['data_root'], 'images/') + file_name
path[img_id] = image_path
return path
def get_imgIds(self, sent_ids):
return self._ref_db.get_imgIds(sent_ids)
def _load_sg(self):
sgs = {}
sg_file_path = osp.join(opt['data_root'], self._dataset, self._split + '_sgs.json')
data = json.load(open(sg_file_path, 'r'))
for key in list(data.keys()):
sgs[key] = data[key]
return sgs
def _load_sg_seq(self):
sg_seqs = {}
sg_seq_file_path = osp.join(opt['data_root'], self._dataset, self._split + '_sg_seqs.json')
data = json.load(open(sg_seq_file_path, 'r'))
for key in list(data.keys()):
sg_seqs[key] = data[key]
return sg_seqs
def load_dictionary(self, pad_at_first=True):
dict_file = osp.join(opt['data_root'], 'word_embedding', 'vocabulary_72700.txt')
with io.open(dict_file, encoding='utf-8') as f:
words = [w.strip() for w in f.readlines()]
if pad_at_first and words[0] != '<pad>':
raise Exception("The first word needs to be <pad> in the word list.")
vocab_dict = {words[n]: n for n in range(len(words))}
return vocab_dict
def get_img_path(self, id):
return self.id_to_path[id]
def get_sent(self, sent_id):
return self.ref_db.load_sent(sent_id) | 0.548915 | 0.275094 |
import asyncio
import logging
import voluptuous as vol
from aiohttp import web
from homeassistant.components.climate import ATTR_CURRENT_TEMPERATURE
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_STATE_CHANGED, TEMP_FAHRENHEIT, CONTENT_TYPE_TEXT_PLAIN,
ATTR_TEMPERATURE, ATTR_UNIT_OF_MEASUREMENT)
from homeassistant import core as hacore
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import entityfilter, state as state_helper
from homeassistant.util.temperature import fahrenheit_to_celsius
REQUIREMENTS = ['prometheus_client==0.2.0']
_LOGGER = logging.getLogger(__name__)
API_ENDPOINT = '/api/prometheus'
DOMAIN = 'prometheus'
DEPENDENCIES = ['http']
CONF_FILTER = 'filter'
CONF_PROM_NAMESPACE = 'namespace'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All({
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_PROM_NAMESPACE): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Activate Prometheus component."""
import prometheus_client
hass.http.register_view(PrometheusView(prometheus_client))
conf = config[DOMAIN]
entity_filter = conf[CONF_FILTER]
namespace = conf.get(CONF_PROM_NAMESPACE)
metrics = PrometheusMetrics(prometheus_client, entity_filter, namespace)
hass.bus.listen(EVENT_STATE_CHANGED, metrics.handle_event)
return True
class PrometheusMetrics(object):
"""Model all of the metrics which should be exposed to Prometheus."""
def __init__(self, prometheus_client, entity_filter, namespace):
"""Initialize Prometheus Metrics."""
self.prometheus_client = prometheus_client
self._filter = entity_filter
if namespace:
self.metrics_prefix = "{}_".format(namespace)
else:
self.metrics_prefix = ""
self._metrics = {}
def handle_event(self, event):
"""Listen for new messages on the bus, and add them to Prometheus."""
state = event.data.get('new_state')
if state is None:
return
entity_id = state.entity_id
_LOGGER.debug("Handling state update for %s", entity_id)
domain, _ = hacore.split_entity_id(entity_id)
if not self._filter(state.entity_id):
return
handler = '_handle_{}'.format(domain)
if hasattr(self, handler):
getattr(self, handler)(state)
metric = self._metric(
'state_change',
self.prometheus_client.Counter,
'The number of state changes',
)
metric.labels(**self._labels(state)).inc()
def _metric(self, metric, factory, documentation, labels=None):
if labels is None:
labels = ['entity', 'friendly_name', 'domain']
try:
return self._metrics[metric]
except KeyError:
full_metric_name = "{}{}".format(self.metrics_prefix, metric)
self._metrics[metric] = factory(
full_metric_name, documentation, labels)
return self._metrics[metric]
@staticmethod
def _labels(state):
return {
'entity': state.entity_id,
'domain': state.domain,
'friendly_name': state.attributes.get('friendly_name'),
}
def _battery(self, state):
if 'battery_level' in state.attributes:
metric = self._metric(
'battery_level_percent',
self.prometheus_client.Gauge,
'Battery level as a percentage of its capacity',
)
try:
value = float(state.attributes['battery_level'])
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_binary_sensor(self, state):
metric = self._metric(
'binary_sensor_state',
self.prometheus_client.Gauge,
'State of the binary sensor (0/1)',
)
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_device_tracker(self, state):
metric = self._metric(
'device_tracker_state',
self.prometheus_client.Gauge,
'State of the device tracker (0/1)',
)
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_light(self, state):
metric = self._metric(
'light_state',
self.prometheus_client.Gauge,
'Load level of a light (0..1)',
)
try:
if 'brightness' in state.attributes:
value = state.attributes['brightness'] / 255.0
else:
value = state_helper.state_as_number(state)
value = value * 100
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_lock(self, state):
metric = self._metric(
'lock_state',
self.prometheus_client.Gauge,
'State of the lock (0/1)',
)
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_climate(self, state):
temp = state.attributes.get(ATTR_TEMPERATURE)
if temp:
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if unit == TEMP_FAHRENHEIT:
temp = fahrenheit_to_celsius(temp)
metric = self._metric(
'temperature_c', self.prometheus_client.Gauge,
'Temperature in degrees Celsius')
metric.labels(**self._labels(state)).set(temp)
current_temp = state.attributes.get(ATTR_CURRENT_TEMPERATURE)
if current_temp:
if unit == TEMP_FAHRENHEIT:
current_temp = fahrenheit_to_celsius(current_temp)
metric = self._metric(
'current_temperature_c', self.prometheus_client.Gauge,
'Current Temperature in degrees Celsius')
metric.labels(**self._labels(state)).set(current_temp)
metric = self._metric(
'climate_state', self.prometheus_client.Gauge,
'State of the thermostat (0/1)')
try:
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_sensor(self, state):
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
metric = state.entity_id.split(".")[1]
if '_' not in str(metric):
metric = state.entity_id.replace('.', '_')
try:
int(metric.split("_")[-1])
metric = "_".join(metric.split("_")[:-1])
except ValueError:
pass
_metric = self._metric(metric, self.prometheus_client.Gauge,
state.entity_id)
try:
value = state_helper.state_as_number(state)
if unit == TEMP_FAHRENHEIT:
value = fahrenheit_to_celsius(value)
_metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._battery(state)
def _handle_switch(self, state):
metric = self._metric(
'switch_state',
self.prometheus_client.Gauge,
'State of the switch (0/1)',
)
try:
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_zwave(self, state):
self._battery(state)
def _handle_automation(self, state):
metric = self._metric(
'automation_triggered_count',
self.prometheus_client.Counter,
'Count of times an automation has been triggered',
)
metric.labels(**self._labels(state)).inc()
class PrometheusView(HomeAssistantView):
"""Handle Prometheus requests."""
url = API_ENDPOINT
name = 'api:prometheus'
def __init__(self, prometheus_client):
"""Initialize Prometheus view."""
self.prometheus_client = prometheus_client
@asyncio.coroutine
def get(self, request):
"""Handle request for Prometheus metrics."""
_LOGGER.debug("Received Prometheus metrics request")
return web.Response(
body=self.prometheus_client.generate_latest(),
content_type=CONTENT_TYPE_TEXT_PLAIN) | homeassistant/components/prometheus.py | import asyncio
import logging
import voluptuous as vol
from aiohttp import web
from homeassistant.components.climate import ATTR_CURRENT_TEMPERATURE
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_STATE_CHANGED, TEMP_FAHRENHEIT, CONTENT_TYPE_TEXT_PLAIN,
ATTR_TEMPERATURE, ATTR_UNIT_OF_MEASUREMENT)
from homeassistant import core as hacore
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import entityfilter, state as state_helper
from homeassistant.util.temperature import fahrenheit_to_celsius
REQUIREMENTS = ['prometheus_client==0.2.0']
_LOGGER = logging.getLogger(__name__)
API_ENDPOINT = '/api/prometheus'
DOMAIN = 'prometheus'
DEPENDENCIES = ['http']
CONF_FILTER = 'filter'
CONF_PROM_NAMESPACE = 'namespace'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All({
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_PROM_NAMESPACE): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Activate Prometheus component."""
import prometheus_client
hass.http.register_view(PrometheusView(prometheus_client))
conf = config[DOMAIN]
entity_filter = conf[CONF_FILTER]
namespace = conf.get(CONF_PROM_NAMESPACE)
metrics = PrometheusMetrics(prometheus_client, entity_filter, namespace)
hass.bus.listen(EVENT_STATE_CHANGED, metrics.handle_event)
return True
class PrometheusMetrics(object):
"""Model all of the metrics which should be exposed to Prometheus."""
def __init__(self, prometheus_client, entity_filter, namespace):
"""Initialize Prometheus Metrics."""
self.prometheus_client = prometheus_client
self._filter = entity_filter
if namespace:
self.metrics_prefix = "{}_".format(namespace)
else:
self.metrics_prefix = ""
self._metrics = {}
def handle_event(self, event):
"""Listen for new messages on the bus, and add them to Prometheus."""
state = event.data.get('new_state')
if state is None:
return
entity_id = state.entity_id
_LOGGER.debug("Handling state update for %s", entity_id)
domain, _ = hacore.split_entity_id(entity_id)
if not self._filter(state.entity_id):
return
handler = '_handle_{}'.format(domain)
if hasattr(self, handler):
getattr(self, handler)(state)
metric = self._metric(
'state_change',
self.prometheus_client.Counter,
'The number of state changes',
)
metric.labels(**self._labels(state)).inc()
def _metric(self, metric, factory, documentation, labels=None):
if labels is None:
labels = ['entity', 'friendly_name', 'domain']
try:
return self._metrics[metric]
except KeyError:
full_metric_name = "{}{}".format(self.metrics_prefix, metric)
self._metrics[metric] = factory(
full_metric_name, documentation, labels)
return self._metrics[metric]
@staticmethod
def _labels(state):
return {
'entity': state.entity_id,
'domain': state.domain,
'friendly_name': state.attributes.get('friendly_name'),
}
def _battery(self, state):
if 'battery_level' in state.attributes:
metric = self._metric(
'battery_level_percent',
self.prometheus_client.Gauge,
'Battery level as a percentage of its capacity',
)
try:
value = float(state.attributes['battery_level'])
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_binary_sensor(self, state):
metric = self._metric(
'binary_sensor_state',
self.prometheus_client.Gauge,
'State of the binary sensor (0/1)',
)
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_device_tracker(self, state):
metric = self._metric(
'device_tracker_state',
self.prometheus_client.Gauge,
'State of the device tracker (0/1)',
)
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_light(self, state):
metric = self._metric(
'light_state',
self.prometheus_client.Gauge,
'Load level of a light (0..1)',
)
try:
if 'brightness' in state.attributes:
value = state.attributes['brightness'] / 255.0
else:
value = state_helper.state_as_number(state)
value = value * 100
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_lock(self, state):
metric = self._metric(
'lock_state',
self.prometheus_client.Gauge,
'State of the lock (0/1)',
)
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_climate(self, state):
temp = state.attributes.get(ATTR_TEMPERATURE)
if temp:
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if unit == TEMP_FAHRENHEIT:
temp = fahrenheit_to_celsius(temp)
metric = self._metric(
'temperature_c', self.prometheus_client.Gauge,
'Temperature in degrees Celsius')
metric.labels(**self._labels(state)).set(temp)
current_temp = state.attributes.get(ATTR_CURRENT_TEMPERATURE)
if current_temp:
if unit == TEMP_FAHRENHEIT:
current_temp = fahrenheit_to_celsius(current_temp)
metric = self._metric(
'current_temperature_c', self.prometheus_client.Gauge,
'Current Temperature in degrees Celsius')
metric.labels(**self._labels(state)).set(current_temp)
metric = self._metric(
'climate_state', self.prometheus_client.Gauge,
'State of the thermostat (0/1)')
try:
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_sensor(self, state):
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
metric = state.entity_id.split(".")[1]
if '_' not in str(metric):
metric = state.entity_id.replace('.', '_')
try:
int(metric.split("_")[-1])
metric = "_".join(metric.split("_")[:-1])
except ValueError:
pass
_metric = self._metric(metric, self.prometheus_client.Gauge,
state.entity_id)
try:
value = state_helper.state_as_number(state)
if unit == TEMP_FAHRENHEIT:
value = fahrenheit_to_celsius(value)
_metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._battery(state)
def _handle_switch(self, state):
metric = self._metric(
'switch_state',
self.prometheus_client.Gauge,
'State of the switch (0/1)',
)
try:
value = state_helper.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_zwave(self, state):
self._battery(state)
def _handle_automation(self, state):
metric = self._metric(
'automation_triggered_count',
self.prometheus_client.Counter,
'Count of times an automation has been triggered',
)
metric.labels(**self._labels(state)).inc()
class PrometheusView(HomeAssistantView):
"""Handle Prometheus requests."""
url = API_ENDPOINT
name = 'api:prometheus'
def __init__(self, prometheus_client):
"""Initialize Prometheus view."""
self.prometheus_client = prometheus_client
@asyncio.coroutine
def get(self, request):
"""Handle request for Prometheus metrics."""
_LOGGER.debug("Received Prometheus metrics request")
return web.Response(
body=self.prometheus_client.generate_latest(),
content_type=CONTENT_TYPE_TEXT_PLAIN) | 0.609059 | 0.122078 |
from time import time, sleep
from datetime import datetime, timezone, timedelta
import json
import re
import random
import configparser
import telegram
from telegram.error import NetworkError, Unauthorized
import cotoha
class TelegramBot:
def __init__(self):
self.REPLY_SETTINGS = './reply.json'
config_file = './settings/config.ini'
config = configparser.ConfigParser()
config.read(config_file, 'utf-8')
self.TELEGRAM_TOKEN = str(config.get('login', 'api_key'))
self.bot = telegram.Bot(self.TELEGRAM_TOKEN)
try:
self.update_id = self.bot.get_updates()[0].update_id
except IndexError:
self.update_id = None
try:
self.replyLists = self.jsonReader()
except json.JSONDecodeError as e:
raise('JSONファイルが不正です。')
except Exception as e:
raise(e)
def jsonReader(self):
"""Read json file for autoreply"""
replyLoad = open(self.REPLY_SETTINGS,encoding="utf-8")
replyLists = json.load(replyLoad)
return replyLists
def main(self):
"""Run the bot."""
while True:
try:
self.autoreply()
except NetworkError:
sleep(1)
except Unauthorized:
# BOTをリムーブ、ブロックされた場合はupdate_idを+1
self.update_id += 1
def autoreply(self):
"""autoreply the message the user sent."""
# 前回更新以降の更新がないかを確認
for update in self.bot.get_updates(offset=self.update_id, timeout=10):
self.update_id = update.update_id + 1
try:
rcv_text = str(update.message.text)
except:
rcv_text = str("")
self.rcv_user = str(update.effective_user['first_name'])
if not update.message:
continue
for reply_ptn in self.replyLists['autoreplies']:
for reply_trg in reply_ptn[0]:
if (re.match('regex:', reply_trg) is None or re.match(reply_trg.replace('regex:', ''), rcv_text) is None) and rcv_text != reply_trg:
continue
reply_text = reply_ptn[1][random.randrange(len(reply_ptn[1]))]
reply_text = reply_text.replace('{event.user.full_name}', self.rcv_user)
# リプライに"/がついていたら機能判定実施
if re.match('/', reply_text) is not None:
reply_text = self.checkReply(reply_text, rcv_text)
# Reply to the message
update.message.reply_text(reply_text)
def checkReply(self, reply_text, rcv_text):
# 判定を実施し、機能呼び出し
if reply_text == '/reload':
try:
self.replyLists = self.jsonReader()
return_text = 'Reload reply-setting...OK!'
except json.JSONDecodeError:
return_text = 'JSON Error in ' + str(self.REPLY_SETTINGS)
elif reply_text == '/whoami':
return_text = self.rcv_user
elif reply_text == '/checkemote':
#感情分析
try:
cth = cotoha.CotohaController()
emote = cth.emotion_analysis(rcv_text)
reply_list = self.replyLists['cotoha'][emote]
return_text = reply_list[random.randrange(len(reply_list))]
except Exception:
return_text = "・・・"
else:
return_text = reply_text
return(return_text)
if __name__ == '__main__':
bot = TelegramBot()
bot.main() | autoreply.py | from time import time, sleep
from datetime import datetime, timezone, timedelta
import json
import re
import random
import configparser
import telegram
from telegram.error import NetworkError, Unauthorized
import cotoha
class TelegramBot:
def __init__(self):
self.REPLY_SETTINGS = './reply.json'
config_file = './settings/config.ini'
config = configparser.ConfigParser()
config.read(config_file, 'utf-8')
self.TELEGRAM_TOKEN = str(config.get('login', 'api_key'))
self.bot = telegram.Bot(self.TELEGRAM_TOKEN)
try:
self.update_id = self.bot.get_updates()[0].update_id
except IndexError:
self.update_id = None
try:
self.replyLists = self.jsonReader()
except json.JSONDecodeError as e:
raise('JSONファイルが不正です。')
except Exception as e:
raise(e)
def jsonReader(self):
"""Read json file for autoreply"""
replyLoad = open(self.REPLY_SETTINGS,encoding="utf-8")
replyLists = json.load(replyLoad)
return replyLists
def main(self):
"""Run the bot."""
while True:
try:
self.autoreply()
except NetworkError:
sleep(1)
except Unauthorized:
# BOTをリムーブ、ブロックされた場合はupdate_idを+1
self.update_id += 1
def autoreply(self):
"""autoreply the message the user sent."""
# 前回更新以降の更新がないかを確認
for update in self.bot.get_updates(offset=self.update_id, timeout=10):
self.update_id = update.update_id + 1
try:
rcv_text = str(update.message.text)
except:
rcv_text = str("")
self.rcv_user = str(update.effective_user['first_name'])
if not update.message:
continue
for reply_ptn in self.replyLists['autoreplies']:
for reply_trg in reply_ptn[0]:
if (re.match('regex:', reply_trg) is None or re.match(reply_trg.replace('regex:', ''), rcv_text) is None) and rcv_text != reply_trg:
continue
reply_text = reply_ptn[1][random.randrange(len(reply_ptn[1]))]
reply_text = reply_text.replace('{event.user.full_name}', self.rcv_user)
# リプライに"/がついていたら機能判定実施
if re.match('/', reply_text) is not None:
reply_text = self.checkReply(reply_text, rcv_text)
# Reply to the message
update.message.reply_text(reply_text)
def checkReply(self, reply_text, rcv_text):
# 判定を実施し、機能呼び出し
if reply_text == '/reload':
try:
self.replyLists = self.jsonReader()
return_text = 'Reload reply-setting...OK!'
except json.JSONDecodeError:
return_text = 'JSON Error in ' + str(self.REPLY_SETTINGS)
elif reply_text == '/whoami':
return_text = self.rcv_user
elif reply_text == '/checkemote':
#感情分析
try:
cth = cotoha.CotohaController()
emote = cth.emotion_analysis(rcv_text)
reply_list = self.replyLists['cotoha'][emote]
return_text = reply_list[random.randrange(len(reply_list))]
except Exception:
return_text = "・・・"
else:
return_text = reply_text
return(return_text)
if __name__ == '__main__':
bot = TelegramBot()
bot.main() | 0.285571 | 0.042029 |
import typing
from typing import List, Optional
from uuid import UUID
from marshmallow import fields
from commercetools import schemas, types
from commercetools.services import abstract
from commercetools.typing import OptionalListStr
__all__ = ["ProductService"]
class ProductDeleteSchema(abstract.AbstractDeleteSchema):
price_currency = fields.String(data_key="priceCurrency", required=False)
price_country = fields.String(data_key="priceCountry", required=False)
price_customer_group = fields.UUID(data_key="priceCustomerGroup", required=False)
price_channel = fields.UUID(data_key="priceChannel", required=False)
class ProductQuerySchema(abstract.AbstractQuerySchema):
price_currency = fields.String(data_key="priceCurrency", required=False)
price_country = fields.String(data_key="priceCountry", required=False)
price_customer_group = fields.UUID(data_key="priceCustomerGroup", required=False)
price_channel = fields.UUID(data_key="priceChannel", required=False)
class ProductService(abstract.AbstractService):
def get_by_id(self, id: str, price_currency: str = None,
price_country: str = None, price_customer_group: UUID = None,
price_channel: UUID = None, expand: OptionalListStr = None
) -> Optional[types.Product]:
params = ProductQuerySchema().dump(
{
"expand": expand,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(f"products/{id}", params, schemas.ProductSchema)
def get_by_key(self, key: str, price_currency: str = None, price_country: str = None,
price_customer_group: UUID = None, price_channel: UUID = None,
expand: OptionalListStr = None) -> Optional[types.Product]:
params = ProductQuerySchema().dump(
{
"expand": expand,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(f"products/key={key}", params, schemas.ProductSchema)
def query(
self,
where: OptionalListStr = None,
sort: OptionalListStr = None,
expand: OptionalListStr = None,
limit: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
price_currency: typing.Optional[str] = None,
price_country: typing.Optional[str] = None,
price_customer_group: typing.Optional[UUID] = None,
price_channel: typing.Optional[UUID] = None,
) -> types.ProductPagedQueryResponse:
params = ProductQuerySchema().dump(
{
"where": where,
"sort": sort,
"expand": expand,
"limit": limit,
"offset": offset,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(
"products", params, schemas.ProductPagedQueryResponseSchema
)
def create(self, draft: types.ProductDraft, expand: OptionalListStr = None) -> types.Product:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._post(
"products", query_params, draft, schemas.ProductDraftSchema, schemas.ProductSchema
)
def update_by_id(
self,
id: str,
version: int,
actions: List[types.ProductUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Product:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.ProductUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"products/{id}",
params=expand,
data_object=update_action,
request_schema_cls=schemas.ProductUpdateSchema,
response_schema_cls=schemas.ProductSchema,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: List[types.ProductUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Product:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.ProductUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"products/key={key}",
params=query_params,
data_object=update_action,
request_schema_cls=schemas.ProductUpdateSchema,
response_schema_cls=schemas.ProductSchema,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
price_currency: str = None,
price_country: str = None,
price_customer_group: UUID = None,
price_channel: UUID = None,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Product:
params = ProductDeleteSchema().dump(
{
"version": version,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
)
return self._client._delete(
endpoint=f"products/{id}",
params=params,
response_schema_cls=schemas.ProductSchema,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
price_currency: str = None,
price_country: str = None,
price_customer_group: UUID = None,
price_channel: UUID = None,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Product:
params = ProductDeleteSchema().dump(
{
"version": version,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
)
return self._client._delete(
endpoint=f"products/key={key}",
params=params,
response_schema_cls=schemas.ProductSchema,
force_delete=force_delete,
)
def upload_image(
self,
product_id: str,
fh: typing.BinaryIO,
sku: str = None,
filename: str = "img",
staged: bool = True,
):
params = {"filename": filename, "staged": staged}
if sku:
params["sku"] = sku
return self._client._upload(
endpoint=f"products/{product_id}/images",
params=params,
file=fh,
response_schema_cls=schemas.ProductSchema,
) | src/commercetools/services/products.py | import typing
from typing import List, Optional
from uuid import UUID
from marshmallow import fields
from commercetools import schemas, types
from commercetools.services import abstract
from commercetools.typing import OptionalListStr
__all__ = ["ProductService"]
class ProductDeleteSchema(abstract.AbstractDeleteSchema):
price_currency = fields.String(data_key="priceCurrency", required=False)
price_country = fields.String(data_key="priceCountry", required=False)
price_customer_group = fields.UUID(data_key="priceCustomerGroup", required=False)
price_channel = fields.UUID(data_key="priceChannel", required=False)
class ProductQuerySchema(abstract.AbstractQuerySchema):
price_currency = fields.String(data_key="priceCurrency", required=False)
price_country = fields.String(data_key="priceCountry", required=False)
price_customer_group = fields.UUID(data_key="priceCustomerGroup", required=False)
price_channel = fields.UUID(data_key="priceChannel", required=False)
class ProductService(abstract.AbstractService):
def get_by_id(self, id: str, price_currency: str = None,
price_country: str = None, price_customer_group: UUID = None,
price_channel: UUID = None, expand: OptionalListStr = None
) -> Optional[types.Product]:
params = ProductQuerySchema().dump(
{
"expand": expand,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(f"products/{id}", params, schemas.ProductSchema)
def get_by_key(self, key: str, price_currency: str = None, price_country: str = None,
price_customer_group: UUID = None, price_channel: UUID = None,
expand: OptionalListStr = None) -> Optional[types.Product]:
params = ProductQuerySchema().dump(
{
"expand": expand,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(f"products/key={key}", params, schemas.ProductSchema)
def query(
self,
where: OptionalListStr = None,
sort: OptionalListStr = None,
expand: OptionalListStr = None,
limit: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
price_currency: typing.Optional[str] = None,
price_country: typing.Optional[str] = None,
price_customer_group: typing.Optional[UUID] = None,
price_channel: typing.Optional[UUID] = None,
) -> types.ProductPagedQueryResponse:
params = ProductQuerySchema().dump(
{
"where": where,
"sort": sort,
"expand": expand,
"limit": limit,
"offset": offset,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(
"products", params, schemas.ProductPagedQueryResponseSchema
)
def create(self, draft: types.ProductDraft, expand: OptionalListStr = None) -> types.Product:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._post(
"products", query_params, draft, schemas.ProductDraftSchema, schemas.ProductSchema
)
def update_by_id(
self,
id: str,
version: int,
actions: List[types.ProductUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Product:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.ProductUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"products/{id}",
params=expand,
data_object=update_action,
request_schema_cls=schemas.ProductUpdateSchema,
response_schema_cls=schemas.ProductSchema,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: List[types.ProductUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Product:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.ProductUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"products/key={key}",
params=query_params,
data_object=update_action,
request_schema_cls=schemas.ProductUpdateSchema,
response_schema_cls=schemas.ProductSchema,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
price_currency: str = None,
price_country: str = None,
price_customer_group: UUID = None,
price_channel: UUID = None,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Product:
params = ProductDeleteSchema().dump(
{
"version": version,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
)
return self._client._delete(
endpoint=f"products/{id}",
params=params,
response_schema_cls=schemas.ProductSchema,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
price_currency: str = None,
price_country: str = None,
price_customer_group: UUID = None,
price_channel: UUID = None,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Product:
params = ProductDeleteSchema().dump(
{
"version": version,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
)
return self._client._delete(
endpoint=f"products/key={key}",
params=params,
response_schema_cls=schemas.ProductSchema,
force_delete=force_delete,
)
def upload_image(
self,
product_id: str,
fh: typing.BinaryIO,
sku: str = None,
filename: str = "img",
staged: bool = True,
):
params = {"filename": filename, "staged": staged}
if sku:
params["sku"] = sku
return self._client._upload(
endpoint=f"products/{product_id}/images",
params=params,
file=fh,
response_schema_cls=schemas.ProductSchema,
) | 0.791539 | 0.219819 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import inception_preprocessing
import vgg_preprocessing
from tensorflow.contrib import summary
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.training.python.training import evaluation
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific parameters
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (workers).')
flags.DEFINE_integer(
'iterations', 100,
'Number of iterations per TPU training loop.')
flags.DEFINE_bool(
'skip_host_call', default=True,
help=('Skip the host call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the computation running on the TPU.'))
flags.DEFINE_integer(
'train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_total_size', 0,
'Total batch size for evaluation, use the entire validation set if 0')
flags.DEFINE_integer(
'eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_integer(
'train_steps', 213000,
'Number of steps use for training.')
flags.DEFINE_integer(
'train_steps_per_eval', 2000,
'Number of training steps to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer(
'min_eval_interval', 180,
'Minimum number of seconds between evaluations')
flags.DEFINE_integer(
'eval_timeout', None,
'Evaluation timeout: Maximum number of seconds that '
'may elapse while no new checkpoints are observed')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than plain CPUs')
flags.DEFINE_string(
'use_data', 'real',
'One of "fake","real"')
flags.DEFINE_float(
'learning_rate', 0.165,
'Learning rate.')
flags.DEFINE_string(
'optimizer', 'RMS',
'Optimizer (one of sgd, RMS, momentum)')
flags.DEFINE_integer(
'num_classes', 1001,
'Number of classes to distinguish')
flags.DEFINE_integer(
'width', 299,
'Width of input image')
flags.DEFINE_integer(
'height', 299,
'Height of input image')
flags.DEFINE_bool(
'transpose_enabled', False,
'Boolean to enable/disable explicit I/O transpose')
flags.DEFINE_bool(
'log_device_placement', False,
'Boolean to enable/disable log device placement')
flags.DEFINE_integer(
'save_summary_steps', 100,
'Number of steps which must have run before showing summaries.')
flags.DEFINE_integer(
'save_checkpoints_secs', 1000,
'Interval (in seconds) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'moving_average', True,
'Whether to enable moving average computation on variables')
flags.DEFINE_string(
'preprocessing', 'inception',
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_float(
'learning_rate_decay', 0.94,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'learning_rate_decay_epochs', 3,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_bool(
'display_tensors', False,
'Whether to dump prediction tensors for comparison')
flags.DEFINE_bool(
'clear_update_collections', True,
'Set batchnorm update_collections to None if true, else use default value')
flags.DEFINE_integer(
'cold_epochs', 2,
'Number of epochs using cold learning rate')
flags.DEFINE_integer(
'warmup_epochs', 7,
'Number of epochs using linearly increasing learning rate')
flags.DEFINE_bool(
'use_learning_rate_warmup', False,
'Apply learning rate warmup if true')
# Dataset specific paramenters
flags.DEFINE_bool(
'prefetch_enabled', True,
'Boolean to enable/disable prefetching')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'initial_shuffle_buffer_size', 1024,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done before any other operations. '
'Set to 0 to disable')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_string(
'precision', 'float32',
help=('Precision to use; one of: {bfloat16, float32}'))
FLAGS = flags.FLAGS
# Dataset constants
_NUM_TRAIN_IMAGES = 1281167
_NUM_EVAL_IMAGES = 50000
# Random cropping constants
_RESIZE_SIDE_MIN = 300
_RESIZE_SIDE_MAX = 600
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
# Constants dictating moving average.
MOVING_AVERAGE_DECAY = 0.995
# Batchnorm moving mean/variance parameters
BATCH_NORM_DECAY = 0.996
BATCH_NORM_EPSILON = 1e-3
WEIGHT_DECAY = 0.00004
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
class InputPipeline(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
"""
def __init__(self, is_training, data_dir, use_bfloat16):
self.is_training = is_training
self.data_dir = data_dir
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
image = features['image/encoded']
image = preprocess_raw_bytes(image, is_training=self.is_training, bbox=bbox)
label = tf.cast(
tf.reshape(features['image/class/label'], shape=[]), dtype=tf.int32)
if self.use_bfloat16:
image = tf.cast(image, tf.bfloat16)
return image, label
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
batch_size = params['batch_size']
if FLAGS.use_data == 'real':
assert self.data_dir, 'data_dir is required'
shuffle = self.is_training
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
prefetch_dataset,
cycle_length=FLAGS.num_files_infeed,
sloppy=True))
if shuffle and FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(
buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser, num_parallel_calls=FLAGS.num_parallel_calls)
else:
random_image = tf.random.uniform(
[FLAGS.height, FLAGS.width, 3],
minval=-1,
maxval=1,
dtype=tf.bfloat16 if self.use_bfloat16 else tf.float32)
random_label = tf.random.uniform([], minval=0, maxval=999, dtype=tf.int32)
dataset = tf.data.Dataset.range(1).repeat().map(
lambda data: (random_image, random_label))
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
if FLAGS.transpose_enabled:
def transpose_images(images):
return tf.transpose(images, params['output_perm'])
dataset = dataset.map(
lambda images, labels: (transpose_images(images), labels),
num_parallel_calls=FLAGS.num_parallel_calls)
return dataset
def image_serving_input_fn():
"""Serving input fn for raw images.
This function is consumed when exporting a SavedModel.
Returns:
A ServingInputReceiver capable of serving MobileNet predictions.
"""
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
preprocess_raw_bytes, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: New ordering of dimensions
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
def inception_model_fn(features, labels, mode, params):
"""Inception v3 model using Estimator API."""
num_classes = FLAGS.num_classes
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_eval = (mode == tf.estimator.ModeKeys.EVAL)
if isinstance(features, dict):
features = features['feature']
features = tensor_transform_fn(features, params['input_perm'])
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
if FLAGS.precision == 'bfloat16':
with tf.contrib.tpu.bfloat16_scope():
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
return logits, end_points
if FLAGS.clear_update_collections:
# updates_collections must be set to None in order to use fused batchnorm
with arg_scope(inception.inception_v3_arg_scope(
weight_decay=0.0,
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON,
updates_collections=None)):
logits, end_points = build_network()
else:
with arg_scope(inception.inception_v3_arg_scope(
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON)):
logits, end_points = build_network()
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL and FLAGS.display_tensors and (
not FLAGS.use_tpu):
with tf.control_dependencies([
tf.Print(
predictions['classes'], [predictions['classes']],
summarize=FLAGS.eval_batch_size,
message='prediction: ')
]):
labels = tf.Print(
labels, [labels], summarize=FLAGS.eval_batch_size, message='label: ')
one_hot_labels = tf.one_hot(labels, FLAGS.num_classes, dtype=tf.int32)
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=tf.cast(end_points['AuxLogits'], tf.float32),
weights=0.4,
label_smoothing=0.1,
scope='aux_loss')
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=logits,
weights=1.0,
label_smoothing=0.1)
losses = tf.add_n(tf.losses.get_losses())
l2_loss = []
for v in tf.trainable_variables():
if 'BatchNorm' not in v.name and 'weights' in v.name:
l2_loss.append(tf.nn.l2_loss(v))
loss = losses + WEIGHT_DECAY * tf.add_n(l2_loss)
initial_learning_rate = FLAGS.learning_rate * FLAGS.train_batch_size / 256
if FLAGS.use_learning_rate_warmup:
# Adjust initial learning rate to match final warmup rate
warmup_decay = FLAGS.learning_rate_decay**(
(FLAGS.warmup_epochs + FLAGS.cold_epochs) /
FLAGS.learning_rate_decay_epochs)
adj_initial_learning_rate = initial_learning_rate * warmup_decay
final_learning_rate = 0.0001 * initial_learning_rate
host_call = None
train_op = None
if is_training:
batches_per_epoch = _NUM_TRAIN_IMAGES / FLAGS.train_batch_size
global_step = tf.train.get_or_create_global_step()
current_epoch = tf.cast(
(tf.cast(global_step, tf.float32) / batches_per_epoch), tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate=initial_learning_rate,
global_step=global_step,
decay_steps=int(FLAGS.learning_rate_decay_epochs * batches_per_epoch),
decay_rate=FLAGS.learning_rate_decay,
staircase=True)
if FLAGS.use_learning_rate_warmup:
wlr = 0.1 * adj_initial_learning_rate
wlr_height = tf.cast(
0.9 * adj_initial_learning_rate /
(FLAGS.warmup_epochs + FLAGS.learning_rate_decay_epochs - 1),
tf.float32)
epoch_offset = tf.cast(FLAGS.cold_epochs - 1, tf.int32)
exp_decay_start = (FLAGS.warmup_epochs + FLAGS.cold_epochs +
FLAGS.learning_rate_decay_epochs)
lin_inc_lr = tf.add(
wlr, tf.multiply(
tf.cast(tf.subtract(current_epoch, epoch_offset), tf.float32),
wlr_height))
learning_rate = tf.where(
tf.greater_equal(current_epoch, FLAGS.cold_epochs),
(tf.where(tf.greater_equal(current_epoch, exp_decay_start),
learning_rate, lin_inc_lr)),
wlr)
# Set a minimum boundary for the learning rate.
learning_rate = tf.maximum(
learning_rate, final_learning_rate, name='learning_rate')
if FLAGS.optimizer == 'sgd':
tf.logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
elif FLAGS.optimizer == 'momentum':
tf.logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9)
elif FLAGS.optimizer == 'RMS':
tf.logging.info('Using RMS optimizer')
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
else:
tf.logging.fatal('Unknown optimizer:', FLAGS.optimizer)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step=global_step)
if FLAGS.moving_average:
ema = tf.train.ExponentialMovingAverage(
decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
with tf.control_dependencies([train_op]), tf.name_scope('moving_average'):
train_op = ema.apply(variables_to_average)
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide them as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('loss', tf.reduce_mean(loss), step=gs)
summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs)
summary.scalar('current_epoch', tf.reduce_mean(ce), step=gs)
return summary.all_summary_ops()
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
eval_metrics = None
if is_eval:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch, ]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'accuracy': top_1_accuracy,
'accuracy@5': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
class LoadEMAHook(tf.train.SessionRunHook):
"""Hook to load exponential moving averages into corresponding variables."""
def __init__(self, model_dir):
super(LoadEMAHook, self).__init__()
self._model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self._load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self._model_dir), variables_to_restore)
def after_create_session(self, sess, coord):
tf.logging.info('Reloading EMA...')
self._load_ema(sess)
def main(unused_argv):
del unused_argv # Unused
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
tf.logging.info('Precision: %s', FLAGS.precision)
params = {
'input_perm': [0, 1, 2, 3],
'output_perm': [0, 1, 2, 3],
}
batch_axis = 0
if FLAGS.transpose_enabled:
params['input_perm'] = [3, 0, 1, 2]
params['output_perm'] = [1, 2, 3, 0]
batch_axis = 3
if FLAGS.eval_total_size > 0:
eval_size = FLAGS.eval_total_size
else:
eval_size = _NUM_EVAL_IMAGES
eval_steps = eval_size // FLAGS.eval_batch_size
iterations = (eval_steps if FLAGS.mode == 'eval' else
FLAGS.iterations)
eval_batch_size = (None if FLAGS.mode == 'train' else
FLAGS.eval_batch_size)
tpu_config = tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations, num_shards=FLAGS.num_shards)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement),
tpu_config=tpu_config)
inception_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=inception_model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
params=params,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=eval_batch_size,
batch_axis=(batch_axis, 0))
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
use_bfloat16 = FLAGS.precision == 'bfloat16'
imagenet_train = InputPipeline(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
imagenet_eval = InputPipeline(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
if FLAGS.moving_average:
eval_hooks = [LoadEMAHook(FLAGS.model_dir)]
else:
eval_hooks = []
if FLAGS.mode == 'eval':
# Run evaluation when there is a new checkpoint
for checkpoint in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # Includes compilation time
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info(
'Eval results: %s. Elapsed seconds: %d', eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(checkpoint).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', checkpoint)
elif FLAGS.mode == 'train_and_eval':
for cycle in range(FLAGS.train_steps // FLAGS.train_steps_per_eval):
tf.logging.info('Starting training cycle %d.' % cycle)
inception_classifier.train(
input_fn=imagenet_train.input_fn, steps=FLAGS.train_steps_per_eval)
tf.logging.info('Starting evaluation cycle %d .' % cycle)
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Starting training ...')
inception_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
if FLAGS.export_dir is not None:
tf.logging.info('Starting to export model.')
inception_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main) | models/experimental/inception/inception_v3.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import inception_preprocessing
import vgg_preprocessing
from tensorflow.contrib import summary
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.training.python.training import evaluation
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific parameters
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (workers).')
flags.DEFINE_integer(
'iterations', 100,
'Number of iterations per TPU training loop.')
flags.DEFINE_bool(
'skip_host_call', default=True,
help=('Skip the host call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the computation running on the TPU.'))
flags.DEFINE_integer(
'train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_total_size', 0,
'Total batch size for evaluation, use the entire validation set if 0')
flags.DEFINE_integer(
'eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_integer(
'train_steps', 213000,
'Number of steps use for training.')
flags.DEFINE_integer(
'train_steps_per_eval', 2000,
'Number of training steps to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer(
'min_eval_interval', 180,
'Minimum number of seconds between evaluations')
flags.DEFINE_integer(
'eval_timeout', None,
'Evaluation timeout: Maximum number of seconds that '
'may elapse while no new checkpoints are observed')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than plain CPUs')
flags.DEFINE_string(
'use_data', 'real',
'One of "fake","real"')
flags.DEFINE_float(
'learning_rate', 0.165,
'Learning rate.')
flags.DEFINE_string(
'optimizer', 'RMS',
'Optimizer (one of sgd, RMS, momentum)')
flags.DEFINE_integer(
'num_classes', 1001,
'Number of classes to distinguish')
flags.DEFINE_integer(
'width', 299,
'Width of input image')
flags.DEFINE_integer(
'height', 299,
'Height of input image')
flags.DEFINE_bool(
'transpose_enabled', False,
'Boolean to enable/disable explicit I/O transpose')
flags.DEFINE_bool(
'log_device_placement', False,
'Boolean to enable/disable log device placement')
flags.DEFINE_integer(
'save_summary_steps', 100,
'Number of steps which must have run before showing summaries.')
flags.DEFINE_integer(
'save_checkpoints_secs', 1000,
'Interval (in seconds) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'moving_average', True,
'Whether to enable moving average computation on variables')
flags.DEFINE_string(
'preprocessing', 'inception',
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_float(
'learning_rate_decay', 0.94,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'learning_rate_decay_epochs', 3,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_bool(
'display_tensors', False,
'Whether to dump prediction tensors for comparison')
flags.DEFINE_bool(
'clear_update_collections', True,
'Set batchnorm update_collections to None if true, else use default value')
flags.DEFINE_integer(
'cold_epochs', 2,
'Number of epochs using cold learning rate')
flags.DEFINE_integer(
'warmup_epochs', 7,
'Number of epochs using linearly increasing learning rate')
flags.DEFINE_bool(
'use_learning_rate_warmup', False,
'Apply learning rate warmup if true')
# Dataset specific paramenters
flags.DEFINE_bool(
'prefetch_enabled', True,
'Boolean to enable/disable prefetching')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'initial_shuffle_buffer_size', 1024,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done before any other operations. '
'Set to 0 to disable')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_string(
'precision', 'float32',
help=('Precision to use; one of: {bfloat16, float32}'))
FLAGS = flags.FLAGS
# Dataset constants
_NUM_TRAIN_IMAGES = 1281167
_NUM_EVAL_IMAGES = 50000
# Random cropping constants
_RESIZE_SIDE_MIN = 300
_RESIZE_SIDE_MAX = 600
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
# Constants dictating moving average.
MOVING_AVERAGE_DECAY = 0.995
# Batchnorm moving mean/variance parameters
BATCH_NORM_DECAY = 0.996
BATCH_NORM_EPSILON = 1e-3
WEIGHT_DECAY = 0.00004
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
class InputPipeline(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
"""
def __init__(self, is_training, data_dir, use_bfloat16):
self.is_training = is_training
self.data_dir = data_dir
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
image = features['image/encoded']
image = preprocess_raw_bytes(image, is_training=self.is_training, bbox=bbox)
label = tf.cast(
tf.reshape(features['image/class/label'], shape=[]), dtype=tf.int32)
if self.use_bfloat16:
image = tf.cast(image, tf.bfloat16)
return image, label
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
batch_size = params['batch_size']
if FLAGS.use_data == 'real':
assert self.data_dir, 'data_dir is required'
shuffle = self.is_training
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
prefetch_dataset,
cycle_length=FLAGS.num_files_infeed,
sloppy=True))
if shuffle and FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(
buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser, num_parallel_calls=FLAGS.num_parallel_calls)
else:
random_image = tf.random.uniform(
[FLAGS.height, FLAGS.width, 3],
minval=-1,
maxval=1,
dtype=tf.bfloat16 if self.use_bfloat16 else tf.float32)
random_label = tf.random.uniform([], minval=0, maxval=999, dtype=tf.int32)
dataset = tf.data.Dataset.range(1).repeat().map(
lambda data: (random_image, random_label))
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
if FLAGS.transpose_enabled:
def transpose_images(images):
return tf.transpose(images, params['output_perm'])
dataset = dataset.map(
lambda images, labels: (transpose_images(images), labels),
num_parallel_calls=FLAGS.num_parallel_calls)
return dataset
def image_serving_input_fn():
"""Serving input fn for raw images.
This function is consumed when exporting a SavedModel.
Returns:
A ServingInputReceiver capable of serving MobileNet predictions.
"""
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
preprocess_raw_bytes, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: New ordering of dimensions
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
def inception_model_fn(features, labels, mode, params):
"""Inception v3 model using Estimator API."""
num_classes = FLAGS.num_classes
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_eval = (mode == tf.estimator.ModeKeys.EVAL)
if isinstance(features, dict):
features = features['feature']
features = tensor_transform_fn(features, params['input_perm'])
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
if FLAGS.precision == 'bfloat16':
with tf.contrib.tpu.bfloat16_scope():
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
return logits, end_points
if FLAGS.clear_update_collections:
# updates_collections must be set to None in order to use fused batchnorm
with arg_scope(inception.inception_v3_arg_scope(
weight_decay=0.0,
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON,
updates_collections=None)):
logits, end_points = build_network()
else:
with arg_scope(inception.inception_v3_arg_scope(
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON)):
logits, end_points = build_network()
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL and FLAGS.display_tensors and (
not FLAGS.use_tpu):
with tf.control_dependencies([
tf.Print(
predictions['classes'], [predictions['classes']],
summarize=FLAGS.eval_batch_size,
message='prediction: ')
]):
labels = tf.Print(
labels, [labels], summarize=FLAGS.eval_batch_size, message='label: ')
one_hot_labels = tf.one_hot(labels, FLAGS.num_classes, dtype=tf.int32)
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=tf.cast(end_points['AuxLogits'], tf.float32),
weights=0.4,
label_smoothing=0.1,
scope='aux_loss')
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=logits,
weights=1.0,
label_smoothing=0.1)
losses = tf.add_n(tf.losses.get_losses())
l2_loss = []
for v in tf.trainable_variables():
if 'BatchNorm' not in v.name and 'weights' in v.name:
l2_loss.append(tf.nn.l2_loss(v))
loss = losses + WEIGHT_DECAY * tf.add_n(l2_loss)
initial_learning_rate = FLAGS.learning_rate * FLAGS.train_batch_size / 256
if FLAGS.use_learning_rate_warmup:
# Adjust initial learning rate to match final warmup rate
warmup_decay = FLAGS.learning_rate_decay**(
(FLAGS.warmup_epochs + FLAGS.cold_epochs) /
FLAGS.learning_rate_decay_epochs)
adj_initial_learning_rate = initial_learning_rate * warmup_decay
final_learning_rate = 0.0001 * initial_learning_rate
host_call = None
train_op = None
if is_training:
batches_per_epoch = _NUM_TRAIN_IMAGES / FLAGS.train_batch_size
global_step = tf.train.get_or_create_global_step()
current_epoch = tf.cast(
(tf.cast(global_step, tf.float32) / batches_per_epoch), tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate=initial_learning_rate,
global_step=global_step,
decay_steps=int(FLAGS.learning_rate_decay_epochs * batches_per_epoch),
decay_rate=FLAGS.learning_rate_decay,
staircase=True)
if FLAGS.use_learning_rate_warmup:
wlr = 0.1 * adj_initial_learning_rate
wlr_height = tf.cast(
0.9 * adj_initial_learning_rate /
(FLAGS.warmup_epochs + FLAGS.learning_rate_decay_epochs - 1),
tf.float32)
epoch_offset = tf.cast(FLAGS.cold_epochs - 1, tf.int32)
exp_decay_start = (FLAGS.warmup_epochs + FLAGS.cold_epochs +
FLAGS.learning_rate_decay_epochs)
lin_inc_lr = tf.add(
wlr, tf.multiply(
tf.cast(tf.subtract(current_epoch, epoch_offset), tf.float32),
wlr_height))
learning_rate = tf.where(
tf.greater_equal(current_epoch, FLAGS.cold_epochs),
(tf.where(tf.greater_equal(current_epoch, exp_decay_start),
learning_rate, lin_inc_lr)),
wlr)
# Set a minimum boundary for the learning rate.
learning_rate = tf.maximum(
learning_rate, final_learning_rate, name='learning_rate')
if FLAGS.optimizer == 'sgd':
tf.logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
elif FLAGS.optimizer == 'momentum':
tf.logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9)
elif FLAGS.optimizer == 'RMS':
tf.logging.info('Using RMS optimizer')
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
else:
tf.logging.fatal('Unknown optimizer:', FLAGS.optimizer)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step=global_step)
if FLAGS.moving_average:
ema = tf.train.ExponentialMovingAverage(
decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
with tf.control_dependencies([train_op]), tf.name_scope('moving_average'):
train_op = ema.apply(variables_to_average)
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide them as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('loss', tf.reduce_mean(loss), step=gs)
summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs)
summary.scalar('current_epoch', tf.reduce_mean(ce), step=gs)
return summary.all_summary_ops()
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
eval_metrics = None
if is_eval:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch, ]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'accuracy': top_1_accuracy,
'accuracy@5': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
class LoadEMAHook(tf.train.SessionRunHook):
"""Hook to load exponential moving averages into corresponding variables."""
def __init__(self, model_dir):
super(LoadEMAHook, self).__init__()
self._model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self._load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self._model_dir), variables_to_restore)
def after_create_session(self, sess, coord):
tf.logging.info('Reloading EMA...')
self._load_ema(sess)
def main(unused_argv):
del unused_argv # Unused
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
tf.logging.info('Precision: %s', FLAGS.precision)
params = {
'input_perm': [0, 1, 2, 3],
'output_perm': [0, 1, 2, 3],
}
batch_axis = 0
if FLAGS.transpose_enabled:
params['input_perm'] = [3, 0, 1, 2]
params['output_perm'] = [1, 2, 3, 0]
batch_axis = 3
if FLAGS.eval_total_size > 0:
eval_size = FLAGS.eval_total_size
else:
eval_size = _NUM_EVAL_IMAGES
eval_steps = eval_size // FLAGS.eval_batch_size
iterations = (eval_steps if FLAGS.mode == 'eval' else
FLAGS.iterations)
eval_batch_size = (None if FLAGS.mode == 'train' else
FLAGS.eval_batch_size)
tpu_config = tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations, num_shards=FLAGS.num_shards)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement),
tpu_config=tpu_config)
inception_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=inception_model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
params=params,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=eval_batch_size,
batch_axis=(batch_axis, 0))
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
use_bfloat16 = FLAGS.precision == 'bfloat16'
imagenet_train = InputPipeline(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
imagenet_eval = InputPipeline(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
if FLAGS.moving_average:
eval_hooks = [LoadEMAHook(FLAGS.model_dir)]
else:
eval_hooks = []
if FLAGS.mode == 'eval':
# Run evaluation when there is a new checkpoint
for checkpoint in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # Includes compilation time
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info(
'Eval results: %s. Elapsed seconds: %d', eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(checkpoint).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', checkpoint)
elif FLAGS.mode == 'train_and_eval':
for cycle in range(FLAGS.train_steps // FLAGS.train_steps_per_eval):
tf.logging.info('Starting training cycle %d.' % cycle)
inception_classifier.train(
input_fn=imagenet_train.input_fn, steps=FLAGS.train_steps_per_eval)
tf.logging.info('Starting evaluation cycle %d .' % cycle)
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Starting training ...')
inception_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
if FLAGS.export_dir is not None:
tf.logging.info('Starting to export model.')
inception_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main) | 0.680772 | 0.200812 |
import tensorflow as tf
import awesome_gans.modules as t
tf.set_random_seed(777) # reproducibility
class LSGAN:
def __init__(
self,
s,
batch_size=64,
height=32,
width=32,
channel=3,
n_classes=10,
sample_num=10 * 10,
sample_size=10,
df_dim=64,
gf_dim=64,
fc_unit=1024,
z_dim=128,
lr=2e-4,
):
"""
# General Settings
:param s: TF Session
:param batch_size: training batch size, default 64
:param height: input image height, default 32
:param width: input image width, default 32
:param channel: input image channel, default 3 (gray-scale)
:param n_classes: input DataSet's classes
# Output Settings
:param sample_num: the number of output images, default 64
:param sample_size: sample image size, default 8
# For CNN model
:param df_dim: discriminator filter, default 64
:param gf_dim: generator filter, default 64
:param fc_unit: the number of fully connected filters, default 1024
# Training Option
:param z_dim: z dimension (kinda noise), default 128
:param lr: learning rate, default 2e-4
"""
self.s = s
self.batch_size = batch_size
self.height = height
self.width = width
self.channel = channel
self.image_shape = [self.batch_size, self.height, self.width, self.channel]
self.n_classes = n_classes
self.sample_num = sample_num
self.sample_size = sample_size
self.df_dim = df_dim
self.gf_dim = gf_dim
self.fc_unit = fc_unit
self.z_dim = z_dim
self.beta1 = 0.5
self.lr = lr
# pre-defined
self.g_loss = 0.0
self.d_loss = 0.0
self.g = None
self.d_op = None
self.g_op = None
self.merged = None
self.writer = None
self.saver = None
# Placeholder
self.x = tf.placeholder(
tf.float32, shape=[None, self.height, self.width, self.channel], name="x-image"
) # (-1, 64, 64, 3)
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim], name='z-noise') # (-1, 128)
self.build_lsgan() # build LSGAN model
def discriminator(self, x, reuse=None):
""" Same as DCGAN Disc Net """
with tf.variable_scope('discriminator', reuse=reuse):
x = t.conv2d(x, self.df_dim * 1, 5, 2, name='disc-conv2d-1')
x = tf.nn.leaky_relu(x)
x = t.conv2d(x, self.df_dim * 2, 5, 2, name='disc-conv2d-2')
x = t.batch_norm(x, name='disc-bn-1')
x = tf.nn.leaky_relu(x)
x = t.conv2d(x, self.df_dim * 4, 5, 2, name='disc-conv2d-3')
x = t.batch_norm(x, name='disc-bn-2')
x = tf.nn.leaky_relu(x)
x = tf.layers.flatten(x)
logits = t.dense(x, 1, name='disc-fc-1')
prob = tf.nn.sigmoid(logits)
return prob, logits
def generator(self, z, reuse=None, is_train=True):
""" Same as DCGAN Gen Net """
with tf.variable_scope('generator', reuse=reuse):
x = t.dense(z, self.gf_dim * 4 * 4 * 4, name='gen-fc-1')
x = tf.reshape(x, [-1, 4, 4, self.gf_dim * 4])
x = t.batch_norm(x, is_train=is_train, name='gen-bn-1')
x = tf.nn.relu(x)
x = t.deconv2d(x, self.gf_dim * 2, 5, 2, name='gen-deconv2d-1')
x = t.batch_norm(x, is_train=is_train, name='gen-bn-2')
x = tf.nn.relu(x)
x = t.deconv2d(x, self.gf_dim * 1, 5, 2, name='gen-deconv2d-2')
x = t.batch_norm(x, is_train=is_train, name='gen-bn-3')
x = tf.nn.relu(x)
x = t.deconv2d(x, self.channel, 5, 2, name='gen-deconv2d-3')
x = tf.nn.tanh(x)
return x
def build_lsgan(self):
# Generator
self.g = self.generator(self.z)
# Discriminator
d_real = self.discriminator(self.x)
d_fake = self.discriminator(self.g, reuse=True)
# LSGAN Loss
d_real_loss = t.mse_loss(d_real, tf.ones_like(d_real), self.batch_size)
d_fake_loss = t.mse_loss(d_fake, tf.zeros_like(d_fake), self.batch_size)
self.d_loss = (d_real_loss + d_fake_loss) / 2.0
self.g_loss = t.mse_loss(d_fake, tf.ones_like(d_fake), self.batch_size)
# Summary
tf.summary.scalar("loss/d_real_loss", d_real_loss)
tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
tf.summary.scalar("loss/d_loss", self.d_loss)
tf.summary.scalar("loss/g_loss", self.g_loss)
# optimizer
t_vars = tf.trainable_variables()
d_params = [v for v in t_vars if v.name.startswith('d')]
g_params = [v for v in t_vars if v.name.startswith('g')]
self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(
self.d_loss, var_list=d_params
)
self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(
self.g_loss, var_list=g_params
)
# Merge summary
self.merged = tf.summary.merge_all()
# Model saver
self.saver = tf.train.Saver(max_to_keep=1)
self.writer = tf.summary.FileWriter('./model/', self.s.graph) | awesome_gans/lsgan/lsgan_model.py | import tensorflow as tf
import awesome_gans.modules as t
tf.set_random_seed(777) # reproducibility
class LSGAN:
def __init__(
self,
s,
batch_size=64,
height=32,
width=32,
channel=3,
n_classes=10,
sample_num=10 * 10,
sample_size=10,
df_dim=64,
gf_dim=64,
fc_unit=1024,
z_dim=128,
lr=2e-4,
):
"""
# General Settings
:param s: TF Session
:param batch_size: training batch size, default 64
:param height: input image height, default 32
:param width: input image width, default 32
:param channel: input image channel, default 3 (gray-scale)
:param n_classes: input DataSet's classes
# Output Settings
:param sample_num: the number of output images, default 64
:param sample_size: sample image size, default 8
# For CNN model
:param df_dim: discriminator filter, default 64
:param gf_dim: generator filter, default 64
:param fc_unit: the number of fully connected filters, default 1024
# Training Option
:param z_dim: z dimension (kinda noise), default 128
:param lr: learning rate, default 2e-4
"""
self.s = s
self.batch_size = batch_size
self.height = height
self.width = width
self.channel = channel
self.image_shape = [self.batch_size, self.height, self.width, self.channel]
self.n_classes = n_classes
self.sample_num = sample_num
self.sample_size = sample_size
self.df_dim = df_dim
self.gf_dim = gf_dim
self.fc_unit = fc_unit
self.z_dim = z_dim
self.beta1 = 0.5
self.lr = lr
# pre-defined
self.g_loss = 0.0
self.d_loss = 0.0
self.g = None
self.d_op = None
self.g_op = None
self.merged = None
self.writer = None
self.saver = None
# Placeholder
self.x = tf.placeholder(
tf.float32, shape=[None, self.height, self.width, self.channel], name="x-image"
) # (-1, 64, 64, 3)
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim], name='z-noise') # (-1, 128)
self.build_lsgan() # build LSGAN model
def discriminator(self, x, reuse=None):
""" Same as DCGAN Disc Net """
with tf.variable_scope('discriminator', reuse=reuse):
x = t.conv2d(x, self.df_dim * 1, 5, 2, name='disc-conv2d-1')
x = tf.nn.leaky_relu(x)
x = t.conv2d(x, self.df_dim * 2, 5, 2, name='disc-conv2d-2')
x = t.batch_norm(x, name='disc-bn-1')
x = tf.nn.leaky_relu(x)
x = t.conv2d(x, self.df_dim * 4, 5, 2, name='disc-conv2d-3')
x = t.batch_norm(x, name='disc-bn-2')
x = tf.nn.leaky_relu(x)
x = tf.layers.flatten(x)
logits = t.dense(x, 1, name='disc-fc-1')
prob = tf.nn.sigmoid(logits)
return prob, logits
def generator(self, z, reuse=None, is_train=True):
""" Same as DCGAN Gen Net """
with tf.variable_scope('generator', reuse=reuse):
x = t.dense(z, self.gf_dim * 4 * 4 * 4, name='gen-fc-1')
x = tf.reshape(x, [-1, 4, 4, self.gf_dim * 4])
x = t.batch_norm(x, is_train=is_train, name='gen-bn-1')
x = tf.nn.relu(x)
x = t.deconv2d(x, self.gf_dim * 2, 5, 2, name='gen-deconv2d-1')
x = t.batch_norm(x, is_train=is_train, name='gen-bn-2')
x = tf.nn.relu(x)
x = t.deconv2d(x, self.gf_dim * 1, 5, 2, name='gen-deconv2d-2')
x = t.batch_norm(x, is_train=is_train, name='gen-bn-3')
x = tf.nn.relu(x)
x = t.deconv2d(x, self.channel, 5, 2, name='gen-deconv2d-3')
x = tf.nn.tanh(x)
return x
def build_lsgan(self):
# Generator
self.g = self.generator(self.z)
# Discriminator
d_real = self.discriminator(self.x)
d_fake = self.discriminator(self.g, reuse=True)
# LSGAN Loss
d_real_loss = t.mse_loss(d_real, tf.ones_like(d_real), self.batch_size)
d_fake_loss = t.mse_loss(d_fake, tf.zeros_like(d_fake), self.batch_size)
self.d_loss = (d_real_loss + d_fake_loss) / 2.0
self.g_loss = t.mse_loss(d_fake, tf.ones_like(d_fake), self.batch_size)
# Summary
tf.summary.scalar("loss/d_real_loss", d_real_loss)
tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
tf.summary.scalar("loss/d_loss", self.d_loss)
tf.summary.scalar("loss/g_loss", self.g_loss)
# optimizer
t_vars = tf.trainable_variables()
d_params = [v for v in t_vars if v.name.startswith('d')]
g_params = [v for v in t_vars if v.name.startswith('g')]
self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(
self.d_loss, var_list=d_params
)
self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(
self.g_loss, var_list=g_params
)
# Merge summary
self.merged = tf.summary.merge_all()
# Model saver
self.saver = tf.train.Saver(max_to_keep=1)
self.writer = tf.summary.FileWriter('./model/', self.s.graph) | 0.845113 | 0.328826 |
import numpy as np
import acl
import atlas_utils.utils as utils
from atlas_utils.acl_image import AclImage
from atlas_utils.acl_logger import log_error, log_info
from atlas_utils.resource_list import resource_list
import atlas_utils.constants as constants
class Dvpp(object):
"""
dvpp class
"""
def __init__(self, acl_resource=None):
if acl_resource is None:
self._stream, ret = acl.rt.create_stream()
utils.check_ret("acl.rt.create_stream", ret)
self._run_mode, ret = acl.rt.get_run_mode()
utils.check_ret("acl.rt.get_run_mode", ret)
else:
self._stream = acl_resource.stream
self._run_mode = acl_resource.run_mode
self._dvpp_channel_desc = None
self._crop_config = None
self._paste_config = None
self._init_resource()
# Dvpp involves acl resources, which need to be released \
# before the acl ends when the program exits, \
# register here to the resource table to ensure the release timing
self._is_destroyed = False
resource_list.register(self)
def _init_resource(self):
# Create dvpp channel
self._dvpp_channel_desc = acl.media.dvpp_create_channel_desc()
ret = acl.media.dvpp_create_channel(self._dvpp_channel_desc)
utils.check_ret("acl.media.dvpp_create_channel", ret)
# Create a resize configuration
self._resize_config = acl.media.dvpp_create_resize_config()
# Create yuv to jpeg configuration
self._jpege_config = acl.media.dvpp_create_jpege_config()
ret = acl.media.dvpp_set_jpege_config_level(self._jpege_config, 100)
utils.check_ret("acl.media.dvpp_set_jpege_config_level", ret)
def _gen_input_pic_desc(self, image,
width_align_factor=16, height_align_factor=2):
# Create input image
stride_width = utils.align_up(image.width, width_align_factor)
stride_height = utils.align_up(image.height, height_align_factor)
pic_desc = acl.media.dvpp_create_pic_desc()
acl.media.dvpp_set_pic_desc_data(pic_desc, image.data())
acl.media.dvpp_set_pic_desc_format(
pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420)
acl.media.dvpp_set_pic_desc_width(pic_desc, image.width)
acl.media.dvpp_set_pic_desc_height(pic_desc, image.height)
acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width)
acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height)
acl.media.dvpp_set_pic_desc_size(pic_desc, image.size)
return pic_desc
def _gen_output_pic_desc(self, width, height,
output_buffer, output_buffer_size,
width_align_factor=16, height_align_factor=2):
# Create output image
stride_width = utils.align_up(width, width_align_factor)
stride_height = utils.align_up(height, height_align_factor)
pic_desc = acl.media.dvpp_create_pic_desc()
acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer)
acl.media.dvpp_set_pic_desc_format(
pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420)
acl.media.dvpp_set_pic_desc_width(pic_desc, width)
acl.media.dvpp_set_pic_desc_height(pic_desc, height)
acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width)
acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height)
acl.media.dvpp_set_pic_desc_size(pic_desc, output_buffer_size)
return pic_desc
def _stride_yuv_size(self, width, height,
width_align_factor=16, height_align_factor=2):
stride_width = utils.align_up(width, width_align_factor)
stride_height = utils.align_up(height, height_align_factor)
stride_size = utils.yuv420sp_size(stride_width, stride_height)
return stride_width, stride_height, stride_size
def jpegd(self, image):
"""
jepg image to yuv image
"""
# Create conversion output image desc
output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image)
ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc,
image.data(),
image.size,
output_desc,
self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
return None
ret = acl.rt.synchronize_stream(self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
return None
# Return the decoded AclImage instance
stride_width = utils.align_up128(image.width)
stride_height = utils.align_up16(image.height)
stride_size = utils.yuv420sp_size(stride_width, stride_height)
return AclImage(out_buffer, stride_width,
stride_height, stride_size, constants.MEMORY_DVPP)
def _gen_jpegd_out_pic_desc(self, image):
# Predict the memory size required to decode jpeg into yuv pictures
ret, out_buffer_size = self._get_jpegd_memory_size(image)
if not ret:
return None
# Apply for memory for storing decoded yuv pictures
out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
if ret != constants.ACL_ERROR_NONE:
log_error("Dvpp malloc failed, error: ", ret)
return None
# Create output image desc
pic_desc = self._gen_output_pic_desc(
image.width,
image.height,
out_buffer,
out_buffer_size,
width_align_factor=128,
height_align_factor=16)
return pic_desc, out_buffer
def _get_jpegd_memory_size(self, image):
if image.is_local():
size, ret = acl.media.dvpp_jpeg_predict_dec_size(
image.data(), image.size, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420)
if ret != constants.ACL_ERROR_NONE:
log_error("Predict jpeg decode size failed, return ", ret)
return False, 0
return True, size
else:
return True, int(
utils.yuv420sp_size(
image.width, image.height) * 3)
def resize(self, image, resize_width, resize_height):
"""
Scale yuvsp420 picture to specified size
"""
# Generate input picture desc
input_desc = self._gen_input_pic_desc(image)
# Calculate the image size after scaling
stride_width = utils.align_up16(resize_width)
stride_height = utils.align_up2(resize_height)
output_size = utils.yuv420sp_size(stride_width, stride_height)
# Request memory for the zoomed picture
out_buffer, ret = acl.media.dvpp_malloc(output_size)
if ret != constants.ACL_ERROR_NONE:
log_error("Dvpp malloc failed, error: ", ret)
return None
# Create output image
output_desc = self._gen_output_pic_desc(resize_width, resize_height,
out_buffer, output_size)
if output_desc is None:
log_error("Gen resize output desc failed")
return None
# Call dvpp asynchronous zoom interface to zoom pictures
ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc,
input_desc,
output_desc,
self._resize_config,
self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("Vpc resize async failed, error: ", ret)
return None
# Wait for the zoom operation to complete
ret = acl.rt.synchronize_stream(self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("Resize synchronize stream failed, error: ", ret)
return None
# Release the resources requested for scaling
acl.media.dvpp_destroy_pic_desc(input_desc)
acl.media.dvpp_destroy_pic_desc(output_desc)
return AclImage(out_buffer, stride_width,
stride_height, output_size, constants.MEMORY_DVPP)
def _gen_resize_out_pic_desc(self, resize_width,
resize_height, output_size):
out_buffer, ret = acl.media.dvpp_malloc(output_size)
if ret != constants.ACL_ERROR_NONE:
log_error("Dvpp malloc failed, error: ", ret)
return None
pic_desc = self._gen_output_pic_desc(resize_width, resize_height,
out_buffer, output_size)
return pic_desc, out_buffer
def crop_and_paste(
self,
image,
width,
height,
crop_and_paste_width,
crop_and_paste_height):
"""
crop_and_paste
"""
# print('[Dvpp] vpc crop and paste stage:')
input_desc = self._gen_input_pic_desc(image)
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
out_buffer_size = utils.yuv420sp_size(stride_width, stride_height)
out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
output_desc = self._gen_output_pic_desc(
crop_and_paste_width,
crop_and_paste_height,
out_buffer,
out_buffer_size)
self._crop_config = acl.media.dvpp_create_roi_config(
0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1)
# set crop area:
rx = float(width) / float(crop_and_paste_width)
ry = float(height) / float(crop_and_paste_height)
if rx > ry:
dx = 0
r = rx
dy = int((crop_and_paste_height - height / r) / 2)
else:
dy = 0
r = ry
dx = int((crop_and_paste_width - width / r) / 2)
pasteRightOffset = int(crop_and_paste_width - 2 * dx)
pasteBottomOffset = int(crop_and_paste_height - 2 * dy)
if (pasteRightOffset % 2) == 0:
pasteRightOffset = pasteRightOffset - 1
if (pasteBottomOffset % 2) == 0:
pasteBottomOffset = pasteBottomOffset - 1
self._paste_config = acl.media.dvpp_create_roi_config(
0, pasteRightOffset, 0, pasteBottomOffset)
ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc,
input_desc,
output_desc,
self._crop_config,
self._paste_config,
self._stream)
utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret)
ret = acl.rt.synchronize_stream(self._stream)
utils.check_ret("acl.rt.synchronize_stream", ret)
# print('[Dvpp] vpc crop and paste stage success')
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
return AclImage(out_buffer, stride_width,
stride_height, out_buffer_size, constants.MEMORY_DVPP)
def crop_and_paste_get_roi(
self,
image,
width,
height,
crop_and_paste_width,
crop_and_paste_height):
"""
:image: input image
:width: input image width
:height: input image height
:crop_and_paste_width: crop_and_paste_width
:crop_and_paste_height: crop_and_paste_height
:return: return AclImage
"""
# print('[Dvpp] vpc crop and paste stage:')
input_desc = self._gen_input_pic_desc(image)
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
out_buffer_size = utils.yuv420sp_size(stride_width, stride_height)
out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
output_desc = self._gen_output_pic_desc(
crop_and_paste_width,
crop_and_paste_height,
out_buffer,
out_buffer_size)
self._crop_config = acl.media.dvpp_create_roi_config(
0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1)
self._paste_config = acl.media.dvpp_create_roi_config(
0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1)
ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc,
input_desc,
output_desc,
self._crop_config,
self._paste_config,
self._stream)
utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret)
ret = acl.rt.synchronize_stream(self._stream)
utils.check_ret("acl.rt.synchronize_stream", ret)
# print('[Dvpp] vpc crop and paste stage success')
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
return AclImage(out_buffer, stride_width,
stride_height, out_buffer_size, constants.MEMORY_DVPP)
def jpege(self, image):
"""
Convert yuv420sp pictures to jpeg pictures
"""
# create input image
input_desc = self._gen_input_pic_desc(image)
# Predict the memory size required for conversion
output_size, ret = acl.media.dvpp_jpeg_predict_enc_size(
input_desc, self._jpege_config)
if (ret != constants.ACL_ERROR_NONE):
log_error("Predict jpege output size failed")
return None
# Request memory required for conversion
output_buffer, ret = acl.media.dvpp_malloc(output_size)
if (ret != constants.ACL_ERROR_NONE):
log_error("Malloc jpege output memory failed")
return None
output_size_array = np.array([output_size], dtype=np.int32)
output_size_ptr = acl.util.numpy_to_ptr(output_size_array)
# Call jpege asynchronous interface to convert pictures
ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc,
input_desc, output_buffer,
output_size_ptr,
self._jpege_config,
self._stream)
if (ret != constants.ACL_ERROR_NONE):
log_error("Jpege failed, ret ", ret)
return None
# Wait for the conversion to complete
ret = acl.rt.synchronize_stream(self._stream)
if (ret != constants.ACL_ERROR_NONE):
print("Jpege synchronize stream, failed, ret ", ret)
return None
# Release resources
acl.media.dvpp_destroy_pic_desc(input_desc)
return AclImage(
output_buffer, image.width, image.height, int(
output_size_array[0]), constants.MEMORY_DVPP)
def destroy(self):
"""
dvpp resource release
"""
if self._is_destroyed:
return
if self._resize_config:
acl.media.dvpp_destroy_resize_config(self._resize_config)
if self._dvpp_channel_desc:
acl.media.dvpp_destroy_channel(self._dvpp_channel_desc)
acl.media.dvpp_destroy_channel_desc(self._dvpp_channel_desc)
if self._jpege_config:
acl.media.dvpp_destroy_jpege_config(self._jpege_config)
self._is_destroyed = True
resource_list.unregister(self)
log_info("dvpp resource release success")
def __del__(self):
self.destroy() | 3_inference/code/src/atlas_utils/acl_dvpp.py |
import numpy as np
import acl
import atlas_utils.utils as utils
from atlas_utils.acl_image import AclImage
from atlas_utils.acl_logger import log_error, log_info
from atlas_utils.resource_list import resource_list
import atlas_utils.constants as constants
class Dvpp(object):
"""
dvpp class
"""
def __init__(self, acl_resource=None):
if acl_resource is None:
self._stream, ret = acl.rt.create_stream()
utils.check_ret("acl.rt.create_stream", ret)
self._run_mode, ret = acl.rt.get_run_mode()
utils.check_ret("acl.rt.get_run_mode", ret)
else:
self._stream = acl_resource.stream
self._run_mode = acl_resource.run_mode
self._dvpp_channel_desc = None
self._crop_config = None
self._paste_config = None
self._init_resource()
# Dvpp involves acl resources, which need to be released \
# before the acl ends when the program exits, \
# register here to the resource table to ensure the release timing
self._is_destroyed = False
resource_list.register(self)
def _init_resource(self):
# Create dvpp channel
self._dvpp_channel_desc = acl.media.dvpp_create_channel_desc()
ret = acl.media.dvpp_create_channel(self._dvpp_channel_desc)
utils.check_ret("acl.media.dvpp_create_channel", ret)
# Create a resize configuration
self._resize_config = acl.media.dvpp_create_resize_config()
# Create yuv to jpeg configuration
self._jpege_config = acl.media.dvpp_create_jpege_config()
ret = acl.media.dvpp_set_jpege_config_level(self._jpege_config, 100)
utils.check_ret("acl.media.dvpp_set_jpege_config_level", ret)
def _gen_input_pic_desc(self, image,
width_align_factor=16, height_align_factor=2):
# Create input image
stride_width = utils.align_up(image.width, width_align_factor)
stride_height = utils.align_up(image.height, height_align_factor)
pic_desc = acl.media.dvpp_create_pic_desc()
acl.media.dvpp_set_pic_desc_data(pic_desc, image.data())
acl.media.dvpp_set_pic_desc_format(
pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420)
acl.media.dvpp_set_pic_desc_width(pic_desc, image.width)
acl.media.dvpp_set_pic_desc_height(pic_desc, image.height)
acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width)
acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height)
acl.media.dvpp_set_pic_desc_size(pic_desc, image.size)
return pic_desc
def _gen_output_pic_desc(self, width, height,
output_buffer, output_buffer_size,
width_align_factor=16, height_align_factor=2):
# Create output image
stride_width = utils.align_up(width, width_align_factor)
stride_height = utils.align_up(height, height_align_factor)
pic_desc = acl.media.dvpp_create_pic_desc()
acl.media.dvpp_set_pic_desc_data(pic_desc, output_buffer)
acl.media.dvpp_set_pic_desc_format(
pic_desc, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420)
acl.media.dvpp_set_pic_desc_width(pic_desc, width)
acl.media.dvpp_set_pic_desc_height(pic_desc, height)
acl.media.dvpp_set_pic_desc_width_stride(pic_desc, stride_width)
acl.media.dvpp_set_pic_desc_height_stride(pic_desc, stride_height)
acl.media.dvpp_set_pic_desc_size(pic_desc, output_buffer_size)
return pic_desc
def _stride_yuv_size(self, width, height,
width_align_factor=16, height_align_factor=2):
stride_width = utils.align_up(width, width_align_factor)
stride_height = utils.align_up(height, height_align_factor)
stride_size = utils.yuv420sp_size(stride_width, stride_height)
return stride_width, stride_height, stride_size
def jpegd(self, image):
"""
jepg image to yuv image
"""
# Create conversion output image desc
output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image)
ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc,
image.data(),
image.size,
output_desc,
self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
return None
ret = acl.rt.synchronize_stream(self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
return None
# Return the decoded AclImage instance
stride_width = utils.align_up128(image.width)
stride_height = utils.align_up16(image.height)
stride_size = utils.yuv420sp_size(stride_width, stride_height)
return AclImage(out_buffer, stride_width,
stride_height, stride_size, constants.MEMORY_DVPP)
def _gen_jpegd_out_pic_desc(self, image):
# Predict the memory size required to decode jpeg into yuv pictures
ret, out_buffer_size = self._get_jpegd_memory_size(image)
if not ret:
return None
# Apply for memory for storing decoded yuv pictures
out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
if ret != constants.ACL_ERROR_NONE:
log_error("Dvpp malloc failed, error: ", ret)
return None
# Create output image desc
pic_desc = self._gen_output_pic_desc(
image.width,
image.height,
out_buffer,
out_buffer_size,
width_align_factor=128,
height_align_factor=16)
return pic_desc, out_buffer
def _get_jpegd_memory_size(self, image):
if image.is_local():
size, ret = acl.media.dvpp_jpeg_predict_dec_size(
image.data(), image.size, constants.PIXEL_FORMAT_YUV_SEMIPLANAR_420)
if ret != constants.ACL_ERROR_NONE:
log_error("Predict jpeg decode size failed, return ", ret)
return False, 0
return True, size
else:
return True, int(
utils.yuv420sp_size(
image.width, image.height) * 3)
def resize(self, image, resize_width, resize_height):
"""
Scale yuvsp420 picture to specified size
"""
# Generate input picture desc
input_desc = self._gen_input_pic_desc(image)
# Calculate the image size after scaling
stride_width = utils.align_up16(resize_width)
stride_height = utils.align_up2(resize_height)
output_size = utils.yuv420sp_size(stride_width, stride_height)
# Request memory for the zoomed picture
out_buffer, ret = acl.media.dvpp_malloc(output_size)
if ret != constants.ACL_ERROR_NONE:
log_error("Dvpp malloc failed, error: ", ret)
return None
# Create output image
output_desc = self._gen_output_pic_desc(resize_width, resize_height,
out_buffer, output_size)
if output_desc is None:
log_error("Gen resize output desc failed")
return None
# Call dvpp asynchronous zoom interface to zoom pictures
ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc,
input_desc,
output_desc,
self._resize_config,
self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("Vpc resize async failed, error: ", ret)
return None
# Wait for the zoom operation to complete
ret = acl.rt.synchronize_stream(self._stream)
if ret != constants.ACL_ERROR_NONE:
log_error("Resize synchronize stream failed, error: ", ret)
return None
# Release the resources requested for scaling
acl.media.dvpp_destroy_pic_desc(input_desc)
acl.media.dvpp_destroy_pic_desc(output_desc)
return AclImage(out_buffer, stride_width,
stride_height, output_size, constants.MEMORY_DVPP)
def _gen_resize_out_pic_desc(self, resize_width,
resize_height, output_size):
out_buffer, ret = acl.media.dvpp_malloc(output_size)
if ret != constants.ACL_ERROR_NONE:
log_error("Dvpp malloc failed, error: ", ret)
return None
pic_desc = self._gen_output_pic_desc(resize_width, resize_height,
out_buffer, output_size)
return pic_desc, out_buffer
def crop_and_paste(
self,
image,
width,
height,
crop_and_paste_width,
crop_and_paste_height):
"""
crop_and_paste
"""
# print('[Dvpp] vpc crop and paste stage:')
input_desc = self._gen_input_pic_desc(image)
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
out_buffer_size = utils.yuv420sp_size(stride_width, stride_height)
out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
output_desc = self._gen_output_pic_desc(
crop_and_paste_width,
crop_and_paste_height,
out_buffer,
out_buffer_size)
self._crop_config = acl.media.dvpp_create_roi_config(
0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1)
# set crop area:
rx = float(width) / float(crop_and_paste_width)
ry = float(height) / float(crop_and_paste_height)
if rx > ry:
dx = 0
r = rx
dy = int((crop_and_paste_height - height / r) / 2)
else:
dy = 0
r = ry
dx = int((crop_and_paste_width - width / r) / 2)
pasteRightOffset = int(crop_and_paste_width - 2 * dx)
pasteBottomOffset = int(crop_and_paste_height - 2 * dy)
if (pasteRightOffset % 2) == 0:
pasteRightOffset = pasteRightOffset - 1
if (pasteBottomOffset % 2) == 0:
pasteBottomOffset = pasteBottomOffset - 1
self._paste_config = acl.media.dvpp_create_roi_config(
0, pasteRightOffset, 0, pasteBottomOffset)
ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc,
input_desc,
output_desc,
self._crop_config,
self._paste_config,
self._stream)
utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret)
ret = acl.rt.synchronize_stream(self._stream)
utils.check_ret("acl.rt.synchronize_stream", ret)
# print('[Dvpp] vpc crop and paste stage success')
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
return AclImage(out_buffer, stride_width,
stride_height, out_buffer_size, constants.MEMORY_DVPP)
def crop_and_paste_get_roi(
self,
image,
width,
height,
crop_and_paste_width,
crop_and_paste_height):
"""
:image: input image
:width: input image width
:height: input image height
:crop_and_paste_width: crop_and_paste_width
:crop_and_paste_height: crop_and_paste_height
:return: return AclImage
"""
# print('[Dvpp] vpc crop and paste stage:')
input_desc = self._gen_input_pic_desc(image)
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
out_buffer_size = utils.yuv420sp_size(stride_width, stride_height)
out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
output_desc = self._gen_output_pic_desc(
crop_and_paste_width,
crop_and_paste_height,
out_buffer,
out_buffer_size)
self._crop_config = acl.media.dvpp_create_roi_config(
0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1)
self._paste_config = acl.media.dvpp_create_roi_config(
0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1)
ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc,
input_desc,
output_desc,
self._crop_config,
self._paste_config,
self._stream)
utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret)
ret = acl.rt.synchronize_stream(self._stream)
utils.check_ret("acl.rt.synchronize_stream", ret)
# print('[Dvpp] vpc crop and paste stage success')
stride_width = utils.align_up16(crop_and_paste_width)
stride_height = utils.align_up2(crop_and_paste_height)
return AclImage(out_buffer, stride_width,
stride_height, out_buffer_size, constants.MEMORY_DVPP)
def jpege(self, image):
"""
Convert yuv420sp pictures to jpeg pictures
"""
# create input image
input_desc = self._gen_input_pic_desc(image)
# Predict the memory size required for conversion
output_size, ret = acl.media.dvpp_jpeg_predict_enc_size(
input_desc, self._jpege_config)
if (ret != constants.ACL_ERROR_NONE):
log_error("Predict jpege output size failed")
return None
# Request memory required for conversion
output_buffer, ret = acl.media.dvpp_malloc(output_size)
if (ret != constants.ACL_ERROR_NONE):
log_error("Malloc jpege output memory failed")
return None
output_size_array = np.array([output_size], dtype=np.int32)
output_size_ptr = acl.util.numpy_to_ptr(output_size_array)
# Call jpege asynchronous interface to convert pictures
ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc,
input_desc, output_buffer,
output_size_ptr,
self._jpege_config,
self._stream)
if (ret != constants.ACL_ERROR_NONE):
log_error("Jpege failed, ret ", ret)
return None
# Wait for the conversion to complete
ret = acl.rt.synchronize_stream(self._stream)
if (ret != constants.ACL_ERROR_NONE):
print("Jpege synchronize stream, failed, ret ", ret)
return None
# Release resources
acl.media.dvpp_destroy_pic_desc(input_desc)
return AclImage(
output_buffer, image.width, image.height, int(
output_size_array[0]), constants.MEMORY_DVPP)
def destroy(self):
"""
dvpp resource release
"""
if self._is_destroyed:
return
if self._resize_config:
acl.media.dvpp_destroy_resize_config(self._resize_config)
if self._dvpp_channel_desc:
acl.media.dvpp_destroy_channel(self._dvpp_channel_desc)
acl.media.dvpp_destroy_channel_desc(self._dvpp_channel_desc)
if self._jpege_config:
acl.media.dvpp_destroy_jpege_config(self._jpege_config)
self._is_destroyed = True
resource_list.unregister(self)
log_info("dvpp resource release success")
def __del__(self):
self.destroy() | 0.683102 | 0.117623 |
from numpy import ndarray
from livia.input.FrameInput import FrameInput
from livia.output.FrameOutput import FrameOutput
from livia.process.FrameProcessor import FrameProcessor
from livia.process.analyzer.FrameAnalyzer import FrameAnalyzer
from livia.process.analyzer.listener.FrameAnalyzerChangeEvent import FrameAnalyzerChangeEvent
from livia.process.analyzer.listener.FrameAnalyzerChangeListener import FrameAnalyzerChangeListener
from livia.process.listener.EventListeners import EventListeners
class AnalyzerFrameProcessor(FrameProcessor):
def __init__(self, input: FrameInput, output: FrameOutput, frame_analyzer: FrameAnalyzer, daemon: bool = True):
super().__init__(input, output, daemon)
self._frame_analyzer: FrameAnalyzer = frame_analyzer
self._frame_analyzer_change_listeners: EventListeners[FrameAnalyzerChangeListener] =\
EventListeners[FrameAnalyzerChangeListener]()
def _manipulate_frame(self, frame: ndarray) -> ndarray:
if self._num_frame is None:
raise RuntimeError("self._num_frame should not be None")
modification = self._frame_analyzer.analyze(self._num_frame, frame)
return modification.modify(self._num_frame, frame)
@property
def frame_analyzer(self) -> FrameAnalyzer:
return self._frame_analyzer
@frame_analyzer.setter
def frame_analyzer(self, frame_analyzer: FrameAnalyzer):
if self._frame_analyzer != frame_analyzer:
old_frame_analyzer = self._frame_analyzer
self._frame_analyzer = frame_analyzer
event = FrameAnalyzerChangeEvent(self, self._frame_analyzer, old_frame_analyzer)
self._frame_analyzer_change_listeners.notify(FrameAnalyzerChangeListener.analyzer_changed, event)
def add_frame_analyzer_change_listener(self, listener: FrameAnalyzerChangeListener):
self._frame_analyzer_change_listeners.append(listener)
def remove_frame_analyzer_change_listener(self, listener: FrameAnalyzerChangeListener):
self._frame_analyzer_change_listeners.remove(listener)
def has_frame_analyzer_change_listener(self, listener: FrameAnalyzerChangeListener) -> bool:
return listener in self._frame_analyzer_change_listeners | livia/process/analyzer/AnalyzerFrameProcessor.py | from numpy import ndarray
from livia.input.FrameInput import FrameInput
from livia.output.FrameOutput import FrameOutput
from livia.process.FrameProcessor import FrameProcessor
from livia.process.analyzer.FrameAnalyzer import FrameAnalyzer
from livia.process.analyzer.listener.FrameAnalyzerChangeEvent import FrameAnalyzerChangeEvent
from livia.process.analyzer.listener.FrameAnalyzerChangeListener import FrameAnalyzerChangeListener
from livia.process.listener.EventListeners import EventListeners
class AnalyzerFrameProcessor(FrameProcessor):
def __init__(self, input: FrameInput, output: FrameOutput, frame_analyzer: FrameAnalyzer, daemon: bool = True):
super().__init__(input, output, daemon)
self._frame_analyzer: FrameAnalyzer = frame_analyzer
self._frame_analyzer_change_listeners: EventListeners[FrameAnalyzerChangeListener] =\
EventListeners[FrameAnalyzerChangeListener]()
def _manipulate_frame(self, frame: ndarray) -> ndarray:
if self._num_frame is None:
raise RuntimeError("self._num_frame should not be None")
modification = self._frame_analyzer.analyze(self._num_frame, frame)
return modification.modify(self._num_frame, frame)
@property
def frame_analyzer(self) -> FrameAnalyzer:
return self._frame_analyzer
@frame_analyzer.setter
def frame_analyzer(self, frame_analyzer: FrameAnalyzer):
if self._frame_analyzer != frame_analyzer:
old_frame_analyzer = self._frame_analyzer
self._frame_analyzer = frame_analyzer
event = FrameAnalyzerChangeEvent(self, self._frame_analyzer, old_frame_analyzer)
self._frame_analyzer_change_listeners.notify(FrameAnalyzerChangeListener.analyzer_changed, event)
def add_frame_analyzer_change_listener(self, listener: FrameAnalyzerChangeListener):
self._frame_analyzer_change_listeners.append(listener)
def remove_frame_analyzer_change_listener(self, listener: FrameAnalyzerChangeListener):
self._frame_analyzer_change_listeners.remove(listener)
def has_frame_analyzer_change_listener(self, listener: FrameAnalyzerChangeListener) -> bool:
return listener in self._frame_analyzer_change_listeners | 0.768863 | 0.280358 |
from contextlib import suppress
from urllib.parse import quote, urljoin
import pytz
from django.conf import settings
from django.shortcuts import get_object_or_404, redirect, reverse
from django.urls import resolve
from django.utils import timezone, translation
from django.utils.translation.trans_real import (
get_supported_language_variant,
language_code_re,
parse_accept_lang_header,
)
from django_scopes import scope, scopes_disabled
from pretalx.event.models import Event, Organiser, Team
class EventPermissionMiddleware:
UNAUTHENTICATED_ORGA_URLS = (
"invitation.view",
"auth",
"login",
"auth.reset",
"auth.recover",
"event.login",
"event.auth.reset",
"event.auth.recover",
)
def __init__(self, get_response):
self.get_response = get_response
@staticmethod
def _set_orga_events(request):
request.is_orga = False
request.is_reviewer = False
request.orga_events = []
if not request.user.is_anonymous:
if request.user.is_administrator:
request.orga_events = Event.objects.order_by("date_from")
request.is_orga = True
request.is_reviewer = True
else:
request.orga_events = request.user.get_events_for_permission().order_by(
"date_from"
)
event = getattr(request, "event", None)
if event:
request.is_orga = event in request.orga_events
request.is_reviewer = event.teams.filter(
members__in=[request.user], is_reviewer=True
).exists()
def _handle_orga_url(self, request, url):
if request.uses_custom_domain:
return urljoin(settings.SITE_URL, request.get_full_path())
if (
request.user.is_anonymous
and url.url_name not in self.UNAUTHENTICATED_ORGA_URLS
):
params = "&" + request.GET.urlencode() if request.GET else ""
event = getattr(request, "event", None)
if event:
return (
reverse("orga:event.login", kwargs={"event": event.slug})
+ f"?next={quote(request.path)}"
+ params
)
return reverse("orga:login") + f"?next={quote(request.path)}" + params
return None
def __call__(self, request):
url = resolve(request.path_info)
organiser_slug = url.kwargs.get("organiser")
if organiser_slug:
request.organiser = get_object_or_404(
Organiser, slug__iexact=organiser_slug
)
if hasattr(request, "organiser") and request.organiser:
request.is_orga = False
if not request.user.is_anonymous:
has_perms = Team.objects.filter(
organiser=request.organiser,
members__in=[request.user],
can_change_organiser_settings=True,
).exists()
request.is_orga = request.user.is_administrator or has_perms
event_slug = url.kwargs.get("event")
if event_slug:
with scopes_disabled():
request.event = get_object_or_404(
Event.objects.prefetch_related("schedules", "submissions"),
slug__iexact=event_slug,
)
event = getattr(request, "event", None)
self._set_orga_events(request)
self._select_locale(request)
is_exempt = (
url.url_name == "export"
if "agenda" in url.namespaces
else request.path.startswith("/api/")
)
if "orga" in url.namespaces or (
"plugins" in url.namespaces and request.path.startswith("/orga")
):
url = self._handle_orga_url(request, url)
if url:
return redirect(url)
elif (
event
and request.event.settings.custom_domain
and not request.uses_custom_domain
and not is_exempt
):
return redirect(
urljoin(request.event.settings.custom_domain, request.get_full_path())
)
if event:
with scope(event=event):
return self.get_response(request)
return self.get_response(request)
def _select_locale(self, request):
supported = (
request.event.locales
if (hasattr(request, "event") and request.event)
else settings.LANGUAGE_CODES
)
language = (
self._language_from_user(request, supported)
or self._language_from_cookie(request, supported)
or self._language_from_browser(request, supported)
)
if hasattr(request, "event") and request.event:
language = language or request.event.locale
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
with suppress(pytz.UnknownTimeZoneError):
if hasattr(request, "event") and request.event:
tzname = request.event.timezone
elif request.user.is_authenticated:
tzname = request.user.timezone
else:
tzname = settings.TIME_ZONE
timezone.activate(pytz.timezone(tzname))
request.timezone = tzname
@staticmethod
def _language_from_browser(request, supported):
accept_value = request.headers.get("Accept-Language", "")
for accept_lang, _ in parse_accept_lang_header(accept_value):
if accept_lang == "*":
break
if not language_code_re.search(accept_lang):
continue
try:
val = get_supported_language_variant(accept_lang)
if val and val in supported:
return val
except LookupError:
continue
return None
@staticmethod
def _language_from_cookie(request, supported):
cookie_value = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
with suppress(LookupError):
cookie_value = get_supported_language_variant(cookie_value)
if cookie_value and cookie_value in supported:
return cookie_value
return None
@staticmethod
def _language_from_user(request, supported):
if request.user.is_authenticated:
with suppress(LookupError):
value = get_supported_language_variant(request.user.locale)
if value and value in supported:
return value
return None | src/pretalx/common/middleware/event.py | from contextlib import suppress
from urllib.parse import quote, urljoin
import pytz
from django.conf import settings
from django.shortcuts import get_object_or_404, redirect, reverse
from django.urls import resolve
from django.utils import timezone, translation
from django.utils.translation.trans_real import (
get_supported_language_variant,
language_code_re,
parse_accept_lang_header,
)
from django_scopes import scope, scopes_disabled
from pretalx.event.models import Event, Organiser, Team
class EventPermissionMiddleware:
UNAUTHENTICATED_ORGA_URLS = (
"invitation.view",
"auth",
"login",
"auth.reset",
"auth.recover",
"event.login",
"event.auth.reset",
"event.auth.recover",
)
def __init__(self, get_response):
self.get_response = get_response
@staticmethod
def _set_orga_events(request):
request.is_orga = False
request.is_reviewer = False
request.orga_events = []
if not request.user.is_anonymous:
if request.user.is_administrator:
request.orga_events = Event.objects.order_by("date_from")
request.is_orga = True
request.is_reviewer = True
else:
request.orga_events = request.user.get_events_for_permission().order_by(
"date_from"
)
event = getattr(request, "event", None)
if event:
request.is_orga = event in request.orga_events
request.is_reviewer = event.teams.filter(
members__in=[request.user], is_reviewer=True
).exists()
def _handle_orga_url(self, request, url):
if request.uses_custom_domain:
return urljoin(settings.SITE_URL, request.get_full_path())
if (
request.user.is_anonymous
and url.url_name not in self.UNAUTHENTICATED_ORGA_URLS
):
params = "&" + request.GET.urlencode() if request.GET else ""
event = getattr(request, "event", None)
if event:
return (
reverse("orga:event.login", kwargs={"event": event.slug})
+ f"?next={quote(request.path)}"
+ params
)
return reverse("orga:login") + f"?next={quote(request.path)}" + params
return None
def __call__(self, request):
url = resolve(request.path_info)
organiser_slug = url.kwargs.get("organiser")
if organiser_slug:
request.organiser = get_object_or_404(
Organiser, slug__iexact=organiser_slug
)
if hasattr(request, "organiser") and request.organiser:
request.is_orga = False
if not request.user.is_anonymous:
has_perms = Team.objects.filter(
organiser=request.organiser,
members__in=[request.user],
can_change_organiser_settings=True,
).exists()
request.is_orga = request.user.is_administrator or has_perms
event_slug = url.kwargs.get("event")
if event_slug:
with scopes_disabled():
request.event = get_object_or_404(
Event.objects.prefetch_related("schedules", "submissions"),
slug__iexact=event_slug,
)
event = getattr(request, "event", None)
self._set_orga_events(request)
self._select_locale(request)
is_exempt = (
url.url_name == "export"
if "agenda" in url.namespaces
else request.path.startswith("/api/")
)
if "orga" in url.namespaces or (
"plugins" in url.namespaces and request.path.startswith("/orga")
):
url = self._handle_orga_url(request, url)
if url:
return redirect(url)
elif (
event
and request.event.settings.custom_domain
and not request.uses_custom_domain
and not is_exempt
):
return redirect(
urljoin(request.event.settings.custom_domain, request.get_full_path())
)
if event:
with scope(event=event):
return self.get_response(request)
return self.get_response(request)
def _select_locale(self, request):
supported = (
request.event.locales
if (hasattr(request, "event") and request.event)
else settings.LANGUAGE_CODES
)
language = (
self._language_from_user(request, supported)
or self._language_from_cookie(request, supported)
or self._language_from_browser(request, supported)
)
if hasattr(request, "event") and request.event:
language = language or request.event.locale
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
with suppress(pytz.UnknownTimeZoneError):
if hasattr(request, "event") and request.event:
tzname = request.event.timezone
elif request.user.is_authenticated:
tzname = request.user.timezone
else:
tzname = settings.TIME_ZONE
timezone.activate(pytz.timezone(tzname))
request.timezone = tzname
@staticmethod
def _language_from_browser(request, supported):
accept_value = request.headers.get("Accept-Language", "")
for accept_lang, _ in parse_accept_lang_header(accept_value):
if accept_lang == "*":
break
if not language_code_re.search(accept_lang):
continue
try:
val = get_supported_language_variant(accept_lang)
if val and val in supported:
return val
except LookupError:
continue
return None
@staticmethod
def _language_from_cookie(request, supported):
cookie_value = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
with suppress(LookupError):
cookie_value = get_supported_language_variant(cookie_value)
if cookie_value and cookie_value in supported:
return cookie_value
return None
@staticmethod
def _language_from_user(request, supported):
if request.user.is_authenticated:
with suppress(LookupError):
value = get_supported_language_variant(request.user.locale)
if value and value in supported:
return value
return None | 0.563858 | 0.089614 |
import unittest
import shutil
import os
import logging
import pxelinux_cfg
import network_manager
import hw_node
logging.basicConfig(format='%(asctime)s | %(name)s | %(message)s',
level=logging.DEBUG)
tftp_cfg_dir = '/tmp/tftp'
pxelinux_cfg_dir = '/tmp/tftp/pxelinux_cfg'
class PxelinuxCfgTest(unittest.TestCase):
def setUp(self):
network_manager.data_file = 'test/test_network_cfg.json'
shutil.rmtree(tftp_cfg_dir, ignore_errors=True)
os.makedirs(tftp_cfg_dir)
os.makedirs(pxelinux_cfg_dir, exist_ok=True)
pxelinux_cfg.tftp_cfg_dir = tftp_cfg_dir
pxelinux_cfg.default_pxe_server = "192.168.3.11"
pxelinux_cfg.pxelinux_cfg_dir = pxelinux_cfg_dir
shutil.rmtree(pxelinux_cfg.pxelinux_cfg_dir, ignore_errors=True)
os.mkdir(pxelinux_cfg.pxelinux_cfg_dir)
def tearDown(self):
shutil.rmtree(tftp_cfg_dir, ignore_errors=True)
pass
def test_list_os(self):
tdir = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + tdir)
found = pxelinux_cfg.get_os_dir(tdir)
logging.debug("found os directory: " + found)
self.assertEqual(found, tdir)
os.symlink(tftp_cfg_dir + '/' + tdir,
tftp_cfg_dir + '/sle-15.1')
found = pxelinux_cfg.get_os_dir('sle-15.1')
logging.debug("found os directory: " + found)
self.assertEqual(found, tdir)
def test_get_boot_record_for_os(self):
os_id = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + os_id)
boot = pxelinux_cfg.get_boot_record_for_os(
{'node': 'test_node'}, os_id)
logging.debug("Boot records: " + boot)
self.assertRegex(boot,
'/minimal-sle-15-sp1.x86_64-0.1.1.xz',
'check image')
self.assertRegex(boot,
'http://1.2.3.4/sle-15.1-0.1.1-29.1/', 'check host')
self.assertRegex(boot,
'console=tty1 console=ttyS1,11520', 'check tty')
def test_prepare_tftp(self):
tdir = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + tdir)
network_manager.data_file = 'test/test_network_cfg.json'
node = network_manager.get_node_by_name('test_node')
mac_file = pxelinux_cfg.get_macfile(node)
self.assertEqual(mac_file,
pxelinux_cfg.pxelinux_cfg_dir +
'/01-aa-bb-cc-dd-00-73',
'mac file calculation check')
pxelinux_cfg.set_tftp_dir(node, 'local')
self.assertTrue(os.path.isfile(mac_file),
'local cfg file generated')
with open(mac_file, 'r') as ifile:
lines = ifile.readlines()
cfg = "\n".join(lines)
self.assertRegex(cfg,
r'MENU\s+LABEL\s+Boot\s+local\s+'
r'hard\s+drive\s+LOCALBOOT\s-1',
'check generated local data #1')
self.assertRegex(cfg,
r'APPEND\s+pxelinux\.cfg\/default',
'check generated local data #2')
pxelinux_cfg.set_tftp_dir(node, 'sle-15.1-0.1.1-29.1')
self.assertTrue(os.path.isfile(mac_file),
'special os cfg file generated')
with open(mac_file, 'r') as ifile:
lines = ifile.readlines()
cfg = "\n".join(lines)
self.assertRegex(cfg,
'KERNEL sle-15.1-0.1.1-29.1/pxeboot.kernel',
'check os specific generated data #1')
self.assertRegex(cfg,
'INITRD sle-15.1-0.1.1-29.1/pxeboot.initrd.xz',
'check os specific generated data #2')
self.assertRegex(cfg,
r'rd.kiwi.install.pxe\s+rd.kiwi.install.image=',
'check os specific generated data #3')
pxelinux_cfg.cleanup_tftp_dir(node)
self.assertFalse(os.path.isfile(mac_file),
'cfg file removed')
def test_refresh_symlinks(self):
os.makedirs(tftp_cfg_dir + '/oem_sle_15sp1-0.1.1')
logging.debug("Write test file: "
+ tftp_cfg_dir + '/oem_sle_15sp1-0.1.1/test')
with open(tftp_cfg_dir + '/oem_sle_15sp1-0.1.1/test', 'w') as ofile:
ofile.writelines("011")
pxelinux_cfg.refresh_symlinks('oem_sle_15sp1', '0.1.1')
logging.debug("Read test file via symlink: "
+ tftp_cfg_dir + '/oem_sle_15sp1/test')
with open(tftp_cfg_dir + '/oem_sle_15sp1/test', 'r') as ifile:
lines = ifile.readlines()
self.assertFalse(os.path.islink(tftp_cfg_dir + '/oem_sle_15sp1/test'))
self.assertEqual(lines[0], '011')
os.makedirs(tftp_cfg_dir + '/oem_sle_15sp1-1.1.1')
with open(tftp_cfg_dir + '/oem_sle_15sp1-1.1.1/test', 'w') as ofile:
ofile.writelines("111")
pxelinux_cfg.refresh_symlinks('oem_sle_15sp1', '1.1.1')
with open(tftp_cfg_dir + '/oem_sle_15sp1/test', 'r') as ifile:
lines = ifile.readlines()
self.assertFalse(os.path.islink(tftp_cfg_dir + '/oem_sle_15sp1/test'))
self.assertEqual(lines[0], '111')
logging.debug("---")
def test_provision_node(self):
# test awaited open port 22 locally
# TODO add for case when no ssh port on local port
# code for open port
tdir = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + tdir)
os.symlink(tftp_cfg_dir + '/' + tdir,
tftp_cfg_dir + '/sle-15.1')
hw_node.default_cold_restart_timeout = 1
pxelinux_cfg.default_undoubted_hw_start_timeout = 1
hw_node.ipmitool_bin = 'echo'
node = network_manager.get_node_by_name('test_local_node')
# check provision failure in salt
hw_node.sls_list = ['setup_hsm']
with self.assertRaises(Exception):
pxelinux_cfg.provision_node(node, 'sle-15.1')
hw_node.sls_list = []
# provision without failure
self.assertEqual(
pxelinux_cfg.provision_node(node, 'sle-15.1'), 1)
# check provision need reboot
node['provision_need_reboot'] = 'no'
self.assertEqual(
pxelinux_cfg.provision_node(node, 'sle-15.1'), 1)
node['provision_need_reboot'] = 'yes'
self.assertEqual(
pxelinux_cfg.provision_node(node, 'sle-15.1'), 2)
if __name__ == '__main__':
unittest.main() | test/test_pxelinux_cfg.py | import unittest
import shutil
import os
import logging
import pxelinux_cfg
import network_manager
import hw_node
logging.basicConfig(format='%(asctime)s | %(name)s | %(message)s',
level=logging.DEBUG)
tftp_cfg_dir = '/tmp/tftp'
pxelinux_cfg_dir = '/tmp/tftp/pxelinux_cfg'
class PxelinuxCfgTest(unittest.TestCase):
def setUp(self):
network_manager.data_file = 'test/test_network_cfg.json'
shutil.rmtree(tftp_cfg_dir, ignore_errors=True)
os.makedirs(tftp_cfg_dir)
os.makedirs(pxelinux_cfg_dir, exist_ok=True)
pxelinux_cfg.tftp_cfg_dir = tftp_cfg_dir
pxelinux_cfg.default_pxe_server = "192.168.3.11"
pxelinux_cfg.pxelinux_cfg_dir = pxelinux_cfg_dir
shutil.rmtree(pxelinux_cfg.pxelinux_cfg_dir, ignore_errors=True)
os.mkdir(pxelinux_cfg.pxelinux_cfg_dir)
def tearDown(self):
shutil.rmtree(tftp_cfg_dir, ignore_errors=True)
pass
def test_list_os(self):
tdir = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + tdir)
found = pxelinux_cfg.get_os_dir(tdir)
logging.debug("found os directory: " + found)
self.assertEqual(found, tdir)
os.symlink(tftp_cfg_dir + '/' + tdir,
tftp_cfg_dir + '/sle-15.1')
found = pxelinux_cfg.get_os_dir('sle-15.1')
logging.debug("found os directory: " + found)
self.assertEqual(found, tdir)
def test_get_boot_record_for_os(self):
os_id = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + os_id)
boot = pxelinux_cfg.get_boot_record_for_os(
{'node': 'test_node'}, os_id)
logging.debug("Boot records: " + boot)
self.assertRegex(boot,
'/minimal-sle-15-sp1.x86_64-0.1.1.xz',
'check image')
self.assertRegex(boot,
'http://1.2.3.4/sle-15.1-0.1.1-29.1/', 'check host')
self.assertRegex(boot,
'console=tty1 console=ttyS1,11520', 'check tty')
def test_prepare_tftp(self):
tdir = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + tdir)
network_manager.data_file = 'test/test_network_cfg.json'
node = network_manager.get_node_by_name('test_node')
mac_file = pxelinux_cfg.get_macfile(node)
self.assertEqual(mac_file,
pxelinux_cfg.pxelinux_cfg_dir +
'/01-aa-bb-cc-dd-00-73',
'mac file calculation check')
pxelinux_cfg.set_tftp_dir(node, 'local')
self.assertTrue(os.path.isfile(mac_file),
'local cfg file generated')
with open(mac_file, 'r') as ifile:
lines = ifile.readlines()
cfg = "\n".join(lines)
self.assertRegex(cfg,
r'MENU\s+LABEL\s+Boot\s+local\s+'
r'hard\s+drive\s+LOCALBOOT\s-1',
'check generated local data #1')
self.assertRegex(cfg,
r'APPEND\s+pxelinux\.cfg\/default',
'check generated local data #2')
pxelinux_cfg.set_tftp_dir(node, 'sle-15.1-0.1.1-29.1')
self.assertTrue(os.path.isfile(mac_file),
'special os cfg file generated')
with open(mac_file, 'r') as ifile:
lines = ifile.readlines()
cfg = "\n".join(lines)
self.assertRegex(cfg,
'KERNEL sle-15.1-0.1.1-29.1/pxeboot.kernel',
'check os specific generated data #1')
self.assertRegex(cfg,
'INITRD sle-15.1-0.1.1-29.1/pxeboot.initrd.xz',
'check os specific generated data #2')
self.assertRegex(cfg,
r'rd.kiwi.install.pxe\s+rd.kiwi.install.image=',
'check os specific generated data #3')
pxelinux_cfg.cleanup_tftp_dir(node)
self.assertFalse(os.path.isfile(mac_file),
'cfg file removed')
def test_refresh_symlinks(self):
os.makedirs(tftp_cfg_dir + '/oem_sle_15sp1-0.1.1')
logging.debug("Write test file: "
+ tftp_cfg_dir + '/oem_sle_15sp1-0.1.1/test')
with open(tftp_cfg_dir + '/oem_sle_15sp1-0.1.1/test', 'w') as ofile:
ofile.writelines("011")
pxelinux_cfg.refresh_symlinks('oem_sle_15sp1', '0.1.1')
logging.debug("Read test file via symlink: "
+ tftp_cfg_dir + '/oem_sle_15sp1/test')
with open(tftp_cfg_dir + '/oem_sle_15sp1/test', 'r') as ifile:
lines = ifile.readlines()
self.assertFalse(os.path.islink(tftp_cfg_dir + '/oem_sle_15sp1/test'))
self.assertEqual(lines[0], '011')
os.makedirs(tftp_cfg_dir + '/oem_sle_15sp1-1.1.1')
with open(tftp_cfg_dir + '/oem_sle_15sp1-1.1.1/test', 'w') as ofile:
ofile.writelines("111")
pxelinux_cfg.refresh_symlinks('oem_sle_15sp1', '1.1.1')
with open(tftp_cfg_dir + '/oem_sle_15sp1/test', 'r') as ifile:
lines = ifile.readlines()
self.assertFalse(os.path.islink(tftp_cfg_dir + '/oem_sle_15sp1/test'))
self.assertEqual(lines[0], '111')
logging.debug("---")
def test_provision_node(self):
# test awaited open port 22 locally
# TODO add for case when no ssh port on local port
# code for open port
tdir = 'sle-15.1-0.1.1-29.1'
os.makedirs(tftp_cfg_dir + '/' + tdir)
os.symlink(tftp_cfg_dir + '/' + tdir,
tftp_cfg_dir + '/sle-15.1')
hw_node.default_cold_restart_timeout = 1
pxelinux_cfg.default_undoubted_hw_start_timeout = 1
hw_node.ipmitool_bin = 'echo'
node = network_manager.get_node_by_name('test_local_node')
# check provision failure in salt
hw_node.sls_list = ['setup_hsm']
with self.assertRaises(Exception):
pxelinux_cfg.provision_node(node, 'sle-15.1')
hw_node.sls_list = []
# provision without failure
self.assertEqual(
pxelinux_cfg.provision_node(node, 'sle-15.1'), 1)
# check provision need reboot
node['provision_need_reboot'] = 'no'
self.assertEqual(
pxelinux_cfg.provision_node(node, 'sle-15.1'), 1)
node['provision_need_reboot'] = 'yes'
self.assertEqual(
pxelinux_cfg.provision_node(node, 'sle-15.1'), 2)
if __name__ == '__main__':
unittest.main() | 0.122839 | 0.158467 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='search_total.proto',
package='instance',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x12search_total.proto\x12\x08instance\x1a\x1cgoogle/protobuf/struct.proto\"|\n\x12SearchTotalRequest\x12\x10\n\x08objectId\x18\x01 \x01(\t\x12&\n\x05query\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\npermission\x18\x03 \x03(\t\x12\x18\n\x10only_my_instance\x18\x04 \x01(\x08\"Q\n\x13SearchTotalResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x05\"{\n\x1aSearchTotalResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12+\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1d.instance.SearchTotalResponseb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_SEARCHTOTALREQUEST = _descriptor.Descriptor(
name='SearchTotalRequest',
full_name='instance.SearchTotalRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectId', full_name='instance.SearchTotalRequest.objectId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query', full_name='instance.SearchTotalRequest.query', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='permission', full_name='instance.SearchTotalRequest.permission', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='only_my_instance', full_name='instance.SearchTotalRequest.only_my_instance', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=186,
)
_SEARCHTOTALRESPONSE = _descriptor.Descriptor(
name='SearchTotalResponse',
full_name='instance.SearchTotalResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.SearchTotalResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.SearchTotalResponse.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='instance.SearchTotalResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.SearchTotalResponse.data', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=188,
serialized_end=269,
)
_SEARCHTOTALRESPONSEWRAPPER = _descriptor.Descriptor(
name='SearchTotalResponseWrapper',
full_name='instance.SearchTotalResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.SearchTotalResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance.SearchTotalResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.SearchTotalResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.SearchTotalResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=271,
serialized_end=394,
)
_SEARCHTOTALREQUEST.fields_by_name['query'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_SEARCHTOTALRESPONSEWRAPPER.fields_by_name['data'].message_type = _SEARCHTOTALRESPONSE
DESCRIPTOR.message_types_by_name['SearchTotalRequest'] = _SEARCHTOTALREQUEST
DESCRIPTOR.message_types_by_name['SearchTotalResponse'] = _SEARCHTOTALRESPONSE
DESCRIPTOR.message_types_by_name['SearchTotalResponseWrapper'] = _SEARCHTOTALRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SearchTotalRequest = _reflection.GeneratedProtocolMessageType('SearchTotalRequest', (_message.Message,), {
'DESCRIPTOR' : _SEARCHTOTALREQUEST,
'__module__' : 'search_total_pb2'
# @@protoc_insertion_point(class_scope:instance.SearchTotalRequest)
})
_sym_db.RegisterMessage(SearchTotalRequest)
SearchTotalResponse = _reflection.GeneratedProtocolMessageType('SearchTotalResponse', (_message.Message,), {
'DESCRIPTOR' : _SEARCHTOTALRESPONSE,
'__module__' : 'search_total_pb2'
# @@protoc_insertion_point(class_scope:instance.SearchTotalResponse)
})
_sym_db.RegisterMessage(SearchTotalResponse)
SearchTotalResponseWrapper = _reflection.GeneratedProtocolMessageType('SearchTotalResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _SEARCHTOTALRESPONSEWRAPPER,
'__module__' : 'search_total_pb2'
# @@protoc_insertion_point(class_scope:instance.SearchTotalResponseWrapper)
})
_sym_db.RegisterMessage(SearchTotalResponseWrapper)
# @@protoc_insertion_point(module_scope) | cmdb_sdk/api/instance/search_total_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='search_total.proto',
package='instance',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x12search_total.proto\x12\x08instance\x1a\x1cgoogle/protobuf/struct.proto\"|\n\x12SearchTotalRequest\x12\x10\n\x08objectId\x18\x01 \x01(\t\x12&\n\x05query\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\npermission\x18\x03 \x03(\t\x12\x18\n\x10only_my_instance\x18\x04 \x01(\x08\"Q\n\x13SearchTotalResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x05\"{\n\x1aSearchTotalResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12+\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1d.instance.SearchTotalResponseb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_SEARCHTOTALREQUEST = _descriptor.Descriptor(
name='SearchTotalRequest',
full_name='instance.SearchTotalRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectId', full_name='instance.SearchTotalRequest.objectId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query', full_name='instance.SearchTotalRequest.query', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='permission', full_name='instance.SearchTotalRequest.permission', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='only_my_instance', full_name='instance.SearchTotalRequest.only_my_instance', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=186,
)
_SEARCHTOTALRESPONSE = _descriptor.Descriptor(
name='SearchTotalResponse',
full_name='instance.SearchTotalResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.SearchTotalResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.SearchTotalResponse.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='instance.SearchTotalResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.SearchTotalResponse.data', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=188,
serialized_end=269,
)
_SEARCHTOTALRESPONSEWRAPPER = _descriptor.Descriptor(
name='SearchTotalResponseWrapper',
full_name='instance.SearchTotalResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.SearchTotalResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance.SearchTotalResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.SearchTotalResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.SearchTotalResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=271,
serialized_end=394,
)
_SEARCHTOTALREQUEST.fields_by_name['query'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_SEARCHTOTALRESPONSEWRAPPER.fields_by_name['data'].message_type = _SEARCHTOTALRESPONSE
DESCRIPTOR.message_types_by_name['SearchTotalRequest'] = _SEARCHTOTALREQUEST
DESCRIPTOR.message_types_by_name['SearchTotalResponse'] = _SEARCHTOTALRESPONSE
DESCRIPTOR.message_types_by_name['SearchTotalResponseWrapper'] = _SEARCHTOTALRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SearchTotalRequest = _reflection.GeneratedProtocolMessageType('SearchTotalRequest', (_message.Message,), {
'DESCRIPTOR' : _SEARCHTOTALREQUEST,
'__module__' : 'search_total_pb2'
# @@protoc_insertion_point(class_scope:instance.SearchTotalRequest)
})
_sym_db.RegisterMessage(SearchTotalRequest)
SearchTotalResponse = _reflection.GeneratedProtocolMessageType('SearchTotalResponse', (_message.Message,), {
'DESCRIPTOR' : _SEARCHTOTALRESPONSE,
'__module__' : 'search_total_pb2'
# @@protoc_insertion_point(class_scope:instance.SearchTotalResponse)
})
_sym_db.RegisterMessage(SearchTotalResponse)
SearchTotalResponseWrapper = _reflection.GeneratedProtocolMessageType('SearchTotalResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _SEARCHTOTALRESPONSEWRAPPER,
'__module__' : 'search_total_pb2'
# @@protoc_insertion_point(class_scope:instance.SearchTotalResponseWrapper)
})
_sym_db.RegisterMessage(SearchTotalResponseWrapper)
# @@protoc_insertion_point(module_scope) | 0.257952 | 0.111895 |
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['TestCreate.test[uvloop-none] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'none',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-users_primary_group] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'technician',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-force_choice] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'diagnostics',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-none] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'none',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-users_primary_group] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'technician',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-force_choice] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'diagnostics',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['test_find[uvloop-fred-None-None-d_range5-meta5] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_get[uvloop-True-None] 1'] = {
'caches': [
],
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'download_url': '/download/samples/files/file_1.fq.gz',
'id': 'foo',
'name': 'Bar.fq.gz',
'replace_url': '/upload/samples/test/files/1'
}
],
'id': 'test',
'name': 'Test',
'ready': True
}
snapshots['test_find[uvloop-None-None-None-d_range0-meta0] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 3,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find[uvloop-None-2-1-d_range1-meta1] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
}
],
'found_count': 3,
'page': 1,
'page_count': 2,
'per_page': 2,
'total_count': 3
}
snapshots['test_find[uvloop-None-2-2-d_range2-meta2] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 3,
'page': 2,
'page_count': 2,
'per_page': 2,
'total_count': 3
}
snapshots['test_find[uvloop-gv-None-None-d_range3-meta3] 1'] = {
'documents': [
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find[uvloop-sp-None-None-d_range4-meta4] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 1,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_get[uvloop-False-None] 1'] = {
'caches': [
],
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'download_url': '/download/samples/files/file_1.fq.gz',
'id': 'foo',
'name': 'Bar.fq.gz'
}
],
'id': 'test',
'name': 'Test',
'ready': False
}
snapshots['test_find_analyses[uvloop-None-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_2',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_3',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'foo',
'name': 'Foo'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 3,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find_analyses[uvloop-bob-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 1,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find_analyses[uvloop-Baz-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_2',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
} | tests/samples/snapshots/snap_test_api.py | from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['TestCreate.test[uvloop-none] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'none',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-users_primary_group] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'technician',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-force_choice] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'diagnostics',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-none] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'none',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-users_primary_group] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'technician',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-force_choice] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'diagnostics',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['test_find[uvloop-fred-None-None-d_range5-meta5] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_get[uvloop-True-None] 1'] = {
'caches': [
],
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'download_url': '/download/samples/files/file_1.fq.gz',
'id': 'foo',
'name': 'Bar.fq.gz',
'replace_url': '/upload/samples/test/files/1'
}
],
'id': 'test',
'name': 'Test',
'ready': True
}
snapshots['test_find[uvloop-None-None-None-d_range0-meta0] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 3,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find[uvloop-None-2-1-d_range1-meta1] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
}
],
'found_count': 3,
'page': 1,
'page_count': 2,
'per_page': 2,
'total_count': 3
}
snapshots['test_find[uvloop-None-2-2-d_range2-meta2] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 3,
'page': 2,
'page_count': 2,
'per_page': 2,
'total_count': 3
}
snapshots['test_find[uvloop-gv-None-None-d_range3-meta3] 1'] = {
'documents': [
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find[uvloop-sp-None-None-d_range4-meta4] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 1,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_get[uvloop-False-None] 1'] = {
'caches': [
],
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'download_url': '/download/samples/files/file_1.fq.gz',
'id': 'foo',
'name': 'Bar.fq.gz'
}
],
'id': 'test',
'name': 'Test',
'ready': False
}
snapshots['test_find_analyses[uvloop-None-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_2',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_3',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'foo',
'name': 'Foo'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 3,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find_analyses[uvloop-bob-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 1,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find_analyses[uvloop-Baz-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_2',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
} | 0.411111 | 0.22213 |
import shutil
from typing import Mapping, Tuple, Optional
from rlbot.parsing.bot_config_bundle import BotConfigBundle
from rlbot.setup_manager import setup_manager_context
from rlbot.training.training import Fail
from rlbottraining.exercise_runner import run_playlist, RenderPolicy
from bots import BotID
from match import MatchDetails, MatchResult
from match_exercise import MatchExercise, MatchGrader
from overlay import make_overlay
from paths import LeagueDir
from replays import ReplayPreference, ReplayMonitor, ReplayData
from popup import confirm_match_result
def run_match(ld: LeagueDir, match_details: MatchDetails, bots: Mapping[BotID, BotConfigBundle],
replay_preference: ReplayPreference) -> Tuple[MatchResult, Optional[ReplayData]]:
"""
Run a match, wait for it to finish, and return the result.
"""
with setup_manager_context() as setup_manager:
# Expose data to overlay
make_overlay(ld, match_details, bots)
# Prepare the match exercise
print(f"Starting match: {match_details.blue} vs {match_details.orange}. Waiting for match to finish...")
match = MatchExercise(
name=match_details.name,
match_config=match_details.to_config(bots),
grader=MatchGrader(
replay_monitor=ReplayMonitor(replay_preference=replay_preference),
)
)
# If any bots have signed up for early start, give them 10 seconds.
# This is typically enough for Scratch.
setup_manager.early_start_seconds = 10
# For loop, but should only run exactly once
for exercise_result in run_playlist([match], setup_manager=setup_manager,
render_policy=RenderPolicy.DEFAULT):
replay_data = None
# Warn if no replay was found
replay_data = exercise_result.exercise.grader.replay_monitor.replay_data()
if isinstance(exercise_result.grade, Fail) and replay_data.replay_id is None:
print(f"WARNING: No replay was found for the match '{match_details.name}'.")
else:
if replay_preference != ReplayPreference.NONE and replay_data.replay_path is not None:
try:
dst = ld.replays / f"{replay_data.replay_id}.replay"
shutil.copy(replay_data.replay_path, dst)
print("Replay successfully copied to replays directory")
except:
pass
match_result = confirm_match_result(exercise_result.exercise.grader.match_result)
return match_result, replay_data | autoleague/match_runner.py | import shutil
from typing import Mapping, Tuple, Optional
from rlbot.parsing.bot_config_bundle import BotConfigBundle
from rlbot.setup_manager import setup_manager_context
from rlbot.training.training import Fail
from rlbottraining.exercise_runner import run_playlist, RenderPolicy
from bots import BotID
from match import MatchDetails, MatchResult
from match_exercise import MatchExercise, MatchGrader
from overlay import make_overlay
from paths import LeagueDir
from replays import ReplayPreference, ReplayMonitor, ReplayData
from popup import confirm_match_result
def run_match(ld: LeagueDir, match_details: MatchDetails, bots: Mapping[BotID, BotConfigBundle],
replay_preference: ReplayPreference) -> Tuple[MatchResult, Optional[ReplayData]]:
"""
Run a match, wait for it to finish, and return the result.
"""
with setup_manager_context() as setup_manager:
# Expose data to overlay
make_overlay(ld, match_details, bots)
# Prepare the match exercise
print(f"Starting match: {match_details.blue} vs {match_details.orange}. Waiting for match to finish...")
match = MatchExercise(
name=match_details.name,
match_config=match_details.to_config(bots),
grader=MatchGrader(
replay_monitor=ReplayMonitor(replay_preference=replay_preference),
)
)
# If any bots have signed up for early start, give them 10 seconds.
# This is typically enough for Scratch.
setup_manager.early_start_seconds = 10
# For loop, but should only run exactly once
for exercise_result in run_playlist([match], setup_manager=setup_manager,
render_policy=RenderPolicy.DEFAULT):
replay_data = None
# Warn if no replay was found
replay_data = exercise_result.exercise.grader.replay_monitor.replay_data()
if isinstance(exercise_result.grade, Fail) and replay_data.replay_id is None:
print(f"WARNING: No replay was found for the match '{match_details.name}'.")
else:
if replay_preference != ReplayPreference.NONE and replay_data.replay_path is not None:
try:
dst = ld.replays / f"{replay_data.replay_id}.replay"
shutil.copy(replay_data.replay_path, dst)
print("Replay successfully copied to replays directory")
except:
pass
match_result = confirm_match_result(exercise_result.exercise.grader.match_result)
return match_result, replay_data | 0.68215 | 0.242834 |
import Queue
import logging
import traceback
import threading
import pprint
import os
import ambari_simplejson as json
import time
from AgentException import AgentException
from LiveStatus import LiveStatus
from ActualConfigHandler import ActualConfigHandler
from CommandStatusDict import CommandStatusDict
from CustomServiceOrchestrator import CustomServiceOrchestrator
from ambari_agent.BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
logger = logging.getLogger()
installScriptHash = -1
class ActionQueue(threading.Thread):
""" Action Queue for the agent. We pick one command at a time from the queue
and execute it
Note: Action and command terms in this and related classes are used interchangeably
"""
# How many actions can be performed in parallel. Feel free to change
MAX_CONCURRENT_ACTIONS = 5
#How much time(in seconds) we need wait for new incoming execution command before checking
#status command queue
EXECUTION_COMMAND_WAIT_TIME = 2
STATUS_COMMAND = 'STATUS_COMMAND'
EXECUTION_COMMAND = 'EXECUTION_COMMAND'
AUTO_EXECUTION_COMMAND = 'AUTO_EXECUTION_COMMAND'
BACKGROUND_EXECUTION_COMMAND = 'BACKGROUND_EXECUTION_COMMAND'
ROLE_COMMAND_INSTALL = 'INSTALL'
ROLE_COMMAND_START = 'START'
ROLE_COMMAND_STOP = 'STOP'
ROLE_COMMAND_CUSTOM_COMMAND = 'CUSTOM_COMMAND'
CUSTOM_COMMAND_RESTART = 'RESTART'
IN_PROGRESS_STATUS = 'IN_PROGRESS'
COMPLETED_STATUS = 'COMPLETED'
FAILED_STATUS = 'FAILED'
def __init__(self, config, controller):
super(ActionQueue, self).__init__()
self.commandQueue = Queue.Queue()
self.statusCommandQueue = Queue.Queue()
self.backgroundCommandQueue = Queue.Queue()
self.commandStatuses = CommandStatusDict(callback_action =
self.status_update_callback)
self.config = config
self.controller = controller
self.configTags = {}
self._stop = threading.Event()
self.tmpdir = config.get('agent', 'prefix')
self.customServiceOrchestrator = CustomServiceOrchestrator(config, controller)
self.parallel_execution = config.get_parallel_exec_option()
if self.parallel_execution == 1:
logger.info("Parallel execution is enabled, will start Agent commands in parallel")
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def put_status(self, commands):
#Was supposed that we got all set of statuses, we don't need to keep old ones
self.statusCommandQueue.queue.clear()
for command in commands:
logger.info("Adding " + command['commandType'] + " for service " + \
command['serviceName'] + " of cluster " + \
command['clusterName'] + " to the queue.")
self.statusCommandQueue.put(command)
def put(self, commands):
for command in commands:
if not command.has_key('serviceName'):
command['serviceName'] = "null"
if not command.has_key('clusterName'):
command['clusterName'] = 'null'
logger.info("Adding " + command['commandType'] + " for role " + \
command['role'] + " for service " + \
command['serviceName'] + " of cluster " + \
command['clusterName'] + " to the queue.")
if command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND :
self.backgroundCommandQueue.put(self.createCommandHandle(command))
else:
self.commandQueue.put(command)
def cancel(self, commands):
for command in commands:
logger.info("Canceling command {tid}".format(tid = str(command['target_task_id'])))
logger.debug(pprint.pformat(command))
task_id = command['target_task_id']
reason = command['reason']
# Remove from the command queue by task_id
queue = self.commandQueue
self.commandQueue = Queue.Queue()
while not queue.empty():
queued_command = queue.get(False)
if queued_command['task_id'] != task_id:
self.commandQueue.put(queued_command)
else:
logger.info("Canceling " + queued_command['commandType'] + \
" for service " + queued_command['serviceName'] + \
" of cluster " + queued_command['clusterName'] + \
" to the queue.")
# Kill if in progress
self.customServiceOrchestrator.cancel_command(task_id, reason)
def run(self):
while not self.stopped():
self.processBackgroundQueueSafeEmpty();
self.processStatusCommandQueueSafeEmpty();
try:
if self.parallel_execution == 0:
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
self.process_command(command)
else:
# If parallel execution is enabled, just kick off all available
# commands using separate threads
while (True):
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
logger.info("Kicking off a thread for the command, id=" +
str(command['commandId']) + " taskId=" + str(command['taskId']))
t = threading.Thread(target=self.process_command, args=(command,))
t.daemon = True
t.start()
except (Queue.Empty):
pass
def processBackgroundQueueSafeEmpty(self):
while not self.backgroundCommandQueue.empty():
try:
command = self.backgroundCommandQueue.get(False)
if(command.has_key('__handle') and command['__handle'].status == None):
self.process_command(command)
except (Queue.Empty):
pass
def processStatusCommandQueueSafeEmpty(self):
while not self.statusCommandQueue.empty():
try:
command = self.statusCommandQueue.get(False)
self.process_command(command)
except (Queue.Empty):
pass
def createCommandHandle(self, command):
if(command.has_key('__handle')):
raise AgentException("Command already has __handle")
command['__handle'] = BackgroundCommandExecutionHandle(command, command['commandId'], None, self.on_background_command_complete_callback)
return command
def process_command(self, command):
# make sure we log failures
commandType = command['commandType']
logger.debug("Took an element of Queue (command type = %s)." % commandType)
try:
if commandType in [self.EXECUTION_COMMAND, self.BACKGROUND_EXECUTION_COMMAND, self.AUTO_EXECUTION_COMMAND]:
try:
if self.controller.recovery_manager.enabled():
self.controller.recovery_manager.start_execution_command()
self.execute_command(command)
finally:
if self.controller.recovery_manager.enabled():
self.controller.recovery_manager.stop_execution_command()
elif commandType == self.STATUS_COMMAND:
self.execute_status_command(command)
else:
logger.error("Unrecognized command " + pprint.pformat(command))
except Exception, err:
# Should not happen
traceback.print_exc()
logger.warn(err)
def tasks_in_progress_or_pending(self):
return_val = False
if not self.commandQueue.empty():
return_val = True
if self.controller.recovery_manager.has_active_command():
return_val = True
return return_val
pass
def execute_command(self, command):
'''
Executes commands of type EXECUTION_COMMAND
'''
clusterName = command['clusterName']
commandId = command['commandId']
isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND
isAutoExecuteCommand = command['commandType'] == self.AUTO_EXECUTION_COMMAND
message = "Executing command with id = {commandId} for role = {role} of " \
"cluster {cluster}.".format(
commandId = str(commandId), role=command['role'],
cluster=clusterName)
logger.info(message)
taskId = command['taskId']
# Preparing 'IN_PROGRESS' report
in_progress_status = self.commandStatuses.generate_report_template(command)
# The path of the files that contain the output log and error log use a prefix that the agent advertises to the
# server. The prefix is defined in agent-config.ini
if not isAutoExecuteCommand:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt',
'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json',
'status': self.IN_PROGRESS_STATUS
})
else:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt',
'structuredOut' : self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json',
'status': self.IN_PROGRESS_STATUS
})
self.commandStatuses.put_command_status(command, in_progress_status)
numAttempts = 0
retryDuration = 0 # even with 0 allow one attempt
retryAble = False
delay = 1
if 'commandParams' in command:
if 'max_duration_for_retries' in command['commandParams']:
retryDuration = int(command['commandParams']['max_duration_for_retries'])
if 'command_retry_enabled' in command['commandParams']:
retryAble = command['commandParams']['command_retry_enabled'] == "true"
if isAutoExecuteCommand:
retryAble = False
logger.debug("Command execution metadata - retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}".
format(retryAble=retryAble, retryDuration=retryDuration))
while retryDuration >= 0:
numAttempts += 1
start = 0
if retryAble:
start = int(time.time())
# running command
commandresult = self.customServiceOrchestrator.runCommand(command,
in_progress_status['tmpout'],
in_progress_status['tmperr'],
override_output_files=numAttempts == 1,
retry=numAttempts > 1)
end = 1
if retryAble:
end = int(time.time())
retryDuration -= (end - start)
# dumping results
if isCommandBackground:
return
else:
if commandresult['exitcode'] == 0:
status = self.COMPLETED_STATUS
else:
status = self.FAILED_STATUS
if status != self.COMPLETED_STATUS and retryAble == True and retryDuration > 0:
delay = self.get_retry_delay(delay)
if delay > retryDuration:
delay = retryDuration
retryDuration -= delay # allow one last attempt
logger.info("Retrying command id {cid} after a wait of {delay}".format(cid=taskId, delay=delay))
time.sleep(delay)
continue
else:
break
roleResult = self.commandStatuses.generate_report_template(command)
roleResult.update({
'stdout': commandresult['stdout'],
'stderr': commandresult['stderr'],
'exitCode': commandresult['exitcode'],
'status': status,
})
if roleResult['stdout'] == '':
roleResult['stdout'] = 'None'
if roleResult['stderr'] == '':
roleResult['stderr'] = 'None'
# let ambari know name of custom command
if command['hostLevelParams'].has_key('custom_command'):
roleResult['customCommand'] = command['hostLevelParams']['custom_command']
if 'structuredOut' in commandresult:
roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut']))
else:
roleResult['structuredOut'] = ''
# let recovery manager know the current state
if status == self.COMPLETED_STATUS:
if self.controller.recovery_manager.enabled() and command.has_key('roleCommand'):
if command['roleCommand'] == self.ROLE_COMMAND_START:
self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS)
self.controller.recovery_manager.update_config_staleness(command['role'], False)
logger.info("After EXECUTION_COMMAND (START), current state of " + command['role'] + " to " +
self.controller.recovery_manager.get_current_status(command['role']) )
elif command['roleCommand'] == self.ROLE_COMMAND_STOP or command['roleCommand'] == self.ROLE_COMMAND_INSTALL:
self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.DEAD_STATUS)
logger.info("After EXECUTION_COMMAND (STOP/INSTALL), current state of " + command['role'] + " to " +
self.controller.recovery_manager.get_current_status(command['role']) )
elif command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND:
if command['hostLevelParams'].has_key('custom_command') and \
command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART:
self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS)
self.controller.recovery_manager.update_config_staleness(command['role'], False)
logger.info("After EXECUTION_COMMAND (RESTART), current state of " + command['role'] + " to " +
self.controller.recovery_manager.get_current_status(command['role']) )
pass
# let ambari know that configuration tags were applied
configHandler = ActualConfigHandler(self.config, self.configTags)
#update
if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0 :
forceRefreshConfigTags = command['forceRefreshConfigTags']
logger.info("Got refresh additional component tags command")
for configTag in forceRefreshConfigTags :
configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag])
roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side
command['configurationTags'] = configHandler.read_actual_component(command['role'])
if command.has_key('configurationTags'):
configHandler.write_actual(command['configurationTags'])
roleResult['configurationTags'] = command['configurationTags']
component = {'serviceName':command['serviceName'],'componentName':command['role']}
if command.has_key('roleCommand') and \
(command['roleCommand'] == self.ROLE_COMMAND_START or \
(command['roleCommand'] == self.ROLE_COMMAND_INSTALL \
and component in LiveStatus.CLIENT_COMPONENTS) or \
(command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \
command['hostLevelParams'].has_key('custom_command') and \
command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)):
configHandler.write_actual_component(command['role'], command['configurationTags'])
if command['hostLevelParams'].has_key('clientsToUpdateConfigs') and \
command['hostLevelParams']['clientsToUpdateConfigs']:
configHandler.write_client_components(command['serviceName'], command['configurationTags'],
command['hostLevelParams']['clientsToUpdateConfigs'])
roleResult['configurationTags'] = configHandler.read_actual_component(command['role'])
self.commandStatuses.put_command_status(command, roleResult)
def get_retry_delay(self, last_delay):
"""
Returns exponentially growing delay. The idea being if number of retries is high then the reason to retry
is probably a host or environment specific issue requiring longer waits
"""
return last_delay * 2
def command_was_canceled(self):
self.customServiceOrchestrator
def on_background_command_complete_callback(self, process_condensed_result, handle):
logger.debug('Start callback: %s' % process_condensed_result)
logger.debug('The handle is: %s' % handle)
status = self.COMPLETED_STATUS if handle.exitCode == 0 else self.FAILED_STATUS
aborted_postfix = self.customServiceOrchestrator.command_canceled_reason(handle.command['taskId'])
if aborted_postfix:
status = self.FAILED_STATUS
logger.debug('Set status to: %s , reason = %s' % (status, aborted_postfix))
else:
aborted_postfix = ''
roleResult = self.commandStatuses.generate_report_template(handle.command)
roleResult.update({
'stdout': process_condensed_result['stdout'] + aborted_postfix,
'stderr': process_condensed_result['stderr'] + aborted_postfix,
'exitCode': process_condensed_result['exitcode'],
'structuredOut': str(json.dumps(process_condensed_result['structuredOut'])) if 'structuredOut' in process_condensed_result else '',
'status': status,
})
self.commandStatuses.put_command_status(handle.command, roleResult)
def execute_status_command(self, command):
'''
Executes commands of type STATUS_COMMAND
'''
try:
cluster = command['clusterName']
service = command['serviceName']
component = command['componentName']
configurations = command['configurations']
if configurations.has_key('global'):
globalConfig = configurations['global']
else:
globalConfig = {}
livestatus = LiveStatus(cluster, service, component,
globalConfig, self.config, self.configTags)
component_extra = None
request_execution_cmd = False
# For custom services, responsibility to determine service status is
# delegated to python scripts
component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
component_security_status_result = self.customServiceOrchestrator.requestComponentSecurityState(command)
if component_status_result['exitcode'] == 0:
component_status = LiveStatus.LIVE_STATUS
self.controller.recovery_manager.update_current_status(component, component_status)
else:
component_status = LiveStatus.DEAD_STATUS
self.controller.recovery_manager.update_current_status(component, component_status)
request_execution_cmd = self.controller.recovery_manager.requires_recovery(component)
if component_status_result.has_key('structuredOut'):
component_extra = component_status_result['structuredOut']
result = livestatus.build(forced_component_status= component_status)
if self.controller.recovery_manager.enabled():
result['sendExecCmdDet'] = str(request_execution_cmd)
# Add security state to the result
result['securityState'] = component_security_status_result
if component_extra is not None and len(component_extra) != 0:
if component_extra.has_key('alerts'):
result['alerts'] = component_extra['alerts']
del component_extra['alerts']
result['extra'] = component_extra
logger.debug("Got live status for component " + component + \
" of service " + str(service) + \
" of cluster " + str(cluster))
logger.debug(pprint.pformat(result))
if result is not None:
self.commandStatuses.put_command_status(command, result)
except Exception, err:
traceback.print_exc()
logger.warn(err)
pass
# Store action result to agent response queue
def result(self):
return self.commandStatuses.generate_report()
def status_update_callback(self):
"""
Actions that are executed every time when command status changes
"""
self.controller.trigger_heartbeat()
# Removes all commands from the queue
def reset(self):
queue = self.commandQueue
with queue.mutex:
queue.queue.clear() | ambari-agent/src/main/python/ambari_agent/ActionQueue.py | import Queue
import logging
import traceback
import threading
import pprint
import os
import ambari_simplejson as json
import time
from AgentException import AgentException
from LiveStatus import LiveStatus
from ActualConfigHandler import ActualConfigHandler
from CommandStatusDict import CommandStatusDict
from CustomServiceOrchestrator import CustomServiceOrchestrator
from ambari_agent.BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
logger = logging.getLogger()
installScriptHash = -1
class ActionQueue(threading.Thread):
""" Action Queue for the agent. We pick one command at a time from the queue
and execute it
Note: Action and command terms in this and related classes are used interchangeably
"""
# How many actions can be performed in parallel. Feel free to change
MAX_CONCURRENT_ACTIONS = 5
#How much time(in seconds) we need wait for new incoming execution command before checking
#status command queue
EXECUTION_COMMAND_WAIT_TIME = 2
STATUS_COMMAND = 'STATUS_COMMAND'
EXECUTION_COMMAND = 'EXECUTION_COMMAND'
AUTO_EXECUTION_COMMAND = 'AUTO_EXECUTION_COMMAND'
BACKGROUND_EXECUTION_COMMAND = 'BACKGROUND_EXECUTION_COMMAND'
ROLE_COMMAND_INSTALL = 'INSTALL'
ROLE_COMMAND_START = 'START'
ROLE_COMMAND_STOP = 'STOP'
ROLE_COMMAND_CUSTOM_COMMAND = 'CUSTOM_COMMAND'
CUSTOM_COMMAND_RESTART = 'RESTART'
IN_PROGRESS_STATUS = 'IN_PROGRESS'
COMPLETED_STATUS = 'COMPLETED'
FAILED_STATUS = 'FAILED'
def __init__(self, config, controller):
super(ActionQueue, self).__init__()
self.commandQueue = Queue.Queue()
self.statusCommandQueue = Queue.Queue()
self.backgroundCommandQueue = Queue.Queue()
self.commandStatuses = CommandStatusDict(callback_action =
self.status_update_callback)
self.config = config
self.controller = controller
self.configTags = {}
self._stop = threading.Event()
self.tmpdir = config.get('agent', 'prefix')
self.customServiceOrchestrator = CustomServiceOrchestrator(config, controller)
self.parallel_execution = config.get_parallel_exec_option()
if self.parallel_execution == 1:
logger.info("Parallel execution is enabled, will start Agent commands in parallel")
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def put_status(self, commands):
#Was supposed that we got all set of statuses, we don't need to keep old ones
self.statusCommandQueue.queue.clear()
for command in commands:
logger.info("Adding " + command['commandType'] + " for service " + \
command['serviceName'] + " of cluster " + \
command['clusterName'] + " to the queue.")
self.statusCommandQueue.put(command)
def put(self, commands):
for command in commands:
if not command.has_key('serviceName'):
command['serviceName'] = "null"
if not command.has_key('clusterName'):
command['clusterName'] = 'null'
logger.info("Adding " + command['commandType'] + " for role " + \
command['role'] + " for service " + \
command['serviceName'] + " of cluster " + \
command['clusterName'] + " to the queue.")
if command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND :
self.backgroundCommandQueue.put(self.createCommandHandle(command))
else:
self.commandQueue.put(command)
def cancel(self, commands):
for command in commands:
logger.info("Canceling command {tid}".format(tid = str(command['target_task_id'])))
logger.debug(pprint.pformat(command))
task_id = command['target_task_id']
reason = command['reason']
# Remove from the command queue by task_id
queue = self.commandQueue
self.commandQueue = Queue.Queue()
while not queue.empty():
queued_command = queue.get(False)
if queued_command['task_id'] != task_id:
self.commandQueue.put(queued_command)
else:
logger.info("Canceling " + queued_command['commandType'] + \
" for service " + queued_command['serviceName'] + \
" of cluster " + queued_command['clusterName'] + \
" to the queue.")
# Kill if in progress
self.customServiceOrchestrator.cancel_command(task_id, reason)
def run(self):
while not self.stopped():
self.processBackgroundQueueSafeEmpty();
self.processStatusCommandQueueSafeEmpty();
try:
if self.parallel_execution == 0:
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
self.process_command(command)
else:
# If parallel execution is enabled, just kick off all available
# commands using separate threads
while (True):
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
logger.info("Kicking off a thread for the command, id=" +
str(command['commandId']) + " taskId=" + str(command['taskId']))
t = threading.Thread(target=self.process_command, args=(command,))
t.daemon = True
t.start()
except (Queue.Empty):
pass
def processBackgroundQueueSafeEmpty(self):
while not self.backgroundCommandQueue.empty():
try:
command = self.backgroundCommandQueue.get(False)
if(command.has_key('__handle') and command['__handle'].status == None):
self.process_command(command)
except (Queue.Empty):
pass
def processStatusCommandQueueSafeEmpty(self):
while not self.statusCommandQueue.empty():
try:
command = self.statusCommandQueue.get(False)
self.process_command(command)
except (Queue.Empty):
pass
def createCommandHandle(self, command):
if(command.has_key('__handle')):
raise AgentException("Command already has __handle")
command['__handle'] = BackgroundCommandExecutionHandle(command, command['commandId'], None, self.on_background_command_complete_callback)
return command
def process_command(self, command):
# make sure we log failures
commandType = command['commandType']
logger.debug("Took an element of Queue (command type = %s)." % commandType)
try:
if commandType in [self.EXECUTION_COMMAND, self.BACKGROUND_EXECUTION_COMMAND, self.AUTO_EXECUTION_COMMAND]:
try:
if self.controller.recovery_manager.enabled():
self.controller.recovery_manager.start_execution_command()
self.execute_command(command)
finally:
if self.controller.recovery_manager.enabled():
self.controller.recovery_manager.stop_execution_command()
elif commandType == self.STATUS_COMMAND:
self.execute_status_command(command)
else:
logger.error("Unrecognized command " + pprint.pformat(command))
except Exception, err:
# Should not happen
traceback.print_exc()
logger.warn(err)
def tasks_in_progress_or_pending(self):
return_val = False
if not self.commandQueue.empty():
return_val = True
if self.controller.recovery_manager.has_active_command():
return_val = True
return return_val
pass
def execute_command(self, command):
'''
Executes commands of type EXECUTION_COMMAND
'''
clusterName = command['clusterName']
commandId = command['commandId']
isCommandBackground = command['commandType'] == self.BACKGROUND_EXECUTION_COMMAND
isAutoExecuteCommand = command['commandType'] == self.AUTO_EXECUTION_COMMAND
message = "Executing command with id = {commandId} for role = {role} of " \
"cluster {cluster}.".format(
commandId = str(commandId), role=command['role'],
cluster=clusterName)
logger.info(message)
taskId = command['taskId']
# Preparing 'IN_PROGRESS' report
in_progress_status = self.commandStatuses.generate_report_template(command)
# The path of the files that contain the output log and error log use a prefix that the agent advertises to the
# server. The prefix is defined in agent-config.ini
if not isAutoExecuteCommand:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt',
'structuredOut' : self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json',
'status': self.IN_PROGRESS_STATUS
})
else:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt',
'structuredOut' : self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json',
'status': self.IN_PROGRESS_STATUS
})
self.commandStatuses.put_command_status(command, in_progress_status)
numAttempts = 0
retryDuration = 0 # even with 0 allow one attempt
retryAble = False
delay = 1
if 'commandParams' in command:
if 'max_duration_for_retries' in command['commandParams']:
retryDuration = int(command['commandParams']['max_duration_for_retries'])
if 'command_retry_enabled' in command['commandParams']:
retryAble = command['commandParams']['command_retry_enabled'] == "true"
if isAutoExecuteCommand:
retryAble = False
logger.debug("Command execution metadata - retry enabled = {retryAble}, max retry duration (sec) = {retryDuration}".
format(retryAble=retryAble, retryDuration=retryDuration))
while retryDuration >= 0:
numAttempts += 1
start = 0
if retryAble:
start = int(time.time())
# running command
commandresult = self.customServiceOrchestrator.runCommand(command,
in_progress_status['tmpout'],
in_progress_status['tmperr'],
override_output_files=numAttempts == 1,
retry=numAttempts > 1)
end = 1
if retryAble:
end = int(time.time())
retryDuration -= (end - start)
# dumping results
if isCommandBackground:
return
else:
if commandresult['exitcode'] == 0:
status = self.COMPLETED_STATUS
else:
status = self.FAILED_STATUS
if status != self.COMPLETED_STATUS and retryAble == True and retryDuration > 0:
delay = self.get_retry_delay(delay)
if delay > retryDuration:
delay = retryDuration
retryDuration -= delay # allow one last attempt
logger.info("Retrying command id {cid} after a wait of {delay}".format(cid=taskId, delay=delay))
time.sleep(delay)
continue
else:
break
roleResult = self.commandStatuses.generate_report_template(command)
roleResult.update({
'stdout': commandresult['stdout'],
'stderr': commandresult['stderr'],
'exitCode': commandresult['exitcode'],
'status': status,
})
if roleResult['stdout'] == '':
roleResult['stdout'] = 'None'
if roleResult['stderr'] == '':
roleResult['stderr'] = 'None'
# let ambari know name of custom command
if command['hostLevelParams'].has_key('custom_command'):
roleResult['customCommand'] = command['hostLevelParams']['custom_command']
if 'structuredOut' in commandresult:
roleResult['structuredOut'] = str(json.dumps(commandresult['structuredOut']))
else:
roleResult['structuredOut'] = ''
# let recovery manager know the current state
if status == self.COMPLETED_STATUS:
if self.controller.recovery_manager.enabled() and command.has_key('roleCommand'):
if command['roleCommand'] == self.ROLE_COMMAND_START:
self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS)
self.controller.recovery_manager.update_config_staleness(command['role'], False)
logger.info("After EXECUTION_COMMAND (START), current state of " + command['role'] + " to " +
self.controller.recovery_manager.get_current_status(command['role']) )
elif command['roleCommand'] == self.ROLE_COMMAND_STOP or command['roleCommand'] == self.ROLE_COMMAND_INSTALL:
self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.DEAD_STATUS)
logger.info("After EXECUTION_COMMAND (STOP/INSTALL), current state of " + command['role'] + " to " +
self.controller.recovery_manager.get_current_status(command['role']) )
elif command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND:
if command['hostLevelParams'].has_key('custom_command') and \
command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART:
self.controller.recovery_manager.update_current_status(command['role'], LiveStatus.LIVE_STATUS)
self.controller.recovery_manager.update_config_staleness(command['role'], False)
logger.info("After EXECUTION_COMMAND (RESTART), current state of " + command['role'] + " to " +
self.controller.recovery_manager.get_current_status(command['role']) )
pass
# let ambari know that configuration tags were applied
configHandler = ActualConfigHandler(self.config, self.configTags)
#update
if command.has_key('forceRefreshConfigTags') and len(command['forceRefreshConfigTags']) > 0 :
forceRefreshConfigTags = command['forceRefreshConfigTags']
logger.info("Got refresh additional component tags command")
for configTag in forceRefreshConfigTags :
configHandler.update_component_tag(command['role'], configTag, command['configurationTags'][configTag])
roleResult['customCommand'] = self.CUSTOM_COMMAND_RESTART # force restart for component to evict stale_config on server side
command['configurationTags'] = configHandler.read_actual_component(command['role'])
if command.has_key('configurationTags'):
configHandler.write_actual(command['configurationTags'])
roleResult['configurationTags'] = command['configurationTags']
component = {'serviceName':command['serviceName'],'componentName':command['role']}
if command.has_key('roleCommand') and \
(command['roleCommand'] == self.ROLE_COMMAND_START or \
(command['roleCommand'] == self.ROLE_COMMAND_INSTALL \
and component in LiveStatus.CLIENT_COMPONENTS) or \
(command['roleCommand'] == self.ROLE_COMMAND_CUSTOM_COMMAND and \
command['hostLevelParams'].has_key('custom_command') and \
command['hostLevelParams']['custom_command'] == self.CUSTOM_COMMAND_RESTART)):
configHandler.write_actual_component(command['role'], command['configurationTags'])
if command['hostLevelParams'].has_key('clientsToUpdateConfigs') and \
command['hostLevelParams']['clientsToUpdateConfigs']:
configHandler.write_client_components(command['serviceName'], command['configurationTags'],
command['hostLevelParams']['clientsToUpdateConfigs'])
roleResult['configurationTags'] = configHandler.read_actual_component(command['role'])
self.commandStatuses.put_command_status(command, roleResult)
def get_retry_delay(self, last_delay):
"""
Returns exponentially growing delay. The idea being if number of retries is high then the reason to retry
is probably a host or environment specific issue requiring longer waits
"""
return last_delay * 2
def command_was_canceled(self):
self.customServiceOrchestrator
def on_background_command_complete_callback(self, process_condensed_result, handle):
logger.debug('Start callback: %s' % process_condensed_result)
logger.debug('The handle is: %s' % handle)
status = self.COMPLETED_STATUS if handle.exitCode == 0 else self.FAILED_STATUS
aborted_postfix = self.customServiceOrchestrator.command_canceled_reason(handle.command['taskId'])
if aborted_postfix:
status = self.FAILED_STATUS
logger.debug('Set status to: %s , reason = %s' % (status, aborted_postfix))
else:
aborted_postfix = ''
roleResult = self.commandStatuses.generate_report_template(handle.command)
roleResult.update({
'stdout': process_condensed_result['stdout'] + aborted_postfix,
'stderr': process_condensed_result['stderr'] + aborted_postfix,
'exitCode': process_condensed_result['exitcode'],
'structuredOut': str(json.dumps(process_condensed_result['structuredOut'])) if 'structuredOut' in process_condensed_result else '',
'status': status,
})
self.commandStatuses.put_command_status(handle.command, roleResult)
def execute_status_command(self, command):
'''
Executes commands of type STATUS_COMMAND
'''
try:
cluster = command['clusterName']
service = command['serviceName']
component = command['componentName']
configurations = command['configurations']
if configurations.has_key('global'):
globalConfig = configurations['global']
else:
globalConfig = {}
livestatus = LiveStatus(cluster, service, component,
globalConfig, self.config, self.configTags)
component_extra = None
request_execution_cmd = False
# For custom services, responsibility to determine service status is
# delegated to python scripts
component_status_result = self.customServiceOrchestrator.requestComponentStatus(command)
component_security_status_result = self.customServiceOrchestrator.requestComponentSecurityState(command)
if component_status_result['exitcode'] == 0:
component_status = LiveStatus.LIVE_STATUS
self.controller.recovery_manager.update_current_status(component, component_status)
else:
component_status = LiveStatus.DEAD_STATUS
self.controller.recovery_manager.update_current_status(component, component_status)
request_execution_cmd = self.controller.recovery_manager.requires_recovery(component)
if component_status_result.has_key('structuredOut'):
component_extra = component_status_result['structuredOut']
result = livestatus.build(forced_component_status= component_status)
if self.controller.recovery_manager.enabled():
result['sendExecCmdDet'] = str(request_execution_cmd)
# Add security state to the result
result['securityState'] = component_security_status_result
if component_extra is not None and len(component_extra) != 0:
if component_extra.has_key('alerts'):
result['alerts'] = component_extra['alerts']
del component_extra['alerts']
result['extra'] = component_extra
logger.debug("Got live status for component " + component + \
" of service " + str(service) + \
" of cluster " + str(cluster))
logger.debug(pprint.pformat(result))
if result is not None:
self.commandStatuses.put_command_status(command, result)
except Exception, err:
traceback.print_exc()
logger.warn(err)
pass
# Store action result to agent response queue
def result(self):
return self.commandStatuses.generate_report()
def status_update_callback(self):
"""
Actions that are executed every time when command status changes
"""
self.controller.trigger_heartbeat()
# Removes all commands from the queue
def reset(self):
queue = self.commandQueue
with queue.mutex:
queue.queue.clear() | 0.25618 | 0.045058 |
import random
import numpy as np
from sklearn.model_selection import train_test_split
def get_square(img, pos):
"""Extract a left or a right square from ndarray shape : (H, W, C))"""
h = img.shape[0]
if pos == 0:
return img[:, :h]
else:
return img[:, -h:]
def split_img_into_squares(img):
return get_square(img, 0), get_square(img, 1)
def hwc_to_chw(img):
return np.transpose(img, axes=[2, 0, 1])
def resize_and_crop(pilimg, scale=0.5, final_height=None):
w = pilimg.size[0]
h = pilimg.size[1]
newW = int(w * scale)
newH = int(h * scale)
if not final_height:
diff = 0
else:
diff = newH - final_height
img = pilimg.resize((newW, newH))
img = img.crop((0, diff // 2, newW, newH - diff // 2))
return np.array(img, dtype=np.float32)
def batch(iterable, batch_size):
"""Yields lists by batch"""
b = []
for i, t in enumerate(iterable):
b.append(t)
if (i + 1) % batch_size == 0:
yield b
b = []
if len(b) > 0:
yield b
def split_train_val(dataset, val_percent=0.25):
dataset = list(dataset)
_train, _val = train_test_split(dataset, test_size=val_percent, random_state=42)
print('train dataset length: {}, validation dataset length: {}'.format(len(_train), len(_val)))
# length = len(dataset)
# n = int(length * val_percent)
# random.shuffle(dataset)
return {'train': _train, 'val': _val}
def normalize(x):
return x / 255
def merge_masks(img1, img2, full_w):
h = img1.shape[0]
new = np.zeros((h, full_w), np.float32)
new[:, :full_w // 2 + 1] = img1[:, :full_w // 2 + 1]
new[:, full_w // 2 + 1:] = img2[:, -(full_w // 2 - 1):]
return new
# credits to https://stackoverflow.com/users/6076729/manuel-lagunas
def rle_encode(mask_image):
pixels = mask_image.flatten()
# We avoid issues with '1' at the start or end (at the corners of
# the original image) by setting those pixels to '0' explicitly.
# We do not expect these to be non-zero for an accurate mask,
# so this should not harm the score.
pixels[0] = 0
pixels[-1] = 0
runs = np.where(pixels[1:] != pixels[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
return runs | models/unet/utils/utils.py | import random
import numpy as np
from sklearn.model_selection import train_test_split
def get_square(img, pos):
"""Extract a left or a right square from ndarray shape : (H, W, C))"""
h = img.shape[0]
if pos == 0:
return img[:, :h]
else:
return img[:, -h:]
def split_img_into_squares(img):
return get_square(img, 0), get_square(img, 1)
def hwc_to_chw(img):
return np.transpose(img, axes=[2, 0, 1])
def resize_and_crop(pilimg, scale=0.5, final_height=None):
w = pilimg.size[0]
h = pilimg.size[1]
newW = int(w * scale)
newH = int(h * scale)
if not final_height:
diff = 0
else:
diff = newH - final_height
img = pilimg.resize((newW, newH))
img = img.crop((0, diff // 2, newW, newH - diff // 2))
return np.array(img, dtype=np.float32)
def batch(iterable, batch_size):
"""Yields lists by batch"""
b = []
for i, t in enumerate(iterable):
b.append(t)
if (i + 1) % batch_size == 0:
yield b
b = []
if len(b) > 0:
yield b
def split_train_val(dataset, val_percent=0.25):
dataset = list(dataset)
_train, _val = train_test_split(dataset, test_size=val_percent, random_state=42)
print('train dataset length: {}, validation dataset length: {}'.format(len(_train), len(_val)))
# length = len(dataset)
# n = int(length * val_percent)
# random.shuffle(dataset)
return {'train': _train, 'val': _val}
def normalize(x):
return x / 255
def merge_masks(img1, img2, full_w):
h = img1.shape[0]
new = np.zeros((h, full_w), np.float32)
new[:, :full_w // 2 + 1] = img1[:, :full_w // 2 + 1]
new[:, full_w // 2 + 1:] = img2[:, -(full_w // 2 - 1):]
return new
# credits to https://stackoverflow.com/users/6076729/manuel-lagunas
def rle_encode(mask_image):
pixels = mask_image.flatten()
# We avoid issues with '1' at the start or end (at the corners of
# the original image) by setting those pixels to '0' explicitly.
# We do not expect these to be non-zero for an accurate mask,
# so this should not harm the score.
pixels[0] = 0
pixels[-1] = 0
runs = np.where(pixels[1:] != pixels[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
return runs | 0.664214 | 0.491029 |
import numpy as np
import matplotlib.pyplot as plt
from .. import fig_to_dict
from numpy.testing import assert_equal
def test_line():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'--k', alpha=0.3, zorder=10, lw=2)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
line = axrep['lines'][0]
assert_equal(list(sorted(line.keys())),
['alpha', 'color', 'coordinates', 'dasharray', 'data', 'id',
'linewidth', 'xindex', 'yindex', 'zorder'])
assert_equal(line['alpha'], 0.3)
assert_equal(line['color'], "#000000")
assert_equal(line['coordinates'], 'data')
assert_equal(line['dasharray'], '6,6')
assert_equal(line['zorder'], 10)
assert_equal(line['linewidth'], 2)
def test_markers():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'^k', alpha=0.3, zorder=10, mec='r', mew=2, c='b')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
markers = axrep['markers'][0]
assert_equal(list(sorted(markers.keys())),
['alpha', 'coordinates', 'data', 'edgecolor', 'edgewidth',
'facecolor', 'id', 'markerpath', 'xindex', 'yindex',
'zorder'])
assert_equal(markers['alpha'], 0.3)
assert_equal(markers['zorder'], 10)
assert_equal(markers['coordinates'], 'data')
assert_equal(markers['edgecolor'], '#FF0000')
assert_equal(markers['edgewidth'], 2)
assert_equal(markers['facecolor'], '#0000FF')
assert_equal(markers['markerpath'][0],
[[0.0, -3.0], [-3.0, 3.0], [3.0, 3.0]])
assert_equal(markers['markerpath'][1],
['M', 'L', 'L', 'Z'])
def test_scatter():
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.random.random(10), c='r', s=30,
marker='^', alpha=0.3, lw=2, edgecolors='b', zorder=10)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
points = axrep['collections'][0]
assert_equal(list(sorted(points.keys())),
['alphas', 'edgecolors', 'edgewidths', 'facecolors', 'id',
'offsetcoordinates', 'offsets', 'pathcoordinates', 'paths',
'pathtransforms', 'xindex', 'yindex', 'zorder'])
assert_equal(points['alphas'], [0.3])
assert_equal(points['zorder'], 10)
assert_equal(points['edgecolors'], ['#0000FF'])
assert_equal(points['facecolors'], ['#FF0000'])
assert_equal(points['edgewidths'], (2.0,))
assert_equal(points['paths'][0][0],
[[0.0, 0.5], [-0.5, -0.5], [0.5, -0.5]])
assert_equal(points['paths'][0][1],
['M', 'L', 'L', 'Z'])
assert_equal(points['pathtransforms'],
[[6.085806194501846, 0.0, 0.0, 6.085806194501846, 0.0, 0.0]])
def test_patch():
fig, ax = plt.subplots()
ax.add_patch(plt.Rectangle((0, 0), 1, 2, alpha=0.2, linewidth=2,
edgecolor='green', facecolor='red', zorder=3))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
path = axrep['paths'][0]
assert_equal(list(sorted(path.keys())),
['alpha', 'coordinates', 'dasharray', 'data', 'edgecolor',
'edgewidth', 'facecolor', 'id', 'pathcodes',
'xindex', 'yindex', 'zorder'])
assert_equal(path['alpha'], 0.2)
assert_equal(path['edgecolor'], "#008000")
assert_equal(path['facecolor'], "#FF0000")
assert_equal(path['edgewidth'], 2)
assert_equal(path['zorder'], 3)
def test_text():
fig, ax = plt.subplots()
ax.text(0.1, 0.1, "abcde", size=14, color='red', alpha=0.7,
rotation=15, ha='center', va='center')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
text = axrep['texts'][0]
assert_equal(list(sorted(text.keys())),
['alpha', 'color', 'coordinates', 'fontsize', 'h_anchor',
'id', 'position', 'rotation', 'text', 'v_baseline',
'zorder'])
assert_equal(text['alpha'], 0.7)
assert_equal(text['color'], "#FF0000")
assert_equal(text['text'], "abcde")
assert_equal(text['rotation'], -15)
assert_equal(text['fontsize'], 14)
assert_equal(text['position'], [0.1, 0.1])
assert_equal(text['h_anchor'], 'middle')
assert_equal(text['v_baseline'], 'central')
assert_equal(text['zorder'], 3)
assert_equal(text['coordinates'], "data")
def test_image():
fig, ax = plt.subplots()
ax.imshow(np.random.random((20, 20)), cmap=plt.cm.binary,
alpha=0.2, zorder=4, extent=(2, 4, 3, 5))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
image = axrep['images'][0]
# TODO: how to test data?
assert_equal(list(sorted(image.keys())),
['alpha', 'coordinates', 'data', 'extent', 'id', 'zorder'])
assert_equal(image['alpha'], 0.2)
assert_equal(image['extent'], (2, 4, 3, 5))
assert_equal(image['zorder'], 4)
assert_equal(image['coordinates'], "data") | mpld3/tests/test_elements.py | import numpy as np
import matplotlib.pyplot as plt
from .. import fig_to_dict
from numpy.testing import assert_equal
def test_line():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'--k', alpha=0.3, zorder=10, lw=2)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
line = axrep['lines'][0]
assert_equal(list(sorted(line.keys())),
['alpha', 'color', 'coordinates', 'dasharray', 'data', 'id',
'linewidth', 'xindex', 'yindex', 'zorder'])
assert_equal(line['alpha'], 0.3)
assert_equal(line['color'], "#000000")
assert_equal(line['coordinates'], 'data')
assert_equal(line['dasharray'], '6,6')
assert_equal(line['zorder'], 10)
assert_equal(line['linewidth'], 2)
def test_markers():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'^k', alpha=0.3, zorder=10, mec='r', mew=2, c='b')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
markers = axrep['markers'][0]
assert_equal(list(sorted(markers.keys())),
['alpha', 'coordinates', 'data', 'edgecolor', 'edgewidth',
'facecolor', 'id', 'markerpath', 'xindex', 'yindex',
'zorder'])
assert_equal(markers['alpha'], 0.3)
assert_equal(markers['zorder'], 10)
assert_equal(markers['coordinates'], 'data')
assert_equal(markers['edgecolor'], '#FF0000')
assert_equal(markers['edgewidth'], 2)
assert_equal(markers['facecolor'], '#0000FF')
assert_equal(markers['markerpath'][0],
[[0.0, -3.0], [-3.0, 3.0], [3.0, 3.0]])
assert_equal(markers['markerpath'][1],
['M', 'L', 'L', 'Z'])
def test_scatter():
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.random.random(10), c='r', s=30,
marker='^', alpha=0.3, lw=2, edgecolors='b', zorder=10)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
points = axrep['collections'][0]
assert_equal(list(sorted(points.keys())),
['alphas', 'edgecolors', 'edgewidths', 'facecolors', 'id',
'offsetcoordinates', 'offsets', 'pathcoordinates', 'paths',
'pathtransforms', 'xindex', 'yindex', 'zorder'])
assert_equal(points['alphas'], [0.3])
assert_equal(points['zorder'], 10)
assert_equal(points['edgecolors'], ['#0000FF'])
assert_equal(points['facecolors'], ['#FF0000'])
assert_equal(points['edgewidths'], (2.0,))
assert_equal(points['paths'][0][0],
[[0.0, 0.5], [-0.5, -0.5], [0.5, -0.5]])
assert_equal(points['paths'][0][1],
['M', 'L', 'L', 'Z'])
assert_equal(points['pathtransforms'],
[[6.085806194501846, 0.0, 0.0, 6.085806194501846, 0.0, 0.0]])
def test_patch():
fig, ax = plt.subplots()
ax.add_patch(plt.Rectangle((0, 0), 1, 2, alpha=0.2, linewidth=2,
edgecolor='green', facecolor='red', zorder=3))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
path = axrep['paths'][0]
assert_equal(list(sorted(path.keys())),
['alpha', 'coordinates', 'dasharray', 'data', 'edgecolor',
'edgewidth', 'facecolor', 'id', 'pathcodes',
'xindex', 'yindex', 'zorder'])
assert_equal(path['alpha'], 0.2)
assert_equal(path['edgecolor'], "#008000")
assert_equal(path['facecolor'], "#FF0000")
assert_equal(path['edgewidth'], 2)
assert_equal(path['zorder'], 3)
def test_text():
fig, ax = plt.subplots()
ax.text(0.1, 0.1, "abcde", size=14, color='red', alpha=0.7,
rotation=15, ha='center', va='center')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
text = axrep['texts'][0]
assert_equal(list(sorted(text.keys())),
['alpha', 'color', 'coordinates', 'fontsize', 'h_anchor',
'id', 'position', 'rotation', 'text', 'v_baseline',
'zorder'])
assert_equal(text['alpha'], 0.7)
assert_equal(text['color'], "#FF0000")
assert_equal(text['text'], "abcde")
assert_equal(text['rotation'], -15)
assert_equal(text['fontsize'], 14)
assert_equal(text['position'], [0.1, 0.1])
assert_equal(text['h_anchor'], 'middle')
assert_equal(text['v_baseline'], 'central')
assert_equal(text['zorder'], 3)
assert_equal(text['coordinates'], "data")
def test_image():
fig, ax = plt.subplots()
ax.imshow(np.random.random((20, 20)), cmap=plt.cm.binary,
alpha=0.2, zorder=4, extent=(2, 4, 3, 5))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
image = axrep['images'][0]
# TODO: how to test data?
assert_equal(list(sorted(image.keys())),
['alpha', 'coordinates', 'data', 'extent', 'id', 'zorder'])
assert_equal(image['alpha'], 0.2)
assert_equal(image['extent'], (2, 4, 3, 5))
assert_equal(image['zorder'], 4)
assert_equal(image['coordinates'], "data") | 0.649245 | 0.786848 |
import unittest
import os
import json
from io import open
import warnings
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.io.vasp import BSVasprun
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine, get_reconstructed_band_structure, \
LobsterBandStructureSymmLine
from pymatgen.util.testing import PymatgenTest
from monty.serialization import loadfn
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(PymatgenTest):
def setUp(self):
self.bs = loadfn(os.path.join(test_dir, "Cu2O_361_bandstructure.json"))
self.bs2 = loadfn(os.path.join(test_dir, "CaO_2605_bandstructure.json"))
self.bs_spin = loadfn(os.path.join(test_dir, "NiO_19009_bandstructure.json"))
self.bs_cbm0 = loadfn(os.path.join(test_dir, "InN_22205_bandstructure.json"))
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_diff_spins = loadfn(os.path.join(test_dir, "VBr2_971787_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][10][12][0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][25][0][
Orbital.dyz.value],
[0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069])
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
proj = self.bs.get_projections_on_elements_and_orbitals({'Cu': ['s',
'd']})
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
self.assertEqual(self.bs2.nb_bands, 16)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs2.branches[5]['name'], "L-U")
self.assertEqual(self.bs2.branches[5]['start_index'], 80)
self.assertEqual(self.bs2.branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs2.distance[70], 4.2335127528765737)
self.assertEqual(self.bs_spin.nb_bands, 27)
self.assertAlmostEqual(self.bs_spin.bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin.bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.one_kpoint = self.bs2.kpoints[31]
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs2.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs2.get_branch(110)[0]['name'], "U-W")
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_diff_spins.get_direct_band_gap_dict()
self.assertEqual(direct_dict[Spin.down]['value'], 4.5365)
for bs in [self.bs2, self.bs_spin]:
dg_dict = bs.get_direct_band_gap_dict()
for spin, v in bs.bands.items():
kpt = dg_dict[spin]['kpoint_index']
vb, cb = dg_dict[spin]['band_indices']
gap = v[cb][kpt] - v[vb][kpt]
self.assertEqual(gap, dg_dict[spin]['value'])
self.assertRaises(ValueError, self.bs_cu.get_direct_band_gap_dict)
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs2.get_direct_band_gap(),
4.0125999999999999)
self.assertTrue(self.bs_diff_spins.get_direct_band_gap() > 0)
self.assertEqual(self.bs_cu.get_direct_band_gap(), 0)
def test_is_metal(self):
self.assertFalse(self.bs2.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
self.assertTrue(self.bs_cu.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs2.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs2.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs2.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
bg_cbm0 = self.bs_cbm0.get_band_gap()
self.assertAlmostEqual(bg_cbm0['energy'], 0, places=3, msg="wrong gap energy")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs2
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), None)
bs.structure = loadfn(os.path.join(test_dir, "CaO_2605_structure.json"))
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 3)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 1)
cbm_eqs = bs.get_sym_eq_kpoints(cbm_k)
self.assertTrue([0.5, 0., 0.5] in cbm_eqs)
self.assertTrue([0., 0.5, 0.5] in cbm_eqs)
self.assertTrue([0.5, 0.5, 0.] in cbm_eqs)
vbm_eqs = bs.get_sym_eq_kpoints(vbm_k)
self.assertTrue([0., 0., 0.] in vbm_eqs)
def test_as_dict(self):
s = json.dumps(self.bs.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs2.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
with open(os.path.join(test_dir, "bs_ZnS_old.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
bs_old = BandStructureSymmLine.from_dict(d)
self.assertEqual(bs_old.get_projection_on_elements()[
Spin.up][0][0]['Zn'], 0.0971)
class ReconstructBandStructureTest(PymatgenTest):
def setUp(self):
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_cu2 = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_reconstruct_band_structure(self):
bs = get_reconstructed_band_structure([self.bs_cu, self.bs_cu2])
self.assertEqual(bs.bands[Spin.up].shape, (20, 700), "wrong number of bands or kpoints")
def test_vasprun_bs(self):
bsv = BSVasprun(os.path.join(test_dir, "vasprun.xml"),
parse_projected_eigen=True,
parse_potcar_file=True)
bs = bsv.get_band_structure(kpoints_filename=os.path.join(test_dir, "KPOINTS.band"),
line_mode=True)
bs.get_projection_on_elements()
class LobsterBandStructureSymmLine_test(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
with open(os.path.join(test_dir, "cohp/Fatband_SiO2/Test_p/lobster_band_structure_spin.json"), 'r') as f:
bs_spin_dict = json.load(f)
self.bs_spin = LobsterBandStructureSymmLine.from_dict(bs_spin_dict)
with open(os.path.join(test_dir, "cohp/Fatband_SiO2/Test_p/lobster_band_structure.json"), 'r') as f:
bs_dict = json.load(f)
self.bs_p = LobsterBandStructureSymmLine.from_dict(bs_dict)
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
bs_p = self.bs_p
bs_spin = self.bs_spin
self.assertAlmostEqual(bs_p.structure[0].frac_coords[0], 0.)
self.assertAlmostEqual(bs_p.structure[0].frac_coords[1], 0.47634315)
self.assertAlmostEqual(bs_p.structure[0].frac_coords[2], 0.666667)
self.assertEqual(bs_p.structure[0].species_string, 'Si')
self.assertAlmostEqual(bs_p.structure[0].coords[0], -1.19607309)
self.assertAlmostEqual(bs_p.structure[0].coords[1], 2.0716597)
self.assertAlmostEqual(bs_p.structure[0].coords[2], 3.67462144)
self.assertAlmostEqual(bs_p.efermi, 1.06470288)
lattice = bs_p.lattice_rec.as_dict()
self.assertAlmostEqual(lattice["matrix"][0][0], 1.2511575194890285)
self.assertAlmostEqual(lattice["matrix"][0][1], 0.7223560132915973)
self.assertAlmostEqual(lattice["matrix"][0][2], 0.0)
self.assertAlmostEqual(lattice["matrix"][1][0], 0.0)
self.assertAlmostEqual(lattice["matrix"][1][1], 1.4447123171425553)
self.assertAlmostEqual(lattice["matrix"][1][2], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][0], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][1], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][2], 1.1399248502312707)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[0], 0.09090909)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[1], 0.0)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[2], 0.0)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[0], 0.11374159)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[1], 0.06566873)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[2], 0.)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[0], 0.46153846)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[1], 0.07692308)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[2], 0.0)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[0], 0.57745732)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[1], 0.4445268)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[2], 0.0)
self.assertAlmostEqual(bs_p.distance[30], 0.49251552363382556)
self.assertTrue(bs_p.branches[0]["name"], '\\Gamma-K')
self.assertAlmostEqual(bs_p.get_band_gap()["energy"], 5.6739999999999995)
print(bs_p.get_projection_on_elements()[Spin.up][0][0].keys())
self.assertAlmostEqual(bs_p.get_projection_on_elements()[Spin.up][0][0]["Si0+"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_p.get_projections_on_elements_and_orbitals({"Si0+": ["3p"]})[Spin.up][0][0]["Si0+"]["3p"],
0.003)
self.assertAlmostEqual(
bs_p.get_projections_on_elements_and_orbitals({"O0+": ["2p"]})[Spin.up][0][0]["O0+"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_p.get_projections_on_elements_and_orbitals({"Si0+": ["3s", "3p"], "O0+": ["2s", "2p"]})[Spin.up][0][
0]
self.assertAlmostEqual(dict_here["Si0+"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si0+"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O0+"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O0+"]["2p"], 0.015)
self.assertAlmostEqual(bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si0+"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3p"]})[Spin.up][0][0]["Si0+"]["3p"],
0.003)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O0+": ["2p"]})[Spin.up][0][0]["O0+"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3s", "3p"], "O0+": ["2s", "2p"]})[Spin.up][0][0]
self.assertAlmostEqual(dict_here["Si0+"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si0+"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O0+"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O0+"]["2p"], 0.015)
self.assertAlmostEqual(bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si0+"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3p"]})[Spin.down][0][0]["Si0+"]["3p"],
0.003)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O0+": ["2p"]})[Spin.down][0][0]["O0+"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3s", "3p"], "O0+": ["2s", "2p"]})[Spin.down][0][
0]
self.assertAlmostEqual(dict_here["Si0+"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si0+"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O0+"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O0+"]["2p"], 0.015)
def test_get_branch(self):
branch = self.bs_p.get_branch(0)[0]
self.assertEqual(branch["name"], '\\Gamma-K')
self.assertEqual(branch["start_index"], 0)
self.assertEqual(branch["end_index"], 70)
self.assertEqual(branch["index"], 0)
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_p.get_direct_band_gap_dict()
self.assertAlmostEqual(direct_dict[Spin.up]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.up]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.up]["band_indices"], [22, 24])
direct_dict = self.bs_spin.get_direct_band_gap_dict()
self.assertAlmostEqual(direct_dict[Spin.up]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.up]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.up]["band_indices"], [22, 24])
self.assertAlmostEqual(direct_dict[Spin.down]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.down]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.down]["band_indices"], [22, 24])
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs_p.get_direct_band_gap(), 6.005999999999999)
self.assertAlmostEqual(self.bs_spin.get_direct_band_gap(), 6.005999999999999)
def test_is_metal(self):
self.assertFalse(self.bs_p.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs_p.get_cbm()
self.assertAlmostEqual(cbm['energy'], 6.3037028799999995, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 24, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 6.30370274, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 24, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 1, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs_p.get_vbm()
self.assertAlmostEqual(vbm['energy'], 0.62970288, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 1, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 23, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 68, "wrong VBM kpoint index")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[0], 0.34615384615385, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[1], 0.30769230769231, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, None, "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 0.6297027399999999, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 1, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 1, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 23, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 68, "wrong VBM kpoint index")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[0], 0.34615384615385, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[1], 0.30769230769231, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, None, "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs_p.get_band_gap()
self.assertAlmostEqual(bg['energy'], 5.6739999999999995, "wrong gap energy")
self.assertEqual(bg['transition'], "(0.346,0.308,0.000)-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 5.674, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "(0.346,0.308,0.000)-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs_p
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 1)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 3)
def test_as_dict(self):
s = json.dumps(self.bs_p.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
# this method will use the loading from the old dict
self.bs_spin.apply_scissor(3.0)
if __name__ == '__main__':
unittest.main() | pymatgen/electronic_structure/tests/test_bandstructure.py |
import unittest
import os
import json
from io import open
import warnings
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.io.vasp import BSVasprun
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine, get_reconstructed_band_structure, \
LobsterBandStructureSymmLine
from pymatgen.util.testing import PymatgenTest
from monty.serialization import loadfn
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(PymatgenTest):
def setUp(self):
self.bs = loadfn(os.path.join(test_dir, "Cu2O_361_bandstructure.json"))
self.bs2 = loadfn(os.path.join(test_dir, "CaO_2605_bandstructure.json"))
self.bs_spin = loadfn(os.path.join(test_dir, "NiO_19009_bandstructure.json"))
self.bs_cbm0 = loadfn(os.path.join(test_dir, "InN_22205_bandstructure.json"))
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_diff_spins = loadfn(os.path.join(test_dir, "VBr2_971787_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][10][12][0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][25][0][
Orbital.dyz.value],
[0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069])
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
proj = self.bs.get_projections_on_elements_and_orbitals({'Cu': ['s',
'd']})
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
self.assertEqual(self.bs2.nb_bands, 16)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs2.branches[5]['name'], "L-U")
self.assertEqual(self.bs2.branches[5]['start_index'], 80)
self.assertEqual(self.bs2.branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs2.distance[70], 4.2335127528765737)
self.assertEqual(self.bs_spin.nb_bands, 27)
self.assertAlmostEqual(self.bs_spin.bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin.bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.one_kpoint = self.bs2.kpoints[31]
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs2.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs2.get_branch(110)[0]['name'], "U-W")
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_diff_spins.get_direct_band_gap_dict()
self.assertEqual(direct_dict[Spin.down]['value'], 4.5365)
for bs in [self.bs2, self.bs_spin]:
dg_dict = bs.get_direct_band_gap_dict()
for spin, v in bs.bands.items():
kpt = dg_dict[spin]['kpoint_index']
vb, cb = dg_dict[spin]['band_indices']
gap = v[cb][kpt] - v[vb][kpt]
self.assertEqual(gap, dg_dict[spin]['value'])
self.assertRaises(ValueError, self.bs_cu.get_direct_band_gap_dict)
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs2.get_direct_band_gap(),
4.0125999999999999)
self.assertTrue(self.bs_diff_spins.get_direct_band_gap() > 0)
self.assertEqual(self.bs_cu.get_direct_band_gap(), 0)
def test_is_metal(self):
self.assertFalse(self.bs2.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
self.assertTrue(self.bs_cu.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs2.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs2.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs2.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
bg_cbm0 = self.bs_cbm0.get_band_gap()
self.assertAlmostEqual(bg_cbm0['energy'], 0, places=3, msg="wrong gap energy")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs2
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), None)
bs.structure = loadfn(os.path.join(test_dir, "CaO_2605_structure.json"))
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 3)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 1)
cbm_eqs = bs.get_sym_eq_kpoints(cbm_k)
self.assertTrue([0.5, 0., 0.5] in cbm_eqs)
self.assertTrue([0., 0.5, 0.5] in cbm_eqs)
self.assertTrue([0.5, 0.5, 0.] in cbm_eqs)
vbm_eqs = bs.get_sym_eq_kpoints(vbm_k)
self.assertTrue([0., 0., 0.] in vbm_eqs)
def test_as_dict(self):
s = json.dumps(self.bs.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs2.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
with open(os.path.join(test_dir, "bs_ZnS_old.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
bs_old = BandStructureSymmLine.from_dict(d)
self.assertEqual(bs_old.get_projection_on_elements()[
Spin.up][0][0]['Zn'], 0.0971)
class ReconstructBandStructureTest(PymatgenTest):
def setUp(self):
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_cu2 = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_reconstruct_band_structure(self):
bs = get_reconstructed_band_structure([self.bs_cu, self.bs_cu2])
self.assertEqual(bs.bands[Spin.up].shape, (20, 700), "wrong number of bands or kpoints")
def test_vasprun_bs(self):
bsv = BSVasprun(os.path.join(test_dir, "vasprun.xml"),
parse_projected_eigen=True,
parse_potcar_file=True)
bs = bsv.get_band_structure(kpoints_filename=os.path.join(test_dir, "KPOINTS.band"),
line_mode=True)
bs.get_projection_on_elements()
class LobsterBandStructureSymmLine_test(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
with open(os.path.join(test_dir, "cohp/Fatband_SiO2/Test_p/lobster_band_structure_spin.json"), 'r') as f:
bs_spin_dict = json.load(f)
self.bs_spin = LobsterBandStructureSymmLine.from_dict(bs_spin_dict)
with open(os.path.join(test_dir, "cohp/Fatband_SiO2/Test_p/lobster_band_structure.json"), 'r') as f:
bs_dict = json.load(f)
self.bs_p = LobsterBandStructureSymmLine.from_dict(bs_dict)
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
bs_p = self.bs_p
bs_spin = self.bs_spin
self.assertAlmostEqual(bs_p.structure[0].frac_coords[0], 0.)
self.assertAlmostEqual(bs_p.structure[0].frac_coords[1], 0.47634315)
self.assertAlmostEqual(bs_p.structure[0].frac_coords[2], 0.666667)
self.assertEqual(bs_p.structure[0].species_string, 'Si')
self.assertAlmostEqual(bs_p.structure[0].coords[0], -1.19607309)
self.assertAlmostEqual(bs_p.structure[0].coords[1], 2.0716597)
self.assertAlmostEqual(bs_p.structure[0].coords[2], 3.67462144)
self.assertAlmostEqual(bs_p.efermi, 1.06470288)
lattice = bs_p.lattice_rec.as_dict()
self.assertAlmostEqual(lattice["matrix"][0][0], 1.2511575194890285)
self.assertAlmostEqual(lattice["matrix"][0][1], 0.7223560132915973)
self.assertAlmostEqual(lattice["matrix"][0][2], 0.0)
self.assertAlmostEqual(lattice["matrix"][1][0], 0.0)
self.assertAlmostEqual(lattice["matrix"][1][1], 1.4447123171425553)
self.assertAlmostEqual(lattice["matrix"][1][2], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][0], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][1], 0.0)
self.assertAlmostEqual(lattice["matrix"][2][2], 1.1399248502312707)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[0], 0.09090909)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[1], 0.0)
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[2], 0.0)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[0], 0.11374159)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[1], 0.06566873)
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[2], 0.)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[0], 0.46153846)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[1], 0.07692308)
self.assertAlmostEqual(bs_p.kpoints[50].frac_coords[2], 0.0)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[0], 0.57745732)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[1], 0.4445268)
self.assertAlmostEqual(bs_p.kpoints[50].cart_coords[2], 0.0)
self.assertAlmostEqual(bs_p.distance[30], 0.49251552363382556)
self.assertTrue(bs_p.branches[0]["name"], '\\Gamma-K')
self.assertAlmostEqual(bs_p.get_band_gap()["energy"], 5.6739999999999995)
print(bs_p.get_projection_on_elements()[Spin.up][0][0].keys())
self.assertAlmostEqual(bs_p.get_projection_on_elements()[Spin.up][0][0]["Si0+"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_p.get_projections_on_elements_and_orbitals({"Si0+": ["3p"]})[Spin.up][0][0]["Si0+"]["3p"],
0.003)
self.assertAlmostEqual(
bs_p.get_projections_on_elements_and_orbitals({"O0+": ["2p"]})[Spin.up][0][0]["O0+"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_p.get_projections_on_elements_and_orbitals({"Si0+": ["3s", "3p"], "O0+": ["2s", "2p"]})[Spin.up][0][
0]
self.assertAlmostEqual(dict_here["Si0+"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si0+"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O0+"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O0+"]["2p"], 0.015)
self.assertAlmostEqual(bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si0+"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3p"]})[Spin.up][0][0]["Si0+"]["3p"],
0.003)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O0+": ["2p"]})[Spin.up][0][0]["O0+"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3s", "3p"], "O0+": ["2s", "2p"]})[Spin.up][0][0]
self.assertAlmostEqual(dict_here["Si0+"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si0+"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O0+"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O0+"]["2p"], 0.015)
self.assertAlmostEqual(bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si0+"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3p"]})[Spin.down][0][0]["Si0+"]["3p"],
0.003)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O0+": ["2p"]})[Spin.down][0][0]["O0+"]["2p"],
0.002 * 3 + 0.003 * 3)
dict_here = \
bs_spin.get_projections_on_elements_and_orbitals({"Si0+": ["3s", "3p"], "O0+": ["2s", "2p"]})[Spin.down][0][
0]
self.assertAlmostEqual(dict_here["Si0+"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si0+"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O0+"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O0+"]["2p"], 0.015)
def test_get_branch(self):
branch = self.bs_p.get_branch(0)[0]
self.assertEqual(branch["name"], '\\Gamma-K')
self.assertEqual(branch["start_index"], 0)
self.assertEqual(branch["end_index"], 70)
self.assertEqual(branch["index"], 0)
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_p.get_direct_band_gap_dict()
self.assertAlmostEqual(direct_dict[Spin.up]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.up]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.up]["band_indices"], [22, 24])
direct_dict = self.bs_spin.get_direct_band_gap_dict()
self.assertAlmostEqual(direct_dict[Spin.up]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.up]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.up]["band_indices"], [22, 24])
self.assertAlmostEqual(direct_dict[Spin.down]["value"], 6.005999999999999)
self.assertEqual(direct_dict[Spin.down]["kpoint_index"], 0)
self.assertListEqual(direct_dict[Spin.down]["band_indices"], [22, 24])
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs_p.get_direct_band_gap(), 6.005999999999999)
self.assertAlmostEqual(self.bs_spin.get_direct_band_gap(), 6.005999999999999)
def test_is_metal(self):
self.assertFalse(self.bs_p.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs_p.get_cbm()
self.assertAlmostEqual(cbm['energy'], 6.3037028799999995, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 24, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 6.30370274, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 24, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 1, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs_p.get_vbm()
self.assertAlmostEqual(vbm['energy'], 0.62970288, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 1, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 23, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 68, "wrong VBM kpoint index")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[0], 0.34615384615385, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[1], 0.30769230769231, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, None, "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 0.6297027399999999, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 1, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 1, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 23, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 68, "wrong VBM kpoint index")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[0], 0.34615384615385, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[1], 0.30769230769231, "wrong VBM kpoint frac coords")
self.assertAlmostEqual(vbm_spin['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, None, "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs_p.get_band_gap()
self.assertAlmostEqual(bg['energy'], 5.6739999999999995, "wrong gap energy")
self.assertEqual(bg['transition'], "(0.346,0.308,0.000)-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 5.674, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "(0.346,0.308,0.000)-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs_p
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 1)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 3)
def test_as_dict(self):
s = json.dumps(self.bs_p.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
# this method will use the loading from the old dict
self.bs_spin.apply_scissor(3.0)
if __name__ == '__main__':
unittest.main() | 0.557845 | 0.570271 |
import requests
import sys
import urllib.parse
class StreamCamel:
def __fetch(self, url):
max_retry = 3
attempt = 1
while True:
try:
print("Fetch URL: {}".format(url))
r = requests.get(url = url, timeout=10)
break
except requests.exceptions.RequestException as err:
print('timed out')
attempt = attempt + 1
if attempt > max_retry:
print("HTTP Exception: {}".format(err))
fakeData = ""
return fakeData
else:
print("(Going to retry) HTTP Exception: {}".format(err))
return r.json()
def __page_anything(self, count, max_count, route):
remaining = count
cursor = ""
final_json = []
while (remaining > 0):
batch_size = min(remaining, max_count)
json = self.__fetch('https://api.streamcamel.com/{}?limit={}&cursor={}'.format(route, batch_size, cursor))
if 'data' in json:
final_json += json['data']
cursor = ""
if 'pagination' in json and 'cursor' in json['pagination']:
cursor = json['pagination']['cursor']
if cursor == "":
break
remaining -= batch_size
return final_json
def companies(self, count=sys.maxsize):
return self.__page_anything(count, 100, 'companies')
def games(self, count=sys.maxsize):
return self.__page_anything(count, 100, 'games')
def top_streamers(self, count=sys.maxsize):
return self.__page_anything(count, 500, 'users')
def company_games(self, company, count=sys.maxsize):
return self.__page_anything(count, 100, 'companies/{}/games'.format(company))
def missing_games(self):
return self.__fetch('https://api.streamcamel.com/games_without_igdb?limit=5000')
def users_stats(self):
return self.__fetch('https://api.streamcamel.com/users_stats?limit=5000')
def games_stats(self, company=None):
if company is None:
return self.__fetch('https://api.streamcamel.com/games_stats?limit=5000')
else:
company_encoded = urllib.parse.quote(company)
return self.__fetch('https://api.streamcamel.com/games_stats?limit=5000&company=' + company_encoded) | streamcamel.py | import requests
import sys
import urllib.parse
class StreamCamel:
def __fetch(self, url):
max_retry = 3
attempt = 1
while True:
try:
print("Fetch URL: {}".format(url))
r = requests.get(url = url, timeout=10)
break
except requests.exceptions.RequestException as err:
print('timed out')
attempt = attempt + 1
if attempt > max_retry:
print("HTTP Exception: {}".format(err))
fakeData = ""
return fakeData
else:
print("(Going to retry) HTTP Exception: {}".format(err))
return r.json()
def __page_anything(self, count, max_count, route):
remaining = count
cursor = ""
final_json = []
while (remaining > 0):
batch_size = min(remaining, max_count)
json = self.__fetch('https://api.streamcamel.com/{}?limit={}&cursor={}'.format(route, batch_size, cursor))
if 'data' in json:
final_json += json['data']
cursor = ""
if 'pagination' in json and 'cursor' in json['pagination']:
cursor = json['pagination']['cursor']
if cursor == "":
break
remaining -= batch_size
return final_json
def companies(self, count=sys.maxsize):
return self.__page_anything(count, 100, 'companies')
def games(self, count=sys.maxsize):
return self.__page_anything(count, 100, 'games')
def top_streamers(self, count=sys.maxsize):
return self.__page_anything(count, 500, 'users')
def company_games(self, company, count=sys.maxsize):
return self.__page_anything(count, 100, 'companies/{}/games'.format(company))
def missing_games(self):
return self.__fetch('https://api.streamcamel.com/games_without_igdb?limit=5000')
def users_stats(self):
return self.__fetch('https://api.streamcamel.com/users_stats?limit=5000')
def games_stats(self, company=None):
if company is None:
return self.__fetch('https://api.streamcamel.com/games_stats?limit=5000')
else:
company_encoded = urllib.parse.quote(company)
return self.__fetch('https://api.streamcamel.com/games_stats?limit=5000&company=' + company_encoded) | 0.262464 | 0.089654 |
# Import Python libs
import logging
import time
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
# Import third party libs
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.fields import HashKey, RangeKey
from boto.dynamodb2.fields import AllIndex, GlobalAllIndex
from boto.dynamodb2.table import Table
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from salt._compat import string_types
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return False
return True
def _create_connection(region=None, key=None, keyid=None, profile=None):
'''
Get a boto connection to DynamoDB.
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
key = _profile.get('key', None)
keyid = _profile.get('keyid', None)
region = _profile.get('region', None)
if not region and __salt__['config.option']('dynamodb.region'):
region = __salt__['config.option']('dynamodb.region')
if not region:
region = 'us-east-1'
if not key and __salt__['config.option']('dynamodb.key'):
key = __salt__['config.option']('dynamodb.key')
if not keyid and __salt__['config.option']('dynamodb.keyid'):
keyid = __salt__['config.option']('dynamodb.keyid')
try:
conn = boto.dynamodb2.connect_to_region(
region,
aws_access_key_id=keyid,
aws_secret_access_key=key
)
except boto.exception.NoAuthHandlerFound:
logger.error('No authentication credentials found when attempting to'
' make boto dynamodb connection.')
return None
return conn
def create_table(table_name, region=None, key=None, keyid=None, profile=None,
read_capacity_units=None, write_capacity_units=None,
hash_key=None, hash_key_data_type=None, range_key=None,
range_key_data_type=None, local_indexes=None,
global_indexes=None):
'''
Creates a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.create_table table_name /
region=us-east-1 /
hash_key=id /
hash_key_data_type=N /
range_key=created_at /
range_key_data_type=N /
read_capacity_units=1 /
write_capacity_units=1
'''
schema = []
primary_index_fields = []
primary_index_name = ''
if hash_key:
hash_key_obj = HashKey(hash_key, data_type=hash_key_data_type)
schema.append(hash_key_obj)
primary_index_fields.append(hash_key_obj)
primary_index_name += hash_key
if range_key:
range_key_obj = RangeKey(range_key, data_type=range_key_data_type)
schema.append(range_key_obj)
primary_index_fields.append(range_key_obj)
primary_index_name += '_'
primary_index_name += range_key
primary_index_name += '_index'
throughput = {
'read': read_capacity_units,
'write': write_capacity_units
}
local_table_indexes = []
# Add the table's key
local_table_indexes.append(
AllIndex(primary_index_name, parts=primary_index_fields)
)
if local_indexes:
for index in local_indexes:
local_table_indexes.append(_extract_index(index))
global_table_indexes = []
if global_indexes:
for index in global_indexes:
global_table_indexes.append(
_extract_index(index, global_index=True)
)
conn = _create_connection(region, key, keyid, profile)
Table.create(
table_name,
schema=schema,
throughput=throughput,
indexes=local_table_indexes,
global_indexes=global_table_indexes,
connection=conn
)
# Table creation can take several seconds to propagate.
# We will check MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
if exists(
table_name,
region,
key,
keyid,
profile
):
return True
else:
time.sleep(1) # sleep for one second and try again
return False
def exists(table_name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a table exists.
CLI example::
salt myminion boto_dynamodb.exists table_name region=us-east-1
'''
conn = _create_connection(region, key, keyid, profile)
tables = conn.list_tables()
return tables and table_name in tables['TableNames']
def delete(table_name, region=None, key=None, keyid=None, profile=None):
'''
Delete a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.delete table_name region=us-east-1
'''
conn = _create_connection(region, key, keyid, profile)
table = Table(table_name, connection=conn)
table.delete()
# Table deletion can take several seconds to propagate.
# We will retry MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
if not exists(table_name, region, key, keyid, profile):
return True
else:
time.sleep(1) # sleep for one second and try again
return False
def _extract_index(index_data, global_index=False):
'''
Instantiates and returns an AllIndex object given a valid index
configuration
'''
parsed_data = {}
keys = []
for key, value in index_data.iteritems():
for item in value:
for field, data in item.iteritems():
if field == 'hash_key':
parsed_data['hash_key'] = data
elif field == 'hash_key_data_type':
parsed_data['hash_key_data_type'] = data
elif field == 'range_key':
parsed_data['range_key'] = data
elif field == 'range_key_data_type':
parsed_data['range_key_data_type'] = data
elif field == 'name':
parsed_data['name'] = data
elif field == 'read_capacity_units':
parsed_data['read_capacity_units'] = data
elif field == 'write_capacity_units':
parsed_data['write_capacity_units'] = data
if parsed_data['hash_key']:
keys.append(
HashKey(
parsed_data['hash_key'],
data_type=parsed_data['hash_key_data_type']
)
)
if parsed_data['range_key']:
keys.append(
RangeKey(
parsed_data['range_key'],
data_type=parsed_data['range_key_data_type']
)
)
if (
global_index and
parsed_data['read_capacity_units'] and
parsed_data['write_capacity_units']):
parsed_data['throughput'] = {
'read': parsed_data['read_capacity_units'],
'write': parsed_data['write_capacity_units']
}
if parsed_data['name'] and len(keys) > 0:
if global_index:
return GlobalAllIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput']
)
else:
return AllIndex(
parsed_data['name'],
parts=keys
) | salt/modules/boto_dynamodb.py | # Import Python libs
import logging
import time
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
# Import third party libs
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.fields import HashKey, RangeKey
from boto.dynamodb2.fields import AllIndex, GlobalAllIndex
from boto.dynamodb2.table import Table
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from salt._compat import string_types
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return False
return True
def _create_connection(region=None, key=None, keyid=None, profile=None):
'''
Get a boto connection to DynamoDB.
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
key = _profile.get('key', None)
keyid = _profile.get('keyid', None)
region = _profile.get('region', None)
if not region and __salt__['config.option']('dynamodb.region'):
region = __salt__['config.option']('dynamodb.region')
if not region:
region = 'us-east-1'
if not key and __salt__['config.option']('dynamodb.key'):
key = __salt__['config.option']('dynamodb.key')
if not keyid and __salt__['config.option']('dynamodb.keyid'):
keyid = __salt__['config.option']('dynamodb.keyid')
try:
conn = boto.dynamodb2.connect_to_region(
region,
aws_access_key_id=keyid,
aws_secret_access_key=key
)
except boto.exception.NoAuthHandlerFound:
logger.error('No authentication credentials found when attempting to'
' make boto dynamodb connection.')
return None
return conn
def create_table(table_name, region=None, key=None, keyid=None, profile=None,
read_capacity_units=None, write_capacity_units=None,
hash_key=None, hash_key_data_type=None, range_key=None,
range_key_data_type=None, local_indexes=None,
global_indexes=None):
'''
Creates a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.create_table table_name /
region=us-east-1 /
hash_key=id /
hash_key_data_type=N /
range_key=created_at /
range_key_data_type=N /
read_capacity_units=1 /
write_capacity_units=1
'''
schema = []
primary_index_fields = []
primary_index_name = ''
if hash_key:
hash_key_obj = HashKey(hash_key, data_type=hash_key_data_type)
schema.append(hash_key_obj)
primary_index_fields.append(hash_key_obj)
primary_index_name += hash_key
if range_key:
range_key_obj = RangeKey(range_key, data_type=range_key_data_type)
schema.append(range_key_obj)
primary_index_fields.append(range_key_obj)
primary_index_name += '_'
primary_index_name += range_key
primary_index_name += '_index'
throughput = {
'read': read_capacity_units,
'write': write_capacity_units
}
local_table_indexes = []
# Add the table's key
local_table_indexes.append(
AllIndex(primary_index_name, parts=primary_index_fields)
)
if local_indexes:
for index in local_indexes:
local_table_indexes.append(_extract_index(index))
global_table_indexes = []
if global_indexes:
for index in global_indexes:
global_table_indexes.append(
_extract_index(index, global_index=True)
)
conn = _create_connection(region, key, keyid, profile)
Table.create(
table_name,
schema=schema,
throughput=throughput,
indexes=local_table_indexes,
global_indexes=global_table_indexes,
connection=conn
)
# Table creation can take several seconds to propagate.
# We will check MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
if exists(
table_name,
region,
key,
keyid,
profile
):
return True
else:
time.sleep(1) # sleep for one second and try again
return False
def exists(table_name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a table exists.
CLI example::
salt myminion boto_dynamodb.exists table_name region=us-east-1
'''
conn = _create_connection(region, key, keyid, profile)
tables = conn.list_tables()
return tables and table_name in tables['TableNames']
def delete(table_name, region=None, key=None, keyid=None, profile=None):
'''
Delete a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.delete table_name region=us-east-1
'''
conn = _create_connection(region, key, keyid, profile)
table = Table(table_name, connection=conn)
table.delete()
# Table deletion can take several seconds to propagate.
# We will retry MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
if not exists(table_name, region, key, keyid, profile):
return True
else:
time.sleep(1) # sleep for one second and try again
return False
def _extract_index(index_data, global_index=False):
'''
Instantiates and returns an AllIndex object given a valid index
configuration
'''
parsed_data = {}
keys = []
for key, value in index_data.iteritems():
for item in value:
for field, data in item.iteritems():
if field == 'hash_key':
parsed_data['hash_key'] = data
elif field == 'hash_key_data_type':
parsed_data['hash_key_data_type'] = data
elif field == 'range_key':
parsed_data['range_key'] = data
elif field == 'range_key_data_type':
parsed_data['range_key_data_type'] = data
elif field == 'name':
parsed_data['name'] = data
elif field == 'read_capacity_units':
parsed_data['read_capacity_units'] = data
elif field == 'write_capacity_units':
parsed_data['write_capacity_units'] = data
if parsed_data['hash_key']:
keys.append(
HashKey(
parsed_data['hash_key'],
data_type=parsed_data['hash_key_data_type']
)
)
if parsed_data['range_key']:
keys.append(
RangeKey(
parsed_data['range_key'],
data_type=parsed_data['range_key_data_type']
)
)
if (
global_index and
parsed_data['read_capacity_units'] and
parsed_data['write_capacity_units']):
parsed_data['throughput'] = {
'read': parsed_data['read_capacity_units'],
'write': parsed_data['write_capacity_units']
}
if parsed_data['name'] and len(keys) > 0:
if global_index:
return GlobalAllIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput']
)
else:
return AllIndex(
parsed_data['name'],
parts=keys
) | 0.396419 | 0.113826 |
import numpy as np
import re
import csv
class Data(object):
"""
Class to handle loading and processing of raw datasets.
"""
def __init__(self, data_source,
alphabet="abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
input_size=1014, num_of_classes=8):
"""
Initialization of a Data object.
Args:
data_source (str): Raw data file path
alphabet (str): Alphabet of characters to index
input_size (int): Size of input features
num_of_classes (int): Number of classes in data
"""
self.alphabet = alphabet
self.alphabet_size = len(self.alphabet)
self.dict = {} # Maps each character to an integer
self.no_of_classes = num_of_classes
for idx, char in enumerate(self.alphabet):
self.dict[char] = idx + 1
self.length = input_size
self.data_source = data_source
def load_data(self):
"""
Load raw data from the source file into data variable.
Returns: None
"""
data = []
with open(self.data_source, 'r', encoding='utf-8') as f:
rdr = csv.reader(f, delimiter=',', quotechar='"')
for row in rdr:
txt = ""
for s in row[1:]:
txt = txt + " " + re.sub("^\s*(.-)\s*$", "%1", s).replace("\\n", "\n")
data.append((int(row[0]), txt)) # format: (label, text)
self.data = np.array(data)
print("Data loaded from " + self.data_source)
def get_all_data(self):
"""
Return all loaded data from data variable.
Returns:
(np.ndarray) Data transformed from raw to indexed form with associated one-hot label.
"""
data_size = len(self.data)
start_index = 0
end_index = data_size
batch_texts = self.data[start_index:end_index]
batch_indices = []
one_hot = np.eye(self.no_of_classes, dtype='int64')
classes = []
for c, s in batch_texts:
batch_indices.append(self.str_to_indexes(s))
#c = int(c) - 1
c = int(c)
classes.append(one_hot[c])
return np.asarray(batch_indices, dtype='int64'), np.asarray(classes),batch_texts
def str_to_indexes(self, s):
"""
Convert a string to character indexes based on character dictionary.
Args:
s (str): String to be converted to indexes
Returns:
str2idx (np.ndarray): Indexes of characters in s
"""
s = s.lower()
max_length = min(len(s), self.length)
str2idx = np.zeros(self.length, dtype='int64')
for i in range(1, max_length + 1):
c = s[-i]
if c in self.dict:
str2idx[i - 1] = self.dict[c]
return str2idx | data_utils.py | import numpy as np
import re
import csv
class Data(object):
"""
Class to handle loading and processing of raw datasets.
"""
def __init__(self, data_source,
alphabet="abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
input_size=1014, num_of_classes=8):
"""
Initialization of a Data object.
Args:
data_source (str): Raw data file path
alphabet (str): Alphabet of characters to index
input_size (int): Size of input features
num_of_classes (int): Number of classes in data
"""
self.alphabet = alphabet
self.alphabet_size = len(self.alphabet)
self.dict = {} # Maps each character to an integer
self.no_of_classes = num_of_classes
for idx, char in enumerate(self.alphabet):
self.dict[char] = idx + 1
self.length = input_size
self.data_source = data_source
def load_data(self):
"""
Load raw data from the source file into data variable.
Returns: None
"""
data = []
with open(self.data_source, 'r', encoding='utf-8') as f:
rdr = csv.reader(f, delimiter=',', quotechar='"')
for row in rdr:
txt = ""
for s in row[1:]:
txt = txt + " " + re.sub("^\s*(.-)\s*$", "%1", s).replace("\\n", "\n")
data.append((int(row[0]), txt)) # format: (label, text)
self.data = np.array(data)
print("Data loaded from " + self.data_source)
def get_all_data(self):
"""
Return all loaded data from data variable.
Returns:
(np.ndarray) Data transformed from raw to indexed form with associated one-hot label.
"""
data_size = len(self.data)
start_index = 0
end_index = data_size
batch_texts = self.data[start_index:end_index]
batch_indices = []
one_hot = np.eye(self.no_of_classes, dtype='int64')
classes = []
for c, s in batch_texts:
batch_indices.append(self.str_to_indexes(s))
#c = int(c) - 1
c = int(c)
classes.append(one_hot[c])
return np.asarray(batch_indices, dtype='int64'), np.asarray(classes),batch_texts
def str_to_indexes(self, s):
"""
Convert a string to character indexes based on character dictionary.
Args:
s (str): String to be converted to indexes
Returns:
str2idx (np.ndarray): Indexes of characters in s
"""
s = s.lower()
max_length = min(len(s), self.length)
str2idx = np.zeros(self.length, dtype='int64')
for i in range(1, max_length + 1):
c = s[-i]
if c in self.dict:
str2idx[i - 1] = self.dict[c]
return str2idx | 0.84556 | 0.489626 |
import logging
from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, ShadowBanError, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
from synapse.util import stringutils
from ._base import client_patterns
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomUpgradeRestServlet(RestServlet):
"""Handler for room upgrade requests.
Handles requests of the form:
POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
Content-Type: application/json
{
"new_version": "2",
}
Creates a new room and shuts down the old one. Returns the ID of the new room.
"""
PATTERNS = client_patterns(
# /rooms/$roomid/upgrade
"/rooms/(?P<room_id>[^/]*)/upgrade$"
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self._hs = hs
self._room_creation_handler = hs.get_room_creation_handler()
self._auth = hs.get_auth()
async def on_POST(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ("new_version",))
new_version = KNOWN_ROOM_VERSIONS.get(content["new_version"])
if new_version is None:
raise SynapseError(
400,
"Your homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
try:
new_room_id = await self._room_creation_handler.upgrade_room(
requester, room_id, new_version
)
except ShadowBanError:
# Generate a random room ID.
new_room_id = stringutils.random_string(18)
ret = {"replacement_room": new_room_id}
return 200, ret
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomUpgradeRestServlet(hs).register(http_server) | synapse/rest/client/room_upgrade_rest_servlet.py |
import logging
from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, ShadowBanError, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
from synapse.util import stringutils
from ._base import client_patterns
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomUpgradeRestServlet(RestServlet):
"""Handler for room upgrade requests.
Handles requests of the form:
POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
Content-Type: application/json
{
"new_version": "2",
}
Creates a new room and shuts down the old one. Returns the ID of the new room.
"""
PATTERNS = client_patterns(
# /rooms/$roomid/upgrade
"/rooms/(?P<room_id>[^/]*)/upgrade$"
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self._hs = hs
self._room_creation_handler = hs.get_room_creation_handler()
self._auth = hs.get_auth()
async def on_POST(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ("new_version",))
new_version = KNOWN_ROOM_VERSIONS.get(content["new_version"])
if new_version is None:
raise SynapseError(
400,
"Your homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
try:
new_room_id = await self._room_creation_handler.upgrade_room(
requester, room_id, new_version
)
except ShadowBanError:
# Generate a random room ID.
new_room_id = stringutils.random_string(18)
ret = {"replacement_room": new_room_id}
return 200, ret
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomUpgradeRestServlet(hs).register(http_server) | 0.652795 | 0.133105 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import *
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import (
set_ev_cls,
MAIN_DISPATCHER,
)
from ryu.lib.packet import vlan, ethernet, ipv4
from ryu.lib.ofp_pktinfilter import packet_in_filter, RequiredTypeFilter
from ryu.lib import mac
from ryu.ofproto import ether, ofproto_v1_3, ofproto_v1_3_parser
LOG = logging.getLogger('test_pktinfilter')
class _Datapath(object):
ofproto = ofproto_v1_3
ofproto_parser = ofproto_v1_3_parser
class _PacketInFilterApp(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(_PacketInFilterApp, self).__init__(*args, **kwargs)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
@packet_in_filter(RequiredTypeFilter, {'types': [
vlan.vlan,
]})
def packet_in_handler(self, ev):
return True
class Test_packet_in_filter(unittest.TestCase):
""" Test case for pktinfilter
"""
def setUp(self):
self.app = _PacketInFilterApp()
def tearDown(self):
pass
def test_pkt_in_filter_pass(self):
datapath = _Datapath()
e = ethernet.ethernet(mac.BROADCAST_STR,
mac.BROADCAST_STR,
ether.ETH_TYPE_8021Q)
v = vlan.vlan()
i = ipv4.ipv4()
pkt = (e / v / i)
pkt.serialize()
pkt_in = ofproto_v1_3_parser.OFPPacketIn(datapath,
data=buffer(pkt.data))
ev = ofp_event.EventOFPPacketIn(pkt_in)
ok_(self.app.packet_in_handler(ev))
def test_pkt_in_filter_discard(self):
datapath = _Datapath()
e = ethernet.ethernet(mac.BROADCAST_STR,
mac.BROADCAST_STR,
ether.ETH_TYPE_IP)
i = ipv4.ipv4()
pkt = (e / i)
pkt.serialize()
pkt_in = ofproto_v1_3_parser.OFPPacketIn(datapath,
data=buffer(pkt.data))
ev = ofp_event.EventOFPPacketIn(pkt_in)
ok_(not self.app.packet_in_handler(ev))
def test_pkt_in_filter_truncated(self):
datapath = _Datapath()
truncated_data = buffer('')
pkt_in = ofproto_v1_3_parser.OFPPacketIn(datapath,
data=truncated_data)
ev = ofp_event.EventOFPPacketIn(pkt_in)
ok_(not self.app.packet_in_handler(ev)) | ryu/tests/unit/lib/test_ofp_pktinfilter.py |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import *
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import (
set_ev_cls,
MAIN_DISPATCHER,
)
from ryu.lib.packet import vlan, ethernet, ipv4
from ryu.lib.ofp_pktinfilter import packet_in_filter, RequiredTypeFilter
from ryu.lib import mac
from ryu.ofproto import ether, ofproto_v1_3, ofproto_v1_3_parser
LOG = logging.getLogger('test_pktinfilter')
class _Datapath(object):
ofproto = ofproto_v1_3
ofproto_parser = ofproto_v1_3_parser
class _PacketInFilterApp(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(_PacketInFilterApp, self).__init__(*args, **kwargs)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
@packet_in_filter(RequiredTypeFilter, {'types': [
vlan.vlan,
]})
def packet_in_handler(self, ev):
return True
class Test_packet_in_filter(unittest.TestCase):
""" Test case for pktinfilter
"""
def setUp(self):
self.app = _PacketInFilterApp()
def tearDown(self):
pass
def test_pkt_in_filter_pass(self):
datapath = _Datapath()
e = ethernet.ethernet(mac.BROADCAST_STR,
mac.BROADCAST_STR,
ether.ETH_TYPE_8021Q)
v = vlan.vlan()
i = ipv4.ipv4()
pkt = (e / v / i)
pkt.serialize()
pkt_in = ofproto_v1_3_parser.OFPPacketIn(datapath,
data=buffer(pkt.data))
ev = ofp_event.EventOFPPacketIn(pkt_in)
ok_(self.app.packet_in_handler(ev))
def test_pkt_in_filter_discard(self):
datapath = _Datapath()
e = ethernet.ethernet(mac.BROADCAST_STR,
mac.BROADCAST_STR,
ether.ETH_TYPE_IP)
i = ipv4.ipv4()
pkt = (e / i)
pkt.serialize()
pkt_in = ofproto_v1_3_parser.OFPPacketIn(datapath,
data=buffer(pkt.data))
ev = ofp_event.EventOFPPacketIn(pkt_in)
ok_(not self.app.packet_in_handler(ev))
def test_pkt_in_filter_truncated(self):
datapath = _Datapath()
truncated_data = buffer('')
pkt_in = ofproto_v1_3_parser.OFPPacketIn(datapath,
data=truncated_data)
ev = ofp_event.EventOFPPacketIn(pkt_in)
ok_(not self.app.packet_in_handler(ev)) | 0.494873 | 0.117243 |
import os
from contextlib import contextmanager
from funcy import get_in
from dvc.exceptions import DvcException
class SCMError(DvcException):
"""Base class for source control management errors."""
class FileNotInRepoError(SCMError):
"""Thrown when trying to find .gitignore for a file that is not in a scm
repository.
"""
class CloneError(SCMError):
def __init__(self, url, path):
super().__init__(f"Failed to clone repo '{url}' to '{path}'")
class RevError(SCMError):
pass
class NoSCMError(SCMError):
def __init__(self):
msg = (
"Only supported for Git repositories. If you're "
"seeing this error in a Git repo, try updating the DVC "
"configuration with `dvc config core.no_scm false`."
)
super().__init__(msg)
class MergeConflictError(SCMError):
pass
class InvalidRemoteSCMRepo(SCMError):
def __init__(self, url: str):
msg = f"'{url}' is not a valid Git remote or URL"
super().__init__(msg)
class GitAuthError(SCMError):
def __init__(self, url: str):
super().__init__(
f"HTTP Git authentication is not supported: '{url}'"
"\nSee https://dvc.org/doc//user-guide/"
"troubleshooting#git-auth"
)
class Base:
"""Base class for source control management driver implementations."""
def __init__(self, root_dir=os.curdir):
self._root_dir = os.path.realpath(root_dir)
@property
def root_dir(self) -> str:
return self._root_dir
def __repr__(self):
return "{class_name}: '{directory}'".format(
class_name=type(self).__name__, directory=self.dir
)
@property
def dir(self):
"""Path to a directory with SCM specific information."""
return None
@staticmethod
def is_repo(root_dir): # pylint: disable=unused-argument
"""Returns whether or not root_dir is a valid SCM repository."""
return True
@staticmethod
def is_submodule(root_dir): # pylint: disable=unused-argument
"""Returns whether or not root_dir is a valid SCM repository
submodule.
"""
return True
def is_ignored(self, path): # pylint: disable=unused-argument
"""Returns whether or not path is ignored by SCM."""
return False
def ignore(self, path): # pylint: disable=unused-argument
"""Makes SCM ignore a specified path."""
def ignore_remove(self, path): # pylint: disable=unused-argument
"""Makes SCM stop ignoring a specified path."""
@property
def ignore_file(self):
"""Filename for a file that contains ignored paths for this SCM."""
def ignore_list(self, p_list):
"""Makes SCM ignore all paths specified in a list."""
return [self.ignore(path) for path in p_list]
def add(self, paths):
"""Makes SCM track every path from a specified list of paths."""
def commit(self, msg):
"""Makes SCM create a commit."""
def checkout(self, branch, create_new=False):
"""Makes SCM checkout a branch."""
def branch(self, branch):
"""Makes SCM create a branch with a specified name."""
def tag(self, tag):
"""Makes SCM create a tag with a specified name."""
def untracked_files(self): # pylint: disable=no-self-use
"""Returns a list of untracked files."""
return []
def is_tracked(self, path): # pylint: disable=no-self-use, unused-argument
"""Returns whether or not a specified path is tracked."""
return False
def is_dirty(self):
"""Return whether the SCM contains uncommitted changes."""
return False
def active_branch(self): # pylint: disable=no-self-use
"""Returns current branch in the repo."""
return ""
def list_branches(self): # pylint: disable=no-self-use
"""Returns a list of available branches in the repo."""
return []
def list_tags(self): # pylint: disable=no-self-use
"""Returns a list of available tags in the repo."""
return []
def list_all_commits(self): # pylint: disable=no-self-use
"""Returns a list of commits in the repo."""
return []
def cleanup_ignores(self):
"""
This method should clean up ignores (eg. entries in .gitignore),
use, when method editing ignores (eg. add, run, import) fails to
perform its task.
"""
def reset_ignores(self):
"""
Method to reset in-memory ignore storing mechanism.
"""
def reset_tracked_files(self):
"""
Method to reset in-memory tracked files storing mechanism.
"""
def remind_to_track(self):
"""
Method to remind user to track newly created files handled by scm
"""
def track_changed_files(self):
"""
Method to stage files that have changed
"""
def track_file(self, path):
"""
Method to add file to mechanism that will remind user
to track new files
"""
@contextmanager
def track_file_changes(self, config=None, autostage=False):
autostage = get_in(
config or {}, ["core", "autostage"], default=autostage
)
try:
yield
except Exception:
self.cleanup_ignores()
raise
self.reset_ignores()
if autostage:
self.track_changed_files()
else:
self.remind_to_track()
self.reset_tracked_files()
def belongs_to_scm(self, path):
"""Return boolean whether file belongs to scm"""
def close(self):
"""Method to close the files"""
def _reset(self) -> None:
pass | dvc/scm/base.py |
import os
from contextlib import contextmanager
from funcy import get_in
from dvc.exceptions import DvcException
class SCMError(DvcException):
"""Base class for source control management errors."""
class FileNotInRepoError(SCMError):
"""Thrown when trying to find .gitignore for a file that is not in a scm
repository.
"""
class CloneError(SCMError):
def __init__(self, url, path):
super().__init__(f"Failed to clone repo '{url}' to '{path}'")
class RevError(SCMError):
pass
class NoSCMError(SCMError):
def __init__(self):
msg = (
"Only supported for Git repositories. If you're "
"seeing this error in a Git repo, try updating the DVC "
"configuration with `dvc config core.no_scm false`."
)
super().__init__(msg)
class MergeConflictError(SCMError):
pass
class InvalidRemoteSCMRepo(SCMError):
def __init__(self, url: str):
msg = f"'{url}' is not a valid Git remote or URL"
super().__init__(msg)
class GitAuthError(SCMError):
def __init__(self, url: str):
super().__init__(
f"HTTP Git authentication is not supported: '{url}'"
"\nSee https://dvc.org/doc//user-guide/"
"troubleshooting#git-auth"
)
class Base:
"""Base class for source control management driver implementations."""
def __init__(self, root_dir=os.curdir):
self._root_dir = os.path.realpath(root_dir)
@property
def root_dir(self) -> str:
return self._root_dir
def __repr__(self):
return "{class_name}: '{directory}'".format(
class_name=type(self).__name__, directory=self.dir
)
@property
def dir(self):
"""Path to a directory with SCM specific information."""
return None
@staticmethod
def is_repo(root_dir): # pylint: disable=unused-argument
"""Returns whether or not root_dir is a valid SCM repository."""
return True
@staticmethod
def is_submodule(root_dir): # pylint: disable=unused-argument
"""Returns whether or not root_dir is a valid SCM repository
submodule.
"""
return True
def is_ignored(self, path): # pylint: disable=unused-argument
"""Returns whether or not path is ignored by SCM."""
return False
def ignore(self, path): # pylint: disable=unused-argument
"""Makes SCM ignore a specified path."""
def ignore_remove(self, path): # pylint: disable=unused-argument
"""Makes SCM stop ignoring a specified path."""
@property
def ignore_file(self):
"""Filename for a file that contains ignored paths for this SCM."""
def ignore_list(self, p_list):
"""Makes SCM ignore all paths specified in a list."""
return [self.ignore(path) for path in p_list]
def add(self, paths):
"""Makes SCM track every path from a specified list of paths."""
def commit(self, msg):
"""Makes SCM create a commit."""
def checkout(self, branch, create_new=False):
"""Makes SCM checkout a branch."""
def branch(self, branch):
"""Makes SCM create a branch with a specified name."""
def tag(self, tag):
"""Makes SCM create a tag with a specified name."""
def untracked_files(self): # pylint: disable=no-self-use
"""Returns a list of untracked files."""
return []
def is_tracked(self, path): # pylint: disable=no-self-use, unused-argument
"""Returns whether or not a specified path is tracked."""
return False
def is_dirty(self):
"""Return whether the SCM contains uncommitted changes."""
return False
def active_branch(self): # pylint: disable=no-self-use
"""Returns current branch in the repo."""
return ""
def list_branches(self): # pylint: disable=no-self-use
"""Returns a list of available branches in the repo."""
return []
def list_tags(self): # pylint: disable=no-self-use
"""Returns a list of available tags in the repo."""
return []
def list_all_commits(self): # pylint: disable=no-self-use
"""Returns a list of commits in the repo."""
return []
def cleanup_ignores(self):
"""
This method should clean up ignores (eg. entries in .gitignore),
use, when method editing ignores (eg. add, run, import) fails to
perform its task.
"""
def reset_ignores(self):
"""
Method to reset in-memory ignore storing mechanism.
"""
def reset_tracked_files(self):
"""
Method to reset in-memory tracked files storing mechanism.
"""
def remind_to_track(self):
"""
Method to remind user to track newly created files handled by scm
"""
def track_changed_files(self):
"""
Method to stage files that have changed
"""
def track_file(self, path):
"""
Method to add file to mechanism that will remind user
to track new files
"""
@contextmanager
def track_file_changes(self, config=None, autostage=False):
autostage = get_in(
config or {}, ["core", "autostage"], default=autostage
)
try:
yield
except Exception:
self.cleanup_ignores()
raise
self.reset_ignores()
if autostage:
self.track_changed_files()
else:
self.remind_to_track()
self.reset_tracked_files()
def belongs_to_scm(self, path):
"""Return boolean whether file belongs to scm"""
def close(self):
"""Method to close the files"""
def _reset(self) -> None:
pass | 0.867219 | 0.101947 |
import os
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest
context = ge.get_context()
datasource_yaml = f"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <PATH_TO_YOUR_DATA_HERE>
glob_directive: "*.csv"
default_regex:
pattern: (.*)
group_names:
- data_asset_name
assets:
taxi_data_flat:
base_directory: samples_2020
pattern: (yellow_trip_data_sample_.*)\\.csv
group_names:
- filename
taxi_data_year_month:
base_directory: samples_2020
pattern: ([\\w]+)_trip_data_sample_(\\d{{4}})-(\\d{{2}})\\.csv
group_names:
- name
- year
- month
default_inferred_data_connector_name:
class_name: InferredAssetFilesystemDataConnector
base_directory: <PATH_TO_YOUR_DATA_HERE>
glob_directive: "*.csv"
default_regex:
pattern: (.*)
group_names:
- data_asset_name
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
data_dir_path = os.path.join("..", "data")
datasource_yaml = datasource_yaml.replace("<PATH_TO_YOUR_DATA_HERE>", data_dir_path)
context.test_yaml_config(datasource_yaml)
context.add_datasource(**yaml.load(datasource_yaml))
available_data_asset_names = context.datasources[
"taxi_datasource"
].get_available_data_asset_names(
data_connector_names="default_inferred_data_connector_name"
)[
"default_inferred_data_connector_name"
]
assert len(available_data_asset_names) == 36
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
# Here is a BatchRequest naming an inferred data_asset.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = "yellow_trip_data_sample_2019-01.csv"
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
# Here is a BatchRequest naming a configured data_asset representing an un-partitioned (flat) filename structure.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_flat"
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 12
# Here is a BatchRequest naming a configured data_asset representing a filename structure partitioned by year and month.
# This BatchRequest specifies multiple batches, which is useful for dataset exploration.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
data_connector_query={"custom_filter_function": "<YOUR_CUSTOM_FILTER_FUNCTION>"},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name and other arguments directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_year_month"
batch_request.data_connector_query["custom_filter_function"] = (
lambda batch_identifiers: batch_identifiers["name"] == "yellow"
and 1 < int(batch_identifiers["month"]) < 11
)
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 9
assert batch_list[0].data.dataframe.shape[0] == 10000
# Here is a BatchRequest naming a configured data_asset representing a filename structure partitioned by year and month.
# This BatchRequest specifies one batch, which is useful for data analysis.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
data_connector_query={
"batch_filter_parameters": {
"<YOUR_BATCH_FILTER_PARAMETER_KEY>": "<YOUR_BATCH_FILTER_PARAMETER_VALUE>",
}
},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name and other arguments directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_year_month"
batch_request.data_connector_query["batch_filter_parameters"] = {
"month": "01",
}
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
assert batch_list[0].data.dataframe.shape[0] == 10000
# Here is a BatchRequest naming a configured data_asset representing a filename structure partitioned by year and month.
# This BatchRequest specifies one batch, which is useful for data analysis.
# In addition, the resulting batch is split according to "passenger_count" column with the focus on two-passenger rides.
# Moreover, a randomly sampled fraction of this subset of the batch data is obtained and returned as the final result.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
data_connector_query={
"batch_filter_parameters": {
"<YOUR_BATCH_FILTER_PARAMETER_KEY>": "<YOUR_BATCH_FILTER_PARAMETER_VALUE>",
}
},
batch_spec_passthrough={
"splitter_method": "<YOUR_SPLITTER_METHOD>",
"splitter_kwargs": {
"<YOUR_SPLITTER_OBJECTIVE_NAME>": "<YOUR_SPLITTER_OBJECTIVE_KEYS>",
"batch_identifiers": {
"<YOUR_SPLITTER_OBJECTIVE_0_KEY>": "<YOUR_SPLITTER_OBJECTIVE_0_VALUE>",
"<YOUR_SPLITTER_OBJECTIVE_1_KEY>": "<YOUR_SPLITTER_OBJECTIVE_1_VALUE>",
"<YOUR_SPLITTER_OBJECTIVE_2_KEY>": "<YOUR_SPLITTER_OBJECTIVE_2_VALUE>",
# ...
},
},
"sampling_method": "<YOUR_SAMPLING_METHOD>",
"sampling_kwargs": {
"<YOUR_SAMPLING_ARGUMENT_0_NAME>": "<YOUR_SAMPLING_ARGUMENT_0_VALUE>",
"<YOUR_SAMPLING_ARGUMENT_1_NAME>": "<YOUR_SAMPLING_ARGUMENT_1_VALUE>",
"<YOUR_SAMPLING_ARGUMENT_2_NAME>": "<YOUR_SAMPLING_ARGUMENT_2_VALUE>",
# ...
},
},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name and other arguments directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_year_month"
batch_request.data_connector_query["batch_filter_parameters"] = {
"month": "01",
}
batch_request.batch_spec_passthrough["splitter_method"] = "_split_on_column_value"
batch_request.batch_spec_passthrough["splitter_kwargs"] = {
"column_name": "passenger_count",
"batch_identifiers": {"passenger_count": 2},
}
batch_request.batch_spec_passthrough["sampling_method"] = "_sample_using_random"
batch_request.batch_spec_passthrough["sampling_kwargs"] = {"p": 1.0e-1}
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
assert batch_list[0].data.dataframe.shape[0] < 200
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert "taxi_datasource" in [ds["name"] for ds in context.list_datasources()]
assert "yellow_trip_data_sample_2019-01.csv" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_inferred_data_connector_name"
]
) | tests/integration/docusaurus/connecting_to_your_data/how_to_introspect_and_partition_your_data/files/yaml_example.py | import os
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest
context = ge.get_context()
datasource_yaml = f"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <PATH_TO_YOUR_DATA_HERE>
glob_directive: "*.csv"
default_regex:
pattern: (.*)
group_names:
- data_asset_name
assets:
taxi_data_flat:
base_directory: samples_2020
pattern: (yellow_trip_data_sample_.*)\\.csv
group_names:
- filename
taxi_data_year_month:
base_directory: samples_2020
pattern: ([\\w]+)_trip_data_sample_(\\d{{4}})-(\\d{{2}})\\.csv
group_names:
- name
- year
- month
default_inferred_data_connector_name:
class_name: InferredAssetFilesystemDataConnector
base_directory: <PATH_TO_YOUR_DATA_HERE>
glob_directive: "*.csv"
default_regex:
pattern: (.*)
group_names:
- data_asset_name
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
data_dir_path = os.path.join("..", "data")
datasource_yaml = datasource_yaml.replace("<PATH_TO_YOUR_DATA_HERE>", data_dir_path)
context.test_yaml_config(datasource_yaml)
context.add_datasource(**yaml.load(datasource_yaml))
available_data_asset_names = context.datasources[
"taxi_datasource"
].get_available_data_asset_names(
data_connector_names="default_inferred_data_connector_name"
)[
"default_inferred_data_connector_name"
]
assert len(available_data_asset_names) == 36
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
# Here is a BatchRequest naming an inferred data_asset.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = "yellow_trip_data_sample_2019-01.csv"
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
# Here is a BatchRequest naming a configured data_asset representing an un-partitioned (flat) filename structure.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_flat"
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 12
# Here is a BatchRequest naming a configured data_asset representing a filename structure partitioned by year and month.
# This BatchRequest specifies multiple batches, which is useful for dataset exploration.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
data_connector_query={"custom_filter_function": "<YOUR_CUSTOM_FILTER_FUNCTION>"},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name and other arguments directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_year_month"
batch_request.data_connector_query["custom_filter_function"] = (
lambda batch_identifiers: batch_identifiers["name"] == "yellow"
and 1 < int(batch_identifiers["month"]) < 11
)
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 9
assert batch_list[0].data.dataframe.shape[0] == 10000
# Here is a BatchRequest naming a configured data_asset representing a filename structure partitioned by year and month.
# This BatchRequest specifies one batch, which is useful for data analysis.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
data_connector_query={
"batch_filter_parameters": {
"<YOUR_BATCH_FILTER_PARAMETER_KEY>": "<YOUR_BATCH_FILTER_PARAMETER_VALUE>",
}
},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name and other arguments directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_year_month"
batch_request.data_connector_query["batch_filter_parameters"] = {
"month": "01",
}
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
assert batch_list[0].data.dataframe.shape[0] == 10000
# Here is a BatchRequest naming a configured data_asset representing a filename structure partitioned by year and month.
# This BatchRequest specifies one batch, which is useful for data analysis.
# In addition, the resulting batch is split according to "passenger_count" column with the focus on two-passenger rides.
# Moreover, a randomly sampled fraction of this subset of the batch data is obtained and returned as the final result.
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="configured_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
data_connector_query={
"batch_filter_parameters": {
"<YOUR_BATCH_FILTER_PARAMETER_KEY>": "<YOUR_BATCH_FILTER_PARAMETER_VALUE>",
}
},
batch_spec_passthrough={
"splitter_method": "<YOUR_SPLITTER_METHOD>",
"splitter_kwargs": {
"<YOUR_SPLITTER_OBJECTIVE_NAME>": "<YOUR_SPLITTER_OBJECTIVE_KEYS>",
"batch_identifiers": {
"<YOUR_SPLITTER_OBJECTIVE_0_KEY>": "<YOUR_SPLITTER_OBJECTIVE_0_VALUE>",
"<YOUR_SPLITTER_OBJECTIVE_1_KEY>": "<YOUR_SPLITTER_OBJECTIVE_1_VALUE>",
"<YOUR_SPLITTER_OBJECTIVE_2_KEY>": "<YOUR_SPLITTER_OBJECTIVE_2_VALUE>",
# ...
},
},
"sampling_method": "<YOUR_SAMPLING_METHOD>",
"sampling_kwargs": {
"<YOUR_SAMPLING_ARGUMENT_0_NAME>": "<YOUR_SAMPLING_ARGUMENT_0_VALUE>",
"<YOUR_SAMPLING_ARGUMENT_1_NAME>": "<YOUR_SAMPLING_ARGUMENT_1_VALUE>",
"<YOUR_SAMPLING_ARGUMENT_2_NAME>": "<YOUR_SAMPLING_ARGUMENT_2_VALUE>",
# ...
},
},
)
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name and other arguments directly in the BatchRequest above.
batch_request.data_asset_name = "taxi_data_year_month"
batch_request.data_connector_query["batch_filter_parameters"] = {
"month": "01",
}
batch_request.batch_spec_passthrough["splitter_method"] = "_split_on_column_value"
batch_request.batch_spec_passthrough["splitter_kwargs"] = {
"column_name": "passenger_count",
"batch_identifiers": {"passenger_count": 2},
}
batch_request.batch_spec_passthrough["sampling_method"] = "_sample_using_random"
batch_request.batch_spec_passthrough["sampling_kwargs"] = {"p": 1.0e-1}
batch_list = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
assert batch_list[0].data.dataframe.shape[0] < 200
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert "taxi_datasource" in [ds["name"] for ds in context.list_datasources()]
assert "yellow_trip_data_sample_2019-01.csv" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_inferred_data_connector_name"
]
) | 0.624637 | 0.420838 |
from __future__ import unicode_literals
# Ensure 'pytest.raises' context manager support for Python 2.6
import pytest
import time
import json
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_ec2, mock_iam, mock_cloudformation
def quick_instance_creation():
image_id = "ami-1234abcd"
conn_ec2 = boto3.resource("ec2", "us-east-1")
test_instance = conn_ec2.create_instances(ImageId=image_id, MinCount=1, MaxCount=1)
# We only need instance id for this tests
return test_instance[0].id
def quick_instance_profile_creation(name):
conn_iam = boto3.resource("iam", "us-east-1")
test_instance_profile = conn_iam.create_instance_profile(
InstanceProfileName=name, Path="/"
)
return test_instance_profile.arn, test_instance_profile.name
@mock_ec2
@mock_iam
def test_associate():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
association["IamInstanceProfileAssociation"]["InstanceId"].should.equal(instance_id)
association["IamInstanceProfileAssociation"]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn)
association["IamInstanceProfileAssociation"]["State"].should.equal("associating")
@mock_ec2
@mock_iam
def test_invalid_associate():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
# Duplicate
with pytest.raises(ClientError) as ex:
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
ex.value.response["Error"]["Code"].should.equal("IncorrectState")
ex.value.response["Error"]["Message"].should.contain(
"There is an existing association for"
)
# Wrong instance profile
with pytest.raises(ClientError) as ex:
client.associate_iam_instance_profile(
IamInstanceProfile={"Arn": "fake", "Name": "fake"}, InstanceId=instance_id,
)
ex.value.response["Error"]["Code"].should.equal("NoSuchEntity")
ex.value.response["Error"]["Message"].should.contain("not found")
# Wrong instance id
with pytest.raises(ClientError) as ex:
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId="fake",
)
ex.value.response["Error"]["Code"].should.equal("InvalidInstanceID.NotFound")
ex.value.response["Error"]["Message"].should.contain("does not exist")
@mock_ec2
@mock_iam
def test_describe():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["InstanceId"].should.equal(
instance_id
)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn)
associations["IamInstanceProfileAssociations"][0]["State"].should.equal(
"associated"
)
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile1"
)
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
next_test_associations = client.describe_iam_instance_profile_associations()
next_test_associations["IamInstanceProfileAssociations"].should.have.length_of(2)
associations = client.describe_iam_instance_profile_associations(
AssociationIds=[
next_test_associations["IamInstanceProfileAssociations"][0][
"AssociationId"
],
]
)
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.equal(
next_test_associations["IamInstanceProfileAssociations"][0][
"IamInstanceProfile"
]["Arn"]
)
associations = client.describe_iam_instance_profile_associations(
Filters=[
{
"Name": "instance-id",
"Values": [
next_test_associations["IamInstanceProfileAssociations"][0][
"InstanceId"
],
],
},
{"Name": "state", "Values": ["associated"]},
]
)
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.equal(
next_test_associations["IamInstanceProfileAssociations"][0][
"IamInstanceProfile"
]["Arn"]
)
@mock_ec2
@mock_iam
def test_replace():
client = boto3.client("ec2", region_name="us-east-1")
instance_id1 = quick_instance_creation()
instance_profile_arn1, instance_profile_name1 = quick_instance_profile_creation(
"test_profile1"
)
instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation(
"test_profile2"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn1,
"Name": instance_profile_name1,
},
InstanceId=instance_id1,
)
association = client.replace_iam_instance_profile_association(
IamInstanceProfile={
"Arn": instance_profile_arn2,
"Name": instance_profile_name2,
},
AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"],
)
association["IamInstanceProfileAssociation"]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn2)
association["IamInstanceProfileAssociation"]["State"].should.equal("associating")
@mock_ec2
@mock_iam
def test_invalid_replace():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation(
"test_profile2"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
# Wrong id
with pytest.raises(ClientError) as ex:
client.replace_iam_instance_profile_association(
IamInstanceProfile={
"Arn": instance_profile_arn2,
"Name": instance_profile_name2,
},
AssociationId="fake",
)
ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound")
ex.value.response["Error"]["Message"].should.contain("An invalid association-id of")
# Wrong instance profile
with pytest.raises(ClientError) as ex:
client.replace_iam_instance_profile_association(
IamInstanceProfile={"Arn": "fake", "Name": "fake",},
AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"],
)
ex.value.response["Error"]["Code"].should.equal("NoSuchEntity")
ex.value.response["Error"]["Message"].should.contain("not found")
@mock_ec2
@mock_iam
def test_disassociate():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
disassociation = client.disassociate_iam_instance_profile(
AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"],
)
disassociation["IamInstanceProfileAssociation"]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn)
disassociation["IamInstanceProfileAssociation"]["State"].should.equal(
"disassociating"
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(0)
@mock_ec2
@mock_iam
def test_invalid_disassociate():
client = boto3.client("ec2", region_name="us-east-1")
# Wrong id
with pytest.raises(ClientError) as ex:
client.disassociate_iam_instance_profile(AssociationId="fake",)
ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound")
ex.value.response["Error"]["Message"].should.contain("An invalid association-id of")
@mock_ec2
@mock_cloudformation
def test_cloudformation():
dummy_template_json = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"InstanceProfile": {
"Type": "AWS::IAM::InstanceProfile",
"Properties": {"Path": "/", "Roles": []},
},
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"IamInstanceProfile": {"Ref": "InstanceProfile"},
"KeyName": "mykey1",
"ImageId": "ami-7a11e213",
},
},
},
}
client = boto3.client("ec2", region_name="us-east-1")
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(dummy_template_json)
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.contain("test_stack")
cf_conn.delete_stack(StackName="test_stack")
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(0) | tests/test_ec2/test_iam_instance_profile_associations.py | from __future__ import unicode_literals
# Ensure 'pytest.raises' context manager support for Python 2.6
import pytest
import time
import json
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_ec2, mock_iam, mock_cloudformation
def quick_instance_creation():
image_id = "ami-1234abcd"
conn_ec2 = boto3.resource("ec2", "us-east-1")
test_instance = conn_ec2.create_instances(ImageId=image_id, MinCount=1, MaxCount=1)
# We only need instance id for this tests
return test_instance[0].id
def quick_instance_profile_creation(name):
conn_iam = boto3.resource("iam", "us-east-1")
test_instance_profile = conn_iam.create_instance_profile(
InstanceProfileName=name, Path="/"
)
return test_instance_profile.arn, test_instance_profile.name
@mock_ec2
@mock_iam
def test_associate():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
association["IamInstanceProfileAssociation"]["InstanceId"].should.equal(instance_id)
association["IamInstanceProfileAssociation"]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn)
association["IamInstanceProfileAssociation"]["State"].should.equal("associating")
@mock_ec2
@mock_iam
def test_invalid_associate():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
# Duplicate
with pytest.raises(ClientError) as ex:
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
ex.value.response["Error"]["Code"].should.equal("IncorrectState")
ex.value.response["Error"]["Message"].should.contain(
"There is an existing association for"
)
# Wrong instance profile
with pytest.raises(ClientError) as ex:
client.associate_iam_instance_profile(
IamInstanceProfile={"Arn": "fake", "Name": "fake"}, InstanceId=instance_id,
)
ex.value.response["Error"]["Code"].should.equal("NoSuchEntity")
ex.value.response["Error"]["Message"].should.contain("not found")
# Wrong instance id
with pytest.raises(ClientError) as ex:
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId="fake",
)
ex.value.response["Error"]["Code"].should.equal("InvalidInstanceID.NotFound")
ex.value.response["Error"]["Message"].should.contain("does not exist")
@mock_ec2
@mock_iam
def test_describe():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["InstanceId"].should.equal(
instance_id
)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn)
associations["IamInstanceProfileAssociations"][0]["State"].should.equal(
"associated"
)
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile1"
)
client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
next_test_associations = client.describe_iam_instance_profile_associations()
next_test_associations["IamInstanceProfileAssociations"].should.have.length_of(2)
associations = client.describe_iam_instance_profile_associations(
AssociationIds=[
next_test_associations["IamInstanceProfileAssociations"][0][
"AssociationId"
],
]
)
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.equal(
next_test_associations["IamInstanceProfileAssociations"][0][
"IamInstanceProfile"
]["Arn"]
)
associations = client.describe_iam_instance_profile_associations(
Filters=[
{
"Name": "instance-id",
"Values": [
next_test_associations["IamInstanceProfileAssociations"][0][
"InstanceId"
],
],
},
{"Name": "state", "Values": ["associated"]},
]
)
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.equal(
next_test_associations["IamInstanceProfileAssociations"][0][
"IamInstanceProfile"
]["Arn"]
)
@mock_ec2
@mock_iam
def test_replace():
client = boto3.client("ec2", region_name="us-east-1")
instance_id1 = quick_instance_creation()
instance_profile_arn1, instance_profile_name1 = quick_instance_profile_creation(
"test_profile1"
)
instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation(
"test_profile2"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn1,
"Name": instance_profile_name1,
},
InstanceId=instance_id1,
)
association = client.replace_iam_instance_profile_association(
IamInstanceProfile={
"Arn": instance_profile_arn2,
"Name": instance_profile_name2,
},
AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"],
)
association["IamInstanceProfileAssociation"]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn2)
association["IamInstanceProfileAssociation"]["State"].should.equal("associating")
@mock_ec2
@mock_iam
def test_invalid_replace():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation(
"test_profile2"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
# Wrong id
with pytest.raises(ClientError) as ex:
client.replace_iam_instance_profile_association(
IamInstanceProfile={
"Arn": instance_profile_arn2,
"Name": instance_profile_name2,
},
AssociationId="fake",
)
ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound")
ex.value.response["Error"]["Message"].should.contain("An invalid association-id of")
# Wrong instance profile
with pytest.raises(ClientError) as ex:
client.replace_iam_instance_profile_association(
IamInstanceProfile={"Arn": "fake", "Name": "fake",},
AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"],
)
ex.value.response["Error"]["Code"].should.equal("NoSuchEntity")
ex.value.response["Error"]["Message"].should.contain("not found")
@mock_ec2
@mock_iam
def test_disassociate():
client = boto3.client("ec2", region_name="us-east-1")
instance_id = quick_instance_creation()
instance_profile_arn, instance_profile_name = quick_instance_profile_creation(
"test_profile"
)
association = client.associate_iam_instance_profile(
IamInstanceProfile={
"Arn": instance_profile_arn,
"Name": instance_profile_name,
},
InstanceId=instance_id,
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
disassociation = client.disassociate_iam_instance_profile(
AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"],
)
disassociation["IamInstanceProfileAssociation"]["IamInstanceProfile"][
"Arn"
].should.equal(instance_profile_arn)
disassociation["IamInstanceProfileAssociation"]["State"].should.equal(
"disassociating"
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(0)
@mock_ec2
@mock_iam
def test_invalid_disassociate():
client = boto3.client("ec2", region_name="us-east-1")
# Wrong id
with pytest.raises(ClientError) as ex:
client.disassociate_iam_instance_profile(AssociationId="fake",)
ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound")
ex.value.response["Error"]["Message"].should.contain("An invalid association-id of")
@mock_ec2
@mock_cloudformation
def test_cloudformation():
dummy_template_json = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"InstanceProfile": {
"Type": "AWS::IAM::InstanceProfile",
"Properties": {"Path": "/", "Roles": []},
},
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"IamInstanceProfile": {"Ref": "InstanceProfile"},
"KeyName": "mykey1",
"ImageId": "ami-7a11e213",
},
},
},
}
client = boto3.client("ec2", region_name="us-east-1")
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(dummy_template_json)
)
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(1)
associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][
"Arn"
].should.contain("test_stack")
cf_conn.delete_stack(StackName="test_stack")
associations = client.describe_iam_instance_profile_associations()
associations["IamInstanceProfileAssociations"].should.have.length_of(0) | 0.606615 | 0.137619 |
import sys, os, os.path, re
__all__ = ['require']
_top = os.path.dirname(os.path.abspath(os.path.normpath(__file__)))
_pkg_cache = None
_import_cache = {}
_pat = re.compile('^([a-z]+)-([0-9]+).([0-9]+)$')
def _load_pkg_cache():
global _pkg_cache
if _pkg_cache is not None:
return _pkg_cache
_pkg_cache = {}
for dir in os.listdir(_top):
m = _pat.match(dir)
if not m:
continue
modname = m.group(1)
modver = (int(m.group(2)), int(m.group(3)))
_pkg_cache.setdefault(modname, []).append((modver, dir))
for vlist in _pkg_cache.values():
vlist.sort(reverse = True)
return _pkg_cache
def _install_path(pkg, newpath):
for p in sys.path:
pname = os.path.basename(p)
m = _pat.match(pname)
if m and m.group(1) == pkg:
sys.path.remove(p)
sys.path.insert(0, newpath)
def require(pkg, reqver):
# parse arg
reqval = tuple([int(n) for n in reqver.split('.')])
need = reqval[:2] # cut minor ver
# check if we already have one installed
if pkg in _import_cache:
got = _import_cache[pkg]
if need[0] != got[0] or reqval > got:
raise ImportError("Request for package '%s' ver '%s', have '%s'" % (
pkg, reqver, '.'.join(got)))
return
# pick best ver from available ones
cache = _load_pkg_cache()
if pkg not in cache:
return
for pkgver, pkgdir in cache[pkg]:
if pkgver[0] == need[0] and pkgver >= need:
# install the best on
_install_path(pkg, os.path.join(_top, pkgdir))
break
inst_ver = reqval
# now import whatever is available
mod = __import__(pkg)
# check if it is actually useful
ver_str = mod.__version__
for i, c in enumerate(ver_str):
if c != '.' and not c.isdigit():
ver_str = ver_str[:i]
break
full_ver = tuple([int(x) for x in ver_str.split('.')])
if full_ver[0] != reqval[0] or reqval > full_ver:
raise ImportError("Request for package '%s' ver '%s', have '%s'" % (
pkg, reqver, '.'.join(full_ver)))
inst_ver = full_ver
# remember full version
_import_cache[pkg] = inst_ver
return mod | python/pkgloader.py | import sys, os, os.path, re
__all__ = ['require']
_top = os.path.dirname(os.path.abspath(os.path.normpath(__file__)))
_pkg_cache = None
_import_cache = {}
_pat = re.compile('^([a-z]+)-([0-9]+).([0-9]+)$')
def _load_pkg_cache():
global _pkg_cache
if _pkg_cache is not None:
return _pkg_cache
_pkg_cache = {}
for dir in os.listdir(_top):
m = _pat.match(dir)
if not m:
continue
modname = m.group(1)
modver = (int(m.group(2)), int(m.group(3)))
_pkg_cache.setdefault(modname, []).append((modver, dir))
for vlist in _pkg_cache.values():
vlist.sort(reverse = True)
return _pkg_cache
def _install_path(pkg, newpath):
for p in sys.path:
pname = os.path.basename(p)
m = _pat.match(pname)
if m and m.group(1) == pkg:
sys.path.remove(p)
sys.path.insert(0, newpath)
def require(pkg, reqver):
# parse arg
reqval = tuple([int(n) for n in reqver.split('.')])
need = reqval[:2] # cut minor ver
# check if we already have one installed
if pkg in _import_cache:
got = _import_cache[pkg]
if need[0] != got[0] or reqval > got:
raise ImportError("Request for package '%s' ver '%s', have '%s'" % (
pkg, reqver, '.'.join(got)))
return
# pick best ver from available ones
cache = _load_pkg_cache()
if pkg not in cache:
return
for pkgver, pkgdir in cache[pkg]:
if pkgver[0] == need[0] and pkgver >= need:
# install the best on
_install_path(pkg, os.path.join(_top, pkgdir))
break
inst_ver = reqval
# now import whatever is available
mod = __import__(pkg)
# check if it is actually useful
ver_str = mod.__version__
for i, c in enumerate(ver_str):
if c != '.' and not c.isdigit():
ver_str = ver_str[:i]
break
full_ver = tuple([int(x) for x in ver_str.split('.')])
if full_ver[0] != reqval[0] or reqval > full_ver:
raise ImportError("Request for package '%s' ver '%s', have '%s'" % (
pkg, reqver, '.'.join(full_ver)))
inst_ver = full_ver
# remember full version
_import_cache[pkg] = inst_ver
return mod | 0.098285 | 0.072538 |
import os
from datetime import datetime, timedelta
import numpy
def salishseacast_paths(timestart, timeend, path, filetype):
"""Generate paths for Salish Seacast forcing
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:returns tuple: three tuples containing the arguments to pass to hdf5 file generator functions
:rtype: :py:class:`tuple'
"""
# generate list of dates from daterange given
daterange = [timestart, timeend]
# append all filename strings within daterange to lists
filelist = []
for day in range(numpy.diff(daterange)[0].days + 1):
datestamp = daterange[0] + timedelta(days = day)
datestr1 = datestamp.strftime('%d%b%y').lower()
datestr2 = datestamp.strftime('%Y%m%d')
# check if file exists. exit if it does not. add path to list if it does.
file_path = f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_{filetype}.nc'
if not os.path.exists(file_path):
print(f'File {file_path} not found. Check Directory and/or Date Range.')
return False
filelist.append(file_path)
return filelist
def hrdps_paths(timestart, timeend, path):
"""Generate wind input file paths
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:returns tuple: tuple containing the arguments to pass to hdf5 file generator function
:rtype: :py:class:`tuple'
"""
# generate list of dates from daterange given
daterange = [timestart, timeend]
# append all filename strings within daterange to list
wind_files = []
for day in range(numpy.diff(daterange)[0].days + 1):
datestamp = daterange[0] + timedelta(days=day)
month = datestamp.month
if month < 10:
month = f'0{str(month)}'
day = datestamp.day
if day < 10:
day = f'0{str(day)}'
year = str(datestamp.year)
# check if file exists. exit if it does not. add path to list if it does.
wind_path = f'{path}ops_y{year}m{month}d{day}.nc'
if not os.path.exists(wind_path):
print(f'File {wind_path} not found. Check Directory and/or Date Range.')
return
wind_files.append(wind_path)
return wind_files
def ww3_paths(timestart, timeend, path):
"""Generate Wave Watch 3 input files paths
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:returns tuple: tuple containing the arguments to pass to hdf5 file generator function
:rtype: :py:class:`tuple'
"""
# generate list of dates from daterange given
months = {1: 'jan', 2: 'feb', 3: 'mar', 4: 'apr', 5 : 'may', 6: 'jun', 7: 'jul', 8: 'aug', 9 : 'sep', 10: 'oct', 11 :'nov',12: 'dec'}
daterange = [timestart, timeend]
hindcast = "hindcast" in path
# append all filename strings within daterange to list
wave_files = []
for day in range(numpy.diff(daterange)[0].days + 1):
datestamp = daterange[0] + timedelta(days=day)
datestr2 = datestamp.strftime('%Y%m%d').lower()
monthnm = months[datestamp.month]
day = datestamp.day
if day < 10:
day = f'0{str(day)}'
year = str(datestamp.year)[2:4]
if hindcast:
wave_path = f'{path}{day}{monthnm}{year}/SoG_ww3_fields_{datestr2}.nc'
else:
wave_path = f'{path}{day}{monthnm}{year}/SoG_ww3_fields_{datestr2}_{datestr2}.nc'
if not os.path.exists(wave_path):
print(f'File {wave_path} not found. Check Directory and/or Date Range.')
return False
wave_files.append(wave_path)
return wave_files | scripts/make-hdf5/forcing_paths.py | import os
from datetime import datetime, timedelta
import numpy
def salishseacast_paths(timestart, timeend, path, filetype):
"""Generate paths for Salish Seacast forcing
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:returns tuple: three tuples containing the arguments to pass to hdf5 file generator functions
:rtype: :py:class:`tuple'
"""
# generate list of dates from daterange given
daterange = [timestart, timeend]
# append all filename strings within daterange to lists
filelist = []
for day in range(numpy.diff(daterange)[0].days + 1):
datestamp = daterange[0] + timedelta(days = day)
datestr1 = datestamp.strftime('%d%b%y').lower()
datestr2 = datestamp.strftime('%Y%m%d')
# check if file exists. exit if it does not. add path to list if it does.
file_path = f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_{filetype}.nc'
if not os.path.exists(file_path):
print(f'File {file_path} not found. Check Directory and/or Date Range.')
return False
filelist.append(file_path)
return filelist
def hrdps_paths(timestart, timeend, path):
"""Generate wind input file paths
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:returns tuple: tuple containing the arguments to pass to hdf5 file generator function
:rtype: :py:class:`tuple'
"""
# generate list of dates from daterange given
daterange = [timestart, timeend]
# append all filename strings within daterange to list
wind_files = []
for day in range(numpy.diff(daterange)[0].days + 1):
datestamp = daterange[0] + timedelta(days=day)
month = datestamp.month
if month < 10:
month = f'0{str(month)}'
day = datestamp.day
if day < 10:
day = f'0{str(day)}'
year = str(datestamp.year)
# check if file exists. exit if it does not. add path to list if it does.
wind_path = f'{path}ops_y{year}m{month}d{day}.nc'
if not os.path.exists(wind_path):
print(f'File {wind_path} not found. Check Directory and/or Date Range.')
return
wind_files.append(wind_path)
return wind_files
def ww3_paths(timestart, timeend, path):
"""Generate Wave Watch 3 input files paths
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:returns tuple: tuple containing the arguments to pass to hdf5 file generator function
:rtype: :py:class:`tuple'
"""
# generate list of dates from daterange given
months = {1: 'jan', 2: 'feb', 3: 'mar', 4: 'apr', 5 : 'may', 6: 'jun', 7: 'jul', 8: 'aug', 9 : 'sep', 10: 'oct', 11 :'nov',12: 'dec'}
daterange = [timestart, timeend]
hindcast = "hindcast" in path
# append all filename strings within daterange to list
wave_files = []
for day in range(numpy.diff(daterange)[0].days + 1):
datestamp = daterange[0] + timedelta(days=day)
datestr2 = datestamp.strftime('%Y%m%d').lower()
monthnm = months[datestamp.month]
day = datestamp.day
if day < 10:
day = f'0{str(day)}'
year = str(datestamp.year)[2:4]
if hindcast:
wave_path = f'{path}{day}{monthnm}{year}/SoG_ww3_fields_{datestr2}.nc'
else:
wave_path = f'{path}{day}{monthnm}{year}/SoG_ww3_fields_{datestr2}_{datestr2}.nc'
if not os.path.exists(wave_path):
print(f'File {wave_path} not found. Check Directory and/or Date Range.')
return False
wave_files.append(wave_path)
return wave_files | 0.647464 | 0.480235 |
import numpy as np
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import RobustScaler, StandardScaler
from ..algebra.onnx_ops import OnnxSub, OnnxDiv, OnnxCast
from ..common._registration import register_converter
from ..common.data_types import guess_numpy_type, guess_proto_type
from ..proto import onnx_proto
from .common import concatenate_variables
def convert_sklearn_scaler(scope, operator, container):
# If there are multiple input variables, we need to combine them as a
# whole tensor. Integer(s) would be converted to float(s).
# Options div use true division instead of Scaler operator
# which replaces a division by a multiplication.
# This leads to discrepencies in some cases.
if len(operator.inputs) > 1:
feature_name = concatenate_variables(scope, operator.inputs, container)
else:
feature_name = operator.inputs[0].full_name
op = operator.raw_operator
op_type = 'Scaler'
attrs = {'name': scope.get_unique_operator_name(op_type)}
if isinstance(op, StandardScaler):
C = (operator.inputs[0].type.shape[1]
if len(operator.inputs[0].type.shape) == 2 else 1)
attrs['offset'] = op.mean_ if op.with_mean else [0.0] * C
attrs['scale'] = 1.0 / op.scale_ if op.with_std else [1.0] * C
inv_scale = op.scale_ if op.with_std else None
elif isinstance(op, RobustScaler):
C = (operator.inputs[0].type.shape[1]
if len(operator.inputs[0].type.shape) == 2 else 1)
attrs['offset'] = op.center_ if op.with_centering else [0.0] * C
attrs['scale'] = 1.0 / op.scale_ if op.with_scaling else [1.0] * C
inv_scale = op.scale_ if op.with_scaling else None
elif isinstance(op, MinMaxScaler):
attrs['scale'] = op.scale_
# Add 1e-8 to avoid divided by 0
attrs['offset'] = -op.min_/(op.scale_ + 1e-8)
inv_scale = None
elif isinstance(op, MaxAbsScaler):
C = (operator.inputs[0].type.shape[1]
if len(operator.inputs[0].type.shape) == 2 else 1)
attrs['scale'] = 1.0 / op.scale_
attrs['offset'] = [0.] * C
inv_scale = op.scale_
else:
raise ValueError('Only scikit-learn StandardScaler and RobustScaler '
'are supported but got %s. You may raise '
'an issue at '
'https://github.com/onnx/sklearn-onnx/issues.'
'' % type(op))
proto_dtype = guess_proto_type(operator.inputs[0].type)
if proto_dtype != onnx_proto.TensorProto.DOUBLE:
proto_dtype = onnx_proto.TensorProto.FLOAT
dtype = guess_numpy_type(operator.inputs[0].type)
if dtype != np.float64:
dtype = np.float32
for k in attrs:
v = attrs[k]
if isinstance(v, np.ndarray) and v.dtype != dtype:
attrs[k] = v.astype(dtype)
if dtype == np.float64:
opv = container.target_opset
sub = OnnxSub(
feature_name, attrs['offset'].astype(dtype),
op_version=opv)
div = OnnxDiv(sub, inv_scale.astype(dtype),
op_version=opv,
output_names=[operator.outputs[0].full_name])
div.add_to(scope, container)
return
if inv_scale is not None:
options = container.get_options(op, dict(div='std'))
div = options['div']
if div == 'div':
opv = container.target_opset
sub = OnnxSub(
feature_name, attrs['offset'].astype(dtype),
op_version=opv)
div = OnnxDiv(sub, inv_scale.astype(dtype),
op_version=opv,
output_names=[operator.outputs[0].full_name])
div.add_to(scope, container)
return
if div == 'div_cast':
opv = container.target_opset
cast = OnnxCast(feature_name, to=onnx_proto.TensorProto.DOUBLE,
op_version=opv)
sub = OnnxSub(cast, attrs['offset'].astype(np.float64),
op_version=opv)
div = OnnxDiv(sub, inv_scale.astype(np.float64), op_version=opv)
cast = OnnxCast(div, to=proto_dtype, op_version=opv,
output_names=[operator.outputs[0].full_name])
cast.add_to(scope, container)
return
container.add_node(
op_type, feature_name, operator.outputs[0].full_name,
op_domain='ai.onnx.ml', **attrs)
register_converter('SklearnRobustScaler', convert_sklearn_scaler,
options={'div': ['std', 'div', 'div_cast']})
register_converter('SklearnScaler', convert_sklearn_scaler,
options={'div': ['std', 'div', 'div_cast']})
register_converter('SklearnMinMaxScaler', convert_sklearn_scaler)
register_converter('SklearnMaxAbsScaler', convert_sklearn_scaler,
options={'div': ['std', 'div', 'div_cast']}) | skl2onnx/operator_converters/scaler_op.py |
import numpy as np
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import RobustScaler, StandardScaler
from ..algebra.onnx_ops import OnnxSub, OnnxDiv, OnnxCast
from ..common._registration import register_converter
from ..common.data_types import guess_numpy_type, guess_proto_type
from ..proto import onnx_proto
from .common import concatenate_variables
def convert_sklearn_scaler(scope, operator, container):
# If there are multiple input variables, we need to combine them as a
# whole tensor. Integer(s) would be converted to float(s).
# Options div use true division instead of Scaler operator
# which replaces a division by a multiplication.
# This leads to discrepencies in some cases.
if len(operator.inputs) > 1:
feature_name = concatenate_variables(scope, operator.inputs, container)
else:
feature_name = operator.inputs[0].full_name
op = operator.raw_operator
op_type = 'Scaler'
attrs = {'name': scope.get_unique_operator_name(op_type)}
if isinstance(op, StandardScaler):
C = (operator.inputs[0].type.shape[1]
if len(operator.inputs[0].type.shape) == 2 else 1)
attrs['offset'] = op.mean_ if op.with_mean else [0.0] * C
attrs['scale'] = 1.0 / op.scale_ if op.with_std else [1.0] * C
inv_scale = op.scale_ if op.with_std else None
elif isinstance(op, RobustScaler):
C = (operator.inputs[0].type.shape[1]
if len(operator.inputs[0].type.shape) == 2 else 1)
attrs['offset'] = op.center_ if op.with_centering else [0.0] * C
attrs['scale'] = 1.0 / op.scale_ if op.with_scaling else [1.0] * C
inv_scale = op.scale_ if op.with_scaling else None
elif isinstance(op, MinMaxScaler):
attrs['scale'] = op.scale_
# Add 1e-8 to avoid divided by 0
attrs['offset'] = -op.min_/(op.scale_ + 1e-8)
inv_scale = None
elif isinstance(op, MaxAbsScaler):
C = (operator.inputs[0].type.shape[1]
if len(operator.inputs[0].type.shape) == 2 else 1)
attrs['scale'] = 1.0 / op.scale_
attrs['offset'] = [0.] * C
inv_scale = op.scale_
else:
raise ValueError('Only scikit-learn StandardScaler and RobustScaler '
'are supported but got %s. You may raise '
'an issue at '
'https://github.com/onnx/sklearn-onnx/issues.'
'' % type(op))
proto_dtype = guess_proto_type(operator.inputs[0].type)
if proto_dtype != onnx_proto.TensorProto.DOUBLE:
proto_dtype = onnx_proto.TensorProto.FLOAT
dtype = guess_numpy_type(operator.inputs[0].type)
if dtype != np.float64:
dtype = np.float32
for k in attrs:
v = attrs[k]
if isinstance(v, np.ndarray) and v.dtype != dtype:
attrs[k] = v.astype(dtype)
if dtype == np.float64:
opv = container.target_opset
sub = OnnxSub(
feature_name, attrs['offset'].astype(dtype),
op_version=opv)
div = OnnxDiv(sub, inv_scale.astype(dtype),
op_version=opv,
output_names=[operator.outputs[0].full_name])
div.add_to(scope, container)
return
if inv_scale is not None:
options = container.get_options(op, dict(div='std'))
div = options['div']
if div == 'div':
opv = container.target_opset
sub = OnnxSub(
feature_name, attrs['offset'].astype(dtype),
op_version=opv)
div = OnnxDiv(sub, inv_scale.astype(dtype),
op_version=opv,
output_names=[operator.outputs[0].full_name])
div.add_to(scope, container)
return
if div == 'div_cast':
opv = container.target_opset
cast = OnnxCast(feature_name, to=onnx_proto.TensorProto.DOUBLE,
op_version=opv)
sub = OnnxSub(cast, attrs['offset'].astype(np.float64),
op_version=opv)
div = OnnxDiv(sub, inv_scale.astype(np.float64), op_version=opv)
cast = OnnxCast(div, to=proto_dtype, op_version=opv,
output_names=[operator.outputs[0].full_name])
cast.add_to(scope, container)
return
container.add_node(
op_type, feature_name, operator.outputs[0].full_name,
op_domain='ai.onnx.ml', **attrs)
register_converter('SklearnRobustScaler', convert_sklearn_scaler,
options={'div': ['std', 'div', 'div_cast']})
register_converter('SklearnScaler', convert_sklearn_scaler,
options={'div': ['std', 'div', 'div_cast']})
register_converter('SklearnMinMaxScaler', convert_sklearn_scaler)
register_converter('SklearnMaxAbsScaler', convert_sklearn_scaler,
options={'div': ['std', 'div', 'div_cast']}) | 0.709019 | 0.262529 |
import numpy as np
from scipy.optimize import curve_fit
def sigmoid_fxn(x, b, r, m):
"""x can be a single value or a numpy array of x_values; b is the steepness; r is the max y_value of the curve; m is the x_value where the curve is at 50% height. The equation has been constrained such that the lower y_bound is always 0."""
return r/2.0 * (b*(m-x) / np.sqrt((b*(m-x))**2 + 1) + 1)
def initial_sigmoid_params(x, y):
"""Reasonable initial guesses for the parameters."""
b = 7.0 / max(x)
r = max(y)
m = (max(x) + min(x)) / 2.0
return [b, r, m]
def fit_to_sigmoid(x, y, r_value=None):
"""Expects x and y to be lists of floats. If r_value is None, the r parameter will be estimated from the data. Otherwise it will be constrained to r_value."""
init_params = initial_sigmoid_params(x, y)
if r_value == None:
opt_params, cov = curve_fit(sigmoid_fxn, xdata=x, ydata=y, p0=init_params)
else:
def sigmoid_constrained(x, b, m):
return sigmoid_fxn(x, b, r_value, m)
init_params = [init_params[0], init_params[2]]
opt_params, cov = curve_fit(sigmoid_constrained, xdata=x, ydata=y, p0=init_params)
opt_params = np.append(opt_params, r_value) # Why do I have both
opt_params = np.array([opt_params[0], r_value, opt_params[1]]) # of these calls?
return opt_params
def multi_sigmoid_fxn(*args):
"""Expects args = xs, b, r, m0, m1, m2...; xs is a list of lists, where each sublist contains the x_values for one data set; b is the steepness; r is the max y_value of the curves; m is the x_value where the curve is at 50% height. The b and r parameters are shared and so will be applied to all data sets, while each data set has its own m value. The equation has been constrained such that the lower y_bound is always 0."""
xs, b, r = args[:3]
ms = args[3:]
y_vals = [r/2.0 * (b*(m-x_vals) / np.sqrt((b*(m-x_vals))**2 + 1) + 1) for x_vals, m in zip(xs, ms)]
return np.hstack(y_vals) # flattens the values
def fit_multiple_sigmoids(xs, ys, r_value=None):
y_data = np.hstack(ys)
bs, rs, ms = [], [], []
for x_vals, y_vals in zip(xs, ys):
b, r, m = initial_sigmoid_params(x_vals, y_vals)
bs.append(b)
rs.append(r)
ms.append(m)
if r_value == None:
init_params = [sum(bs)/float(len(bs)), sum(rs)/float(len(rs))] + ms
opt_params, cov = curve_fit(multi_sigmoid_fxn, xdata=xs, ydata=y_data, p0=init_params)
else:
def multi_sigmoid_constrained(*args):
xs, b = args[:2]
ms = args[2:]
return multi_sigmoid_fxn(xs, b, r_value, *ms)
init_params = [init_params[0]] + init_params[2:]
opt_params, cov = curve_fit(multi_sigmoid_constrained, xdata=xs, ydata=y_data, p0=init_params)
opt_params = np.array([opt_params[0], r_value] + opt_params[1:])
return opt_params | navargator_resources/curve_fitting.py | import numpy as np
from scipy.optimize import curve_fit
def sigmoid_fxn(x, b, r, m):
"""x can be a single value or a numpy array of x_values; b is the steepness; r is the max y_value of the curve; m is the x_value where the curve is at 50% height. The equation has been constrained such that the lower y_bound is always 0."""
return r/2.0 * (b*(m-x) / np.sqrt((b*(m-x))**2 + 1) + 1)
def initial_sigmoid_params(x, y):
"""Reasonable initial guesses for the parameters."""
b = 7.0 / max(x)
r = max(y)
m = (max(x) + min(x)) / 2.0
return [b, r, m]
def fit_to_sigmoid(x, y, r_value=None):
"""Expects x and y to be lists of floats. If r_value is None, the r parameter will be estimated from the data. Otherwise it will be constrained to r_value."""
init_params = initial_sigmoid_params(x, y)
if r_value == None:
opt_params, cov = curve_fit(sigmoid_fxn, xdata=x, ydata=y, p0=init_params)
else:
def sigmoid_constrained(x, b, m):
return sigmoid_fxn(x, b, r_value, m)
init_params = [init_params[0], init_params[2]]
opt_params, cov = curve_fit(sigmoid_constrained, xdata=x, ydata=y, p0=init_params)
opt_params = np.append(opt_params, r_value) # Why do I have both
opt_params = np.array([opt_params[0], r_value, opt_params[1]]) # of these calls?
return opt_params
def multi_sigmoid_fxn(*args):
"""Expects args = xs, b, r, m0, m1, m2...; xs is a list of lists, where each sublist contains the x_values for one data set; b is the steepness; r is the max y_value of the curves; m is the x_value where the curve is at 50% height. The b and r parameters are shared and so will be applied to all data sets, while each data set has its own m value. The equation has been constrained such that the lower y_bound is always 0."""
xs, b, r = args[:3]
ms = args[3:]
y_vals = [r/2.0 * (b*(m-x_vals) / np.sqrt((b*(m-x_vals))**2 + 1) + 1) for x_vals, m in zip(xs, ms)]
return np.hstack(y_vals) # flattens the values
def fit_multiple_sigmoids(xs, ys, r_value=None):
y_data = np.hstack(ys)
bs, rs, ms = [], [], []
for x_vals, y_vals in zip(xs, ys):
b, r, m = initial_sigmoid_params(x_vals, y_vals)
bs.append(b)
rs.append(r)
ms.append(m)
if r_value == None:
init_params = [sum(bs)/float(len(bs)), sum(rs)/float(len(rs))] + ms
opt_params, cov = curve_fit(multi_sigmoid_fxn, xdata=xs, ydata=y_data, p0=init_params)
else:
def multi_sigmoid_constrained(*args):
xs, b = args[:2]
ms = args[2:]
return multi_sigmoid_fxn(xs, b, r_value, *ms)
init_params = [init_params[0]] + init_params[2:]
opt_params, cov = curve_fit(multi_sigmoid_constrained, xdata=xs, ydata=y_data, p0=init_params)
opt_params = np.array([opt_params[0], r_value] + opt_params[1:])
return opt_params | 0.836988 | 0.771801 |
from typing import Optional, Tuple
import torch
import torch.nn as nn
from kornia.testing import KORNIA_CHECK_SHAPE
def match_nn(
desc1: torch.Tensor, desc2: torch.Tensor, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Function, which finds nearest neighbors in desc2 for each vector in desc1.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Returns:
- Descriptor distance of matching descriptors, shape of :math:`(B1, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:`(B1, 2)`.
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
match_dists, idxs_in_2 = torch.min(dm, dim=1)
idxs_in1: torch.Tensor = torch.arange(0, idxs_in_2.size(0), device=idxs_in_2.device)
matches_idxs: torch.Tensor = torch.cat([idxs_in1.view(-1, 1), idxs_in_2.view(-1, 1)], dim=1)
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
def match_mnn(
desc1: torch.Tensor, desc2: torch.Tensor, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function, which finds mutual nearest neighbors in desc2 for each vector in desc1.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Return:
- Descriptor distance of matching descriptors, shape of. :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:`(B3, 2)`,
where 0 <= B3 <= min(B1, B2)
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
ms = min(dm.size(0), dm.size(1))
match_dists, idxs_in_2 = torch.min(dm, dim=1)
match_dists2, idxs_in_1 = torch.min(dm, dim=0)
minsize_idxs = torch.arange(ms, device=dm.device)
if dm.size(0) <= dm.size(1):
mutual_nns = minsize_idxs == idxs_in_1[idxs_in_2][:ms]
matches_idxs = torch.cat([minsize_idxs.view(-1, 1), idxs_in_2.view(-1, 1)], dim=1)[mutual_nns]
match_dists = match_dists[mutual_nns]
else:
mutual_nns = minsize_idxs == idxs_in_2[idxs_in_1][:ms]
matches_idxs = torch.cat([idxs_in_1.view(-1, 1), minsize_idxs.view(-1, 1)], dim=1)[mutual_nns]
match_dists = match_dists2[mutual_nns]
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
def match_snn(
desc1: torch.Tensor, desc2: torch.Tensor, th: float = 0.8, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function, which finds nearest neighbors in desc2 for each vector in desc1.
The method satisfies first to second nearest neighbor distance <= th.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
th: distance ratio threshold.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Return:
- Descriptor distance of matching descriptors, shape of :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2. Shape: :math:`(B3, 2)`,
where 0 <= B3 <= B1.
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if desc2.shape[0] < 2:
raise AssertionError
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
vals, idxs_in_2 = torch.topk(dm, 2, dim=1, largest=False)
ratio = vals[:, 0] / vals[:, 1]
mask = ratio <= th
match_dists = ratio[mask]
idxs_in1 = torch.arange(0, idxs_in_2.size(0), device=dm.device)[mask]
idxs_in_2 = idxs_in_2[:, 0][mask]
matches_idxs = torch.cat([idxs_in1.view(-1, 1), idxs_in_2.view(-1, 1)], dim=1)
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
def match_smnn(
desc1: torch.Tensor, desc2: torch.Tensor, th: float = 0.8, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function, which finds mutual nearest neighbors in desc2 for each vector in desc1.
the method satisfies first to second nearest neighbor distance <= th.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
th: distance ratio threshold.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Return:
- Descriptor distance of matching descriptors, shape of. :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2,
shape of :math:`(B3, 2)` where 0 <= B3 <= B1.
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if desc1.shape[0] < 2:
raise AssertionError
if desc2.shape[0] < 2:
raise AssertionError
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
dists1, idx1 = match_snn(desc1, desc2, th, dm)
dists2, idx2 = match_snn(desc2, desc1, th, dm.t())
if len(dists2) > 0 and len(dists1) > 0:
idx2 = idx2.flip(1)
idxs_dm = torch.cdist(idx1.float(), idx2.float(), p=1.0)
mutual_idxs1 = idxs_dm.min(dim=1)[0] < 1e-8
mutual_idxs2 = idxs_dm.min(dim=0)[0] < 1e-8
good_idxs1 = idx1[mutual_idxs1.view(-1)]
good_idxs2 = idx2[mutual_idxs2.view(-1)]
dists1_good = dists1[mutual_idxs1.view(-1)]
dists2_good = dists2[mutual_idxs2.view(-1)]
_, idx_upl1 = torch.sort(good_idxs1[:, 0])
_, idx_upl2 = torch.sort(good_idxs2[:, 0])
good_idxs1 = good_idxs1[idx_upl1]
match_dists = torch.max(dists1_good[idx_upl1], dists2_good[idx_upl2])
matches_idxs = good_idxs1
else:
matches_idxs, match_dists = torch.empty(0, 2, device=dm.device), torch.empty(0, 1, device=dm.device)
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
class DescriptorMatcher(nn.Module):
"""Module version of matching functions.
See :func:`~kornia.feature.match_nn`, :func:`~kornia.feature.match_snn`,
:func:`~kornia.feature.match_mnn` or :func:`~kornia.feature.match_smnn` for more details.
Args:
match_mode: type of matching, can be `nn`, `snn`, `mnn`, `smnn`.
th: threshold on distance ratio, or other quality measure.
"""
known_modes = ['nn', 'mnn', 'snn', 'smnn']
def __init__(self, match_mode: str = 'snn', th: float = 0.8) -> None:
super().__init__()
_match_mode: str = match_mode.lower()
if _match_mode not in self.known_modes:
raise NotImplementedError(f"{match_mode} is not supported. Try one of {self.known_modes}")
self.match_mode = _match_mode
self.th = th
def forward(self, desc1: torch.Tensor, desc2: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
Return:
- Descriptor distance of matching descriptors, shape of :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2,
shape of :math:`(B3, 2)` where :math:`0 <= B3 <= B1`.
"""
if self.match_mode == 'nn':
out = match_nn(desc1, desc2)
elif self.match_mode == 'mnn':
out = match_mnn(desc1, desc2)
elif self.match_mode == 'snn':
out = match_snn(desc1, desc2, self.th)
elif self.match_mode == 'smnn':
out = match_smnn(desc1, desc2, self.th)
else:
raise NotImplementedError
return out | kornia/feature/matching.py | from typing import Optional, Tuple
import torch
import torch.nn as nn
from kornia.testing import KORNIA_CHECK_SHAPE
def match_nn(
desc1: torch.Tensor, desc2: torch.Tensor, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Function, which finds nearest neighbors in desc2 for each vector in desc1.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Returns:
- Descriptor distance of matching descriptors, shape of :math:`(B1, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:`(B1, 2)`.
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
match_dists, idxs_in_2 = torch.min(dm, dim=1)
idxs_in1: torch.Tensor = torch.arange(0, idxs_in_2.size(0), device=idxs_in_2.device)
matches_idxs: torch.Tensor = torch.cat([idxs_in1.view(-1, 1), idxs_in_2.view(-1, 1)], dim=1)
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
def match_mnn(
desc1: torch.Tensor, desc2: torch.Tensor, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function, which finds mutual nearest neighbors in desc2 for each vector in desc1.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Return:
- Descriptor distance of matching descriptors, shape of. :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:`(B3, 2)`,
where 0 <= B3 <= min(B1, B2)
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
ms = min(dm.size(0), dm.size(1))
match_dists, idxs_in_2 = torch.min(dm, dim=1)
match_dists2, idxs_in_1 = torch.min(dm, dim=0)
minsize_idxs = torch.arange(ms, device=dm.device)
if dm.size(0) <= dm.size(1):
mutual_nns = minsize_idxs == idxs_in_1[idxs_in_2][:ms]
matches_idxs = torch.cat([minsize_idxs.view(-1, 1), idxs_in_2.view(-1, 1)], dim=1)[mutual_nns]
match_dists = match_dists[mutual_nns]
else:
mutual_nns = minsize_idxs == idxs_in_2[idxs_in_1][:ms]
matches_idxs = torch.cat([idxs_in_1.view(-1, 1), minsize_idxs.view(-1, 1)], dim=1)[mutual_nns]
match_dists = match_dists2[mutual_nns]
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
def match_snn(
desc1: torch.Tensor, desc2: torch.Tensor, th: float = 0.8, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function, which finds nearest neighbors in desc2 for each vector in desc1.
The method satisfies first to second nearest neighbor distance <= th.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
th: distance ratio threshold.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Return:
- Descriptor distance of matching descriptors, shape of :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2. Shape: :math:`(B3, 2)`,
where 0 <= B3 <= B1.
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if desc2.shape[0] < 2:
raise AssertionError
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
vals, idxs_in_2 = torch.topk(dm, 2, dim=1, largest=False)
ratio = vals[:, 0] / vals[:, 1]
mask = ratio <= th
match_dists = ratio[mask]
idxs_in1 = torch.arange(0, idxs_in_2.size(0), device=dm.device)[mask]
idxs_in_2 = idxs_in_2[:, 0][mask]
matches_idxs = torch.cat([idxs_in1.view(-1, 1), idxs_in_2.view(-1, 1)], dim=1)
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
def match_smnn(
desc1: torch.Tensor, desc2: torch.Tensor, th: float = 0.8, dm: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function, which finds mutual nearest neighbors in desc2 for each vector in desc1.
the method satisfies first to second nearest neighbor distance <= th.
If the distance matrix dm is not provided, :py:func:`torch.cdist` is used.
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
th: distance ratio threshold.
dm: Tensor containing the distances from each descriptor in desc1
to each descriptor in desc2, shape of :math:`(B1, B2)`.
Return:
- Descriptor distance of matching descriptors, shape of. :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2,
shape of :math:`(B3, 2)` where 0 <= B3 <= B1.
"""
KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"])
KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"])
if desc1.shape[0] < 2:
raise AssertionError
if desc2.shape[0] < 2:
raise AssertionError
if dm is None:
dm = torch.cdist(desc1, desc2)
else:
if not ((dm.size(0) == desc1.size(0)) and (dm.size(1) == desc2.size(0))):
raise AssertionError
dists1, idx1 = match_snn(desc1, desc2, th, dm)
dists2, idx2 = match_snn(desc2, desc1, th, dm.t())
if len(dists2) > 0 and len(dists1) > 0:
idx2 = idx2.flip(1)
idxs_dm = torch.cdist(idx1.float(), idx2.float(), p=1.0)
mutual_idxs1 = idxs_dm.min(dim=1)[0] < 1e-8
mutual_idxs2 = idxs_dm.min(dim=0)[0] < 1e-8
good_idxs1 = idx1[mutual_idxs1.view(-1)]
good_idxs2 = idx2[mutual_idxs2.view(-1)]
dists1_good = dists1[mutual_idxs1.view(-1)]
dists2_good = dists2[mutual_idxs2.view(-1)]
_, idx_upl1 = torch.sort(good_idxs1[:, 0])
_, idx_upl2 = torch.sort(good_idxs2[:, 0])
good_idxs1 = good_idxs1[idx_upl1]
match_dists = torch.max(dists1_good[idx_upl1], dists2_good[idx_upl2])
matches_idxs = good_idxs1
else:
matches_idxs, match_dists = torch.empty(0, 2, device=dm.device), torch.empty(0, 1, device=dm.device)
return match_dists.view(-1, 1), matches_idxs.view(-1, 2)
class DescriptorMatcher(nn.Module):
"""Module version of matching functions.
See :func:`~kornia.feature.match_nn`, :func:`~kornia.feature.match_snn`,
:func:`~kornia.feature.match_mnn` or :func:`~kornia.feature.match_smnn` for more details.
Args:
match_mode: type of matching, can be `nn`, `snn`, `mnn`, `smnn`.
th: threshold on distance ratio, or other quality measure.
"""
known_modes = ['nn', 'mnn', 'snn', 'smnn']
def __init__(self, match_mode: str = 'snn', th: float = 0.8) -> None:
super().__init__()
_match_mode: str = match_mode.lower()
if _match_mode not in self.known_modes:
raise NotImplementedError(f"{match_mode} is not supported. Try one of {self.known_modes}")
self.match_mode = _match_mode
self.th = th
def forward(self, desc1: torch.Tensor, desc2: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
desc1: Batch of descriptors of a shape :math:`(B1, D)`.
desc2: Batch of descriptors of a shape :math:`(B2, D)`.
Return:
- Descriptor distance of matching descriptors, shape of :math:`(B3, 1)`.
- Long tensor indexes of matching descriptors in desc1 and desc2,
shape of :math:`(B3, 2)` where :math:`0 <= B3 <= B1`.
"""
if self.match_mode == 'nn':
out = match_nn(desc1, desc2)
elif self.match_mode == 'mnn':
out = match_mnn(desc1, desc2)
elif self.match_mode == 'snn':
out = match_snn(desc1, desc2, self.th)
elif self.match_mode == 'smnn':
out = match_smnn(desc1, desc2, self.th)
else:
raise NotImplementedError
return out | 0.967884 | 0.782496 |
from django.core.management.base import BaseCommand, CommandError
from api.models import Subject, Course, Campus
import xml.etree.ElementTree as ET
import os
class Command(BaseCommand):
help = 'Populates the subjects table'
def handle(self, *args, **options):
dirr = os.path.dirname(__file__)
filename = os.path.join(dirr, '../../fixtures/disciplinas.xml')
tree = ET.parse(filename)
root = tree.getroot()
subjects = []
subject = Subject.objects.all()
self.create_campuses()
self.create_courses()
if subject.count() <= 0:
for child in root:
for v in child:
c = Subject(name=v.text, course=Course.objects.get(name=v.tag))
match = [c1.name for c1 in subjects if c1.name == c.name]
if match == []:
subjects.append(c)
if len(subjects):
Subject.objects.bulk_create(subjects)
self.stdout.write("Subjects added!")
else:
self.stdout.write("No subjects added. Please remove already inserted subjects from db!")
def create_campuses(self):
c = Campus.objects.all()
if c.count() <= 0:
campuses_name = ["FGA" ,"FCE", "DARCY RIBEIRO", "FUP"]
campuses = []
for name in campuses_name:
campus = Campus(name=name)
campuses.append(campus)
Campus.objects.bulk_create(campuses)
self.stdout.write("Campuses added!")
else:
self.stdout.write("No campuses added. Please remove already inserted campuses from db!")
def create_courses(self):
c = Course.objects.all()
if c.count() <= 0:
courses_name = ["ENGENHARIA" ,"SOFTWARE", "ELETRONICA", "AEROESPACIAL", "ENERGIA", "AUTOMOTIVA"]
courses = []
for name in courses_name:
course = Course(name=name, campus=Campus.objects.get(pk=1)) # fix logic to add other campuses courses
courses.append(course)
Course.objects.bulk_create(courses)
self.stdout.write("Courses added!")
else:
self.stdout.write("No courses added. Please remove already inserted courses from db!") | api/management/commands/populatedb.py | from django.core.management.base import BaseCommand, CommandError
from api.models import Subject, Course, Campus
import xml.etree.ElementTree as ET
import os
class Command(BaseCommand):
help = 'Populates the subjects table'
def handle(self, *args, **options):
dirr = os.path.dirname(__file__)
filename = os.path.join(dirr, '../../fixtures/disciplinas.xml')
tree = ET.parse(filename)
root = tree.getroot()
subjects = []
subject = Subject.objects.all()
self.create_campuses()
self.create_courses()
if subject.count() <= 0:
for child in root:
for v in child:
c = Subject(name=v.text, course=Course.objects.get(name=v.tag))
match = [c1.name for c1 in subjects if c1.name == c.name]
if match == []:
subjects.append(c)
if len(subjects):
Subject.objects.bulk_create(subjects)
self.stdout.write("Subjects added!")
else:
self.stdout.write("No subjects added. Please remove already inserted subjects from db!")
def create_campuses(self):
c = Campus.objects.all()
if c.count() <= 0:
campuses_name = ["FGA" ,"FCE", "DARCY RIBEIRO", "FUP"]
campuses = []
for name in campuses_name:
campus = Campus(name=name)
campuses.append(campus)
Campus.objects.bulk_create(campuses)
self.stdout.write("Campuses added!")
else:
self.stdout.write("No campuses added. Please remove already inserted campuses from db!")
def create_courses(self):
c = Course.objects.all()
if c.count() <= 0:
courses_name = ["ENGENHARIA" ,"SOFTWARE", "ELETRONICA", "AEROESPACIAL", "ENERGIA", "AUTOMOTIVA"]
courses = []
for name in courses_name:
course = Course(name=name, campus=Campus.objects.get(pk=1)) # fix logic to add other campuses courses
courses.append(course)
Course.objects.bulk_create(courses)
self.stdout.write("Courses added!")
else:
self.stdout.write("No courses added. Please remove already inserted courses from db!") | 0.257765 | 0.10725 |
import json
import numpy as np
from collections import OrderedDict
from src.evaluation.summary_loader import load_processed_dataset
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set()
sns.set_style("darkgrid")
videos = {}
x_axis = []
y_axis = []
original_splits = '../../results/TVSum/video_scores/original splits/'
non_overlapping_splits = '../../results/TVSum/video_scores/non overlapping splits/'
n_videos = 50
n_splits = 5
def read_scores(dir,n_splits,n_videos):
df = pd.DataFrame(columns=['Videos', 'F1-scores'])
for split in range(n_splits):
path = dir + '/video_scores{}.txt'.format(split)
print(path)
with open(path, 'r') as infile:
videos = json.load(infile)
print(videos.keys())
for key in videos.keys():
# d = {'Videos': key, 'F1-scores': videos[key]}
d = pd.Series({'Videos': key, 'F1-scores': videos[key]})
df = df.append(d, ignore_index=True)
df['Videos'] = df['Videos'].astype(int)
series = df.groupby('Videos')['F1-scores'].mean()
print(list(series.index.values))
#print(len(list(series.index.values)))
for i in range(1, n_videos + 1):
if i not in list(series.index.values):
x = pd.Series(0, index=[i])
series = series.append(x)
series = series.sort_index(ascending=True)
#print(series)
return series
original_videos = read_scores(original_splits,n_splits,n_videos)
non_overlapping_videos = read_scores(non_overlapping_splits,n_splits,n_videos)
diff= (non_overlapping_videos.values + original_videos.values)/2
print (diff)
df = pd.DataFrame({'Video Names':original_videos.index, 'F1-scores': diff, 'included':'', 'F1-score difference':'Values' })
plot=sns.scatterplot(x="Video Names", y="F1-scores", markers=['o'], style= 'F1-score difference', data=df)
plt.xticks(np.arange(1, n_videos + 1))
plt.axhline(y=0, c='red', linestyle='dashed', label="horizontal")
#plt.yticks(np.arange(-90, 20, step=10))
plt.title("delta F1-score between original and non-overlapping video splits", fontsize= 15)
labels = ["Equivalent F1-score"]
handles, _ = plot.get_legend_handles_labels()
# Slice list to remove first handle
plt.legend(handles = handles[:], labels = labels)
plt.show() | src/visualization/visualize_fscores_differences.py | import json
import numpy as np
from collections import OrderedDict
from src.evaluation.summary_loader import load_processed_dataset
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set()
sns.set_style("darkgrid")
videos = {}
x_axis = []
y_axis = []
original_splits = '../../results/TVSum/video_scores/original splits/'
non_overlapping_splits = '../../results/TVSum/video_scores/non overlapping splits/'
n_videos = 50
n_splits = 5
def read_scores(dir,n_splits,n_videos):
df = pd.DataFrame(columns=['Videos', 'F1-scores'])
for split in range(n_splits):
path = dir + '/video_scores{}.txt'.format(split)
print(path)
with open(path, 'r') as infile:
videos = json.load(infile)
print(videos.keys())
for key in videos.keys():
# d = {'Videos': key, 'F1-scores': videos[key]}
d = pd.Series({'Videos': key, 'F1-scores': videos[key]})
df = df.append(d, ignore_index=True)
df['Videos'] = df['Videos'].astype(int)
series = df.groupby('Videos')['F1-scores'].mean()
print(list(series.index.values))
#print(len(list(series.index.values)))
for i in range(1, n_videos + 1):
if i not in list(series.index.values):
x = pd.Series(0, index=[i])
series = series.append(x)
series = series.sort_index(ascending=True)
#print(series)
return series
original_videos = read_scores(original_splits,n_splits,n_videos)
non_overlapping_videos = read_scores(non_overlapping_splits,n_splits,n_videos)
diff= (non_overlapping_videos.values + original_videos.values)/2
print (diff)
df = pd.DataFrame({'Video Names':original_videos.index, 'F1-scores': diff, 'included':'', 'F1-score difference':'Values' })
plot=sns.scatterplot(x="Video Names", y="F1-scores", markers=['o'], style= 'F1-score difference', data=df)
plt.xticks(np.arange(1, n_videos + 1))
plt.axhline(y=0, c='red', linestyle='dashed', label="horizontal")
#plt.yticks(np.arange(-90, 20, step=10))
plt.title("delta F1-score between original and non-overlapping video splits", fontsize= 15)
labels = ["Equivalent F1-score"]
handles, _ = plot.get_legend_handles_labels()
# Slice list to remove first handle
plt.legend(handles = handles[:], labels = labels)
plt.show() | 0.22093 | 0.3217 |
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'quality': epanet2.EN_QUALITY}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def comment(self):
return self.network().ep.ENgetcomment(0, self.index) # get comment from NODE table
@comment.setter
def comment(self, value):
return self.network().ep.ENsetcomment(0, self.index, value) # set comment from LINK table
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND, 'quality': epanet2.EN_QUALITY}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL} | epynet/node.py | from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'quality': epanet2.EN_QUALITY}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def comment(self):
return self.network().ep.ENgetcomment(0, self.index) # get comment from NODE table
@comment.setter
def comment(self, value):
return self.network().ep.ENsetcomment(0, self.index, value) # set comment from LINK table
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND, 'quality': epanet2.EN_QUALITY}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL} | 0.722037 | 0.207014 |
import re
import mindspore.nn as nn
from mindspore import context
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.communication.management import get_group_size
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from .tinybert_model import BertModel, TinyBertModel, BertModelCLS
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
clip_grad = C.MultitypeFuncGraph("clip_grad")
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in (0, 1):
return grad
dt = F.dtype(grad)
if clip_type == 0:
new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt))
else:
new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class ClipGradients(nn.Cell):
"""
Clip gradients.
Args:
grads (list): List of gradient tuples.
clip_type (Tensor): The way to clip, 'value' or 'norm'.
clip_value (Tensor): Specifies how much to clip.
Returns:
List, a list of clipped_grad tuples.
"""
def __init__(self):
super(ClipGradients, self).__init__()
self.clip_by_norm = nn.ClipByNorm()
self.cast = P.Cast()
self.dtype = P.DType()
def construct(self,
grads,
clip_type,
clip_value):
"""clip gradients"""
if clip_type not in (0, 1):
return grads
new_grads = ()
for grad in grads:
dt = self.dtype(grad)
if clip_type == 0:
t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt),
self.cast(F.tuple_to_array((clip_value,)), dt))
else:
t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt))
new_grads = new_grads + (t,)
return new_grads
class SoftCrossEntropy(nn.Cell):
"""SoftCrossEntropy loss"""
def __init__(self):
super(SoftCrossEntropy, self).__init__()
self.log_softmax = P.LogSoftmax(axis=-1)
self.softmax = P.Softmax(axis=-1)
self.reduce_mean = P.ReduceMean()
self.cast = P.Cast()
def construct(self, predicts, targets):
likelihood = self.log_softmax(predicts)
target_prob = self.softmax(targets)
loss = self.reduce_mean(-target_prob * likelihood)
return self.cast(loss, mstype.float32)
class BertNetworkWithLoss_gd(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, is_training, use_one_hot_embeddings=False,
is_att_fit=True, is_rep_fit=True):
super(BertNetworkWithLoss_gd, self).__init__()
# load teacher model
self.teacher = BertModel(teacher_config, False, use_one_hot_embeddings)
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('^bert.bert.', 'teacher.', key)
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# student model
self.bert = TinyBertModel(student_config, is_training, use_one_hot_embeddings)
self.cast = P.Cast()
self.fit_dense = nn.Dense(student_config.hidden_size,
teacher_config.hidden_size).to_float(teacher_config.compute_type)
self.teacher_layers_num = teacher_config.num_hidden_layers
self.student_layers_num = student_config.num_hidden_layers
self.layers_per_block = int(self.teacher_layers_num / self.student_layers_num)
self.is_att_fit = is_att_fit
self.is_rep_fit = is_rep_fit
self.loss_mse = nn.MSELoss()
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.dtype = teacher_config.dtype
def construct(self,
input_ids,
input_mask,
token_type_id):
"""general distill network with loss"""
# teacher model
_, _, _, teacher_seq_output, teacher_att_output = self.teacher(input_ids, token_type_id, input_mask)
# student model
_, _, _, student_seq_output, student_att_output = self.bert(input_ids, token_type_id, input_mask)
total_loss = 0
if self.is_att_fit:
selected_teacher_att_output = ()
selected_student_att_output = ()
for i in range(self.student_layers_num):
selected_teacher_att_output += (teacher_att_output[(i + 1) * self.layers_per_block - 1],)
selected_student_att_output += (student_att_output[i],)
att_loss = 0
for i in range(self.student_layers_num):
student_att = selected_student_att_output[i]
teacher_att = selected_teacher_att_output[i]
student_att = self.select(student_att <= self.cast(-100.0, mstype.float32), self.zeroslike(student_att),
student_att)
teacher_att = self.select(teacher_att <= self.cast(-100.0, mstype.float32), self.zeroslike(teacher_att),
teacher_att)
att_loss += self.loss_mse(student_att, teacher_att)
total_loss += att_loss
if self.is_rep_fit:
selected_teacher_seq_output = ()
selected_student_seq_output = ()
for i in range(self.student_layers_num + 1):
selected_teacher_seq_output += (teacher_seq_output[i * self.layers_per_block],)
fit_dense_out = self.fit_dense(student_seq_output[i])
fit_dense_out = self.cast(fit_dense_out, self.dtype)
selected_student_seq_output += (fit_dense_out,)
rep_loss = 0
for i in range(self.student_layers_num + 1):
teacher_rep = selected_teacher_seq_output[i]
student_rep = selected_student_seq_output[i]
rep_loss += self.loss_mse(student_rep, teacher_rep)
total_loss += rep_loss
return self.cast(total_loss, mstype.float32)
class BertTrainWithLossScaleCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertTrainWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_status = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
def construct(self,
input_ids,
input_mask,
token_type_id,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
init = F.depend(init, loss)
clear_status = self.clear_status(init)
scaling_sens = F.depend(scaling_sens, clear_status)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
self.cast(scaling_sens,
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
init = F.depend(init, grads)
get_status = self.get_status(init)
init = F.depend(init, get_status)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond, scaling_sens)
return F.depend(ret, succ)
class BertTrainCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertTrainCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.sens = sens
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, self.degree)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
def construct(self,
input_ids,
input_mask,
token_type_id):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
succ = self.optimizer(grads)
return F.depend(loss, succ)
class BertNetworkWithLoss_td(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, student_ckpt,
is_training, task_type, num_labels, use_one_hot_embeddings=False,
is_predistill=True, is_att_fit=True, is_rep_fit=True,
temperature=1.0, dropout_prob=0.1):
super(BertNetworkWithLoss_td, self).__init__()
# load teacher model
self.teacher = BertModelCLS(teacher_config, False, num_labels, dropout_prob,
use_one_hot_embeddings, "teacher")
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('^bert.', 'teacher.', key)
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# load student model
self.bert = BertModelCLS(student_config, is_training, num_labels, dropout_prob,
use_one_hot_embeddings, "student")
param_dict = load_checkpoint(student_ckpt)
if is_predistill:
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', 'bert.' + key)
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
else:
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', key)
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
self.cast = P.Cast()
self.fit_dense = nn.Dense(student_config.hidden_size,
teacher_config.hidden_size).to_float(teacher_config.compute_type)
self.teacher_layers_num = teacher_config.num_hidden_layers
self.student_layers_num = student_config.num_hidden_layers
self.layers_per_block = int(self.teacher_layers_num / self.student_layers_num)
self.is_predistill = is_predistill
self.is_att_fit = is_att_fit
self.is_rep_fit = is_rep_fit
self.task_type = task_type
self.temperature = temperature
self.loss_mse = nn.MSELoss()
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.dtype = student_config.dtype
self.num_labels = num_labels
self.dtype = teacher_config.dtype
self.soft_cross_entropy = SoftCrossEntropy()
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids):
"""task distill network with loss"""
# teacher model
teacher_seq_output, teacher_att_output, teacher_logits, _ = self.teacher(input_ids, token_type_id, input_mask)
# student model
student_seq_output, student_att_output, student_logits, _ = self.bert(input_ids, token_type_id, input_mask)
total_loss = 0
if self.is_predistill:
if self.is_att_fit:
selected_teacher_att_output = ()
selected_student_att_output = ()
for i in range(self.student_layers_num):
selected_teacher_att_output += (teacher_att_output[(i + 1) * self.layers_per_block - 1],)
selected_student_att_output += (student_att_output[i],)
att_loss = 0
for i in range(self.student_layers_num):
student_att = selected_student_att_output[i]
teacher_att = selected_teacher_att_output[i]
student_att = self.select(student_att <= self.cast(-100.0, mstype.float32),
self.zeroslike(student_att),
student_att)
teacher_att = self.select(teacher_att <= self.cast(-100.0, mstype.float32),
self.zeroslike(teacher_att),
teacher_att)
att_loss += self.loss_mse(student_att, teacher_att)
total_loss += att_loss
if self.is_rep_fit:
selected_teacher_seq_output = ()
selected_student_seq_output = ()
for i in range(self.student_layers_num + 1):
selected_teacher_seq_output += (teacher_seq_output[i * self.layers_per_block],)
fit_dense_out = self.fit_dense(student_seq_output[i])
fit_dense_out = self.cast(fit_dense_out, self.dtype)
selected_student_seq_output += (fit_dense_out,)
rep_loss = 0
for i in range(self.student_layers_num + 1):
teacher_rep = selected_teacher_seq_output[i]
student_rep = selected_student_seq_output[i]
rep_loss += self.loss_mse(student_rep, teacher_rep)
total_loss += rep_loss
else:
if self.task_type == "classification":
cls_loss = self.soft_cross_entropy(student_logits / self.temperature, teacher_logits / self.temperature)
else:
cls_loss = self.loss_mse(student_logits[len(student_logits) - 1], label_ids[len(label_ids) - 1])
total_loss += cls_loss
return self.cast(total_loss, mstype.float32)
class BertEvaluationWithLossScaleCell(nn.Cell):
"""
Especially defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertEvaluationWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_status = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
init = F.depend(init, loss)
clear_status = self.clear_status(init)
scaling_sens = F.depend(scaling_sens, clear_status)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(scaling_sens,
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
init = F.depend(init, grads)
get_status = self.get_status(init)
init = F.depend(init, get_status)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond, scaling_sens)
return F.depend(ret, succ)
class BertEvaluationCell(nn.Cell):
"""
Especially defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertEvaluationCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.sens = sens
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
succ = self.optimizer(grads)
return F.depend(loss, succ) | model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py | import re
import mindspore.nn as nn
from mindspore import context
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.communication.management import get_group_size
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from .tinybert_model import BertModel, TinyBertModel, BertModelCLS
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
clip_grad = C.MultitypeFuncGraph("clip_grad")
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in (0, 1):
return grad
dt = F.dtype(grad)
if clip_type == 0:
new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt))
else:
new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class ClipGradients(nn.Cell):
"""
Clip gradients.
Args:
grads (list): List of gradient tuples.
clip_type (Tensor): The way to clip, 'value' or 'norm'.
clip_value (Tensor): Specifies how much to clip.
Returns:
List, a list of clipped_grad tuples.
"""
def __init__(self):
super(ClipGradients, self).__init__()
self.clip_by_norm = nn.ClipByNorm()
self.cast = P.Cast()
self.dtype = P.DType()
def construct(self,
grads,
clip_type,
clip_value):
"""clip gradients"""
if clip_type not in (0, 1):
return grads
new_grads = ()
for grad in grads:
dt = self.dtype(grad)
if clip_type == 0:
t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt),
self.cast(F.tuple_to_array((clip_value,)), dt))
else:
t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt))
new_grads = new_grads + (t,)
return new_grads
class SoftCrossEntropy(nn.Cell):
"""SoftCrossEntropy loss"""
def __init__(self):
super(SoftCrossEntropy, self).__init__()
self.log_softmax = P.LogSoftmax(axis=-1)
self.softmax = P.Softmax(axis=-1)
self.reduce_mean = P.ReduceMean()
self.cast = P.Cast()
def construct(self, predicts, targets):
likelihood = self.log_softmax(predicts)
target_prob = self.softmax(targets)
loss = self.reduce_mean(-target_prob * likelihood)
return self.cast(loss, mstype.float32)
class BertNetworkWithLoss_gd(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, is_training, use_one_hot_embeddings=False,
is_att_fit=True, is_rep_fit=True):
super(BertNetworkWithLoss_gd, self).__init__()
# load teacher model
self.teacher = BertModel(teacher_config, False, use_one_hot_embeddings)
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('^bert.bert.', 'teacher.', key)
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# student model
self.bert = TinyBertModel(student_config, is_training, use_one_hot_embeddings)
self.cast = P.Cast()
self.fit_dense = nn.Dense(student_config.hidden_size,
teacher_config.hidden_size).to_float(teacher_config.compute_type)
self.teacher_layers_num = teacher_config.num_hidden_layers
self.student_layers_num = student_config.num_hidden_layers
self.layers_per_block = int(self.teacher_layers_num / self.student_layers_num)
self.is_att_fit = is_att_fit
self.is_rep_fit = is_rep_fit
self.loss_mse = nn.MSELoss()
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.dtype = teacher_config.dtype
def construct(self,
input_ids,
input_mask,
token_type_id):
"""general distill network with loss"""
# teacher model
_, _, _, teacher_seq_output, teacher_att_output = self.teacher(input_ids, token_type_id, input_mask)
# student model
_, _, _, student_seq_output, student_att_output = self.bert(input_ids, token_type_id, input_mask)
total_loss = 0
if self.is_att_fit:
selected_teacher_att_output = ()
selected_student_att_output = ()
for i in range(self.student_layers_num):
selected_teacher_att_output += (teacher_att_output[(i + 1) * self.layers_per_block - 1],)
selected_student_att_output += (student_att_output[i],)
att_loss = 0
for i in range(self.student_layers_num):
student_att = selected_student_att_output[i]
teacher_att = selected_teacher_att_output[i]
student_att = self.select(student_att <= self.cast(-100.0, mstype.float32), self.zeroslike(student_att),
student_att)
teacher_att = self.select(teacher_att <= self.cast(-100.0, mstype.float32), self.zeroslike(teacher_att),
teacher_att)
att_loss += self.loss_mse(student_att, teacher_att)
total_loss += att_loss
if self.is_rep_fit:
selected_teacher_seq_output = ()
selected_student_seq_output = ()
for i in range(self.student_layers_num + 1):
selected_teacher_seq_output += (teacher_seq_output[i * self.layers_per_block],)
fit_dense_out = self.fit_dense(student_seq_output[i])
fit_dense_out = self.cast(fit_dense_out, self.dtype)
selected_student_seq_output += (fit_dense_out,)
rep_loss = 0
for i in range(self.student_layers_num + 1):
teacher_rep = selected_teacher_seq_output[i]
student_rep = selected_student_seq_output[i]
rep_loss += self.loss_mse(student_rep, teacher_rep)
total_loss += rep_loss
return self.cast(total_loss, mstype.float32)
class BertTrainWithLossScaleCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertTrainWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_status = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
def construct(self,
input_ids,
input_mask,
token_type_id,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
init = F.depend(init, loss)
clear_status = self.clear_status(init)
scaling_sens = F.depend(scaling_sens, clear_status)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
self.cast(scaling_sens,
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
init = F.depend(init, grads)
get_status = self.get_status(init)
init = F.depend(init, get_status)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond, scaling_sens)
return F.depend(ret, succ)
class BertTrainCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertTrainCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.sens = sens
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, self.degree)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
def construct(self,
input_ids,
input_mask,
token_type_id):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
succ = self.optimizer(grads)
return F.depend(loss, succ)
class BertNetworkWithLoss_td(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, student_ckpt,
is_training, task_type, num_labels, use_one_hot_embeddings=False,
is_predistill=True, is_att_fit=True, is_rep_fit=True,
temperature=1.0, dropout_prob=0.1):
super(BertNetworkWithLoss_td, self).__init__()
# load teacher model
self.teacher = BertModelCLS(teacher_config, False, num_labels, dropout_prob,
use_one_hot_embeddings, "teacher")
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('^bert.', 'teacher.', key)
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# load student model
self.bert = BertModelCLS(student_config, is_training, num_labels, dropout_prob,
use_one_hot_embeddings, "student")
param_dict = load_checkpoint(student_ckpt)
if is_predistill:
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', 'bert.' + key)
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
else:
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', key)
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
self.cast = P.Cast()
self.fit_dense = nn.Dense(student_config.hidden_size,
teacher_config.hidden_size).to_float(teacher_config.compute_type)
self.teacher_layers_num = teacher_config.num_hidden_layers
self.student_layers_num = student_config.num_hidden_layers
self.layers_per_block = int(self.teacher_layers_num / self.student_layers_num)
self.is_predistill = is_predistill
self.is_att_fit = is_att_fit
self.is_rep_fit = is_rep_fit
self.task_type = task_type
self.temperature = temperature
self.loss_mse = nn.MSELoss()
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.dtype = student_config.dtype
self.num_labels = num_labels
self.dtype = teacher_config.dtype
self.soft_cross_entropy = SoftCrossEntropy()
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids):
"""task distill network with loss"""
# teacher model
teacher_seq_output, teacher_att_output, teacher_logits, _ = self.teacher(input_ids, token_type_id, input_mask)
# student model
student_seq_output, student_att_output, student_logits, _ = self.bert(input_ids, token_type_id, input_mask)
total_loss = 0
if self.is_predistill:
if self.is_att_fit:
selected_teacher_att_output = ()
selected_student_att_output = ()
for i in range(self.student_layers_num):
selected_teacher_att_output += (teacher_att_output[(i + 1) * self.layers_per_block - 1],)
selected_student_att_output += (student_att_output[i],)
att_loss = 0
for i in range(self.student_layers_num):
student_att = selected_student_att_output[i]
teacher_att = selected_teacher_att_output[i]
student_att = self.select(student_att <= self.cast(-100.0, mstype.float32),
self.zeroslike(student_att),
student_att)
teacher_att = self.select(teacher_att <= self.cast(-100.0, mstype.float32),
self.zeroslike(teacher_att),
teacher_att)
att_loss += self.loss_mse(student_att, teacher_att)
total_loss += att_loss
if self.is_rep_fit:
selected_teacher_seq_output = ()
selected_student_seq_output = ()
for i in range(self.student_layers_num + 1):
selected_teacher_seq_output += (teacher_seq_output[i * self.layers_per_block],)
fit_dense_out = self.fit_dense(student_seq_output[i])
fit_dense_out = self.cast(fit_dense_out, self.dtype)
selected_student_seq_output += (fit_dense_out,)
rep_loss = 0
for i in range(self.student_layers_num + 1):
teacher_rep = selected_teacher_seq_output[i]
student_rep = selected_student_seq_output[i]
rep_loss += self.loss_mse(student_rep, teacher_rep)
total_loss += rep_loss
else:
if self.task_type == "classification":
cls_loss = self.soft_cross_entropy(student_logits / self.temperature, teacher_logits / self.temperature)
else:
cls_loss = self.loss_mse(student_logits[len(student_logits) - 1], label_ids[len(label_ids) - 1])
total_loss += cls_loss
return self.cast(total_loss, mstype.float32)
class BertEvaluationWithLossScaleCell(nn.Cell):
"""
Especially defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertEvaluationWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_status = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
init = F.depend(init, loss)
clear_status = self.clear_status(init)
scaling_sens = F.depend(scaling_sens, clear_status)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(scaling_sens,
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
init = F.depend(init, grads)
get_status = self.get_status(init)
init = F.depend(init, get_status)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond, scaling_sens)
return F.depend(ret, succ)
class BertEvaluationCell(nn.Cell):
"""
Especially defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertEvaluationCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.sens = sens
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
succ = self.optimizer(grads)
return F.depend(loss, succ) | 0.859251 | 0.328099 |
import netmiko
# configure command line arguements
import argparse
parser=argparse.ArgumentParser(description='Switch on and off preconfigured edge router firewall rules from command line')
parser.add_argument('-t', '--target', type=str, metavar='', required=False, help='The user being targeted')
group = parser.add_mutually_exclusive_group()
group.add_argument('-e', '--enable', action='store_true', help='Enable Liams Internet')
group.add_argument('-d', '--disable', action='store_true', help='Disable Liams internet')
group.add_argument('-c', '--configure', action='store_true', help='Put the router in configuration mode')
group.add_argument('-u', '--uptime', action='store_true', help='Is the router up?')
group.add_argument('-s', '--show', action='store_true', help='Show the firewall rules')
group.add_argument('-x', '--exit', action='store_true', help='Leave the configuration console?')
group.add_argument('-q', '--quit', action='store_true', help='Leave and shut connection')
group.add_argument('-n', '--host', action='store_true', help='Router host name')
args=parser.parse_args()
def establishConnection(ip, device, user, password) -> object:
"""
:rtype: object
"""
try:
return netmiko.ConnectHandler(ip=ip, device_type=device, username=user, password=password)
except Exception as e:
print("There has been a connection error", e)
connection = None
def commit(connection):
"""Commits a change in the router and enforces the rule"""
try:
connection.send_command("commit")
except Exception as e:
print("That command was not successful ", e)
print('..............committed...............')
def save(connection):
"""Saves the configuration so that it is a permanent change"""
try:
connection.send_command("save")
except Exception as e:
print("That command was not successful ", e)
def upTime(connection):
"""Return the amount of time that the edge router has been up"""
try:
return (connection.send_command("uptime"))
except Exception as e:
print("That command was not successful ", e)
def disconnect(connection):
"""method to disconnect from the router """
try:
connection.disconnect()
except Exception as e:
print('Could not disconnect')
else:
print('.............disconnected.............')
def show(connection):
"""method to show firewall configuration """
try:
a=connection.send_command('show firewall')
except Exception as e:
print('Could not shoe connection', e)
else:
print(a)
def configure(connection) -> object:
"""Method to put the router into a configuration mode so that you can configure the router firewall."""
try:
connection.config_mode('configure', 'configure\r\n')
except Exception as e:
print("That command was not successful ", e)
def exit(connection):
"""Method to get out of edit mode"""
try:
connection.exit_config_mode('exit')
except Exception as e:
print("That command was not successful ", e)
def internetOffHttp(connection):
"""turns rule number 1 off - preconfigured in the edge router to enable internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("delete firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e)
def internetOffHttps(connection):
"""turns rule number 1 off - preconfigured in the edge router to enable internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("delete firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e)
def internetOnHttp(connection):
"""turns rule number 1 on - preconfigured in the edge router to block internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("set firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e)
def internetOnHttps(connection):
"""turns rule number 1 on - preconfigured in the edge router to block internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("set firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e) | build/lib/venv/fireControlPkg/fireControl.py | import netmiko
# configure command line arguements
import argparse
parser=argparse.ArgumentParser(description='Switch on and off preconfigured edge router firewall rules from command line')
parser.add_argument('-t', '--target', type=str, metavar='', required=False, help='The user being targeted')
group = parser.add_mutually_exclusive_group()
group.add_argument('-e', '--enable', action='store_true', help='Enable Liams Internet')
group.add_argument('-d', '--disable', action='store_true', help='Disable Liams internet')
group.add_argument('-c', '--configure', action='store_true', help='Put the router in configuration mode')
group.add_argument('-u', '--uptime', action='store_true', help='Is the router up?')
group.add_argument('-s', '--show', action='store_true', help='Show the firewall rules')
group.add_argument('-x', '--exit', action='store_true', help='Leave the configuration console?')
group.add_argument('-q', '--quit', action='store_true', help='Leave and shut connection')
group.add_argument('-n', '--host', action='store_true', help='Router host name')
args=parser.parse_args()
def establishConnection(ip, device, user, password) -> object:
"""
:rtype: object
"""
try:
return netmiko.ConnectHandler(ip=ip, device_type=device, username=user, password=password)
except Exception as e:
print("There has been a connection error", e)
connection = None
def commit(connection):
"""Commits a change in the router and enforces the rule"""
try:
connection.send_command("commit")
except Exception as e:
print("That command was not successful ", e)
print('..............committed...............')
def save(connection):
"""Saves the configuration so that it is a permanent change"""
try:
connection.send_command("save")
except Exception as e:
print("That command was not successful ", e)
def upTime(connection):
"""Return the amount of time that the edge router has been up"""
try:
return (connection.send_command("uptime"))
except Exception as e:
print("That command was not successful ", e)
def disconnect(connection):
"""method to disconnect from the router """
try:
connection.disconnect()
except Exception as e:
print('Could not disconnect')
else:
print('.............disconnected.............')
def show(connection):
"""method to show firewall configuration """
try:
a=connection.send_command('show firewall')
except Exception as e:
print('Could not shoe connection', e)
else:
print(a)
def configure(connection) -> object:
"""Method to put the router into a configuration mode so that you can configure the router firewall."""
try:
connection.config_mode('configure', 'configure\r\n')
except Exception as e:
print("That command was not successful ", e)
def exit(connection):
"""Method to get out of edit mode"""
try:
connection.exit_config_mode('exit')
except Exception as e:
print("That command was not successful ", e)
def internetOffHttp(connection):
"""turns rule number 1 off - preconfigured in the edge router to enable internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("delete firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e)
def internetOffHttps(connection):
"""turns rule number 1 off - preconfigured in the edge router to enable internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("delete firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e)
def internetOnHttp(connection):
"""turns rule number 1 on - preconfigured in the edge router to block internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("set firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e)
def internetOnHttps(connection):
"""turns rule number 1 on - preconfigured in the edge router to block internet traffic going to the internet
from the mac address of this computer"""
try:
connection.send_command("set firewall name Internet rule 1 disable")
except Exception as e:
print("That command was not successful ", e) | 0.270769 | 0.108472 |
"""Builder function for image resizing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image):
[resized_image, resized_image_shape] = image_resizer_fn(image)
grayscale_image = preprocessor.rgb_to_gray(resized_image)
grayscale_image_shape = tf.concat([resized_image_shape[:-1], [1]], 0)
return [grayscale_image, grayscale_image_shape]
return functools.partial(grayscale_image_resizer) | src/object_detection/builders/image_resizer_builder.py | """Builder function for image resizing operations."""
import functools
import tensorflow.compat.v1 as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image):
[resized_image, resized_image_shape] = image_resizer_fn(image)
grayscale_image = preprocessor.rgb_to_gray(resized_image)
grayscale_image_shape = tf.concat([resized_image_shape[:-1], [1]], 0)
return [grayscale_image, grayscale_image_shape]
return functools.partial(grayscale_image_resizer) | 0.946782 | 0.436922 |
from parglare import Parser, Grammar
from utils.rvs.expressions import RVFuncs
### Production rules for outer expressions, inner expressions, and events
expression_terminals = r"""
variable: /[#]?[a-zA-Z_$][a-zA-Z_$0-9]*/;
number: /\d+(\.\d+)?/;
"""
expression_productions = r"""
expr
: term
| expr '+' term
| expr '-' term
;
exprs
: expr
| exprs ',' expr
;
term
: unary
| term '*' unary
| term '/' unary
;
unary
: primary
| '|' expr '|'
| '+' unary
| '-' unary
| 'max(' exprs ')'
| 'min(' exprs ')'
| 'nanmax(' exprs ')'
| 'nanmin(' exprs ')'
;
primary
: expected_value
| number
| '(' expr ')'
;
expected_value
: 'E{' variable '(' inner_expr ')}[' sample_set ']'
| 'E[' sample_set ']'
;
sample_set
: inner_expr
| inner_expr '|' event_expr
| event_expr
| event_expr '|' event_expr
;
"""
inner_expression_productions = r"""
inner_expr
: inner_term
| inner_expr '+' inner_term
| inner_expr '-' inner_term
;
inner_term
: inner_unary
| inner_term '*' inner_unary
| inner_term '/' inner_unary
;
inner_unary
: inner_primary
| '|' inner_expr '|'
| '+' inner_unary
| '-' inner_unary
;
inner_primary
: number
| variable
| '(' inner_expr ')'
;
"""
event_productions = r"""
event_expr
: event_term
| event_expr '||' event_term
;
event_term
: event_unary
| event_unary ',' event_term
;
event_unary
: comparison
| '~' event_unary
| '(' event_expr ')'
;
comparison
: inner_expr inequality inner_expr
| inner_expr equality inner_expr
| inner_expr inequality equality inner_expr
;
"""
event_terminals = r"""
inequality: /[<|>|!]/;
equality: /[=]/;
"""
### Actions for outer expressions, inner expressions, and events
expression_production_actions = {
"expr": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.sum(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.sum(nodes[0], RVFuncs.negative(nodes[2]))],
"exprs": [ lambda _, nodes: nodes[0],
lambda _, nodes: ([*nodes[0], nodes[2]] if isinstance(nodes[0], list) else [nodes[0],nodes[2]])],
"term": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.product(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.fraction(nodes[0], nodes[2])],
"unary": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.abs(nodes[1]),
lambda _, nodes: nodes[1],
lambda _, nodes: RVFuncs.negative(nodes[1]),
lambda _, nodes: RVFuncs.max(*nodes[1]),
lambda _, nodes: RVFuncs.min(*nodes[1]),
lambda _, nodes: RVFuncs.nanmax(*nodes[1]),
lambda _, nodes: RVFuncs.nanmin(*nodes[1])],
"primary": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.constant(nodes[0]),
lambda _, nodes: nodes[1]],
"expected_value": [
lambda _, nodes: RVFuncs.expected_value(nodes[5], is_func=nodes[1], is_expr=nodes[3]),
lambda _, nodes: RVFuncs.expected_value(nodes[1])],
"sample_set": [
lambda _, nodes: RVFuncs.sample_set(nodes[0]),
lambda _, nodes: RVFuncs.sample_set(nodes[0],nodes[2]),
lambda _, nodes: RVFuncs.sample_set(nodes[0]),
lambda _, nodes: RVFuncs.sample_set(nodes[0],nodes[2])],
}
expression_terminal_actions = {
"number": lambda _, value: value,
"variable": lambda _, value: value,
}
inner_expression_production_actions = {
"inner_expr": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.sum(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.sum(nodes[0], RVFuncs.negative(nodes[2]))],
"inner_term": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.product(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.fraction(nodes[0], nodes[2])],
"inner_unary": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.abs(nodes[1]),
lambda _, nodes: nodes[1],
lambda _, nodes: RVFuncs.negative(nodes[1])],
"inner_primary": [
lambda _, nodes: RVFuncs.constant(nodes[0]),
lambda _, nodes: RVFuncs.variable(nodes[0]),
lambda _, nodes: nodes[1]],
}
event_production_actions = {
"event_expr": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.logical_or([ nodes[0], nodes[2] ])],
"event_term": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.logical_and([ nodes[0], nodes[2] ])],
"event_unary": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.logical_not(nodes[1]),
lambda _, nodes: nodes[1]],
"comparison": [
lambda _, nodes: RVFuncs.comparator_variable(nodes[0], nodes[1], nodes[2]),
lambda _, nodes: RVFuncs.comparator_variable(nodes[0], nodes[1], nodes[2]),
lambda _, nodes: RVFuncs.comparator_variable(nodes[0], nodes[1]+nodes[2], nodes[3])]
}
event_terminal_actions = {
"inequality": lambda _, value: value,
"equality": lambda _, value: value
}
### Final grammar and action specifications
expression_grammar = '%s\n%s\n%s\nterminals\n%s\n%s' % (expression_productions, inner_expression_productions, event_productions, expression_terminals, event_terminals)
expression_actions = dict( **expression_production_actions,
**inner_expression_production_actions,
**event_production_actions,
**expression_terminal_actions,
**event_terminal_actions)
inner_expression_grammar = '%s\n%s\nterminals\n%s\n%s' % (inner_expression_productions, event_productions, expression_terminals, event_terminals)
inner_expression_actions = dict(**inner_expression_production_actions,
**event_production_actions,
**expression_terminal_actions,
**event_terminal_actions)
event_grammar = '%s\n%s\nterminals\n%s\n%s' % (event_productions, inner_expression_productions, expression_terminals, event_terminals)
event_actions = dict(**inner_expression_production_actions,
**event_production_actions,
**expression_terminal_actions,
**event_terminal_actions)
grammars = {
'outer' : expression_grammar,
'inner' : inner_expression_grammar,
'event' : event_grammar
}
action_sets = {
'outer' : expression_actions,
'inner' : inner_expression_actions,
'event' : event_actions
}
def get_parser(debug=False, mode='outer'):
g = Grammar.from_string(grammars[mode.lower()])
return Parser(g, debug=debug, actions=action_sets[mode.lower()]) | Python/utils/rvs/parser.py | from parglare import Parser, Grammar
from utils.rvs.expressions import RVFuncs
### Production rules for outer expressions, inner expressions, and events
expression_terminals = r"""
variable: /[#]?[a-zA-Z_$][a-zA-Z_$0-9]*/;
number: /\d+(\.\d+)?/;
"""
expression_productions = r"""
expr
: term
| expr '+' term
| expr '-' term
;
exprs
: expr
| exprs ',' expr
;
term
: unary
| term '*' unary
| term '/' unary
;
unary
: primary
| '|' expr '|'
| '+' unary
| '-' unary
| 'max(' exprs ')'
| 'min(' exprs ')'
| 'nanmax(' exprs ')'
| 'nanmin(' exprs ')'
;
primary
: expected_value
| number
| '(' expr ')'
;
expected_value
: 'E{' variable '(' inner_expr ')}[' sample_set ']'
| 'E[' sample_set ']'
;
sample_set
: inner_expr
| inner_expr '|' event_expr
| event_expr
| event_expr '|' event_expr
;
"""
inner_expression_productions = r"""
inner_expr
: inner_term
| inner_expr '+' inner_term
| inner_expr '-' inner_term
;
inner_term
: inner_unary
| inner_term '*' inner_unary
| inner_term '/' inner_unary
;
inner_unary
: inner_primary
| '|' inner_expr '|'
| '+' inner_unary
| '-' inner_unary
;
inner_primary
: number
| variable
| '(' inner_expr ')'
;
"""
event_productions = r"""
event_expr
: event_term
| event_expr '||' event_term
;
event_term
: event_unary
| event_unary ',' event_term
;
event_unary
: comparison
| '~' event_unary
| '(' event_expr ')'
;
comparison
: inner_expr inequality inner_expr
| inner_expr equality inner_expr
| inner_expr inequality equality inner_expr
;
"""
event_terminals = r"""
inequality: /[<|>|!]/;
equality: /[=]/;
"""
### Actions for outer expressions, inner expressions, and events
expression_production_actions = {
"expr": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.sum(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.sum(nodes[0], RVFuncs.negative(nodes[2]))],
"exprs": [ lambda _, nodes: nodes[0],
lambda _, nodes: ([*nodes[0], nodes[2]] if isinstance(nodes[0], list) else [nodes[0],nodes[2]])],
"term": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.product(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.fraction(nodes[0], nodes[2])],
"unary": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.abs(nodes[1]),
lambda _, nodes: nodes[1],
lambda _, nodes: RVFuncs.negative(nodes[1]),
lambda _, nodes: RVFuncs.max(*nodes[1]),
lambda _, nodes: RVFuncs.min(*nodes[1]),
lambda _, nodes: RVFuncs.nanmax(*nodes[1]),
lambda _, nodes: RVFuncs.nanmin(*nodes[1])],
"primary": [ lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.constant(nodes[0]),
lambda _, nodes: nodes[1]],
"expected_value": [
lambda _, nodes: RVFuncs.expected_value(nodes[5], is_func=nodes[1], is_expr=nodes[3]),
lambda _, nodes: RVFuncs.expected_value(nodes[1])],
"sample_set": [
lambda _, nodes: RVFuncs.sample_set(nodes[0]),
lambda _, nodes: RVFuncs.sample_set(nodes[0],nodes[2]),
lambda _, nodes: RVFuncs.sample_set(nodes[0]),
lambda _, nodes: RVFuncs.sample_set(nodes[0],nodes[2])],
}
expression_terminal_actions = {
"number": lambda _, value: value,
"variable": lambda _, value: value,
}
inner_expression_production_actions = {
"inner_expr": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.sum(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.sum(nodes[0], RVFuncs.negative(nodes[2]))],
"inner_term": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.product(nodes[0], nodes[2]),
lambda _, nodes: RVFuncs.fraction(nodes[0], nodes[2])],
"inner_unary": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.abs(nodes[1]),
lambda _, nodes: nodes[1],
lambda _, nodes: RVFuncs.negative(nodes[1])],
"inner_primary": [
lambda _, nodes: RVFuncs.constant(nodes[0]),
lambda _, nodes: RVFuncs.variable(nodes[0]),
lambda _, nodes: nodes[1]],
}
event_production_actions = {
"event_expr": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.logical_or([ nodes[0], nodes[2] ])],
"event_term": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.logical_and([ nodes[0], nodes[2] ])],
"event_unary": [
lambda _, nodes: nodes[0],
lambda _, nodes: RVFuncs.logical_not(nodes[1]),
lambda _, nodes: nodes[1]],
"comparison": [
lambda _, nodes: RVFuncs.comparator_variable(nodes[0], nodes[1], nodes[2]),
lambda _, nodes: RVFuncs.comparator_variable(nodes[0], nodes[1], nodes[2]),
lambda _, nodes: RVFuncs.comparator_variable(nodes[0], nodes[1]+nodes[2], nodes[3])]
}
event_terminal_actions = {
"inequality": lambda _, value: value,
"equality": lambda _, value: value
}
### Final grammar and action specifications
expression_grammar = '%s\n%s\n%s\nterminals\n%s\n%s' % (expression_productions, inner_expression_productions, event_productions, expression_terminals, event_terminals)
expression_actions = dict( **expression_production_actions,
**inner_expression_production_actions,
**event_production_actions,
**expression_terminal_actions,
**event_terminal_actions)
inner_expression_grammar = '%s\n%s\nterminals\n%s\n%s' % (inner_expression_productions, event_productions, expression_terminals, event_terminals)
inner_expression_actions = dict(**inner_expression_production_actions,
**event_production_actions,
**expression_terminal_actions,
**event_terminal_actions)
event_grammar = '%s\n%s\nterminals\n%s\n%s' % (event_productions, inner_expression_productions, expression_terminals, event_terminals)
event_actions = dict(**inner_expression_production_actions,
**event_production_actions,
**expression_terminal_actions,
**event_terminal_actions)
grammars = {
'outer' : expression_grammar,
'inner' : inner_expression_grammar,
'event' : event_grammar
}
action_sets = {
'outer' : expression_actions,
'inner' : inner_expression_actions,
'event' : event_actions
}
def get_parser(debug=False, mode='outer'):
g = Grammar.from_string(grammars[mode.lower()])
return Parser(g, debug=debug, actions=action_sets[mode.lower()]) | 0.355775 | 0.544922 |
import sys
import os
import glob
import datetime
import time
import yaml
from functools import cmp_to_key
from optparse import OptionParser
from urllib.parse import urlparse, urljoin
from util import CONFIG_YML, \
STANDARD_YML, \
AIRPORTS_YML, \
BADGES_YML, \
BADGES_URL, AIRPORTS_URL, WORKSHOPS_URL, \
WORKSHOPS_YML, \
WORKSHOP_CACHE, \
DASHBOARD_CACHE, \
P_BLOG_EXCERPT, \
harvest_metadata, \
load_info, fetch_info
# Translate two-digit month identifiers into short names.
MONTHS = {
'01' : 'Jan', '02' : 'Feb', '03' : 'Mar', '04' : 'Apr',
'05' : 'May', '06' : 'Jun', '07' : 'Jul', '08' : 'Aug',
'09' : 'Sep', '10' : 'Oct', '11' : 'Nov', '12' : 'Dec'
}
# Template for recent blog posts.
RECENT_POST = '''\
<h4><a href="{{page.root}}/%(path)s">%(title)s</a></h4>
<small>By %(author)s / <a href="{{page.root}}/%(path)s">%(date)s</a> </small>
<p>
%(excerpt)s
<a class="pull-right" href="{{page.root}}/%(path)s">...read more</a>
</p><br /><br />
'''
#----------------------------------------
def main():
'''
Main driver for regenerating _config.yml for web site.
This program also creates _includes/recent_blog_posts.html.
'''
# Get the standard stuff.
options, args = parse_args()
# Load other information.
config = load_info(options.config_dir, STANDARD_YML)
# Insert standing values into configuration.
config.update({
'month_names' : MONTHS,
'months' : sorted(MONTHS.keys()),
'site' : options.site,
'timestamp' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'today' : options.today
})
# Load cached dashboard info. Do this early to avoid wasting time
# on everything else if it's not available.
config['dashboard'] = load_cached_info(os.curdir, DASHBOARD_CACHE, 'dashboard cache')
# Fetch information from AMY.
config['badges'] = fetch_info(options.amy_url, BADGES_URL)
config['airports'] = fetch_info(options.amy_url, AIRPORTS_URL)
config['workshops'] = fetch_info(options.amy_url, WORKSHOPS_URL)
# Lower-case and coalesce national flags.
for a in config['airports']:
a['country'] = a['country'].lower()
for w in config['workshops']:
w['country'] = w['country'].lower()
config['flags'] = {
'workshops': sorted({w['country'] for w in config['workshops'] if w['country']}),
'airports': sorted({a['country'] for a in config['airports'] if a['country']})
}
# Select workshops that will be displayed on the home page (soonest first).
workshops_upcoming = [w for w in config['workshops'] if w['start'] >= config['today']]
workshops_upcoming.reverse()
config['workshops_upcoming'] = workshops_upcoming
config['workshops_upcoming_short'] = workshops_upcoming[ :config['upcoming_length'] ]
# Load people and projects.
config['people'] = list(map(lambda x: os.path.relpath(x, '_includes'),
sorted(glob.glob('_includes/people/*.html'))))
config['projects'] = list(map(lambda x: os.path.relpath(x, '_includes'),
sorted(glob.glob('_includes/projects/*.html'))))
# Get information from blog entries.
config['blog'] = harvest_blog(config)
# Sanity checks on blog posts.
check_blog_sanity(config['blog'])
# Select those that'll be displayed on the home page, the index page, etc.
config['blog_recent'] = config['blog'][ -config['recent_length']: ]
config['blog_recent'].reverse()
# Create _includes/recent_blog_posts.html for inclusion in blog index page.
# This is done programmatically because we want snippets to be rendered properly.
for post in config['blog_recent']:
post['excerpt'] = get_blog_excerpt(post['path'])
write_recent_blog_posts(config['blog_recent'])
# Organize all posts by year and month.
blog_lookup, blog_count = organize_blog_entries(config['blog'])
config['blog_lookup'] = blog_lookup
config['blog_count'] = blog_count
config['blog_years'] = sorted(blog_lookup.keys())
config['blog_years'].reverse()
# Construct list of favorite blog posts.
config['blog_favorites'] = [p for p in config['blog'] if p['favorite']]
config['blog_favorites'].reverse()
# Save.
with open(CONFIG_YML, 'w') as writer:
yaml.dump(config, writer, encoding='utf-8', allow_unicode=True)
#----------------------------------------
def parse_args():
'''Parse command-line arguments.'''
parser = OptionParser()
parser.add_option('-c', '--config', dest='config_dir', help='configuration directory')
parser.add_option('-o', '--output', dest='output', help='output directory')
parser.add_option('-s', '--site', dest='site', help='site')
parser.add_option('-t', '--today', dest='today', help='build date',
default=datetime.date.today())
parser.add_option('-a', '--amy-url', dest='amy_url',
default='https://amy.software-carpentry.org/api/',
help='AMY API address')
parser.add_option('-v', '--verbose', dest='verbose', help='enable verbose logging',
default=False, action='store_true')
options, args = parser.parse_args()
return options, args
#----------------------------------------
def load_cached_info(folder, filename, message):
'''Load cached info if available, fail if not.'''
path = os.path.join(folder, filename)
if not os.path.isfile(path):
print('{0} file "{1}" does not exist.'.format(message, path), file=sys.stderr)
print('Please use "make cache" before building site,', file=sys.stderr)
sys.exit(1)
return load_info(folder, filename)
#----------------------------------------
def harvest_blog(config):
'''Harvest metadata for all blog entries.
Note that the YAML parser reads times with a leading 0 like '09:00:00' as strings,
not as times, so we have to convert manually.
'''
all_meta = []
for folder in glob.glob('blog/????/??'):
for post in glob.glob('{0}/*.html'.format(folder)):
m = harvest_metadata(post)
m['folder'] = folder
fill_optional_metadata(m, 'favorite')
all_meta.append(m)
decorated = [(x['date'], x['time'], x) for x in all_meta]
decorated.sort()
all_meta = [x[2] for x in decorated]
return all_meta
#----------------------------------------
def fill_optional_metadata(post, *fields):
'''
Fill in metadata fields that are only provided for some posts.
'''
for f in fields:
if f not in post:
post[f] = None
#----------------------------------------
def check_blog_sanity(posts):
'''Make sure all blog posts have sensible metadata.'''
seen = {}
errors = False
for p in posts:
timestamp = (p['date'], p['time'])
if timestamp in seen:
print('Timestamp {0} in {1} duplicated in {2}'.format(timestamp, seen[timestamp], p['path']), file=sys.stderr)
errors = True
else:
seen[timestamp] = p['path']
if errors:
sys.exit(1)
#----------------------------------------
def organize_blog_entries(posts):
'''Organize blog entries by year and month.'''
blog_lookup = {}
blog_count = {}
for p in posts:
year = '%4d' % p['date'].year
month = '%02d' % p['date'].month
if year not in blog_lookup:
blog_lookup[year] = dict([(m,list()) for m in MONTHS.keys()])
blog_count[year] = dict([(m,0) for m in MONTHS.keys()])
blog_lookup[year][month].append(p)
blog_count[year][month] += 1
return blog_lookup, blog_count
#----------------------------------------
def get_blog_excerpt(path):
'''Get excerpt from blog post for inclusion in blog index page.
Have to turn newlines into spaces so that older versions of Jekyll
(like the one on the server) won't turn them into single backslashes
when doing inclusion expansion.'''
with open(path, 'r') as reader:
temp = reader.read()
temp = P_BLOG_EXCERPT.search(temp)
assert temp, 'Blog post {0} lacks excerpt'.format(path)
return temp.group(1).replace('\n', ' ')
#----------------------------------------
def write_recent_blog_posts(posts):
'''Write out recent blog posts for inclusion in blog index page.'''
with open('_includes/recent_blog_posts.html', 'w') as writer:
for p in posts:
print(RECENT_POST % p, file=writer)
#----------------------------------------
if __name__ == '__main__':
main() | bin/preprocess.py | import sys
import os
import glob
import datetime
import time
import yaml
from functools import cmp_to_key
from optparse import OptionParser
from urllib.parse import urlparse, urljoin
from util import CONFIG_YML, \
STANDARD_YML, \
AIRPORTS_YML, \
BADGES_YML, \
BADGES_URL, AIRPORTS_URL, WORKSHOPS_URL, \
WORKSHOPS_YML, \
WORKSHOP_CACHE, \
DASHBOARD_CACHE, \
P_BLOG_EXCERPT, \
harvest_metadata, \
load_info, fetch_info
# Translate two-digit month identifiers into short names.
MONTHS = {
'01' : 'Jan', '02' : 'Feb', '03' : 'Mar', '04' : 'Apr',
'05' : 'May', '06' : 'Jun', '07' : 'Jul', '08' : 'Aug',
'09' : 'Sep', '10' : 'Oct', '11' : 'Nov', '12' : 'Dec'
}
# Template for recent blog posts.
RECENT_POST = '''\
<h4><a href="{{page.root}}/%(path)s">%(title)s</a></h4>
<small>By %(author)s / <a href="{{page.root}}/%(path)s">%(date)s</a> </small>
<p>
%(excerpt)s
<a class="pull-right" href="{{page.root}}/%(path)s">...read more</a>
</p><br /><br />
'''
#----------------------------------------
def main():
'''
Main driver for regenerating _config.yml for web site.
This program also creates _includes/recent_blog_posts.html.
'''
# Get the standard stuff.
options, args = parse_args()
# Load other information.
config = load_info(options.config_dir, STANDARD_YML)
# Insert standing values into configuration.
config.update({
'month_names' : MONTHS,
'months' : sorted(MONTHS.keys()),
'site' : options.site,
'timestamp' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'today' : options.today
})
# Load cached dashboard info. Do this early to avoid wasting time
# on everything else if it's not available.
config['dashboard'] = load_cached_info(os.curdir, DASHBOARD_CACHE, 'dashboard cache')
# Fetch information from AMY.
config['badges'] = fetch_info(options.amy_url, BADGES_URL)
config['airports'] = fetch_info(options.amy_url, AIRPORTS_URL)
config['workshops'] = fetch_info(options.amy_url, WORKSHOPS_URL)
# Lower-case and coalesce national flags.
for a in config['airports']:
a['country'] = a['country'].lower()
for w in config['workshops']:
w['country'] = w['country'].lower()
config['flags'] = {
'workshops': sorted({w['country'] for w in config['workshops'] if w['country']}),
'airports': sorted({a['country'] for a in config['airports'] if a['country']})
}
# Select workshops that will be displayed on the home page (soonest first).
workshops_upcoming = [w for w in config['workshops'] if w['start'] >= config['today']]
workshops_upcoming.reverse()
config['workshops_upcoming'] = workshops_upcoming
config['workshops_upcoming_short'] = workshops_upcoming[ :config['upcoming_length'] ]
# Load people and projects.
config['people'] = list(map(lambda x: os.path.relpath(x, '_includes'),
sorted(glob.glob('_includes/people/*.html'))))
config['projects'] = list(map(lambda x: os.path.relpath(x, '_includes'),
sorted(glob.glob('_includes/projects/*.html'))))
# Get information from blog entries.
config['blog'] = harvest_blog(config)
# Sanity checks on blog posts.
check_blog_sanity(config['blog'])
# Select those that'll be displayed on the home page, the index page, etc.
config['blog_recent'] = config['blog'][ -config['recent_length']: ]
config['blog_recent'].reverse()
# Create _includes/recent_blog_posts.html for inclusion in blog index page.
# This is done programmatically because we want snippets to be rendered properly.
for post in config['blog_recent']:
post['excerpt'] = get_blog_excerpt(post['path'])
write_recent_blog_posts(config['blog_recent'])
# Organize all posts by year and month.
blog_lookup, blog_count = organize_blog_entries(config['blog'])
config['blog_lookup'] = blog_lookup
config['blog_count'] = blog_count
config['blog_years'] = sorted(blog_lookup.keys())
config['blog_years'].reverse()
# Construct list of favorite blog posts.
config['blog_favorites'] = [p for p in config['blog'] if p['favorite']]
config['blog_favorites'].reverse()
# Save.
with open(CONFIG_YML, 'w') as writer:
yaml.dump(config, writer, encoding='utf-8', allow_unicode=True)
#----------------------------------------
def parse_args():
'''Parse command-line arguments.'''
parser = OptionParser()
parser.add_option('-c', '--config', dest='config_dir', help='configuration directory')
parser.add_option('-o', '--output', dest='output', help='output directory')
parser.add_option('-s', '--site', dest='site', help='site')
parser.add_option('-t', '--today', dest='today', help='build date',
default=datetime.date.today())
parser.add_option('-a', '--amy-url', dest='amy_url',
default='https://amy.software-carpentry.org/api/',
help='AMY API address')
parser.add_option('-v', '--verbose', dest='verbose', help='enable verbose logging',
default=False, action='store_true')
options, args = parser.parse_args()
return options, args
#----------------------------------------
def load_cached_info(folder, filename, message):
'''Load cached info if available, fail if not.'''
path = os.path.join(folder, filename)
if not os.path.isfile(path):
print('{0} file "{1}" does not exist.'.format(message, path), file=sys.stderr)
print('Please use "make cache" before building site,', file=sys.stderr)
sys.exit(1)
return load_info(folder, filename)
#----------------------------------------
def harvest_blog(config):
'''Harvest metadata for all blog entries.
Note that the YAML parser reads times with a leading 0 like '09:00:00' as strings,
not as times, so we have to convert manually.
'''
all_meta = []
for folder in glob.glob('blog/????/??'):
for post in glob.glob('{0}/*.html'.format(folder)):
m = harvest_metadata(post)
m['folder'] = folder
fill_optional_metadata(m, 'favorite')
all_meta.append(m)
decorated = [(x['date'], x['time'], x) for x in all_meta]
decorated.sort()
all_meta = [x[2] for x in decorated]
return all_meta
#----------------------------------------
def fill_optional_metadata(post, *fields):
'''
Fill in metadata fields that are only provided for some posts.
'''
for f in fields:
if f not in post:
post[f] = None
#----------------------------------------
def check_blog_sanity(posts):
'''Make sure all blog posts have sensible metadata.'''
seen = {}
errors = False
for p in posts:
timestamp = (p['date'], p['time'])
if timestamp in seen:
print('Timestamp {0} in {1} duplicated in {2}'.format(timestamp, seen[timestamp], p['path']), file=sys.stderr)
errors = True
else:
seen[timestamp] = p['path']
if errors:
sys.exit(1)
#----------------------------------------
def organize_blog_entries(posts):
'''Organize blog entries by year and month.'''
blog_lookup = {}
blog_count = {}
for p in posts:
year = '%4d' % p['date'].year
month = '%02d' % p['date'].month
if year not in blog_lookup:
blog_lookup[year] = dict([(m,list()) for m in MONTHS.keys()])
blog_count[year] = dict([(m,0) for m in MONTHS.keys()])
blog_lookup[year][month].append(p)
blog_count[year][month] += 1
return blog_lookup, blog_count
#----------------------------------------
def get_blog_excerpt(path):
'''Get excerpt from blog post for inclusion in blog index page.
Have to turn newlines into spaces so that older versions of Jekyll
(like the one on the server) won't turn them into single backslashes
when doing inclusion expansion.'''
with open(path, 'r') as reader:
temp = reader.read()
temp = P_BLOG_EXCERPT.search(temp)
assert temp, 'Blog post {0} lacks excerpt'.format(path)
return temp.group(1).replace('\n', ' ')
#----------------------------------------
def write_recent_blog_posts(posts):
'''Write out recent blog posts for inclusion in blog index page.'''
with open('_includes/recent_blog_posts.html', 'w') as writer:
for p in posts:
print(RECENT_POST % p, file=writer)
#----------------------------------------
if __name__ == '__main__':
main() | 0.391406 | 0.140189 |
import os
import sys
import json
import pygal
import re
import urllib.request
USER = os.popen('cat ./_user.db').read() # Create _user.db with your username inside
PASS = os.popen('cat ./_pass.db').read() # Create _pass.db with your password inside
SVCGRP = 'YourServiceGRP' # Change to your desired SERVICE GROUP
APIKEY = 'YourDomainPAD' # Change to your desired APIKEY (website)
TRAFFICDATA = '&fromDate=20170201&toDate=20170201&timeInterval=1' # Change to your desired graph date/time
GRAPHFILE = 'api_example_python_graph' # Change to your desired graph filename
APIENDPOINT = 'https://openapi.cdnetworks.com/api/rest/' # Don't change
APIFORMAT = '&output=json' # Don't change
API_SUCCESS = 0 # Don't change
# Command: LOGIN : send login, receive list of service groups (logial grouping, like a directory)
print('Control Groups')
url = APIENDPOINT + 'login?user=' + USER + '&pass=' + PASS + APIFORMAT;
print('\tURL: ' + APIENDPOINT + 'login?user=xxx&pass=xxx')
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['loginResponse']['resultCode']
print('\tloginResponse: resultCode = %s' % retval)
# Loop through and find SVCGRP specific Service Group
sessToken = '';
sessions = parsed['loginResponse']['session']
for session in sessions:
if session['svcGroupName'] == SVCGRP:
print('\tFound: %s' % session['svcGroupName'])
print('\t\tSelected: %s' % session['sessionToken'])
sessToken = session['sessionToken']
break
# Command: APIKEYLIST : get list of APIs for service groups
print('\nAPI Key List')
url = APIENDPOINT + 'getApiKeyList?sessionToken=' + sessToken + APIFORMAT;
print('\tURL: %s' % url)
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['apiKeyInfo']['returnCode']
if retval != API_SUCCESS:
print('API Failed, code: %s' % retval)
sys.exit()
print('\tapiKeyInfo: returnCode = %s' % retval)
# Loop through and find the APIKEY specific API Key
apiKey = ''
apikeys = parsed['apiKeyInfo']['apiKeyInfoItem']
for apikey in apikeys:
if apikey['serviceName'] == APIKEY:
print('\tFound: %s' % apikey['serviceName'])
print('\t\tSelected: %s' % apikey['apiKey'])
apiKey = apikey['apiKey']
break
# Command: EDGE TRAFFIC : get edge traffic raw data
print('\nTraffic/Edge')
url = APIENDPOINT + 'traffic/edge?sessionToken=' + sessToken + '&apiKey=' + apiKey + TRAFFICDATA + APIFORMAT;
print('\tURL: %s' % url)
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['trafficResponse']['returnCode']
if retval != API_SUCCESS:
print('API Failed, code: %s' % retval)
sys.exit()
print('\tapiKeyInfo: returnCode = %s' % retval)
# Show all Traffic details
chartListTimes = []
chartListTrans = []
trafficItems = parsed['trafficResponse']['trafficItem']
for item in trafficItems:
print('\tFound: %s' % item['dateTime'])
print('\tFound: %s' % item['dataTransferred'])
chartListTimes.append(item['dateTime'])
chartListTrans.append(item['dataTransferred'])
# Generate and save graph (create nice looking labels first)
chartListTimesPretty = []
for date in chartListTimes: #format with hyphens: 201702011700
chartListTimesPretty.append( "%s-%s-%s-%s" % (str(date)[:4], str(date)[4:6], str(date)[6:8], str(date)[8:]))
bar_chart = pygal.Bar(width=1024, height=768)
bar_chart.title = "Edge Traffic"
bar_chart.x_title = "Date/Time"
bar_chart.y_title = "Data Transferred (bytes)"
bar_chart.x_label_rotation = 270
bar_chart.legend_at_bottom = 1
bar_chart.x_labels = chartListTimesPretty
bar_chart.add(APIKEY, chartListTrans)
bar_chart.render_to_file(GRAPHFILE + '.svg')
bar_chart.render_to_png(GRAPHFILE + '.png')
# Command: LOGOUT : send token to invalidate
print('\nLogout')
url = APIENDPOINT + 'logout?sessionToken=' + sessToken + APIFORMAT
print('\tURL: %s' % url)
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['logoutResponse']['resultCode']
# Ignoring retval
print('\tlogout: resultCode = %s' % retval) | api_example_python.py | import os
import sys
import json
import pygal
import re
import urllib.request
USER = os.popen('cat ./_user.db').read() # Create _user.db with your username inside
PASS = os.popen('cat ./_pass.db').read() # Create _pass.db with your password inside
SVCGRP = 'YourServiceGRP' # Change to your desired SERVICE GROUP
APIKEY = 'YourDomainPAD' # Change to your desired APIKEY (website)
TRAFFICDATA = '&fromDate=20170201&toDate=20170201&timeInterval=1' # Change to your desired graph date/time
GRAPHFILE = 'api_example_python_graph' # Change to your desired graph filename
APIENDPOINT = 'https://openapi.cdnetworks.com/api/rest/' # Don't change
APIFORMAT = '&output=json' # Don't change
API_SUCCESS = 0 # Don't change
# Command: LOGIN : send login, receive list of service groups (logial grouping, like a directory)
print('Control Groups')
url = APIENDPOINT + 'login?user=' + USER + '&pass=' + PASS + APIFORMAT;
print('\tURL: ' + APIENDPOINT + 'login?user=xxx&pass=xxx')
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['loginResponse']['resultCode']
print('\tloginResponse: resultCode = %s' % retval)
# Loop through and find SVCGRP specific Service Group
sessToken = '';
sessions = parsed['loginResponse']['session']
for session in sessions:
if session['svcGroupName'] == SVCGRP:
print('\tFound: %s' % session['svcGroupName'])
print('\t\tSelected: %s' % session['sessionToken'])
sessToken = session['sessionToken']
break
# Command: APIKEYLIST : get list of APIs for service groups
print('\nAPI Key List')
url = APIENDPOINT + 'getApiKeyList?sessionToken=' + sessToken + APIFORMAT;
print('\tURL: %s' % url)
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['apiKeyInfo']['returnCode']
if retval != API_SUCCESS:
print('API Failed, code: %s' % retval)
sys.exit()
print('\tapiKeyInfo: returnCode = %s' % retval)
# Loop through and find the APIKEY specific API Key
apiKey = ''
apikeys = parsed['apiKeyInfo']['apiKeyInfoItem']
for apikey in apikeys:
if apikey['serviceName'] == APIKEY:
print('\tFound: %s' % apikey['serviceName'])
print('\t\tSelected: %s' % apikey['apiKey'])
apiKey = apikey['apiKey']
break
# Command: EDGE TRAFFIC : get edge traffic raw data
print('\nTraffic/Edge')
url = APIENDPOINT + 'traffic/edge?sessionToken=' + sessToken + '&apiKey=' + apiKey + TRAFFICDATA + APIFORMAT;
print('\tURL: %s' % url)
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['trafficResponse']['returnCode']
if retval != API_SUCCESS:
print('API Failed, code: %s' % retval)
sys.exit()
print('\tapiKeyInfo: returnCode = %s' % retval)
# Show all Traffic details
chartListTimes = []
chartListTrans = []
trafficItems = parsed['trafficResponse']['trafficItem']
for item in trafficItems:
print('\tFound: %s' % item['dateTime'])
print('\tFound: %s' % item['dataTransferred'])
chartListTimes.append(item['dateTime'])
chartListTrans.append(item['dataTransferred'])
# Generate and save graph (create nice looking labels first)
chartListTimesPretty = []
for date in chartListTimes: #format with hyphens: 201702011700
chartListTimesPretty.append( "%s-%s-%s-%s" % (str(date)[:4], str(date)[4:6], str(date)[6:8], str(date)[8:]))
bar_chart = pygal.Bar(width=1024, height=768)
bar_chart.title = "Edge Traffic"
bar_chart.x_title = "Date/Time"
bar_chart.y_title = "Data Transferred (bytes)"
bar_chart.x_label_rotation = 270
bar_chart.legend_at_bottom = 1
bar_chart.x_labels = chartListTimesPretty
bar_chart.add(APIKEY, chartListTrans)
bar_chart.render_to_file(GRAPHFILE + '.svg')
bar_chart.render_to_png(GRAPHFILE + '.png')
# Command: LOGOUT : send token to invalidate
print('\nLogout')
url = APIENDPOINT + 'logout?sessionToken=' + sessToken + APIFORMAT
print('\tURL: %s' % url)
parsed = json.load(urllib.request.urlopen(url))
retval = parsed['logoutResponse']['resultCode']
# Ignoring retval
print('\tlogout: resultCode = %s' % retval) | 0.211906 | 0.051774 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Provider'
db.create_table('activitysync_provider', (
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('prefix', self.gf('django.db.models.fields.CharField')(max_length=50)),
('link', self.gf('django.db.models.fields.URLField')(max_length=500)),
('sourceid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20, primary_key=True, db_index=True)),
))
db.send_create_signal('activitysync', ['Provider'])
# Adding field 'Activity.provider'
db.add_column('activitysync_activity', 'provider', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['activitysync.Provider'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'Provider'
db.delete_table('activitysync_provider')
# Deleting field 'Activity.provider'
db.delete_column('activitysync_activity', 'provider_id')
models = {
'activitysync.activity': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Activity'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['activitysync.Provider']", 'null': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
'activitysync.provider': {
'Meta': {'ordering': "('name',)", 'object_name': 'Provider'},
'link': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'sourceid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'primary_key': 'True', 'db_index': 'True'})
}
}
complete_apps = ['activitysync'] | activitysync/migrations/0002_auto__add_provider__add_field_activity_provider.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Provider'
db.create_table('activitysync_provider', (
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('prefix', self.gf('django.db.models.fields.CharField')(max_length=50)),
('link', self.gf('django.db.models.fields.URLField')(max_length=500)),
('sourceid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20, primary_key=True, db_index=True)),
))
db.send_create_signal('activitysync', ['Provider'])
# Adding field 'Activity.provider'
db.add_column('activitysync_activity', 'provider', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['activitysync.Provider'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'Provider'
db.delete_table('activitysync_provider')
# Deleting field 'Activity.provider'
db.delete_column('activitysync_activity', 'provider_id')
models = {
'activitysync.activity': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Activity'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['activitysync.Provider']", 'null': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
'activitysync.provider': {
'Meta': {'ordering': "('name',)", 'object_name': 'Provider'},
'link': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'sourceid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'primary_key': 'True', 'db_index': 'True'})
}
}
complete_apps = ['activitysync'] | 0.353428 | 0.073897 |
from __future__ import print_function
import numpy as np
from wisdem.commonse.utilities import nodal2sectional
import openmdao.api as om
class CylindricalShellProperties(om.ExplicitComponent):
"""
OpenMDAO wrapper for tube class to obtain cylindrical sheel properties.
Parameters
----------
d : numpy array[nFull], [m]
tower diameter at corresponding locations
t : numpy array[nFull-1], [m]
shell thickness at corresponding locations
Returns
-------
Az : numpy array[nFull-1], [m**2]
cross-sectional area
Asx : numpy array[nFull-1], [m**2]
x shear area
Asy : numpy array[nFull-1], [m**2]
y shear area
Jz : numpy array[nFull-1], [m**4]
polar moment of inertia
Ixx : numpy array[nFull-1], [m**4]
area moment of inertia about x-axis
Iyy : numpy array[nFull-1], [m**4]
area moment of inertia about y-axis
"""
def initialize(self):
self.options.declare("nFull")
def setup(self):
nFull = self.options["nFull"]
self.add_input("d", np.zeros(nFull), units="m")
self.add_input("t", np.zeros(nFull - 1), units="m")
self.add_output("Az", np.zeros(nFull - 1), units="m**2")
self.add_output("Asx", np.zeros(nFull - 1), units="m**2")
self.add_output("Asy", np.zeros(nFull - 1), units="m**2")
self.add_output("Jz", np.zeros(nFull - 1), units="m**4")
self.add_output("Ixx", np.zeros(nFull - 1), units="m**4")
self.add_output("Iyy", np.zeros(nFull - 1), units="m**4")
# Derivatives
self.declare_partials("*", "*", method="fd", form="central", step=1e-6)
def compute(self, inputs, outputs):
d, _ = nodal2sectional(inputs["d"])
tube = Tube(d, inputs["t"])
outputs["Az"] = tube.Area
outputs["Asx"] = tube.Asx
outputs["Asy"] = tube.Asy
outputs["Jz"] = tube.J0
outputs["Ixx"] = tube.Jxx
outputs["Iyy"] = tube.Jyy
class Tube:
"""The Tube Class contains functions to calculate properties of tubular circular cross-sections
for structural analyses."""
def __init__(self, D, t, Lgth=np.NaN, Kbuck=1.0):
self.D = D
self.t = t
self.L = Lgth * np.ones(np.size(D)) # this makes sure we exapnd Lght if D,t, arrays
self.Kbuck = Kbuck * np.ones(np.size(D)) # this makes sure we exapnd Kbuck if D,t, arrays
@property
def Area(self): # Cross sectional area of tube
return (self.D ** 2 - (self.D - 2 * self.t) ** 2) * np.pi / 4
@property
def derivArea(self):
return {"D": 2 * np.pi / 4 * (self.D ** 2 - (self.D - 2 * self.t)) * (2 * self.D - 1), "t": 0}
@property
def Amid(self): # mid-thickness inscribed area of tube (thin wall torsion calculation)
return (self.D - self.t) ** 2 * np.pi / 4
@property
def Jxx(self): # 2nd area moment of inertia w.r.t. x-x axis (Jxx=Jyy for tube)
return (self.D ** 4 - (self.D - 2 * self.t) ** 4) * np.pi / 64
@property
def Jyy(self): # 2nd area moment of inertia w.r.t. x-x axis (Jxx=Jyy for tube)
return self.Jxx
@property
def J0(self): # polar moment of inertia w.r.t. z-z axis (torsional)
return 2.0 * self.Jxx
@property
def Asy(self): # Shear Area for tubular cross-section
Ri = self.D / 2 - self.t
Ro = self.D / 2
return self.Area / (1.124235 + 0.055610 * (Ri / Ro) + 1.097134 * (Ri / Ro) ** 2 - 0.630057 * (Ri / Ro) ** 3)
@property
def Asx(self): # Shear Area for tubular cross-section
return self.Asy
@property
def BdgMxx(self): # Bending modulus for tubular cross-section
return self.Jxx / (self.D / 2)
@property
def BdgMyy(self): # Bending modulus for tubular cross-section =BdgMxx
return self.Jyy / (self.D / 2)
@property
def TorsConst(self): # Torsion shear constant for tubular cross-section
return self.J0 / (self.D / 2)
@property
def S(self): # Bending modulus for tubular cross-section
return self.BdgMxx
@property
def C(self): # Torsion shear constant for tubular cross-section
return self.TorsConst
@property
def Rgyr(self): # Radius of Gyration for circular tube
return np.sqrt(self.Jxx / self.Area)
@property
def Klr(self): # Klr buckling parameter
return self.Kbuck * self.L / self.Rgyr
class IBeam:
def __init__(self, L_flange, t_flange, H_web, t_web):
self.Lf = L_flange
self.tf = t_flange
self.Hw = H_web
self.tw = t_web
self.H = H_web + 2 * t_flange
@property
def AreaFlange(self): # Cross sectional area of tube
return self.Lf * self.tf
@property
def AreaWeb(self): # Cross sectional area of tube
return self.Hw * self.tw
@property
def Area(self): # Cross sectional area of tube
return self.AreaWeb + 2 * self.AreaFlange
@property
def Iyy(self): # 2nd area moment of inertia w.r.t. y-y axis running parallel to flange through CG
return (self.Lf * self.H ** 3 - (self.Lf - self.tw) * self.Hw ** 3) / 12.0
@property
def Izz(self): # 2nd area moment of inertia w.r.t. z-z running through center of web
return (2 * self.tw * self.Lf ** 3 + self.Hw * self.tw ** 3) / 12.0
@property
def Jxx(self): # polar moment of inertia w.r.t. z-z axis (torsional)
return 2 * self.Lf * self.tf ** 3 + self.H * self.tw ** 3
@property
def Asy(self): # Shear Area for tubular cross-section
return 1.64 * self.Lf * self.tf
@property
def Asz(self): # Shear Area for tubular cross-section
return self.tw * self.H
@property
def BdgMyy(self): # Bending modulus for tubular cross-section
return 2 * self.Iyy / self.H
@property
def BdgMzz(self): # Bending modulus for tubular cross-section =BdgMxx
return 2 * self.Izz / self.Lf
@property
def TorsConst(self): # Torsion shear constant for tubular cross-section
return self.Jxx / (1.28 * self.tf)
@property
def Syy(self): # Bending modulus for tubular cross-section
return self.BdgMyy
@property
def Szz(self): # Bending modulus for tubular cross-section
return self.BdgMzz
@property
def C(self): # Torsion shear constant for tubular cross-section
return self.TorsConst
@property
def Rgyr(self): # Radius of Gyration for circular tube
return np.sqrt(self.Jxx / self.Area)
@property
def CG(self): # Radius of Gyration for circular tube
return 0.5 * self.Hw + self.tf | wisdem/commonse/cross_sections.py | from __future__ import print_function
import numpy as np
from wisdem.commonse.utilities import nodal2sectional
import openmdao.api as om
class CylindricalShellProperties(om.ExplicitComponent):
"""
OpenMDAO wrapper for tube class to obtain cylindrical sheel properties.
Parameters
----------
d : numpy array[nFull], [m]
tower diameter at corresponding locations
t : numpy array[nFull-1], [m]
shell thickness at corresponding locations
Returns
-------
Az : numpy array[nFull-1], [m**2]
cross-sectional area
Asx : numpy array[nFull-1], [m**2]
x shear area
Asy : numpy array[nFull-1], [m**2]
y shear area
Jz : numpy array[nFull-1], [m**4]
polar moment of inertia
Ixx : numpy array[nFull-1], [m**4]
area moment of inertia about x-axis
Iyy : numpy array[nFull-1], [m**4]
area moment of inertia about y-axis
"""
def initialize(self):
self.options.declare("nFull")
def setup(self):
nFull = self.options["nFull"]
self.add_input("d", np.zeros(nFull), units="m")
self.add_input("t", np.zeros(nFull - 1), units="m")
self.add_output("Az", np.zeros(nFull - 1), units="m**2")
self.add_output("Asx", np.zeros(nFull - 1), units="m**2")
self.add_output("Asy", np.zeros(nFull - 1), units="m**2")
self.add_output("Jz", np.zeros(nFull - 1), units="m**4")
self.add_output("Ixx", np.zeros(nFull - 1), units="m**4")
self.add_output("Iyy", np.zeros(nFull - 1), units="m**4")
# Derivatives
self.declare_partials("*", "*", method="fd", form="central", step=1e-6)
def compute(self, inputs, outputs):
d, _ = nodal2sectional(inputs["d"])
tube = Tube(d, inputs["t"])
outputs["Az"] = tube.Area
outputs["Asx"] = tube.Asx
outputs["Asy"] = tube.Asy
outputs["Jz"] = tube.J0
outputs["Ixx"] = tube.Jxx
outputs["Iyy"] = tube.Jyy
class Tube:
"""The Tube Class contains functions to calculate properties of tubular circular cross-sections
for structural analyses."""
def __init__(self, D, t, Lgth=np.NaN, Kbuck=1.0):
self.D = D
self.t = t
self.L = Lgth * np.ones(np.size(D)) # this makes sure we exapnd Lght if D,t, arrays
self.Kbuck = Kbuck * np.ones(np.size(D)) # this makes sure we exapnd Kbuck if D,t, arrays
@property
def Area(self): # Cross sectional area of tube
return (self.D ** 2 - (self.D - 2 * self.t) ** 2) * np.pi / 4
@property
def derivArea(self):
return {"D": 2 * np.pi / 4 * (self.D ** 2 - (self.D - 2 * self.t)) * (2 * self.D - 1), "t": 0}
@property
def Amid(self): # mid-thickness inscribed area of tube (thin wall torsion calculation)
return (self.D - self.t) ** 2 * np.pi / 4
@property
def Jxx(self): # 2nd area moment of inertia w.r.t. x-x axis (Jxx=Jyy for tube)
return (self.D ** 4 - (self.D - 2 * self.t) ** 4) * np.pi / 64
@property
def Jyy(self): # 2nd area moment of inertia w.r.t. x-x axis (Jxx=Jyy for tube)
return self.Jxx
@property
def J0(self): # polar moment of inertia w.r.t. z-z axis (torsional)
return 2.0 * self.Jxx
@property
def Asy(self): # Shear Area for tubular cross-section
Ri = self.D / 2 - self.t
Ro = self.D / 2
return self.Area / (1.124235 + 0.055610 * (Ri / Ro) + 1.097134 * (Ri / Ro) ** 2 - 0.630057 * (Ri / Ro) ** 3)
@property
def Asx(self): # Shear Area for tubular cross-section
return self.Asy
@property
def BdgMxx(self): # Bending modulus for tubular cross-section
return self.Jxx / (self.D / 2)
@property
def BdgMyy(self): # Bending modulus for tubular cross-section =BdgMxx
return self.Jyy / (self.D / 2)
@property
def TorsConst(self): # Torsion shear constant for tubular cross-section
return self.J0 / (self.D / 2)
@property
def S(self): # Bending modulus for tubular cross-section
return self.BdgMxx
@property
def C(self): # Torsion shear constant for tubular cross-section
return self.TorsConst
@property
def Rgyr(self): # Radius of Gyration for circular tube
return np.sqrt(self.Jxx / self.Area)
@property
def Klr(self): # Klr buckling parameter
return self.Kbuck * self.L / self.Rgyr
class IBeam:
def __init__(self, L_flange, t_flange, H_web, t_web):
self.Lf = L_flange
self.tf = t_flange
self.Hw = H_web
self.tw = t_web
self.H = H_web + 2 * t_flange
@property
def AreaFlange(self): # Cross sectional area of tube
return self.Lf * self.tf
@property
def AreaWeb(self): # Cross sectional area of tube
return self.Hw * self.tw
@property
def Area(self): # Cross sectional area of tube
return self.AreaWeb + 2 * self.AreaFlange
@property
def Iyy(self): # 2nd area moment of inertia w.r.t. y-y axis running parallel to flange through CG
return (self.Lf * self.H ** 3 - (self.Lf - self.tw) * self.Hw ** 3) / 12.0
@property
def Izz(self): # 2nd area moment of inertia w.r.t. z-z running through center of web
return (2 * self.tw * self.Lf ** 3 + self.Hw * self.tw ** 3) / 12.0
@property
def Jxx(self): # polar moment of inertia w.r.t. z-z axis (torsional)
return 2 * self.Lf * self.tf ** 3 + self.H * self.tw ** 3
@property
def Asy(self): # Shear Area for tubular cross-section
return 1.64 * self.Lf * self.tf
@property
def Asz(self): # Shear Area for tubular cross-section
return self.tw * self.H
@property
def BdgMyy(self): # Bending modulus for tubular cross-section
return 2 * self.Iyy / self.H
@property
def BdgMzz(self): # Bending modulus for tubular cross-section =BdgMxx
return 2 * self.Izz / self.Lf
@property
def TorsConst(self): # Torsion shear constant for tubular cross-section
return self.Jxx / (1.28 * self.tf)
@property
def Syy(self): # Bending modulus for tubular cross-section
return self.BdgMyy
@property
def Szz(self): # Bending modulus for tubular cross-section
return self.BdgMzz
@property
def C(self): # Torsion shear constant for tubular cross-section
return self.TorsConst
@property
def Rgyr(self): # Radius of Gyration for circular tube
return np.sqrt(self.Jxx / self.Area)
@property
def CG(self): # Radius of Gyration for circular tube
return 0.5 * self.Hw + self.tf | 0.918475 | 0.467332 |
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
ANY_JS = ".any.js"
WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
proposal_flags = [{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
},
{
'name': 'tail-call',
'flags': ['--experimental-wasm-tail-call',
'--wasm-staging']
},
{
'name': 'simd',
'flags': ['--experimental-wasm-simd',
'--wasm-staging']
},
{
'name': 'memory64',
'flags': ['--experimental-wasm-memory64',
'--wasm-staging']
},
]
class TestLoader(testsuite.JSTestLoader):
@property
def extensions(self):
return [ANY_JS]
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
"mjsunit.js")
self.test_root = os.path.join(self.root, "tests")
self._test_loader.test_root = self.test_root
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
def get_proposal_path_identifier(proposal):
return os.sep.join(['proposals', proposal['name']])
class TestCase(testcase.D8TestCase):
def _get_timeout_param(self):
source = self.get_source()
timeout_params = META_TIMEOUT_REGEXP.findall(source)
if not timeout_params:
return None
if timeout_params[0] in ["long"]:
return timeout_params[0]
else:
print("unknown timeout param %s in %s%s"
% (timeout_params[0], self.path, ANY_JS))
return None
def _get_files_params(self):
files = [self.suite.mjsunit_js,
os.path.join(self.suite.root, "third_party", "testharness.js"),
os.path.join(self.suite.root, "testharness-additions.js"),
os.path.join(self.suite.root, "report.js")]
source = self.get_source()
current_dir = os.path.dirname(self._get_source_path())
for script in META_SCRIPT_REGEXP.findall(source):
if script.startswith(WPT_ROOT):
# Matched an absolute path, strip the root and replace it with our
# local root.
found = False
for proposal in proposal_flags:
if get_proposal_path_identifier(proposal) in current_dir:
found = True
script = os.path.join(self.suite.test_root,
os.sep.join(['proposals', proposal['name']]),
script[len(WPT_ROOT):])
if not found:
script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
elif not script.startswith("/"):
# Matched a relative path, prepend this test's directory.
script = os.path.join(current_dir, script)
else:
raise Exception("Unexpected absolute path for script: \"%s\"" % script);
files.append(script)
files.extend([self._get_source_path(),
os.path.join(self.suite.root, "after.js")])
return files
def _get_source_flags(self):
for proposal in proposal_flags:
if get_proposal_path_identifier(proposal) in self.path:
return proposal['flags']
return ['--wasm-staging']
def _get_source_path(self):
# All tests are named `path/name.any.js`
return os.path.join(self.suite.test_root, self.path + ANY_JS)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs) | test/wasm-js/testcfg.py |
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
ANY_JS = ".any.js"
WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
proposal_flags = [{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
},
{
'name': 'tail-call',
'flags': ['--experimental-wasm-tail-call',
'--wasm-staging']
},
{
'name': 'simd',
'flags': ['--experimental-wasm-simd',
'--wasm-staging']
},
{
'name': 'memory64',
'flags': ['--experimental-wasm-memory64',
'--wasm-staging']
},
]
class TestLoader(testsuite.JSTestLoader):
@property
def extensions(self):
return [ANY_JS]
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
"mjsunit.js")
self.test_root = os.path.join(self.root, "tests")
self._test_loader.test_root = self.test_root
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
def get_proposal_path_identifier(proposal):
return os.sep.join(['proposals', proposal['name']])
class TestCase(testcase.D8TestCase):
def _get_timeout_param(self):
source = self.get_source()
timeout_params = META_TIMEOUT_REGEXP.findall(source)
if not timeout_params:
return None
if timeout_params[0] in ["long"]:
return timeout_params[0]
else:
print("unknown timeout param %s in %s%s"
% (timeout_params[0], self.path, ANY_JS))
return None
def _get_files_params(self):
files = [self.suite.mjsunit_js,
os.path.join(self.suite.root, "third_party", "testharness.js"),
os.path.join(self.suite.root, "testharness-additions.js"),
os.path.join(self.suite.root, "report.js")]
source = self.get_source()
current_dir = os.path.dirname(self._get_source_path())
for script in META_SCRIPT_REGEXP.findall(source):
if script.startswith(WPT_ROOT):
# Matched an absolute path, strip the root and replace it with our
# local root.
found = False
for proposal in proposal_flags:
if get_proposal_path_identifier(proposal) in current_dir:
found = True
script = os.path.join(self.suite.test_root,
os.sep.join(['proposals', proposal['name']]),
script[len(WPT_ROOT):])
if not found:
script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
elif not script.startswith("/"):
# Matched a relative path, prepend this test's directory.
script = os.path.join(current_dir, script)
else:
raise Exception("Unexpected absolute path for script: \"%s\"" % script);
files.append(script)
files.extend([self._get_source_path(),
os.path.join(self.suite.root, "after.js")])
return files
def _get_source_flags(self):
for proposal in proposal_flags:
if get_proposal_path_identifier(proposal) in self.path:
return proposal['flags']
return ['--wasm-staging']
def _get_source_path(self):
# All tests are named `path/name.any.js`
return os.path.join(self.suite.test_root, self.path + ANY_JS)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs) | 0.444565 | 0.126111 |
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList, type_name_to_flow_type
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = flow.argsort(input, dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_tensor_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = input.argsort(dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
@flow.unittest.skip_unless_1n1d()
class TestArgsort(flow.unittest.TestCase):
def test_argsort(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_argsort, _test_tensor_argsort]
arg_dict["data_shape"] = [(2, 6, 5, 4), (3, 4, 8)]
arg_dict["axis"] = [-1, 0, 2]
arg_dict["descending"] = [True, False]
arg_dict["data_type"] = ["double", "float32", "int32"]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=False)
def test_argsort_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = torch.argsort(
x, dim=random(low=-4, high=4).to(int), descending=random_bool()
)
return y
if __name__ == "__main__":
unittest.main() | python/oneflow/test/modules/test_argsort.py | import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList, type_name_to_flow_type
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = flow.argsort(input, dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_tensor_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = input.argsort(dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
@flow.unittest.skip_unless_1n1d()
class TestArgsort(flow.unittest.TestCase):
def test_argsort(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_argsort, _test_tensor_argsort]
arg_dict["data_shape"] = [(2, 6, 5, 4), (3, 4, 8)]
arg_dict["axis"] = [-1, 0, 2]
arg_dict["descending"] = [True, False]
arg_dict["data_type"] = ["double", "float32", "int32"]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=False)
def test_argsort_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = torch.argsort(
x, dim=random(low=-4, high=4).to(int), descending=random_bool()
)
return y
if __name__ == "__main__":
unittest.main() | 0.54359 | 0.665302 |
import re
class MetaOperation(type):
"""
Metaclasse qui permet l'enregistrement de tous les classes qui héritent de la
classe Operation. À noter qu'il faut les importer avant MetaOperation pour qu'elles
soient disponibles dans le registre
"""
registry = {}
def __new__(cls, clsname, bases, attrs):
newclass = super(MetaOperation, cls).__new__(cls, clsname, bases, attrs)
newclass.name = clsname
MetaOperation.register(newclass)
return newclass
@staticmethod
def register(cls):
MetaOperation.registry[cls.__name__] = cls
return cls
@staticmethod
def get(clsname):
try:
return MetaOperation.registry[clsname]
except KeyError:
raise OperationNotImplemented("{" + clsname + "}")
class Operation(metaclass=MetaOperation):
"""
Classe de base de pour les opérations des ponts.
Les méthodes precondtion, pretache, tache, postcondition et posttache
retournent par défaut true pour ne pas empêcher l'exécution du bt si
elles n'ont pas été implémenté dans les classes qui héritent de cette classe.
"""
duree = 0
color = (0,0,0)
@classmethod
def get_duree(cls, context=None):
return cls.duree
@classmethod
def get_color(cls):
return cls.color
@classmethod
def precondition(cls, context=None):
return True
@classmethod
def pretache(cls, pont):
pont.is_operation = True
return True
@classmethod
def tache(cls, kanban):
kanban.noeud.box.contour_color = (0, 0, 0)
kanban.noeud.box.color = kanban.operation.get_color()
kanban.set_next_target()
return True
@classmethod
def postcondition(cls, context=None):
return True
@classmethod
def posttache(cls, pont):
pont.is_operation = False
return True
class Kanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.croissant = True
self.debut = 0
self.cuve_max = 24
self.cuve_courante = 0
self.noeud = None
self.extra = None
self.pont = None
self.actif = True #True, le pont peut commencer aussitôt recu
self.actif_defaut = True
self.completed = False
self.temps_termine = 0
self.temps_restant = 0
def reset(self):
self.cuve_courante = 0
self.temps_termine = 0
self.pont = None
self.completed = False
self.actif = self.actif_defaut
def init_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def get_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def set_next_target(self):
if self.cuve_courante < self.cuve_max:
self.cuve_courante += 1
if self.cuve_courante >= self.cuve_max:
self.completed = True
def is_completed(self):
return self.completed
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
class KanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_kanbans_list(s):
kanbans_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = kanbans_str.split(",")
kanbans = []
for token1 in tokens:
preprocessed_tokens = KanbanParser.preprocess_token(token1)
for token2 in preprocessed_tokens:
kanbans.append(KanbanParser.process_token(token2))
return kanbans
@staticmethod
def preprocess_token(token):
match = re.match(KanbanParser.regex_pattern, token, re.I)
if match:
operation_type = "NONE"
begin = 0
count = 0
extra = ""
items = match.groups()
preprocessed_tokens = []
if items[0]:
operation_type = items[0]
if items[1]:
begin = int(items[1])
if items[2]:
count = int(items[2])
else:
count = 1
if items[3]:
extra = str(items[3]).upper()
for i in range(count):
if extra == "":
preprocessed_tokens.append("{type}{begin}_{number_to_visit}".format(type=operation_type, begin=begin+i,
number_to_visit=1))
else:
preprocessed_tokens.append("{type}{begin}_{number_to_visit}_{extra}".format(type=operation_type, begin=begin+i,
number_to_visit=1, extra=extra))
return preprocessed_tokens
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
@staticmethod
def process_token(token):
kanban = None
match = re.match(KanbanParser.regex_pattern, token, re.I)
op = ' '
if match:
items = match.groups()
if items[0]:
kanban = Kanban(items[0])
kanban.croissant = op.isupper()
if items[1]:
kanban.debut = int(items[1]) - 1
if items[2]:
kanban.cuve_max = int(items[2])
else:
kanban.cuve_max = 1
if items[3]:
kanban.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return kanban
class DelayedKanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.duree = 0
self.t_trigger = 0
self.pont = None
self.completed = False
self.temps_termine = 0
self.extra = None
self.temps_restant = 0
def reset(self):
self.temps_termine = 0
self.pont = None
self.completed = False
def is_completed(self):
return self.completed
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
class DelayedKanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_list(s):
pauses_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = pauses_str.split(",")
pauses = []
for token1 in tokens:
pauses.append(DelayedKanbanParser.process_token(token1))
return pauses
@staticmethod
def process_token(token):
pause = None
match = re.match(DelayedKanbanParser.regex_pattern, token, re.I)
if match:
items = match.groups()
if items[0]:
pause = DelayedKanban(items[0])
if items[1]:
pause.duree = int(items[1])
if items[2]:
pause.t_trigger = int(items[2])
else:
pause.t_trigger = 0
if items[3]:
pause.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return pause
class DeltaKanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.debut = 0
self.cuve_max = 1
self.cuve_courante = 0
self.delta = 0
self.pont = None
self.completed = False
self.temps_termine = 0
self.noeud = None
self.extra = None
self.temps_restant = 0
def is_completed(self):
return self.completed
def reset(self):
self.cuve_courante = 0
self.temps_termine = 0
self.pont = None
self.completed = False
def init_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def get_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def set_next_target(self):
if self.cuve_courante < self.cuve_max:
self.cuve_courante += 1
if self.cuve_courante >= self.cuve_max:
self.completed = True
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
class DeltaKanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_list(s):
dk_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = dk_str.split(",")
dk = []
for token1 in tokens:
dk.append(DeltaKanbanParser.process_token(token1))
return dk
@staticmethod
def process_token(token):
dk = None
match = re.match(DeltaKanbanParser.regex_pattern, token, re.I)
if match:
items = match.groups()
if items[0]:
dk = DeltaKanban(items[0])
if items[1]:
dk.debut = int(items[1]) - 1
if items[2]:
dk.delta = int(items[2])
else:
dk.delta = 0
if items[3]:
dk.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return dk
class ParsingException(Exception):
pass
class OperationNotImplemented(Exception):
pass | simulation/kanban.py | import re
class MetaOperation(type):
"""
Metaclasse qui permet l'enregistrement de tous les classes qui héritent de la
classe Operation. À noter qu'il faut les importer avant MetaOperation pour qu'elles
soient disponibles dans le registre
"""
registry = {}
def __new__(cls, clsname, bases, attrs):
newclass = super(MetaOperation, cls).__new__(cls, clsname, bases, attrs)
newclass.name = clsname
MetaOperation.register(newclass)
return newclass
@staticmethod
def register(cls):
MetaOperation.registry[cls.__name__] = cls
return cls
@staticmethod
def get(clsname):
try:
return MetaOperation.registry[clsname]
except KeyError:
raise OperationNotImplemented("{" + clsname + "}")
class Operation(metaclass=MetaOperation):
"""
Classe de base de pour les opérations des ponts.
Les méthodes precondtion, pretache, tache, postcondition et posttache
retournent par défaut true pour ne pas empêcher l'exécution du bt si
elles n'ont pas été implémenté dans les classes qui héritent de cette classe.
"""
duree = 0
color = (0,0,0)
@classmethod
def get_duree(cls, context=None):
return cls.duree
@classmethod
def get_color(cls):
return cls.color
@classmethod
def precondition(cls, context=None):
return True
@classmethod
def pretache(cls, pont):
pont.is_operation = True
return True
@classmethod
def tache(cls, kanban):
kanban.noeud.box.contour_color = (0, 0, 0)
kanban.noeud.box.color = kanban.operation.get_color()
kanban.set_next_target()
return True
@classmethod
def postcondition(cls, context=None):
return True
@classmethod
def posttache(cls, pont):
pont.is_operation = False
return True
class Kanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.croissant = True
self.debut = 0
self.cuve_max = 24
self.cuve_courante = 0
self.noeud = None
self.extra = None
self.pont = None
self.actif = True #True, le pont peut commencer aussitôt recu
self.actif_defaut = True
self.completed = False
self.temps_termine = 0
self.temps_restant = 0
def reset(self):
self.cuve_courante = 0
self.temps_termine = 0
self.pont = None
self.completed = False
self.actif = self.actif_defaut
def init_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def get_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def set_next_target(self):
if self.cuve_courante < self.cuve_max:
self.cuve_courante += 1
if self.cuve_courante >= self.cuve_max:
self.completed = True
def is_completed(self):
return self.completed
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.cuve_max)
class KanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_kanbans_list(s):
kanbans_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = kanbans_str.split(",")
kanbans = []
for token1 in tokens:
preprocessed_tokens = KanbanParser.preprocess_token(token1)
for token2 in preprocessed_tokens:
kanbans.append(KanbanParser.process_token(token2))
return kanbans
@staticmethod
def preprocess_token(token):
match = re.match(KanbanParser.regex_pattern, token, re.I)
if match:
operation_type = "NONE"
begin = 0
count = 0
extra = ""
items = match.groups()
preprocessed_tokens = []
if items[0]:
operation_type = items[0]
if items[1]:
begin = int(items[1])
if items[2]:
count = int(items[2])
else:
count = 1
if items[3]:
extra = str(items[3]).upper()
for i in range(count):
if extra == "":
preprocessed_tokens.append("{type}{begin}_{number_to_visit}".format(type=operation_type, begin=begin+i,
number_to_visit=1))
else:
preprocessed_tokens.append("{type}{begin}_{number_to_visit}_{extra}".format(type=operation_type, begin=begin+i,
number_to_visit=1, extra=extra))
return preprocessed_tokens
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
@staticmethod
def process_token(token):
kanban = None
match = re.match(KanbanParser.regex_pattern, token, re.I)
op = ' '
if match:
items = match.groups()
if items[0]:
kanban = Kanban(items[0])
kanban.croissant = op.isupper()
if items[1]:
kanban.debut = int(items[1]) - 1
if items[2]:
kanban.cuve_max = int(items[2])
else:
kanban.cuve_max = 1
if items[3]:
kanban.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return kanban
class DelayedKanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.duree = 0
self.t_trigger = 0
self.pont = None
self.completed = False
self.temps_termine = 0
self.extra = None
self.temps_restant = 0
def reset(self):
self.temps_termine = 0
self.pont = None
self.completed = False
def is_completed(self):
return self.completed
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.duree, self.t_trigger)
class DelayedKanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_list(s):
pauses_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = pauses_str.split(",")
pauses = []
for token1 in tokens:
pauses.append(DelayedKanbanParser.process_token(token1))
return pauses
@staticmethod
def process_token(token):
pause = None
match = re.match(DelayedKanbanParser.regex_pattern, token, re.I)
if match:
items = match.groups()
if items[0]:
pause = DelayedKanban(items[0])
if items[1]:
pause.duree = int(items[1])
if items[2]:
pause.t_trigger = int(items[2])
else:
pause.t_trigger = 0
if items[3]:
pause.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return pause
class DeltaKanban(object):
def __init__(self, token):
self.operation = MetaOperation.get(token)
self.debut = 0
self.cuve_max = 1
self.cuve_courante = 0
self.delta = 0
self.pont = None
self.completed = False
self.temps_termine = 0
self.noeud = None
self.extra = None
self.temps_restant = 0
def is_completed(self):
return self.completed
def reset(self):
self.cuve_courante = 0
self.temps_termine = 0
self.pont = None
self.completed = False
def init_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def get_current_target(self, secteur):
indice = self.debut + self.cuve_courante
if indice < len(secteur.noeuds_cuves):
self.noeud = secteur.noeuds_cuves[indice]
return self.noeud
else:
return None
def set_next_target(self):
if self.cuve_courante < self.cuve_max:
self.cuve_courante += 1
if self.cuve_courante >= self.cuve_max:
self.completed = True
def __str__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
def __repr__(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
def to_str(self):
return "{0} {1} {2}".format(self.operation.name, self.debut, self.delta)
class DeltaKanbanParser(object):
regex_pattern = r"([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?"
@staticmethod
def string_to_list(s):
dk_str = s.replace(" ", "").replace("\t", "").replace("\n", "")
tokens = dk_str.split(",")
dk = []
for token1 in tokens:
dk.append(DeltaKanbanParser.process_token(token1))
return dk
@staticmethod
def process_token(token):
dk = None
match = re.match(DeltaKanbanParser.regex_pattern, token, re.I)
if match:
items = match.groups()
if items[0]:
dk = DeltaKanban(items[0])
if items[1]:
dk.debut = int(items[1]) - 1
if items[2]:
dk.delta = int(items[2])
else:
dk.delta = 0
if items[3]:
dk.extra = str(items[3]).upper()
else:
raise ParsingException("{0} doit être dans le format: ([a-zA-Z]+)([0-9]+)_?([0-9]+)?_?([a-z]+)?".format(token))
return dk
class ParsingException(Exception):
pass
class OperationNotImplemented(Exception):
pass | 0.274838 | 0.166641 |
# Export this package's modules as members:
from ._enums import *
from .afd_custom_domain import *
from .afd_endpoint import *
from .afd_origin import *
from .afd_origin_group import *
from .custom_domain import *
from .endpoint import *
from .get_afd_custom_domain import *
from .get_afd_endpoint import *
from .get_afd_origin import *
from .get_afd_origin_group import *
from .get_custom_domain import *
from .get_endpoint import *
from .get_origin import *
from .get_origin_group import *
from .get_policy import *
from .get_profile import *
from .get_profile_supported_optimization_types import *
from .get_route import *
from .get_rule import *
from .get_rule_set import *
from .get_secret import *
from .get_security_policy import *
from .origin import *
from .origin_group import *
from .policy import *
from .profile import *
from .route import *
from .rule import *
from .rule_set import *
from .secret import *
from .security_policy import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:cdn/v20200901:AFDCustomDomain":
return AFDCustomDomain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDEndpoint":
return AFDEndpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDOrigin":
return AFDOrigin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDOriginGroup":
return AFDOriginGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:CustomDomain":
return CustomDomain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Endpoint":
return Endpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Origin":
return Origin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:OriginGroup":
return OriginGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Policy":
return Policy(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Profile":
return Profile(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Route":
return Route(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Rule":
return Rule(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:RuleSet":
return RuleSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Secret":
return Secret(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:SecurityPolicy":
return SecurityPolicy(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "cdn/v20200901", _module_instance)
_register_module() | sdk/python/pulumi_azure_native/cdn/v20200901/__init__.py |
# Export this package's modules as members:
from ._enums import *
from .afd_custom_domain import *
from .afd_endpoint import *
from .afd_origin import *
from .afd_origin_group import *
from .custom_domain import *
from .endpoint import *
from .get_afd_custom_domain import *
from .get_afd_endpoint import *
from .get_afd_origin import *
from .get_afd_origin_group import *
from .get_custom_domain import *
from .get_endpoint import *
from .get_origin import *
from .get_origin_group import *
from .get_policy import *
from .get_profile import *
from .get_profile_supported_optimization_types import *
from .get_route import *
from .get_rule import *
from .get_rule_set import *
from .get_secret import *
from .get_security_policy import *
from .origin import *
from .origin_group import *
from .policy import *
from .profile import *
from .route import *
from .rule import *
from .rule_set import *
from .secret import *
from .security_policy import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:cdn/v20200901:AFDCustomDomain":
return AFDCustomDomain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDEndpoint":
return AFDEndpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDOrigin":
return AFDOrigin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:AFDOriginGroup":
return AFDOriginGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:CustomDomain":
return CustomDomain(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Endpoint":
return Endpoint(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Origin":
return Origin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:OriginGroup":
return OriginGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Policy":
return Policy(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Profile":
return Profile(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Route":
return Route(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Rule":
return Rule(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:RuleSet":
return RuleSet(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:Secret":
return Secret(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:cdn/v20200901:SecurityPolicy":
return SecurityPolicy(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "cdn/v20200901", _module_instance)
_register_module() | 0.399577 | 0.042842 |
from collections import OrderedDict
import pytest
from pandas.util._validators import validate_args_and_kwargs
_fname = "func"
def test_invalid_total_length_max_length_one():
compat_args = ("foo",)
kwargs = {"foo": "FOO"}
args = ("FoO", "BaZ")
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)".format(
fname=_fname, max_length=max_length, actual_length=actual_length
)
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_invalid_total_length_max_length_multiple():
compat_args = ("foo", "bar", "baz")
kwargs = {"foo": "FOO", "bar": "BAR"}
args = ("FoO", "BaZ")
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)".format(
fname=_fname, max_length=max_length, actual_length=actual_length
)
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
@pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})])
def test_missing_args_or_kwargs(args, kwargs):
bad_arg = "bar"
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args["foo"] = -5
compat_args[bad_arg] = 1
msg = (
r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".format(arg=bad_arg, func=_fname)
)
with pytest.raises(ValueError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_duplicate_argument():
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args["foo"] = None
compat_args["bar"] = None
compat_args["baz"] = None
kwargs = {"foo": None, "bar": None}
args = (None,) # duplicate value for "foo"
msg = fr"{_fname}\(\) got multiple values for keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = None
compat_args["baz"] = -2
kwargs = {"baz": -2}
args = (1, None)
min_fname_arg_count = 2
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args) | pandas/tests/util/test_validate_args_and_kwargs.py | from collections import OrderedDict
import pytest
from pandas.util._validators import validate_args_and_kwargs
_fname = "func"
def test_invalid_total_length_max_length_one():
compat_args = ("foo",)
kwargs = {"foo": "FOO"}
args = ("FoO", "BaZ")
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)".format(
fname=_fname, max_length=max_length, actual_length=actual_length
)
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_invalid_total_length_max_length_multiple():
compat_args = ("foo", "bar", "baz")
kwargs = {"foo": "FOO", "bar": "BAR"}
args = ("FoO", "BaZ")
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)".format(
fname=_fname, max_length=max_length, actual_length=actual_length
)
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
@pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})])
def test_missing_args_or_kwargs(args, kwargs):
bad_arg = "bar"
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args["foo"] = -5
compat_args[bad_arg] = 1
msg = (
r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".format(arg=bad_arg, func=_fname)
)
with pytest.raises(ValueError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_duplicate_argument():
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args["foo"] = None
compat_args["bar"] = None
compat_args["baz"] = None
kwargs = {"foo": None, "bar": None}
args = (None,) # duplicate value for "foo"
msg = fr"{_fname}\(\) got multiple values for keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = None
compat_args["baz"] = -2
kwargs = {"baz": -2}
args = (1, None)
min_fname_arg_count = 2
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args) | 0.703957 | 0.313906 |
import tifffile
import tqdm
import os
import numpy as np
import sys
from scandir import scandir
from multiprocessing import Pool
def split_image(img, psplit, add_bg):
img_int = img.astype(int)
img_int[img_int<0] = 0
return np.random.binomial(img_int, psplit) + np.random.poisson(add_bg*psplit,size=img.shape)
# First nbg frames of input: Estimate median bg, generate sample
# f >= nbg < total-nbg:
def split_tiff(tiffinput, tiffoutput, psplit, offset=0, gain=1, nbg=100, bgfile= None):
print (tiffoutput)
print(bgfile)
with tifffile.TiffWriter(tiffoutput) as out_tif, tifffile.TiffWriter(bgfile) as bg_tif:
with tifffile.TiffFile(tiffinput) as in_tif:
total = len(in_tif.pages)
buffer = np.zeros((nbg, * in_tif.pages[0].shape), dtype=np.int)
for f in range(nbg):
buffer[f] = in_tif.pages[f].asarray()
bufindex = 0
bg = (np.median(buffer,0)-offset)/gain
for f in range(total):
# for f in tqdm.trange(total):
if f < nbg:
img = buffer[f]
else:
img = in_tif.pages[f].asarray()
buffer[bufindex] = img; bufindex+=1
if(bufindex==nbg):
bufindex=0
bg = ((np.median(buffer,0)-offset)/gain).astype(np.uint16)
bg_tif.save(bg)
img = (img-offset)/gain
out_tif.save(np.ascontiguousarray(split_image(img, psplit, bg), dtype=np.uint16))
sys.stdout.write(f"\rframe {f}/{total} ({f/total*100:.2f}%)")
print()
def _process(args):
path, outdir, psplit, offset, gain, nbg = args
print(f"pid={os.getpid()}: {path}")
filename = os.path.split(path)[1]
outfile = outdir + filename
bgfile = outdir + "bg/" + filename
os.makedirs(outdir + "bg/",exist_ok=True)
split_tiff(path,outfile,psplit,offset,gain,nbg,bgfile)
def split_dir(inputdir, outputdir, psplit, nbg, offset=0, gain=1):
params = []
def cb(fn):
args=[ fn, outputdir, psplit, offset, gain, nbg]
#params.append(args)
_process(args)
scandir(inputdir, "*.tif", cb)
# p = Pool(8)
# p.map(_process, params)
if __name__ == "__main__":
split_dir('O:/mod/', 'O:/mod-psplit/', 0.5, nbg=1000, offset=100.2,gain=1/0.47)
# split_tiff('../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-1_0.tif',
# '../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-psplit-1_0.tif', 0.5, 100.2, 1/0.47, 300,
# '../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-psplit-1_0-bg.tif') | python/utils/tiff_split_signal_photons.py |
import tifffile
import tqdm
import os
import numpy as np
import sys
from scandir import scandir
from multiprocessing import Pool
def split_image(img, psplit, add_bg):
img_int = img.astype(int)
img_int[img_int<0] = 0
return np.random.binomial(img_int, psplit) + np.random.poisson(add_bg*psplit,size=img.shape)
# First nbg frames of input: Estimate median bg, generate sample
# f >= nbg < total-nbg:
def split_tiff(tiffinput, tiffoutput, psplit, offset=0, gain=1, nbg=100, bgfile= None):
print (tiffoutput)
print(bgfile)
with tifffile.TiffWriter(tiffoutput) as out_tif, tifffile.TiffWriter(bgfile) as bg_tif:
with tifffile.TiffFile(tiffinput) as in_tif:
total = len(in_tif.pages)
buffer = np.zeros((nbg, * in_tif.pages[0].shape), dtype=np.int)
for f in range(nbg):
buffer[f] = in_tif.pages[f].asarray()
bufindex = 0
bg = (np.median(buffer,0)-offset)/gain
for f in range(total):
# for f in tqdm.trange(total):
if f < nbg:
img = buffer[f]
else:
img = in_tif.pages[f].asarray()
buffer[bufindex] = img; bufindex+=1
if(bufindex==nbg):
bufindex=0
bg = ((np.median(buffer,0)-offset)/gain).astype(np.uint16)
bg_tif.save(bg)
img = (img-offset)/gain
out_tif.save(np.ascontiguousarray(split_image(img, psplit, bg), dtype=np.uint16))
sys.stdout.write(f"\rframe {f}/{total} ({f/total*100:.2f}%)")
print()
def _process(args):
path, outdir, psplit, offset, gain, nbg = args
print(f"pid={os.getpid()}: {path}")
filename = os.path.split(path)[1]
outfile = outdir + filename
bgfile = outdir + "bg/" + filename
os.makedirs(outdir + "bg/",exist_ok=True)
split_tiff(path,outfile,psplit,offset,gain,nbg,bgfile)
def split_dir(inputdir, outputdir, psplit, nbg, offset=0, gain=1):
params = []
def cb(fn):
args=[ fn, outputdir, psplit, offset, gain, nbg]
#params.append(args)
_process(args)
scandir(inputdir, "*.tif", cb)
# p = Pool(8)
# p.map(_process, params)
if __name__ == "__main__":
split_dir('O:/mod/', 'O:/mod-psplit/', 0.5, nbg=1000, offset=100.2,gain=1/0.47)
# split_tiff('../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-1_0.tif',
# '../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-psplit-1_0.tif', 0.5, 100.2, 1/0.47, 300,
# '../../../SMLM/data/gattaquant 80nm thirdshift/80nm-3rdshift-psplit-1_0-bg.tif') | 0.138491 | 0.157234 |
import sys
from pathlib import Path
from unittest import TestCase
import pytest
import yaml
from click.testing import CliRunner
from peekingduck.cli import cli
DEFAULT_NODES = ["input.visual", "model.yolo", "draw.bbox", "output.screen"]
GOOD_SUBDIR = "custom_nodes"
GOOD_TYPE = "dabble"
GOOD_NAME = "name"
PROJECT_DIR = Path("tmp_dir")
PKD_DIR = Path(__file__).resolve().parents[2] / "peekingduck"
PKD_CONFIG_DIR = PKD_DIR / "configs"
PKD_NODES_DIR = PKD_DIR / "pipeline" / "nodes"
with open(
PKD_DIR.parent / "tests" / "data" / "user_inputs" / "create_node.yml"
) as infile:
CREATE_NODE_INPUT = yaml.safe_load(infile.read())
with open(
PKD_DIR.parent / "tests" / "data" / "user_configs" / "create_node.yml"
) as infile:
CREATE_NODE_CONFIG = yaml.safe_load(infile.read())
@pytest.fixture(params=[0, 1])
def create_node_input_abort(request):
# Windows has a different absolute path format
bad_paths = "bad_paths_win" if sys.platform == "win32" else "bad_paths"
yield (
CREATE_NODE_INPUT[bad_paths],
CREATE_NODE_INPUT["bad_types"],
CREATE_NODE_INPUT["bad_names"],
CREATE_NODE_INPUT["good_paths"][request.param],
CREATE_NODE_INPUT["good_types"][request.param],
CREATE_NODE_INPUT["good_names"][request.param],
CREATE_NODE_INPUT["proceed"]["reject"],
)
@pytest.fixture(params=[0, 1])
def create_node_input_accept(request):
bad_paths = "bad_paths_win" if sys.platform == "win32" else "bad_paths"
yield (
CREATE_NODE_INPUT[bad_paths],
CREATE_NODE_INPUT["bad_types"],
CREATE_NODE_INPUT["bad_names"],
CREATE_NODE_INPUT["good_paths"][request.param],
CREATE_NODE_INPUT["good_types"][request.param],
CREATE_NODE_INPUT["good_names"][request.param],
CREATE_NODE_INPUT["proceed"]["accept"][request.param],
)
@pytest.fixture
def cwd():
return Path.cwd()
def get_custom_node_subpaths(node_subdir, node_type, node_name):
return (
str(Path(node_subdir) / "configs" / node_type / f"{node_name}.yml"),
str(Path(node_subdir) / node_type / f"{node_name}.py"),
)
def setup_custom_node(node_subdir, node_type, node_name):
cwd = Path.cwd()
config_dir = cwd / "src" / node_subdir / "configs" / node_type
script_dir = cwd / "src" / node_subdir / node_type
config_dir.mkdir(parents=True, exist_ok=True)
script_dir.mkdir(parents=True, exist_ok=True)
(config_dir / f"{node_name}.yml").touch()
(script_dir / f"{node_name}.py").touch()
@pytest.mark.usefixtures("tmp_dir", "tmp_project_dir")
class TestCliCreateNode:
def test_abort(self, create_node_input_abort):
(
bad_paths,
bad_types,
bad_names,
good_path,
good_type,
good_name,
proceed,
) = create_node_input_abort
result = CliRunner().invoke(
cli,
["create-node"],
input=bad_paths
+ good_path
+ bad_types
+ good_type
+ bad_names
+ good_name
+ proceed,
)
# Count only substring we create so we are unaffected by click changes
config_subpath, script_subpath = get_custom_node_subpaths(
good_path.strip(), good_type.strip(), good_name.strip()
)
assert result.output.count("Path cannot") == bad_paths.count("\n")
assert result.output.count("is not one of") == bad_types.count("\n")
assert result.output.count("Invalid node name") == bad_names.count("\n")
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Aborted!") == 1
def test_accept(self, create_node_input_accept):
(
bad_paths,
bad_types,
bad_names,
good_path,
good_type,
good_name,
proceed,
) = create_node_input_accept
result = CliRunner().invoke(
cli,
["create-node"],
input=bad_paths
+ good_path
+ bad_types
+ good_type
+ bad_names
+ good_name
+ proceed,
)
# Count only substring we create so we are unaffected by click changes
config_subpath, script_subpath = get_custom_node_subpaths(
good_path.strip(), good_type.strip(), good_name.strip()
)
assert result.output.count("Path cannot") == bad_paths.count("\n")
assert result.output.count("is not one of") == bad_types.count("\n")
assert result.output.count("Invalid node name") == bad_names.count("\n")
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Created node!") == 1
node_subdir = good_path.strip()
node_type = good_type.strip()
node_name = good_name.strip()
cwd = Path.cwd()
config_path = (
cwd / "src" / node_subdir / "configs" / node_type / f"{node_name}.yml"
)
script_path = cwd / "src" / node_subdir / node_type / f"{node_name}.py"
assert config_path.exists()
assert script_path.exists()
with open(config_path) as actual_file, open(
PKD_CONFIG_DIR / "node_template.yml"
) as expected_file:
assert actual_file.read() == expected_file.read()
with open(script_path) as actual_file, open(
PKD_NODES_DIR / "node_template.py"
) as expected_file:
lines = expected_file.readlines()
# Ensuring start exists/is valid is not done here since we expect
# it to always be valid
for i, line in enumerate(lines):
if line.startswith('"'):
start = i
break
assert actual_file.readlines() == lines[start:]
def test_duplicate_node_name(self, create_node_input_accept):
_, _, _, good_path, good_type, good_name, proceed = create_node_input_accept
node_subdir = good_path.strip()
node_type = good_type.strip()
node_name = good_name.strip()
node_name_2 = "available_node_name"
name_input = f"{good_name}{node_name_2}\n"
setup_custom_node(node_subdir, node_type, node_name)
result = CliRunner().invoke(
cli,
["create-node"],
input=good_path + good_type + name_input + proceed,
)
# Only check the "Node name exists" message, others are checked by
# previous tests.
assert result.output.count("Node name already exists!") == 1
cwd = Path.cwd()
config_path = (
cwd / "src" / node_subdir / "configs" / node_type / f"{node_name_2}.yml"
)
script_path = cwd / "src" / node_subdir / node_type / f"{node_name_2}.py"
assert config_path.exists()
assert script_path.exists()
with open(config_path) as actual_file, open(
PKD_CONFIG_DIR / "node_template.yml"
) as expected_file:
assert actual_file.read() == expected_file.read()
with open(script_path) as actual_file, open(
PKD_NODES_DIR / "node_template.py"
) as expected_file:
lines = expected_file.readlines()
for i, line in enumerate(lines):
if line.startswith('"'):
start = i
break
assert actual_file.readlines() == lines[start:]
def test_cli_options_abort(self, create_node_input_abort):
(
bad_paths,
bad_types,
bad_names,
good_path,
good_type,
good_name,
proceed,
) = create_node_input_abort
result = CliRunner().invoke(
cli,
[
"create-node",
"--node_subdir",
"../some/path",
"--node_type",
"some type",
"--node_name",
"some name",
],
input=bad_paths
+ good_path
+ bad_types
+ good_type
+ bad_names
+ good_name
+ proceed,
)
# Count only substring we create so we are unaffected by click changes
config_subpath, script_subpath = get_custom_node_subpaths(
good_path.strip(), good_type.strip(), good_name.strip()
)
assert result.output.count("Path cannot") == bad_paths.count("\n") + 1
assert result.output.count("is not one of") == bad_types.count("\n") + 1
assert result.output.count("Invalid node name") == bad_names.count("\n") + 1
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Aborted!") == 1
def test_cli_options_accept(self, create_node_input_accept):
_, _, _, good_path, good_type, good_name, proceed = create_node_input_accept
node_subdir = good_path.strip()
node_type = good_type.strip()
node_name = good_name.strip()
# Creates only using CLI options with minimal user input
result = CliRunner().invoke(
cli,
[
"create-node",
"--node_subdir",
node_subdir,
"--node_type",
node_type,
"--node_name",
node_name,
],
input=proceed,
)
config_subpath, script_subpath = get_custom_node_subpaths(
node_subdir, node_type, node_name
)
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Created node!") == 1
cwd = Path.cwd()
config_path = (
cwd / "src" / node_subdir / "configs" / node_type / f"{node_name}.yml"
)
script_path = cwd / "src" / node_subdir / node_type / f"{node_name}.py"
assert config_path.exists()
assert script_path.exists()
with open(config_path) as actual_file, open(
PKD_CONFIG_DIR / "node_template.yml"
) as expected_file:
assert actual_file.read() == expected_file.read()
with open(script_path) as actual_file, open(
PKD_NODES_DIR / "node_template.py"
) as expected_file:
lines = expected_file.readlines()
for i, line in enumerate(lines):
if line.startswith('"'):
start = i
break
assert actual_file.readlines() == lines[start:]
def test_poorly_formatted_config_file(self, cwd):
no_top_level_key = cwd / "pipeline_no_top_level_key.yml"
wrong_top_level_key = cwd / "pipeline_wrong_top_level_key.yml"
with open(no_top_level_key, "w") as outfile:
yaml.dump(DEFAULT_NODES, outfile)
with open(wrong_top_level_key, "w") as outfile:
yaml.dump({"asdf": DEFAULT_NODES}, outfile)
for path in (no_top_level_key, wrong_top_level_key):
# This error originates from DeclarativeLoader
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", path.name],
catch_exceptions=False,
)
assert "has an invalid structure. Missing top-level 'nodes' key." in str(
excinfo.value
)
def test_no_nodes_pipeline_file(self, cwd):
no_nodes = cwd / "pipeline_no_nodes.yml"
with open(no_nodes, "w") as outfile:
data = {"nodes": None}
# ``yaml`` will create 'nodes: null' by default. Manually replace
# it to just 'nodes: '. Surprisingly ``yaml`` will load this
# without error
outfile.write(yaml.safe_dump(data).replace("null", ""))
# This error originates from DeclarativeLoader
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", no_nodes.name],
catch_exceptions=False,
)
assert "does not contain any nodes!" in str(excinfo.value)
@pytest.mark.parametrize(
"extra_options",
[
["node_subdir"],
["node_type"],
["node_name"],
["node_subdir", "node_type", "node_name"],
],
)
def test_invalid_cli_options(self, extra_options):
"""Tests cases when at least one `node_` related option is used with
`config_path` and when all three `node_` related options are used with
`config_path`.
"""
extra_args = [[f"--{option}", "value"] for option in extra_options]
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", "value"]
+ [arg for arg_pair in extra_args for arg in arg_pair],
catch_exceptions=False,
)
assert (
"--config_path cannot be use with --node_subdir, --node_type, or "
"--node_name!"
) in str(excinfo.value)
def test_missing_config_file(self, cwd):
config_file = "missing_file.yml"
with pytest.raises(FileNotFoundError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", config_file],
catch_exceptions=False,
)
assert f"Config file '{config_file}' is not found at" in str(excinfo.value)
def test_no_custom_nodes(self, cwd):
"""Tests when the pipeline file doesn't contain any custom nodes, so
there's nothing to create.
"""
pipeline_file = "pipeline_default_nodes_only.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
yaml.dump({"nodes": DEFAULT_NODES}, outfile)
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", pipeline_file],
catch_exceptions=False,
)
assert (
f"Config file '{pipeline_file}' does not contain custom nodes!"
== str(excinfo.value)
)
def test_invalid_custom_node_string(self, cwd):
"""The custom nodes declared in the pipeline file only contains poor
formatting. So all will be skipped.
"""
bad_paths = [
f"{node_subdir}.{GOOD_TYPE}.{GOOD_NAME}"
for node_subdir in CREATE_NODE_CONFIG[
"bad_config_paths_win"
if sys.platform == "win32"
else "bad_config_paths"
]
]
bad_types = [
f"{GOOD_SUBDIR}.{node_type}.{GOOD_NAME}"
for node_type in CREATE_NODE_CONFIG["bad_config_types"]
]
bad_names = [
f"{GOOD_SUBDIR}.{GOOD_TYPE}.{node_name}"
for node_name in CREATE_NODE_CONFIG["bad_config_names"]
]
pipeline_file = "pipeline_invalid_custom_node_string.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
# Create a "challenging" file, with some config overrides
data = {
"nodes": [
{"input.visual": {"source": 0}},
{"model.yolo": {"model_type": "v4"}},
"draw.bbox",
]
+ bad_paths
+ bad_types
+ bad_names
+ ["output.screen"]
}
yaml.dump(data, outfile)
with TestCase.assertLogs("peekingduck.cli.logger") as captured:
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
offset = 2 # First 2 message is about info about loading config
counter = 0
for node_string in bad_paths:
assert (
f"{node_string} contains invalid formatting: 'Path cannot be "
"absolute!'. Skipping..."
) == captured.records[offset + counter].getMessage()
counter += 1
for node_string in bad_types:
# Invalid type error message begins with a 'user_input' so we
# just check for the presence of the double single quote
assert (
f"{node_string} contains invalid formatting: ''"
) in captured.records[offset + counter].getMessage()
counter += 1
for node_string in bad_names:
assert (
f"{node_string} contains invalid formatting: 'Invalid node "
"name!'. Skipping..."
) == captured.records[offset + counter].getMessage()
counter += 1
def test_create_nodes_from_config_success(self, cwd):
"""The custom nodes declared in the pipeline file only contains poor
formatting. So all will be skipped.
"""
node_string = f"{GOOD_SUBDIR}.{GOOD_TYPE}.{GOOD_NAME}"
created_config_path = (
cwd / "src" / GOOD_SUBDIR / "configs" / GOOD_TYPE / f"{GOOD_NAME}.yml"
)
created_script_path = cwd / "src" / GOOD_SUBDIR / GOOD_TYPE / f"{GOOD_NAME}.py"
pipeline_file = "pipeline_invalid_custom_node_string.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
# Create a "challenging" file, with some config overrides
data = {
"nodes": [
{"input.visual": {"source": 0}},
{"model.yolo": {"model_type": "v4"}},
"draw.bbox",
node_string,
"output.screen",
]
}
yaml.dump(data, outfile)
with TestCase.assertLogs("peekingduck.cli.logger") as captured:
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
# First 2 message is about info about loading config
assert (
f"Creating files for {node_string}:\n\t"
f"Config file: {created_config_path}\n\t"
f"Script file: {created_script_path}"
) == captured.records[2].getMessage()
def test_create_nodes_from_config_duplicate_node_name(self, cwd):
"""The custom nodes declared in the pipeline file only contains poor
formatting. So all will be skipped.
"""
node_string = f"{GOOD_SUBDIR}.{GOOD_TYPE}.{GOOD_NAME}"
pipeline_file = "pipeline_invalid_custom_node_string.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
# Create a "challenging" file, with some config overrides
data = {
"nodes": [
{"input.visual": {"source": 0}},
{"model.yolo": {"model_type": "v4"}},
"draw.bbox",
node_string,
"output.screen",
]
}
yaml.dump(data, outfile)
# Create the node first so we trigger the duplicate name warning
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
with TestCase.assertLogs("peekingduck.cli.logger") as captured:
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
# First 2 message is about info about loading config
assert (
f"{node_string} contains invalid formatting: 'Node name already "
"exists!'. Skipping..."
) == captured.records[2].getMessage() | tests/cli/test_cli_create_node.py |
import sys
from pathlib import Path
from unittest import TestCase
import pytest
import yaml
from click.testing import CliRunner
from peekingduck.cli import cli
DEFAULT_NODES = ["input.visual", "model.yolo", "draw.bbox", "output.screen"]
GOOD_SUBDIR = "custom_nodes"
GOOD_TYPE = "dabble"
GOOD_NAME = "name"
PROJECT_DIR = Path("tmp_dir")
PKD_DIR = Path(__file__).resolve().parents[2] / "peekingduck"
PKD_CONFIG_DIR = PKD_DIR / "configs"
PKD_NODES_DIR = PKD_DIR / "pipeline" / "nodes"
with open(
PKD_DIR.parent / "tests" / "data" / "user_inputs" / "create_node.yml"
) as infile:
CREATE_NODE_INPUT = yaml.safe_load(infile.read())
with open(
PKD_DIR.parent / "tests" / "data" / "user_configs" / "create_node.yml"
) as infile:
CREATE_NODE_CONFIG = yaml.safe_load(infile.read())
@pytest.fixture(params=[0, 1])
def create_node_input_abort(request):
# Windows has a different absolute path format
bad_paths = "bad_paths_win" if sys.platform == "win32" else "bad_paths"
yield (
CREATE_NODE_INPUT[bad_paths],
CREATE_NODE_INPUT["bad_types"],
CREATE_NODE_INPUT["bad_names"],
CREATE_NODE_INPUT["good_paths"][request.param],
CREATE_NODE_INPUT["good_types"][request.param],
CREATE_NODE_INPUT["good_names"][request.param],
CREATE_NODE_INPUT["proceed"]["reject"],
)
@pytest.fixture(params=[0, 1])
def create_node_input_accept(request):
bad_paths = "bad_paths_win" if sys.platform == "win32" else "bad_paths"
yield (
CREATE_NODE_INPUT[bad_paths],
CREATE_NODE_INPUT["bad_types"],
CREATE_NODE_INPUT["bad_names"],
CREATE_NODE_INPUT["good_paths"][request.param],
CREATE_NODE_INPUT["good_types"][request.param],
CREATE_NODE_INPUT["good_names"][request.param],
CREATE_NODE_INPUT["proceed"]["accept"][request.param],
)
@pytest.fixture
def cwd():
return Path.cwd()
def get_custom_node_subpaths(node_subdir, node_type, node_name):
return (
str(Path(node_subdir) / "configs" / node_type / f"{node_name}.yml"),
str(Path(node_subdir) / node_type / f"{node_name}.py"),
)
def setup_custom_node(node_subdir, node_type, node_name):
cwd = Path.cwd()
config_dir = cwd / "src" / node_subdir / "configs" / node_type
script_dir = cwd / "src" / node_subdir / node_type
config_dir.mkdir(parents=True, exist_ok=True)
script_dir.mkdir(parents=True, exist_ok=True)
(config_dir / f"{node_name}.yml").touch()
(script_dir / f"{node_name}.py").touch()
@pytest.mark.usefixtures("tmp_dir", "tmp_project_dir")
class TestCliCreateNode:
def test_abort(self, create_node_input_abort):
(
bad_paths,
bad_types,
bad_names,
good_path,
good_type,
good_name,
proceed,
) = create_node_input_abort
result = CliRunner().invoke(
cli,
["create-node"],
input=bad_paths
+ good_path
+ bad_types
+ good_type
+ bad_names
+ good_name
+ proceed,
)
# Count only substring we create so we are unaffected by click changes
config_subpath, script_subpath = get_custom_node_subpaths(
good_path.strip(), good_type.strip(), good_name.strip()
)
assert result.output.count("Path cannot") == bad_paths.count("\n")
assert result.output.count("is not one of") == bad_types.count("\n")
assert result.output.count("Invalid node name") == bad_names.count("\n")
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Aborted!") == 1
def test_accept(self, create_node_input_accept):
(
bad_paths,
bad_types,
bad_names,
good_path,
good_type,
good_name,
proceed,
) = create_node_input_accept
result = CliRunner().invoke(
cli,
["create-node"],
input=bad_paths
+ good_path
+ bad_types
+ good_type
+ bad_names
+ good_name
+ proceed,
)
# Count only substring we create so we are unaffected by click changes
config_subpath, script_subpath = get_custom_node_subpaths(
good_path.strip(), good_type.strip(), good_name.strip()
)
assert result.output.count("Path cannot") == bad_paths.count("\n")
assert result.output.count("is not one of") == bad_types.count("\n")
assert result.output.count("Invalid node name") == bad_names.count("\n")
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Created node!") == 1
node_subdir = good_path.strip()
node_type = good_type.strip()
node_name = good_name.strip()
cwd = Path.cwd()
config_path = (
cwd / "src" / node_subdir / "configs" / node_type / f"{node_name}.yml"
)
script_path = cwd / "src" / node_subdir / node_type / f"{node_name}.py"
assert config_path.exists()
assert script_path.exists()
with open(config_path) as actual_file, open(
PKD_CONFIG_DIR / "node_template.yml"
) as expected_file:
assert actual_file.read() == expected_file.read()
with open(script_path) as actual_file, open(
PKD_NODES_DIR / "node_template.py"
) as expected_file:
lines = expected_file.readlines()
# Ensuring start exists/is valid is not done here since we expect
# it to always be valid
for i, line in enumerate(lines):
if line.startswith('"'):
start = i
break
assert actual_file.readlines() == lines[start:]
def test_duplicate_node_name(self, create_node_input_accept):
_, _, _, good_path, good_type, good_name, proceed = create_node_input_accept
node_subdir = good_path.strip()
node_type = good_type.strip()
node_name = good_name.strip()
node_name_2 = "available_node_name"
name_input = f"{good_name}{node_name_2}\n"
setup_custom_node(node_subdir, node_type, node_name)
result = CliRunner().invoke(
cli,
["create-node"],
input=good_path + good_type + name_input + proceed,
)
# Only check the "Node name exists" message, others are checked by
# previous tests.
assert result.output.count("Node name already exists!") == 1
cwd = Path.cwd()
config_path = (
cwd / "src" / node_subdir / "configs" / node_type / f"{node_name_2}.yml"
)
script_path = cwd / "src" / node_subdir / node_type / f"{node_name_2}.py"
assert config_path.exists()
assert script_path.exists()
with open(config_path) as actual_file, open(
PKD_CONFIG_DIR / "node_template.yml"
) as expected_file:
assert actual_file.read() == expected_file.read()
with open(script_path) as actual_file, open(
PKD_NODES_DIR / "node_template.py"
) as expected_file:
lines = expected_file.readlines()
for i, line in enumerate(lines):
if line.startswith('"'):
start = i
break
assert actual_file.readlines() == lines[start:]
def test_cli_options_abort(self, create_node_input_abort):
(
bad_paths,
bad_types,
bad_names,
good_path,
good_type,
good_name,
proceed,
) = create_node_input_abort
result = CliRunner().invoke(
cli,
[
"create-node",
"--node_subdir",
"../some/path",
"--node_type",
"some type",
"--node_name",
"some name",
],
input=bad_paths
+ good_path
+ bad_types
+ good_type
+ bad_names
+ good_name
+ proceed,
)
# Count only substring we create so we are unaffected by click changes
config_subpath, script_subpath = get_custom_node_subpaths(
good_path.strip(), good_type.strip(), good_name.strip()
)
assert result.output.count("Path cannot") == bad_paths.count("\n") + 1
assert result.output.count("is not one of") == bad_types.count("\n") + 1
assert result.output.count("Invalid node name") == bad_names.count("\n") + 1
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Aborted!") == 1
def test_cli_options_accept(self, create_node_input_accept):
_, _, _, good_path, good_type, good_name, proceed = create_node_input_accept
node_subdir = good_path.strip()
node_type = good_type.strip()
node_name = good_name.strip()
# Creates only using CLI options with minimal user input
result = CliRunner().invoke(
cli,
[
"create-node",
"--node_subdir",
node_subdir,
"--node_type",
node_type,
"--node_name",
node_name,
],
input=proceed,
)
config_subpath, script_subpath = get_custom_node_subpaths(
node_subdir, node_type, node_name
)
assert result.output.count(config_subpath) == 1
assert result.output.count(script_subpath) == 1
assert result.output.count("Created node!") == 1
cwd = Path.cwd()
config_path = (
cwd / "src" / node_subdir / "configs" / node_type / f"{node_name}.yml"
)
script_path = cwd / "src" / node_subdir / node_type / f"{node_name}.py"
assert config_path.exists()
assert script_path.exists()
with open(config_path) as actual_file, open(
PKD_CONFIG_DIR / "node_template.yml"
) as expected_file:
assert actual_file.read() == expected_file.read()
with open(script_path) as actual_file, open(
PKD_NODES_DIR / "node_template.py"
) as expected_file:
lines = expected_file.readlines()
for i, line in enumerate(lines):
if line.startswith('"'):
start = i
break
assert actual_file.readlines() == lines[start:]
def test_poorly_formatted_config_file(self, cwd):
no_top_level_key = cwd / "pipeline_no_top_level_key.yml"
wrong_top_level_key = cwd / "pipeline_wrong_top_level_key.yml"
with open(no_top_level_key, "w") as outfile:
yaml.dump(DEFAULT_NODES, outfile)
with open(wrong_top_level_key, "w") as outfile:
yaml.dump({"asdf": DEFAULT_NODES}, outfile)
for path in (no_top_level_key, wrong_top_level_key):
# This error originates from DeclarativeLoader
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", path.name],
catch_exceptions=False,
)
assert "has an invalid structure. Missing top-level 'nodes' key." in str(
excinfo.value
)
def test_no_nodes_pipeline_file(self, cwd):
no_nodes = cwd / "pipeline_no_nodes.yml"
with open(no_nodes, "w") as outfile:
data = {"nodes": None}
# ``yaml`` will create 'nodes: null' by default. Manually replace
# it to just 'nodes: '. Surprisingly ``yaml`` will load this
# without error
outfile.write(yaml.safe_dump(data).replace("null", ""))
# This error originates from DeclarativeLoader
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", no_nodes.name],
catch_exceptions=False,
)
assert "does not contain any nodes!" in str(excinfo.value)
@pytest.mark.parametrize(
"extra_options",
[
["node_subdir"],
["node_type"],
["node_name"],
["node_subdir", "node_type", "node_name"],
],
)
def test_invalid_cli_options(self, extra_options):
"""Tests cases when at least one `node_` related option is used with
`config_path` and when all three `node_` related options are used with
`config_path`.
"""
extra_args = [[f"--{option}", "value"] for option in extra_options]
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", "value"]
+ [arg for arg_pair in extra_args for arg in arg_pair],
catch_exceptions=False,
)
assert (
"--config_path cannot be use with --node_subdir, --node_type, or "
"--node_name!"
) in str(excinfo.value)
def test_missing_config_file(self, cwd):
config_file = "missing_file.yml"
with pytest.raises(FileNotFoundError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", config_file],
catch_exceptions=False,
)
assert f"Config file '{config_file}' is not found at" in str(excinfo.value)
def test_no_custom_nodes(self, cwd):
"""Tests when the pipeline file doesn't contain any custom nodes, so
there's nothing to create.
"""
pipeline_file = "pipeline_default_nodes_only.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
yaml.dump({"nodes": DEFAULT_NODES}, outfile)
with pytest.raises(ValueError) as excinfo:
CliRunner().invoke(
cli,
["create-node", "--config_path", pipeline_file],
catch_exceptions=False,
)
assert (
f"Config file '{pipeline_file}' does not contain custom nodes!"
== str(excinfo.value)
)
def test_invalid_custom_node_string(self, cwd):
"""The custom nodes declared in the pipeline file only contains poor
formatting. So all will be skipped.
"""
bad_paths = [
f"{node_subdir}.{GOOD_TYPE}.{GOOD_NAME}"
for node_subdir in CREATE_NODE_CONFIG[
"bad_config_paths_win"
if sys.platform == "win32"
else "bad_config_paths"
]
]
bad_types = [
f"{GOOD_SUBDIR}.{node_type}.{GOOD_NAME}"
for node_type in CREATE_NODE_CONFIG["bad_config_types"]
]
bad_names = [
f"{GOOD_SUBDIR}.{GOOD_TYPE}.{node_name}"
for node_name in CREATE_NODE_CONFIG["bad_config_names"]
]
pipeline_file = "pipeline_invalid_custom_node_string.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
# Create a "challenging" file, with some config overrides
data = {
"nodes": [
{"input.visual": {"source": 0}},
{"model.yolo": {"model_type": "v4"}},
"draw.bbox",
]
+ bad_paths
+ bad_types
+ bad_names
+ ["output.screen"]
}
yaml.dump(data, outfile)
with TestCase.assertLogs("peekingduck.cli.logger") as captured:
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
offset = 2 # First 2 message is about info about loading config
counter = 0
for node_string in bad_paths:
assert (
f"{node_string} contains invalid formatting: 'Path cannot be "
"absolute!'. Skipping..."
) == captured.records[offset + counter].getMessage()
counter += 1
for node_string in bad_types:
# Invalid type error message begins with a 'user_input' so we
# just check for the presence of the double single quote
assert (
f"{node_string} contains invalid formatting: ''"
) in captured.records[offset + counter].getMessage()
counter += 1
for node_string in bad_names:
assert (
f"{node_string} contains invalid formatting: 'Invalid node "
"name!'. Skipping..."
) == captured.records[offset + counter].getMessage()
counter += 1
def test_create_nodes_from_config_success(self, cwd):
"""The custom nodes declared in the pipeline file only contains poor
formatting. So all will be skipped.
"""
node_string = f"{GOOD_SUBDIR}.{GOOD_TYPE}.{GOOD_NAME}"
created_config_path = (
cwd / "src" / GOOD_SUBDIR / "configs" / GOOD_TYPE / f"{GOOD_NAME}.yml"
)
created_script_path = cwd / "src" / GOOD_SUBDIR / GOOD_TYPE / f"{GOOD_NAME}.py"
pipeline_file = "pipeline_invalid_custom_node_string.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
# Create a "challenging" file, with some config overrides
data = {
"nodes": [
{"input.visual": {"source": 0}},
{"model.yolo": {"model_type": "v4"}},
"draw.bbox",
node_string,
"output.screen",
]
}
yaml.dump(data, outfile)
with TestCase.assertLogs("peekingduck.cli.logger") as captured:
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
# First 2 message is about info about loading config
assert (
f"Creating files for {node_string}:\n\t"
f"Config file: {created_config_path}\n\t"
f"Script file: {created_script_path}"
) == captured.records[2].getMessage()
def test_create_nodes_from_config_duplicate_node_name(self, cwd):
"""The custom nodes declared in the pipeline file only contains poor
formatting. So all will be skipped.
"""
node_string = f"{GOOD_SUBDIR}.{GOOD_TYPE}.{GOOD_NAME}"
pipeline_file = "pipeline_invalid_custom_node_string.yml"
default_nodes_only = cwd / pipeline_file
with open(default_nodes_only, "w") as outfile:
# Create a "challenging" file, with some config overrides
data = {
"nodes": [
{"input.visual": {"source": 0}},
{"model.yolo": {"model_type": "v4"}},
"draw.bbox",
node_string,
"output.screen",
]
}
yaml.dump(data, outfile)
# Create the node first so we trigger the duplicate name warning
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
with TestCase.assertLogs("peekingduck.cli.logger") as captured:
CliRunner().invoke(cli, ["create-node", "--config_path", pipeline_file])
# First 2 message is about info about loading config
assert (
f"{node_string} contains invalid formatting: 'Node name already "
"exists!'. Skipping..."
) == captured.records[2].getMessage() | 0.322099 | 0.212732 |
import urllib3
import json
import nb_log
import decorator_libs
# https://www.cnblogs.com/YrRoom/p/14054282.html
"""
-p 18083 服务器启动端口
-p 1882 TCP端口
-p 8083 WS端口
-p 8084 WSS端口
-p 8883 SSL端口
"""
"""
非常适合 前端订阅唯一uuid的topic 然后表单中带上这个topic名字请求python接口 -> 接口中发布任务到rabbitmq或redis消息队列 ->
后台消费进程执行任务消费,并将结果发布到mqtt的那个唯一uuid的topic -> mqtt 把结果推送到前端。
使用ajax轮训或者后台导入websocket相关的包来做和前端的长耗时任务的交互都是伪命题,没有mqtt好。
"""
class MqttHttpHelper(nb_log.LoggerMixin, nb_log.LoggerLevelSetterMixin):
def __init__(self, mqtt_publish_url='http://1172.16.58.3:18083/api/v2/mqtt/publish', user='admin', passwd='<PASSWORD>', display_full_msg=False):
"""
:param mqtt_publish_url: mqtt的http接口,这是mqtt中间件自带的,不是重新自己实现的接口。不需要导入paho.mqtt.client,requeests urllib3即可。
:param display_full_msg: 时候打印发布的任务
"""
self._mqtt_publish_url = mqtt_publish_url
self.http = urllib3.PoolManager()
self._headers = urllib3.util.make_headers(basic_auth=f'{user}:{passwd}')
self._headers['Content-Type'] = 'application/json'
self._display_full_msg = display_full_msg
# @decorator_libs.tomorrow_threads(10)
def pub_message(self, topic, msg):
msg = json.dumps(msg) if isinstance(msg, (dict, list)) else msg
if not isinstance(msg, str):
raise Exception('推送的不是字符串')
post_data = {"qos": 1, "retain": False, "topic": topic, "payload": msg}
try: # UnicodeEncodeError: 'latin-1' codec can't encode character '\u6211' in position 145: Body ('我') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8.
resp_dict = json.loads(self.http.request('post', self._mqtt_publish_url, body=json.dumps(post_data),
headers=self._headers).data)
except UnicodeEncodeError as e:
self.logger.warning(e)
post_data['payload'] = post_data['payload'].encode().decode('latin-1')
resp_dict = json.loads(self.http.request('post', self._mqtt_publish_url, body=json.dumps(post_data),
headers=self._headers).data)
if resp_dict['code'] == 0:
self.logger.debug(f' 推送mqtt成功 ,主题名称是:{topic} ,长度是 {len(msg)}, 消息是 {msg if self._display_full_msg else msg[:200]} ')
else:
self.logger.debug(f' 推送mqtt失败,主题名称是:{topic},mqtt返回响应是 {json.dumps(resp_dict)} , 消息是 {msg if self._display_full_msg else msg[:200]}')
if __name__ == '__main__':
with decorator_libs.TimerContextManager():
mp = MqttHttpHelper('http://192.168.6.130:18083/api/v2/mqtt/publish')
for i in range(2000):
mp.pub_message('/topic_test_uuid123456', 'msg_test3') | funboost/utils/mqtt_util.py | import urllib3
import json
import nb_log
import decorator_libs
# https://www.cnblogs.com/YrRoom/p/14054282.html
"""
-p 18083 服务器启动端口
-p 1882 TCP端口
-p 8083 WS端口
-p 8084 WSS端口
-p 8883 SSL端口
"""
"""
非常适合 前端订阅唯一uuid的topic 然后表单中带上这个topic名字请求python接口 -> 接口中发布任务到rabbitmq或redis消息队列 ->
后台消费进程执行任务消费,并将结果发布到mqtt的那个唯一uuid的topic -> mqtt 把结果推送到前端。
使用ajax轮训或者后台导入websocket相关的包来做和前端的长耗时任务的交互都是伪命题,没有mqtt好。
"""
class MqttHttpHelper(nb_log.LoggerMixin, nb_log.LoggerLevelSetterMixin):
def __init__(self, mqtt_publish_url='http://1172.16.58.3:18083/api/v2/mqtt/publish', user='admin', passwd='<PASSWORD>', display_full_msg=False):
"""
:param mqtt_publish_url: mqtt的http接口,这是mqtt中间件自带的,不是重新自己实现的接口。不需要导入paho.mqtt.client,requeests urllib3即可。
:param display_full_msg: 时候打印发布的任务
"""
self._mqtt_publish_url = mqtt_publish_url
self.http = urllib3.PoolManager()
self._headers = urllib3.util.make_headers(basic_auth=f'{user}:{passwd}')
self._headers['Content-Type'] = 'application/json'
self._display_full_msg = display_full_msg
# @decorator_libs.tomorrow_threads(10)
def pub_message(self, topic, msg):
msg = json.dumps(msg) if isinstance(msg, (dict, list)) else msg
if not isinstance(msg, str):
raise Exception('推送的不是字符串')
post_data = {"qos": 1, "retain": False, "topic": topic, "payload": msg}
try: # UnicodeEncodeError: 'latin-1' codec can't encode character '\u6211' in position 145: Body ('我') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8.
resp_dict = json.loads(self.http.request('post', self._mqtt_publish_url, body=json.dumps(post_data),
headers=self._headers).data)
except UnicodeEncodeError as e:
self.logger.warning(e)
post_data['payload'] = post_data['payload'].encode().decode('latin-1')
resp_dict = json.loads(self.http.request('post', self._mqtt_publish_url, body=json.dumps(post_data),
headers=self._headers).data)
if resp_dict['code'] == 0:
self.logger.debug(f' 推送mqtt成功 ,主题名称是:{topic} ,长度是 {len(msg)}, 消息是 {msg if self._display_full_msg else msg[:200]} ')
else:
self.logger.debug(f' 推送mqtt失败,主题名称是:{topic},mqtt返回响应是 {json.dumps(resp_dict)} , 消息是 {msg if self._display_full_msg else msg[:200]}')
if __name__ == '__main__':
with decorator_libs.TimerContextManager():
mp = MqttHttpHelper('http://192.168.6.130:18083/api/v2/mqtt/publish')
for i in range(2000):
mp.pub_message('/topic_test_uuid123456', 'msg_test3') | 0.199308 | 0.124612 |
import subprocess
from unittest.mock import MagicMock
import pytest
from briefcase.exceptions import BriefcaseCommandError, InvalidDeviceError
from briefcase.integrations.android_sdk import ADB
def test_force_stop_app(mock_sdk, capsys):
"""Invoking `force_stop_app()` calls `run()` with the appropriate
parameters."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(return_value="example normal adb output")
# Invoke force_stop_app
adb.force_stop_app("com.example.sample.package")
# Validate call parameters.
adb.run.assert_called_once_with(
"shell", "am", "force-stop", "com.example.sample.package"
)
# Validate that the normal output of the command was not printed (since there
# was no error).
assert "normal adb output" not in capsys.readouterr()
def test_force_top_fail(mock_sdk, capsys):
"""If `force_stop_app()` fails, an error is raised."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(
side_effect=subprocess.CalledProcessError(returncode=69, cmd="force-stop")
)
# Invoke force_stop_app
with pytest.raises(BriefcaseCommandError):
adb.force_stop_app("com.example.sample.package")
# Validate call parameters.
adb.run.assert_called_once_with(
"shell", "am", "force-stop", "com.example.sample.package"
)
def test_invalid_device(mock_sdk, capsys):
"""Invoking `force_stop_app()` on an invalid device raises an error."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(side_effect=InvalidDeviceError("device", "exampleDevice"))
# Invoke force_stop_app
with pytest.raises(InvalidDeviceError):
adb.force_stop_app("com.example.sample.package")
# Validate call parameters.
adb.run.assert_called_once_with(
"shell", "am", "force-stop", "com.example.sample.package"
) | tests/integrations/android_sdk/ADB/test_force_stop_app.py | import subprocess
from unittest.mock import MagicMock
import pytest
from briefcase.exceptions import BriefcaseCommandError, InvalidDeviceError
from briefcase.integrations.android_sdk import ADB
def test_force_stop_app(mock_sdk, capsys):
"""Invoking `force_stop_app()` calls `run()` with the appropriate
parameters."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(return_value="example normal adb output")
# Invoke force_stop_app
adb.force_stop_app("com.example.sample.package")
# Validate call parameters.
adb.run.assert_called_once_with(
"shell", "am", "force-stop", "com.example.sample.package"
)
# Validate that the normal output of the command was not printed (since there
# was no error).
assert "normal adb output" not in capsys.readouterr()
def test_force_top_fail(mock_sdk, capsys):
"""If `force_stop_app()` fails, an error is raised."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(
side_effect=subprocess.CalledProcessError(returncode=69, cmd="force-stop")
)
# Invoke force_stop_app
with pytest.raises(BriefcaseCommandError):
adb.force_stop_app("com.example.sample.package")
# Validate call parameters.
adb.run.assert_called_once_with(
"shell", "am", "force-stop", "com.example.sample.package"
)
def test_invalid_device(mock_sdk, capsys):
"""Invoking `force_stop_app()` on an invalid device raises an error."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(side_effect=InvalidDeviceError("device", "exampleDevice"))
# Invoke force_stop_app
with pytest.raises(InvalidDeviceError):
adb.force_stop_app("com.example.sample.package")
# Validate call parameters.
adb.run.assert_called_once_with(
"shell", "am", "force-stop", "com.example.sample.package"
) | 0.823683 | 0.350505 |
import argparse
import sys
import Cerbrutus
banner = """
\t================================================================
\t __ ___ ____ ____ ____ __ __ ______ __ __ _____
\t / ] / _]| \ | \ | \| | || || | |/ ___/
\t / / / [_ | D )| o )| D ) | || || | ( \_
\t / / | _]| / | || /| | ||_| |_|| | |\__ |
\t/ \_ | [_ | \ | O || \| : | | | | : |/ \ |
\t\ || || . \| || . \ | | | | |\ |
\t \____||_____||__|\_||_____||__|\_|\__,_| |__| \__,_| \___|
\t
\tNetwork Brute Force Tool
\thttps://github.com/Cerbrutus-BruteForcer/cerbrutus
\t================================================================
"""
def main():
arg_parser = argparse.ArgumentParser(description="Python based network brute forcing tool!")
arg_parser.add_argument("Host", help=f"The host to connect to - in IP or VHOST/Domain Name form")
arg_parser.add_argument("Service", help=f"The service to brute force (currently implemented 'SSH')")
arg_parser.add_argument("-U", "--users", help=f"Either a single user, or the path to the file of users you wish to use", required=True)
arg_parser.add_argument("-P", "--passwords", help=f"Either a single password, or the path to the password list you wish to use", required=True)
arg_parser.add_argument("-p", "--port", help=f"The port you wish to target (only required if running on a non standard port)")
arg_parser.add_argument("-t", "--threads", help=f"Number of threads to use")
arg_parser.add_argument("-q", "--quiet", help=f"Do not print banner", nargs='*')
args = arg_parser.parse_args()
if args.quiet is None:
print(banner)
host = args.Host
service = args.Service.upper()
if service not in Cerbrutus.services.valid_services:
print(f"Service named {service} does not exist yet...")
sys.exit(1)
port = Cerbrutus.services.valid_services[service]["port"]
if args.port:
port = args.port
if '/' not in args.users and '.' not in args.users and '\\' not in args.users:
users = [args.users]
else:
try:
userfile = Cerbrutus.Wordlist(args.users)
users = userfile.read()
except FileNotFoundError as e:
print(e)
sys.exit()
if '/' not in args.passwords and '.' not in args.passwords and '\\' not in args.passwords:
passwords = [args.passwords]
else:
try:
passfile = Cerbrutus.Wordlist(args.passwords)
print("[*] - Initializing password list...")
passwords = passfile.read()
except FileNotFoundError as e:
print(e)
sys.exit()
threads = Cerbrutus.services.valid_services[service]["reccomendedThreads"]
if args.threads:
try:
threads = int(args.threads)
except Exception:
print("[-] - Specified number of threads is not a number.")
sys.exit()
Cerbrutus.BruteUtil(host, port, service, users, passwords, threads=threads).brute()
if __name__ == '__main__':
main() | cerbrutus.py | import argparse
import sys
import Cerbrutus
banner = """
\t================================================================
\t __ ___ ____ ____ ____ __ __ ______ __ __ _____
\t / ] / _]| \ | \ | \| | || || | |/ ___/
\t / / / [_ | D )| o )| D ) | || || | ( \_
\t / / | _]| / | || /| | ||_| |_|| | |\__ |
\t/ \_ | [_ | \ | O || \| : | | | | : |/ \ |
\t\ || || . \| || . \ | | | | |\ |
\t \____||_____||__|\_||_____||__|\_|\__,_| |__| \__,_| \___|
\t
\tNetwork Brute Force Tool
\thttps://github.com/Cerbrutus-BruteForcer/cerbrutus
\t================================================================
"""
def main():
arg_parser = argparse.ArgumentParser(description="Python based network brute forcing tool!")
arg_parser.add_argument("Host", help=f"The host to connect to - in IP or VHOST/Domain Name form")
arg_parser.add_argument("Service", help=f"The service to brute force (currently implemented 'SSH')")
arg_parser.add_argument("-U", "--users", help=f"Either a single user, or the path to the file of users you wish to use", required=True)
arg_parser.add_argument("-P", "--passwords", help=f"Either a single password, or the path to the password list you wish to use", required=True)
arg_parser.add_argument("-p", "--port", help=f"The port you wish to target (only required if running on a non standard port)")
arg_parser.add_argument("-t", "--threads", help=f"Number of threads to use")
arg_parser.add_argument("-q", "--quiet", help=f"Do not print banner", nargs='*')
args = arg_parser.parse_args()
if args.quiet is None:
print(banner)
host = args.Host
service = args.Service.upper()
if service not in Cerbrutus.services.valid_services:
print(f"Service named {service} does not exist yet...")
sys.exit(1)
port = Cerbrutus.services.valid_services[service]["port"]
if args.port:
port = args.port
if '/' not in args.users and '.' not in args.users and '\\' not in args.users:
users = [args.users]
else:
try:
userfile = Cerbrutus.Wordlist(args.users)
users = userfile.read()
except FileNotFoundError as e:
print(e)
sys.exit()
if '/' not in args.passwords and '.' not in args.passwords and '\\' not in args.passwords:
passwords = [args.passwords]
else:
try:
passfile = Cerbrutus.Wordlist(args.passwords)
print("[*] - Initializing password list...")
passwords = passfile.read()
except FileNotFoundError as e:
print(e)
sys.exit()
threads = Cerbrutus.services.valid_services[service]["reccomendedThreads"]
if args.threads:
try:
threads = int(args.threads)
except Exception:
print("[-] - Specified number of threads is not a number.")
sys.exit()
Cerbrutus.BruteUtil(host, port, service, users, passwords, threads=threads).brute()
if __name__ == '__main__':
main() | 0.13569 | 0.150809 |