hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7109ecc06eaa292a20f8a9fb4dbb062ffca45f0 | 610 | py | Python | scripts/emnist_viz_tf.py | vipavlovic/pyprobml | 59a2edc682d0163955db5e2f27491ad772b60141 | [
"MIT"
] | 4,895 | 2016-08-17T22:28:34.000Z | 2022-03-31T17:07:15.000Z | scripts/emnist_viz_tf.py | vipavlovic/pyprobml | 59a2edc682d0163955db5e2f27491ad772b60141 | [
"MIT"
] | 446 | 2016-09-17T14:35:29.000Z | 2022-03-31T19:59:33.000Z | scripts/emnist_viz_tf.py | vipavlovic/pyprobml | 59a2edc682d0163955db5e2f27491ad772b60141 | [
"MIT"
] | 1,160 | 2016-08-18T23:19:27.000Z | 2022-03-31T12:44:07.000Z |
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import tensorflow as tf
import tensorflow_datasets as tfds
np.random.seed(0)
ds, info = tfds.load('emnist', split='test', shuffle_files=False, with_info=True) # horribly slow
print(info)
plt.figure(figsize=(10, 10))
i = 0
for example in ds:
image = example["image"]
label = example["label"]
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image)
plt.title(label)
i += 1
if i >= 25: break
pml.savefig("emnist-data.pdf")
plt.show() | 17.941176 | 97 | 0.67541 |
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import tensorflow as tf
import tensorflow_datasets as tfds
np.random.seed(0)
ds, info = tfds.load('emnist', split='test', shuffle_files=False, with_info=True)
print(info)
plt.figure(figsize=(10, 10))
i = 0
for example in ds:
image = example["image"]
label = example["label"]
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image)
plt.title(label)
i += 1
if i >= 25: break
pml.savefig("emnist-data.pdf")
plt.show() | true | true |
f7109fd83a9537b7def6232027ba5416b1f71200 | 1,156 | py | Python | scraper/storage_spiders/lingovn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/lingovn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/lingovn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@itemprop='name']",
'price' : "//div[@class='div-new-price']/span[@class='new-price']",
'category' : "//span[@class='item']/a[@itemprop='url']/span[@itemprop='title']",
'description' : "//div[@class='block-template-content']/div[@class='clearfix mt2x']",
'images' : "//div[@class='dsi-img full-cover ']/@data-image-hoverattribute",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "//div[@class='infos prod-detail-brand']/a[@class='font-semibold brand-name']",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'lingo.vn'
allowed_domains = ['lingo.vn']
start_urls = ['http://lingo.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/[\w-]+-p\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[\w-]+-c\d+/($|\?page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 38.533333 | 93 | 0.614187 |
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@itemprop='name']",
'price' : "//div[@class='div-new-price']/span[@class='new-price']",
'category' : "//span[@class='item']/a[@itemprop='url']/span[@itemprop='title']",
'description' : "//div[@class='block-template-content']/div[@class='clearfix mt2x']",
'images' : "//div[@class='dsi-img full-cover ']/@data-image-hoverattribute",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "//div[@class='infos prod-detail-brand']/a[@class='font-semibold brand-name']",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'lingo.vn'
allowed_domains = ['lingo.vn']
start_urls = ['http://lingo.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/[\w-]+-p\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[\w-]+-c\d+/($|\?page=\d+$)']), 'parse'),
]
| true | true |
f710a02aacb223fde4921f89cbd938a26a27feb5 | 24,506 | py | Python | src/Interpolator.py | MatthiasDR96/industrial_robotics_simulator | 9039e7a581ce97c583c73294e9937664de90530b | [
"MIT"
] | 1 | 2020-10-21T15:32:41.000Z | 2020-10-21T15:32:41.000Z | src/Interpolator.py | MatthiasDR96/industrial_robotics_simulator | 9039e7a581ce97c583c73294e9937664de90530b | [
"MIT"
] | null | null | null | src/Interpolator.py | MatthiasDR96/industrial_robotics_simulator | 9039e7a581ce97c583c73294e9937664de90530b | [
"MIT"
] | null | null | null | import numpy as np
from sympy import *
def interpolate_cubic(p1, p2, k_traj, t):
'''
Computes a smooth cubic polynomail between 2 N-dimensional points
Input:
p1: Nx1 numpy array the first point
p2: Nx1 numpy array the second point
dp1: Nx1 numpy array of the required velocities at the first point
dp2: Nx1 numpy array of the required velocities at the second point
T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_third_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_quintic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_fifth_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_septic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_seventh_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_nonic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_ninth_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_trapezoid(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_trapezoid_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_minimum_jerk_derivative(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_minimum_jerk_derivative_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def get_normalized_first_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = t
dstau_dtau[i] = 1
ddstau_ddtau[i] = 0
dddstau_dddtau[i] = 0
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_third_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = -2 * (t ** 3) + 3 * (t ** 2)
dstau_dtau[i] = -6 * (t ** 2) + 6 * t
ddstau_ddtau[i] = -12 * t + 6
dddstau_dddtau[i] = -12
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_fifth_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = 6 * (t ** 5) - 15 * (t ** 4) + 10 * (t ** 3)
dstau_dtau[i] = 30 * (t ** 4) - 60 * (t ** 3) + 30 * (t ** 2)
ddstau_ddtau[i] = 120 * (t ** 3) - 180 * (t ** 2) + 60 * t
dddstau_dddtau[i] = 360 * (t ** 2) - 360 * t + 60
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_seventh_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = -20 * (t ** 7) + 70 * (t ** 6) - 84 * (t ** 5) + 35 * (t ** 4)
dstau_dtau[i] = -140 * (t ** 6) + 420 * (t ** 5) - 420 * (t ** 4) + 140 * (t ** 3)
ddstau_ddtau[i] = -840 * (t ** 5) + 2100 * (t ** 4) - 1680 * (t ** 3) + 420 * (t ** 2)
dddstau_dddtau[i] = -4200 * (t ** 4) + 8400 * (t ** 3) - 5040 * (t ** 2) + 840 * t
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_ninth_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(1, k_traj):
t = tau[i]
stau[i] = 70 * (t ** 9) - 315 * (t ** 8) + 540 * (t ** 7) - 420 * (t ** 6) + 126 * (t ** 5)
dstau_dtau[i] = 630 * (t ** 8) - 2520 * (t ** 7) + 3780 * (t ** 6) - 2520 * (t ** 5) + 630 * (t ** 4)
ddstau_ddtau[i] = 5040 * (t ** 7) - 17640 * (t ** 6) + 22680 * (t ** 5) - 12600 * (t ** 4) + 2520 * (t ** 3)
dddstau_dddtau[i] = 35280 * (t ** 6) - 105840 * (t ** 5) + 113400 * (t ** 4) - 50400 * (t ** 3) + 7560 * (
t ** 2)
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_trapezoid_polynomial(k_traj):
t_acc = 1 / 10.
t_ct = 1 - 2 * t_acc
v_m = 1.0 / (t_acc + t_ct)
x = t_acc
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
if 0 <= t <= x:
res = 0.5 * v_m * (t ** 2) / t_acc
vel = v_m * t / t_acc
elif x < t <= 1 - x:
res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * (t - t_acc)
vel = v_m
elif t > 1 - x:
res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * t_ct + v_m * (t - t_acc - t_ct) - 0.5 * v_m / t_acc * (
t - t_acc - t_ct) ** 2
vel = v_m - v_m / t_acc * (t - t_acc - t_ct)
else:
res = None
vel = None
stau[i] = res
dstau_dtau[i] = vel
for i in range(tau.size - 2):
dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_minimum_jerk_derivative_polynomial(k_traj):
x = (1 - np.sqrt(0.5)) / 2
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
res = None
for i in range(k_traj - 1):
t = tau[i]
if 0 <= t <= x:
res = 16 * (t ** 4)
elif x < t <= 0.5:
res = -16 * (t ** 4) + 128 * x * (t ** 3) - 192 * (x ** 2) * (t ** 2) + 128 * (x ** 3) * t - 32 * (x ** 4)
elif 0.5 < t <= 1 - x:
res = 1 + 16 * ((1 - t) ** 4) - 128 * x * ((1 - t) ** 3) + 192 * (x ** 2) * ((1 - t) ** 2) - 128 * (
x ** 3) * (1 - t) + 32 * (x ** 4)
elif 1 - x < t <= 1:
res = 1 - 16 * (1 - t) ** 4
stau[i] = res
for i in range(tau.size - 2):
dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_cubic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3)]
v0 = [0, 1, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt], dtype='float')
b = np.array([[0], [1], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_quintic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]
a0 = [0, 0, 2, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt, a0, at], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_septic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5), pow(1, 6), pow(1, 7)]
v0 = [0, 1, 0, 0, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4), 6 * pow(1, 5), 7 * pow(1, 6)]
a0 = [0, 0, 2, 0, 0, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3), 30 * pow(1, 4), 42 * pow(1, 5)]
j0 = [0, 0, 0, 6, 0, 0, 0, 0]
jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_nonic_polynomial_coefficients():
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]
a0 = [0, 0, 2, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]
j0 = [0, 0, 0, 6, 0, 0, 0, 0]
jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]
# Solve polynomial coefficients
a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def interpolate_quint_2(p1, p2, dp1, dp2, ddp1, ddp2, k_traj, T):
'''
Computes a smooth quintic polynomial between 2 N-dimensional points
Input:
p1: Nx1 numpy array the first point
p2: Nx1 numpy array the second point
dp1: Nx1 numpy array of the required velocities at the first point
dp2: Nx1 numpy array of the required velocities at the second point
ddp1: Nx1 numpy array of the required accelerations the first point
ddp2: Nx1 numpy array of the required accelerations the second point
T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(dp1) == np.ndarray and type(dp2) == np.ndarray
assert type(ddp1) == np.ndarray and type(ddp2) == np.ndarray
assert type(k_traj) == int and (type(T) == float or type(T) == int)
# Kinematic equations for a quintic polynomial
x0 = [1, 0, 0, 0, 0, 0]
xT = [1, T, pow(T, 2), pow(T, 3), pow(T, 4), pow(T, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vT = [0, 1, 2 * T, 3 * pow(T, 2), 4 * pow(T, 3), 5 * pow(T, 4)]
a0 = [0, 0, 2, 0, 0, 0]
aT = [0, 0, 2, 6 * T, 12 * pow(T, 2), 20 * pow(T, 3)]
# Kinematic matrix
A = np.array([x0, xT, v0, vT, a0, aT], dtype='float')
# Interpolate
traj_list = []
dtraj_list = []
ddtraj_list = []
t = Symbol('t')
tv = np.linspace(0, T, k_traj)
for i in range(len(p1)):
B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]], [ddp1[i]], [ddp2[i]]], dtype='float')
x = np.linalg.solve(A, B)
traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3) + x[4, 0] * pow(t, 4) + x[
5, 0] * pow(t, 5)
dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2) + 4 * x[4, 0] * pow(t, 3) + 5 * x[
5, 0] * pow(t, 4)
ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t + 12 * x[4, 0] * pow(t, 2) + 20 * x[5, 0] * pow(t, 3)
traj_ = [traj.subs(t, tv_) for tv_ in tv]
dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]
ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
return traj, dtraj, ddtraj
def interpolate_cubic_2(p1, p2, k_traj, T, dp1=np.zeros((6, 1)), dp2=np.zeros((6, 1))):
'''
Computes a smooth cubic polynomal between 2 N-dimensional points
Input:
p1: Nx1 numpy array the first point
p2: Nx1 numpy array the second point
dp1: Nx1 numpy array of the required velocities at the first point
dp2: Nx1 numpy array of the required velocities at the second point
T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(dp1) == np.ndarray and type(dp2) == np.ndarray
assert type(k_traj) == int and (type(T) == float or type(T) == int)
# Kinematic equations for a cubic polynomial
x0 = [1, 0, 0, 0]
xT = [1, T, pow(T, 2), pow(T, 3)]
v0 = [0, 1, 0, 0]
vT = [0, 1, 2 * T, 3 * pow(T, 2)]
# Kinematic matrix
A = np.array([x0, xT, v0, vT], dtype='float')
traj_list = []
dtraj_list = []
ddtraj_list = []
t = Symbol('t')
tv = np.linspace(0, T, k_traj)
for i in range(len(p1)):
B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]]], dtype='float')
x = np.linalg.solve(A, B)
traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3)
dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2)
ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t
traj_ = [traj.subs(t, tv_) for tv_ in tv]
dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]
ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
traj = np.array(traj_list)
dtraj = np.array(dtraj_list)
ddtraj = np.array(ddtraj_list)
return traj, dtraj, ddtraj
def interpolate_viapoints(p, v1, vn, k_traj, t):
'''
Computes a smooth cubic polynomal between M N-dimensional points
Input:
p: MxN numpy array containing all points
v1: Nx1 numpy array of the required velocities at the first point
vn: Nx1 numpy array of the required velocities at the last point
t: Mx1 numpy array of the timesteps at which the points should be reached
f: Scalar which denotes the frequency of sampling
Returns:
traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps
dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps
ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps
'''
assert type(p) == np.ndarray and type(k_traj) == int
# Compute time interval matrix
h = list(np.zeros((len(t) - 1, 1)))
for i in range(len(t) - 1):
h[i] = t[i + 1] - t[i]
# Compute A(h) matrix
A = np.zeros((len(h) - 1, len(h) - 1))
for i in range(len(h) - 1):
for j in range(len(h) - 1):
if i == j:
A[i][j] = 2 * (h[i] + h[i + 1])
if i == j + 1:
A[i][j] = h[i + 1]
if j == i + 1:
A[i][j] = h[i]
# Compute known B(p0,p1,h,v1,vn) matrix
B = np.zeros((len(h) - 1, len(p[0])))
for i in range(len(h) - 1):
B[i] = (3 / (h[i] * h[i + 1])) * (
pow(h[i], 2) * (np.subtract(p[i + 2], p[i + 1])) + pow(h[i + 1], 2) * (np.subtract(p[i + 1], p[i])))
B[0] = B[0] - np.dot(h[1], v1)
B[-1] = B[-1] - np.dot(h[-2], vn)
# Solve for all unknown velocities of intermediate knots
x = np.linalg.solve(A, B)
vel = [v1.copy()]
[vel.append(x[i]) for i in range(len(x))]
vel.append(vn.copy())
# Compute N-1 polynomials using computed velocities
traj = [[0], [0], [0], [0], [0], [0], [0]]
dtraj = [[0], [0], [0], [0], [0], [0], [0]]
ddtraj = [[0], [0], [0], [0], [0], [0], [0]]
for i in range(len(p) - 1):
traj_, dtraj_, ddtraj_ = interpolate_cubic_2(p[i], p[i + 1], k_traj, float(h[i]), vel[i], vel[i + 1])
for j in range(len(traj) - 1):
traj[j].extend(traj_[j])
dtraj[j].extend(dtraj_[j])
ddtraj[j].extend(ddtraj_[j])
traj[-1].extend(traj_[-1] + traj[-1][-1])
dtraj[-1].extend(dtraj_[-1] + dtraj[-1][-1])
ddtraj[-1].extend(ddtraj_[-1] + ddtraj[-1][-1])
traj = np.asarray(np.delete(traj, 0, 1))
dtraj = np.asarray(np.delete(traj, 0, 1))
ddtraj = np.asarray(np.delete(traj, 0, 1))
return traj, dtraj, ddtraj
| 39.08453 | 118 | 0.548437 | import numpy as np
from sympy import *
def interpolate_cubic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_third_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_quintic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_fifth_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_septic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_seventh_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_nonic(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_ninth_degree_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_trapezoid(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_trapezoid_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def interpolate_minimum_jerk_derivative(p1, p2, k_traj, t):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(k_traj) == int and (type(t) == float or type(t) == int)
traj_list = []
dtraj_list = []
ddtraj_list = []
dddtraj_list = []
s, ds, dds, ddds = get_normalized_minimum_jerk_derivative_polynomial(k_traj)
for i in range(len(p1)):
traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]
dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)
ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)
dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
dddtraj_list.append(dddtraj_)
tv = np.linspace(0, t, k_traj)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
dddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
dddtraj = np.asarray(dddtraj_list)
return traj, dtraj, ddtraj, dddtraj
def get_normalized_first_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = t
dstau_dtau[i] = 1
ddstau_ddtau[i] = 0
dddstau_dddtau[i] = 0
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_third_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = -2 * (t ** 3) + 3 * (t ** 2)
dstau_dtau[i] = -6 * (t ** 2) + 6 * t
ddstau_ddtau[i] = -12 * t + 6
dddstau_dddtau[i] = -12
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_fifth_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = 6 * (t ** 5) - 15 * (t ** 4) + 10 * (t ** 3)
dstau_dtau[i] = 30 * (t ** 4) - 60 * (t ** 3) + 30 * (t ** 2)
ddstau_ddtau[i] = 120 * (t ** 3) - 180 * (t ** 2) + 60 * t
dddstau_dddtau[i] = 360 * (t ** 2) - 360 * t + 60
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_seventh_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
stau[i] = -20 * (t ** 7) + 70 * (t ** 6) - 84 * (t ** 5) + 35 * (t ** 4)
dstau_dtau[i] = -140 * (t ** 6) + 420 * (t ** 5) - 420 * (t ** 4) + 140 * (t ** 3)
ddstau_ddtau[i] = -840 * (t ** 5) + 2100 * (t ** 4) - 1680 * (t ** 3) + 420 * (t ** 2)
dddstau_dddtau[i] = -4200 * (t ** 4) + 8400 * (t ** 3) - 5040 * (t ** 2) + 840 * t
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_ninth_degree_polynomial(k_traj):
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(1, k_traj):
t = tau[i]
stau[i] = 70 * (t ** 9) - 315 * (t ** 8) + 540 * (t ** 7) - 420 * (t ** 6) + 126 * (t ** 5)
dstau_dtau[i] = 630 * (t ** 8) - 2520 * (t ** 7) + 3780 * (t ** 6) - 2520 * (t ** 5) + 630 * (t ** 4)
ddstau_ddtau[i] = 5040 * (t ** 7) - 17640 * (t ** 6) + 22680 * (t ** 5) - 12600 * (t ** 4) + 2520 * (t ** 3)
dddstau_dddtau[i] = 35280 * (t ** 6) - 105840 * (t ** 5) + 113400 * (t ** 4) - 50400 * (t ** 3) + 7560 * (
t ** 2)
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_trapezoid_polynomial(k_traj):
t_acc = 1 / 10.
t_ct = 1 - 2 * t_acc
v_m = 1.0 / (t_acc + t_ct)
x = t_acc
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
for i in range(k_traj):
t = tau[i]
if 0 <= t <= x:
res = 0.5 * v_m * (t ** 2) / t_acc
vel = v_m * t / t_acc
elif x < t <= 1 - x:
res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * (t - t_acc)
vel = v_m
elif t > 1 - x:
res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * t_ct + v_m * (t - t_acc - t_ct) - 0.5 * v_m / t_acc * (
t - t_acc - t_ct) ** 2
vel = v_m - v_m / t_acc * (t - t_acc - t_ct)
else:
res = None
vel = None
stau[i] = res
dstau_dtau[i] = vel
for i in range(tau.size - 2):
dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_minimum_jerk_derivative_polynomial(k_traj):
x = (1 - np.sqrt(0.5)) / 2
tau = np.linspace(0, 1, k_traj)
stau = np.linspace(0, 1, k_traj)
dstau_dtau = np.linspace(0, 0, k_traj)
ddstau_ddtau = np.linspace(0, 0, k_traj)
dddstau_dddtau = np.linspace(0, 0, k_traj)
res = None
for i in range(k_traj - 1):
t = tau[i]
if 0 <= t <= x:
res = 16 * (t ** 4)
elif x < t <= 0.5:
res = -16 * (t ** 4) + 128 * x * (t ** 3) - 192 * (x ** 2) * (t ** 2) + 128 * (x ** 3) * t - 32 * (x ** 4)
elif 0.5 < t <= 1 - x:
res = 1 + 16 * ((1 - t) ** 4) - 128 * x * ((1 - t) ** 3) + 192 * (x ** 2) * ((1 - t) ** 2) - 128 * (
x ** 3) * (1 - t) + 32 * (x ** 4)
elif 1 - x < t <= 1:
res = 1 - 16 * (1 - t) ** 4
stau[i] = res
for i in range(tau.size - 2):
dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])
for i in range(tau.size - 2):
dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])
return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau
def get_normalized_cubic_polynomial_coefficients():
x0 = [1, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3)]
v0 = [0, 1, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2)]
a = np.array([x0, xt, v0, vt], dtype='float')
b = np.array([[0], [1], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_quintic_polynomial_coefficients():
x0 = [1, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]
a0 = [0, 0, 2, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]
a = np.array([x0, xt, v0, vt, a0, at], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_septic_polynomial_coefficients():
x0 = [1, 0, 0, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5), pow(1, 6), pow(1, 7)]
v0 = [0, 1, 0, 0, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4), 6 * pow(1, 5), 7 * pow(1, 6)]
a0 = [0, 0, 2, 0, 0, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3), 30 * pow(1, 4), 42 * pow(1, 5)]
j0 = [0, 0, 0, 6, 0, 0, 0, 0]
jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]
a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def get_normalized_nonic_polynomial_coefficients():
x0 = [1, 0, 0, 0, 0, 0]
xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]
a0 = [0, 0, 2, 0, 0, 0]
at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]
j0 = [0, 0, 0, 6, 0, 0, 0, 0]
jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]
a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')
b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')
polynomial = np.linalg.solve(a, b)
return polynomial
def interpolate_quint_2(p1, p2, dp1, dp2, ddp1, ddp2, k_traj, T):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(dp1) == np.ndarray and type(dp2) == np.ndarray
assert type(ddp1) == np.ndarray and type(ddp2) == np.ndarray
assert type(k_traj) == int and (type(T) == float or type(T) == int)
x0 = [1, 0, 0, 0, 0, 0]
xT = [1, T, pow(T, 2), pow(T, 3), pow(T, 4), pow(T, 5)]
v0 = [0, 1, 0, 0, 0, 0]
vT = [0, 1, 2 * T, 3 * pow(T, 2), 4 * pow(T, 3), 5 * pow(T, 4)]
a0 = [0, 0, 2, 0, 0, 0]
aT = [0, 0, 2, 6 * T, 12 * pow(T, 2), 20 * pow(T, 3)]
A = np.array([x0, xT, v0, vT, a0, aT], dtype='float')
traj_list = []
dtraj_list = []
ddtraj_list = []
t = Symbol('t')
tv = np.linspace(0, T, k_traj)
for i in range(len(p1)):
B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]], [ddp1[i]], [ddp2[i]]], dtype='float')
x = np.linalg.solve(A, B)
traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3) + x[4, 0] * pow(t, 4) + x[
5, 0] * pow(t, 5)
dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2) + 4 * x[4, 0] * pow(t, 3) + 5 * x[
5, 0] * pow(t, 4)
ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t + 12 * x[4, 0] * pow(t, 2) + 20 * x[5, 0] * pow(t, 3)
traj_ = [traj.subs(t, tv_) for tv_ in tv]
dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]
ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
traj = np.asarray(traj_list)
dtraj = np.asarray(dtraj_list)
ddtraj = np.asarray(ddtraj_list)
return traj, dtraj, ddtraj
def interpolate_cubic_2(p1, p2, k_traj, T, dp1=np.zeros((6, 1)), dp2=np.zeros((6, 1))):
assert type(p1) == np.ndarray and type(p2) == np.ndarray
assert type(dp1) == np.ndarray and type(dp2) == np.ndarray
assert type(k_traj) == int and (type(T) == float or type(T) == int)
x0 = [1, 0, 0, 0]
xT = [1, T, pow(T, 2), pow(T, 3)]
v0 = [0, 1, 0, 0]
vT = [0, 1, 2 * T, 3 * pow(T, 2)]
A = np.array([x0, xT, v0, vT], dtype='float')
traj_list = []
dtraj_list = []
ddtraj_list = []
t = Symbol('t')
tv = np.linspace(0, T, k_traj)
for i in range(len(p1)):
B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]]], dtype='float')
x = np.linalg.solve(A, B)
traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3)
dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2)
ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t
traj_ = [traj.subs(t, tv_) for tv_ in tv]
dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]
ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]
traj_list.append(traj_)
dtraj_list.append(dtraj_)
ddtraj_list.append(ddtraj_)
traj_list.append(tv)
dtraj_list.append(tv)
ddtraj_list.append(tv)
traj = np.array(traj_list)
dtraj = np.array(dtraj_list)
ddtraj = np.array(ddtraj_list)
return traj, dtraj, ddtraj
def interpolate_viapoints(p, v1, vn, k_traj, t):
assert type(p) == np.ndarray and type(k_traj) == int
h = list(np.zeros((len(t) - 1, 1)))
for i in range(len(t) - 1):
h[i] = t[i + 1] - t[i]
A = np.zeros((len(h) - 1, len(h) - 1))
for i in range(len(h) - 1):
for j in range(len(h) - 1):
if i == j:
A[i][j] = 2 * (h[i] + h[i + 1])
if i == j + 1:
A[i][j] = h[i + 1]
if j == i + 1:
A[i][j] = h[i]
B = np.zeros((len(h) - 1, len(p[0])))
for i in range(len(h) - 1):
B[i] = (3 / (h[i] * h[i + 1])) * (
pow(h[i], 2) * (np.subtract(p[i + 2], p[i + 1])) + pow(h[i + 1], 2) * (np.subtract(p[i + 1], p[i])))
B[0] = B[0] - np.dot(h[1], v1)
B[-1] = B[-1] - np.dot(h[-2], vn)
x = np.linalg.solve(A, B)
vel = [v1.copy()]
[vel.append(x[i]) for i in range(len(x))]
vel.append(vn.copy())
traj = [[0], [0], [0], [0], [0], [0], [0]]
dtraj = [[0], [0], [0], [0], [0], [0], [0]]
ddtraj = [[0], [0], [0], [0], [0], [0], [0]]
for i in range(len(p) - 1):
traj_, dtraj_, ddtraj_ = interpolate_cubic_2(p[i], p[i + 1], k_traj, float(h[i]), vel[i], vel[i + 1])
for j in range(len(traj) - 1):
traj[j].extend(traj_[j])
dtraj[j].extend(dtraj_[j])
ddtraj[j].extend(ddtraj_[j])
traj[-1].extend(traj_[-1] + traj[-1][-1])
dtraj[-1].extend(dtraj_[-1] + dtraj[-1][-1])
ddtraj[-1].extend(ddtraj_[-1] + ddtraj[-1][-1])
traj = np.asarray(np.delete(traj, 0, 1))
dtraj = np.asarray(np.delete(traj, 0, 1))
ddtraj = np.asarray(np.delete(traj, 0, 1))
return traj, dtraj, ddtraj
| true | true |
f710a0470414947a43cf7b958d2cdc1f201c32b2 | 1,346 | py | Python | model/NoobSender.py | adhocmaster/netmad | fe6c115d71ebeb8c689cdd1b8bed80ac35757681 | [
"MIT"
] | null | null | null | model/NoobSender.py | adhocmaster/netmad | fe6c115d71ebeb8c689cdd1b8bed80ac35757681 | [
"MIT"
] | null | null | null | model/NoobSender.py | adhocmaster/netmad | fe6c115d71ebeb8c689cdd1b8bed80ac35757681 | [
"MIT"
] | null | null | null | from model.Sender import Sender
from model.SenderType import SenderType
import logging
import math
import numpy as np
class NoobSender(Sender):
def __init__(self, id, deliveryRate, debug=True):
super().__init__(id, SenderType.Noob, deliveryRate=deliveryRate, debug=debug)
def getNumberOfPacketsToCreateForTimeStep(self, timeStep):
num = math.floor(timeStep * self.deliveryRate) - math.floor((timeStep - 1) * self.deliveryRate)
# print(num)
# randomness
# if self.debug:
# logging.info(f"Sender #{self.id} creating {numberOfPackets} packets at {timeStep}")
# return math.floor( num * np.random.uniform(0.5, 1.1))
return num
def onTimeStepStart(self, timeStep):
"""To be called at the beginning of a timeStep
Args:
timeStep ([type]): [description]
"""
pass
def onTimeStepEnd(self, timeStep):
"""To be called at the end of a timeStep
Args:
timeStep ([type]): [description]
"""
pass
def onACK(self, packet):
super().onACK(packet)
# packet loss conditions:
# 1. ACK out of order.
# 2.
# if self.debug:
# logging.info(f"{self.getName()}: got ack for packet {packet.getPacketNumber()}")
pass | 28.041667 | 104 | 0.601783 | from model.Sender import Sender
from model.SenderType import SenderType
import logging
import math
import numpy as np
class NoobSender(Sender):
def __init__(self, id, deliveryRate, debug=True):
super().__init__(id, SenderType.Noob, deliveryRate=deliveryRate, debug=debug)
def getNumberOfPacketsToCreateForTimeStep(self, timeStep):
num = math.floor(timeStep * self.deliveryRate) - math.floor((timeStep - 1) * self.deliveryRate)
return num
def onTimeStepStart(self, timeStep):
pass
def onTimeStepEnd(self, timeStep):
pass
def onACK(self, packet):
super().onACK(packet)
pass | true | true |
f710a0c8b136ab0cad55d6b46cb18b57d9494789 | 3,849 | py | Python | customer_selection_line.py | pgmoka/checkout-simulator | bce7e68ba47b9309f19514a9199d43bdbbbc4ffc | [
"MIT"
] | null | null | null | customer_selection_line.py | pgmoka/checkout-simulator | bce7e68ba47b9309f19514a9199d43bdbbbc4ffc | [
"MIT"
] | null | null | null | customer_selection_line.py | pgmoka/checkout-simulator | bce7e68ba47b9309f19514a9199d43bdbbbc4ffc | [
"MIT"
] | null | null | null | '''
-----------------------------------------------------------------------
Additional Documentation
Made by Zachary A Brader, Kieran Coito, Pedro Goncalves Mokarzel
while attending University of Washington Bothell
Made in 03/09/2020
Based on instruction in CSS 458,
taught by professor Johnny Lin
Notes:
- Written for Python 3.7.3.
- No executable
- Modules necessary: numpy, random, and matplotlib.pyplot
- External necessities: variables.py, cashier.py, customer.py, and
equal_distribution_line
- Creates line environment for the use of mode
- Holds lists with relevant to the line
- Holds cashiers and customers
- Used equal_distribution_line as a base for other lines
- Line will give a customer to cashier that looks like it will go the
fastest
=======================================================================
'''
# =======================================================================
# ============================= Imports==================================
# =======================================================================
import numpy as np
import random as r
import matplotlib.pyplot as plt
import variables as v
from cashier import cashier
from customer import customer
from equal_distribution_line import equal_distribution_line
# =======================================================================
# ================================= Class ===============================
# =======================================================================
class customer_selection_line(equal_distribution_line):
'''
Inherits equal_distribution_line
Line acts such that customer chooses the best line
'''
# List of customers in queue
# Implemented
customer_list = 0
# Array to keep track of automated cashier
# Implemented
automated_cashier_tracker = 0
# Maintain cost of maintenance for all lines
# Implemented
cost_for_maintenance = 0
# Not implemented
time_step = 0
# Number of cashiers in system
# implemented
number_of_cashiers = 0
# Total number of customers processed by the line
# Initialization implemented
total_number_of_customers = 0
# Customers currently being served
# implemented
customers_being_served = 0
# Total number of customers current line
# Implemented
customers_waiting_to_queue = 0
# Customers that have left the system at point of simulation
# Implemented
customers_that_left = 0
# Implementation
total_number_of_checked_items = 0
total_number_of_items_in_system = 0
def rotate_customers(self):
''' Rotate customers between the cashiers' queues from the lines
Customers go to the queue that they consider will go fast
Precondition:
- Customers and cashier related lists created
Postcondition:
- Removal of customers in the environment list, and then the addition to queues
'''
# number_of_customers_entering_queue = int(np.random.rand()*(self.number_of_cashiers-1)) +1
# test = []
# for i in range(1000):
# test.append(int(rej()*self.number_of_cashiers))
# plt.hist(test)
# plt.show()
for individual_cashier_iterator in range(len(self.cashier_list)):
if (len(self.customer_list) > 0):
# Updates waiting queue:
smallest_cashier = self.cashier_list[0]
for cashier_iterator in self.cashier_list:
if(smallest_cashier > cashier_iterator):
smallest_cashier = cashier_iterator
smallest_cashier.add_customer_to_queue(self.customer_list.pop())
self.customers_waiting_to_queue = self.customers_waiting_to_queue - 1
# self.cashier_list.sort()
| 32.897436 | 99 | 0.590543 |
import numpy as np
import random as r
import matplotlib.pyplot as plt
import variables as v
from cashier import cashier
from customer import customer
from equal_distribution_line import equal_distribution_line
class customer_selection_line(equal_distribution_line):
customer_list = 0
automated_cashier_tracker = 0
cost_for_maintenance = 0
time_step = 0
number_of_cashiers = 0
total_number_of_customers = 0
customers_being_served = 0
customers_waiting_to_queue = 0
customers_that_left = 0
total_number_of_checked_items = 0
total_number_of_items_in_system = 0
def rotate_customers(self):
for individual_cashier_iterator in range(len(self.cashier_list)):
if (len(self.customer_list) > 0):
smallest_cashier = self.cashier_list[0]
for cashier_iterator in self.cashier_list:
if(smallest_cashier > cashier_iterator):
smallest_cashier = cashier_iterator
smallest_cashier.add_customer_to_queue(self.customer_list.pop())
self.customers_waiting_to_queue = self.customers_waiting_to_queue - 1
| true | true |
f710a1a8fb11a894a1f5202c9c336a54b665cf25 | 475 | py | Python | commons_util/logger/logger_factory.py | zhangdanyangg/commons-util-py | 65514ac1f5002b6156a31c09aeb38538a4d88cba | [
"Apache-2.0"
] | 7 | 2015-04-17T02:12:32.000Z | 2018-08-08T01:29:24.000Z | commons_util/logger/logger_factory.py | zhangdanyangg/commons-util-py | 65514ac1f5002b6156a31c09aeb38538a4d88cba | [
"Apache-2.0"
] | 3 | 2015-05-10T12:18:59.000Z | 2016-05-27T06:56:40.000Z | commons_util/logger/logger_factory.py | zhangdanyangg/commons-util-py | 65514ac1f5002b6156a31c09aeb38538a4d88cba | [
"Apache-2.0"
] | 4 | 2017-08-26T11:44:20.000Z | 2021-06-13T11:50:11.000Z | __author__ = 'Danyang'
import logging
import sys
class LoggerFactory(object):
def getConsoleLogger(self, cls_name, level=logging.DEBUG):
lgr = logging.getLogger(cls_name)
lgr.setLevel(level)
if not lgr.handlers:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
lgr.addHandler(ch)
return lgr | 33.928571 | 102 | 0.633684 | __author__ = 'Danyang'
import logging
import sys
class LoggerFactory(object):
def getConsoleLogger(self, cls_name, level=logging.DEBUG):
lgr = logging.getLogger(cls_name)
lgr.setLevel(level)
if not lgr.handlers:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
lgr.addHandler(ch)
return lgr | true | true |
f710a1c9df18743b2b56aa63b97b0a1180919b20 | 381 | py | Python | src/etl/common/timehelpers.py | vatdaell/spotify-analysis | 030239ba16cfc4a80d4f870686450c1ababc62c2 | [
"MIT"
] | 1 | 2020-10-14T10:01:37.000Z | 2020-10-14T10:01:37.000Z | src/etl/common/timehelpers.py | vatdaell/spotify-analysis | 030239ba16cfc4a80d4f870686450c1ababc62c2 | [
"MIT"
] | null | null | null | src/etl/common/timehelpers.py | vatdaell/spotify-analysis | 030239ba16cfc4a80d4f870686450c1ababc62c2 | [
"MIT"
] | null | null | null | from datetime import datetime
import datetime
def yesterday(today=datetime.datetime.now()):
yesterday = today - datetime.timedelta(days=1)
yesterday_timestamp = int(yesterday.timestamp()) * 1000
return yesterday_timestamp
def extractDate(name, prefix, fileType):
prefixLen = len(prefix)
fileTypeLen = len(fileType)
return name[prefixLen+1:-fileTypeLen]
| 25.4 | 59 | 0.745407 | from datetime import datetime
import datetime
def yesterday(today=datetime.datetime.now()):
yesterday = today - datetime.timedelta(days=1)
yesterday_timestamp = int(yesterday.timestamp()) * 1000
return yesterday_timestamp
def extractDate(name, prefix, fileType):
prefixLen = len(prefix)
fileTypeLen = len(fileType)
return name[prefixLen+1:-fileTypeLen]
| true | true |
f710a1f1b101dea4375a6417f55a26a8ac830bb1 | 11,358 | py | Python | steampak/libsteam/resources/apps.py | idlesign/steampak | cb3f2c737e272b0360802d947e388df7e34f50f3 | [
"BSD-3-Clause"
] | 24 | 2015-08-15T18:41:58.000Z | 2021-06-13T13:52:50.000Z | steampak/libsteam/resources/apps.py | idlesign/steampak | cb3f2c737e272b0360802d947e388df7e34f50f3 | [
"BSD-3-Clause"
] | 3 | 2016-10-12T13:46:07.000Z | 2017-03-05T02:54:22.000Z | steampak/libsteam/resources/apps.py | idlesign/steampak | cb3f2c737e272b0360802d947e388df7e34f50f3 | [
"BSD-3-Clause"
] | 3 | 2016-10-12T12:09:43.000Z | 2017-03-04T15:49:53.000Z | from datetime import datetime
from ctyped.types import CRef
from .base import _ApiResourceBase
from .stats import CurrentApplicationAchievements
from .user import User
class Application(_ApiResourceBase):
"""Exposes methods to get application data.
Aliased as ``steampak.SteamApplication``.
.. code-block:: python
from steampak import SteamApplication
# We use `Spacewar` app ID. (This game is provided with SDK).
my_app = SteamApplication(480)
"""
def __init__(self, app_id, *args, **kwargs):
"""
:param int|None app_id: Application (game) ID.
"""
client = self.get_client()
self._iface = client.apps
self._iface_list = client.app_list
super().__init__(*args, **kwargs)
if app_id is not None: # Might be None for current app.
self.app_id = app_id
def __str__(self):
return self.name
@property
def owned(self):
"""``True`` if user owns the current app.
.. warning::
Only use this member if you need to check ownership of a game related to yours, a demo for example.
:rtype: bool
"""
return self._iface.get_is_subscribed(self.app_id)
@property
def installed(self):
"""``True`` if app is installed (not necessarily owned).
:rtype: bool
"""
return self._iface.get_is_installed(self.app_id)
@property
def name(self):
"""Application name, or None on error.
.. warning::
Restricted interface can only be used by approved apps.
:rtype: str
"""
return self._get_str(self._iface_list.get_name, [self.app_id])
@property
def install_dir(self):
"""Returns application installation path.
.. note::
If fails this falls back to a restricted interface, which can only be used by approved apps.
:rtype: str
"""
max_len = 500
directory = self._get_str(self._iface.get_install_dir, [self.app_id], max_len=max_len)
if not directory:
# Fallback to restricted interface (can only be used by approved apps).
directory = self._get_str(self._iface_list.get_install_dir, [self.app_id], max_len=max_len)
return directory
@property
def purchase_time(self):
"""Date and time of app purchase.
:rtype: datetime
"""
ts = self._iface.get_purchase_time(self.app_id)
return datetime.utcfromtimestamp(ts)
@property
def build_id(self):
"""Application Build ID.
This may change at any time based on backend updates.
.. warning::
Restricted interface can only be used by approved apps.
:rtype: int
"""
return self._iface_list.get_build_id(self.app_id)
class InstalledApplications(_ApiResourceBase):
"""Exposes methods to get data on installed applications.
Interface can be accessed through ``api.apps.installed``.
.. warning::
Restricted interface can only be used by approved apps.
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().app_list
super().__init__(*args, **kwargs)
def __len__(self):
"""Returns a number of currently installed applications.
:rtype: int
"""
return self._iface.get_installed_count()
def __call__(self):
"""Generator. Returns Application objects, representing currently installed applications.
:rtype: tuple(int, Application)
:return:
"""
max_count = len(self)
apps_ids = CRef.carray(int, size=max_count)
total = self._iface.get_installed(apps_ids, max_count)
for app_id in apps_ids:
yield app_id, Application(app_id)
def __iter__(self):
return iter(self())
class Dlc(Application):
"""Exposes methods to get downloadable content (DLC) data.
Aliased as ``steampak.SteamDlc``.
.. code-block:: python
from steampak import SeamDlc
# We use `Spacewar` DLC app ID. (Spacewar game is provided with SDK).
my_dlc = SeamDlc(110902)
Current application DLCs are available through ``CurrentApplication.dlcs``.
"""
def __init__(self, app_id):
self._iface = self.get_client().apps
super(Dlc, self).__init__(app_id)
self._name = None
self._available = None
@property
def installed(self):
"""``True`` if the user owns the DLC & if the DLC is installed.
:rtype: bool
"""
return self._iface.get_is_dlc_installed(self.app_id)
def install(self):
"""Installs DLC (for optional DLCs)."""
self._iface.dlc_install(self.app_id)
def uninstall(self):
"""Uninstalls DLC (for optional DLCs)."""
self._iface.dlc_uninstall(self.app_id)
def get_download_progress(self):
"""Returns tuple with download progress (for optional DLCs):
(bytes_downloaded, bytes_total)
:rtype: tuple
"""
downloaded = CRef.cint()
total = CRef.cint()
result = self._iface.get_dlc_download_progress(self.app_id, downloaded, total)
if not result:
return 0, 0
return int(downloaded), int(total)
@property
def name(self):
"""DLC name.
:rtype: str
"""
# Fallback to parent data if necessary.
return self._name or super().name
@property
def available(self):
"""True if DLC is available.
:rtype: bool
"""
return self._available
class CurrentApplicationDlcs(_ApiResourceBase):
"""Exposes methods to get downloadable content (DLC) data
for current application.
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
super().__init__(*args, **kwargs)
def __len__(self):
"""Returns a number of current application .
:rtype: int
:return:
"""
return self._iface.get_dlc_count()
def __call__(self):
"""Generator. Returns Dlc objects.
:rtype: tuple(int, Dlc)
:return:
"""
max_len = 300
for idx in range(len(self)):
app_id = CRef.cint()
available = CRef.cbool()
name = CRef.carray(str, size=max_len)
if not self._iface.get_dlc_by_index(idx, app_id, available, name, max_len):
continue
app_id = int(app_id)
dlc = Dlc(app_id)
# Populate data.
dlc._name = str(name)
dlc._available = bool(available)
yield app_id, dlc
def __iter__(self):
return iter(self())
class CurrentApplication(Application):
"""Exposes methods to get current application data.
Interface can be accessed through ``api.apps.current``.
.. code-block:: python
from steampak import SteamApi
api = SteamApi(LIBRARY_PATH, app_id=APP_ID)
print(api.apps.current.language_current)
"""
dlcs: CurrentApplicationDlcs = None
"""Interface to DLCs of current application.
.. code-block:: python
for dlc_id, dlc in api.apps.current.dlcs():
print('%s: %s' % (dlc_id, dlc.name))
"""
achievements: CurrentApplicationAchievements = None
"""Current application (game) achievements.
.. code-block:: python
for ach_name, ach in api.apps.current.achievements():
print('%s: %s' % (ach_name, ach.title))
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
self._iface_utils = self.get_client().utils
super().__init__(None, *args, **kwargs)
self.dlcs = CurrentApplicationDlcs()
self.achievements = CurrentApplicationAchievements()
@property
def app_id(self):
# Overrode to support parent class methods.
return self._iface_utils.get_app_id()
@property
def beta_name(self):
"""Current beta branch name, 'public' is the default branch.
:rtype: str
"""
return self._get_str(self._iface.get_name_beta, [])
@property
def build_id(self):
"""Current application Build ID.
This may change at any time based on backend updates.
.. warning::
Restricted interface can only be used by approved apps.
:rtype: int
"""
return self._iface.get_current_build_id()
@property
def language_current(self):
"""Current game language.
E.g.: english
:rtype: str
"""
return self._iface.get_current_language()
@property
def language_available(self):
"""List of available game languages.
E.g.: ['english', 'russian']
:rtype: list[str]
"""
return self._iface.get_available_languages().split(',')
@property
def vac_banned(self):
"""``True`` if the current app is banned by BIsVACBanned.
:rtype: bool
"""
return self._iface.get_is_vac_banned()
@property
def mode_cybercafe(self):
"""``True`` if the current app supports Valve Cybercafe Program.
:rtype: bool
"""
return self._iface.get_is_cybercafe()
@property
def mode_free_weekend(self):
"""``True`` if the user is subscribed to the current app through a free weekend.
Will return ``False`` for users who have a retail or other type of license.
.. note::
Before using, please ask your Valve technical contact how to package and secure your free weekened.
:rtype: bool
"""
return self._iface.get_is_free_weekend()
@property
def low_violence(self):
"""``True`` if the current app is low violence.
:rtype: bool
"""
return self._iface.get_is_low_violence()
@property
def owned(self):
"""``True`` if user owns the current app.
:rtype: bool
"""
return self._iface.get_is_owned()
@property
def owner(self):
"""Owner user. If different from current user, app is borrowed.
:rtype: User
"""
return User(self._iface.get_owner())
def mark_corrupt(self, only_files_missing=False):
"""Signal Steam that game files seems corrupt or missing.
:param bool only_files_missing: Set it to True if only files are missing.
:rtype: bool
"""
return self._iface.mark_corrupt(only_files_missing)
class Applications(_ApiResourceBase):
"""Exposes methods to get applications data."""
installed: InstalledApplications = None
"""Interface to installed applications.
.. code-block:: python
for app_id, app in api.apps.installed:
print('%s: %s' % (app_id, app.name))
"""
current: CurrentApplication = None
"""Interface to current application.
.. code-block:: python
print(api.apps.current.language_current)
"""
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
super().__init__(*args, **kwargs)
self.installed = InstalledApplications()
self.current = CurrentApplication()
| 24.799127 | 111 | 0.606797 | from datetime import datetime
from ctyped.types import CRef
from .base import _ApiResourceBase
from .stats import CurrentApplicationAchievements
from .user import User
class Application(_ApiResourceBase):
def __init__(self, app_id, *args, **kwargs):
client = self.get_client()
self._iface = client.apps
self._iface_list = client.app_list
super().__init__(*args, **kwargs)
if app_id is not None:
self.app_id = app_id
def __str__(self):
return self.name
@property
def owned(self):
return self._iface.get_is_subscribed(self.app_id)
@property
def installed(self):
return self._iface.get_is_installed(self.app_id)
@property
def name(self):
return self._get_str(self._iface_list.get_name, [self.app_id])
@property
def install_dir(self):
max_len = 500
directory = self._get_str(self._iface.get_install_dir, [self.app_id], max_len=max_len)
if not directory:
directory = self._get_str(self._iface_list.get_install_dir, [self.app_id], max_len=max_len)
return directory
@property
def purchase_time(self):
ts = self._iface.get_purchase_time(self.app_id)
return datetime.utcfromtimestamp(ts)
@property
def build_id(self):
return self._iface_list.get_build_id(self.app_id)
class InstalledApplications(_ApiResourceBase):
def __init__(self, *args, **kwargs):
self._iface = self.get_client().app_list
super().__init__(*args, **kwargs)
def __len__(self):
return self._iface.get_installed_count()
def __call__(self):
max_count = len(self)
apps_ids = CRef.carray(int, size=max_count)
total = self._iface.get_installed(apps_ids, max_count)
for app_id in apps_ids:
yield app_id, Application(app_id)
def __iter__(self):
return iter(self())
class Dlc(Application):
def __init__(self, app_id):
self._iface = self.get_client().apps
super(Dlc, self).__init__(app_id)
self._name = None
self._available = None
@property
def installed(self):
return self._iface.get_is_dlc_installed(self.app_id)
def install(self):
self._iface.dlc_install(self.app_id)
def uninstall(self):
self._iface.dlc_uninstall(self.app_id)
def get_download_progress(self):
downloaded = CRef.cint()
total = CRef.cint()
result = self._iface.get_dlc_download_progress(self.app_id, downloaded, total)
if not result:
return 0, 0
return int(downloaded), int(total)
@property
def name(self):
return self._name or super().name
@property
def available(self):
return self._available
class CurrentApplicationDlcs(_ApiResourceBase):
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
super().__init__(*args, **kwargs)
def __len__(self):
return self._iface.get_dlc_count()
def __call__(self):
max_len = 300
for idx in range(len(self)):
app_id = CRef.cint()
available = CRef.cbool()
name = CRef.carray(str, size=max_len)
if not self._iface.get_dlc_by_index(idx, app_id, available, name, max_len):
continue
app_id = int(app_id)
dlc = Dlc(app_id)
dlc._name = str(name)
dlc._available = bool(available)
yield app_id, dlc
def __iter__(self):
return iter(self())
class CurrentApplication(Application):
dlcs: CurrentApplicationDlcs = None
achievements: CurrentApplicationAchievements = None
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
self._iface_utils = self.get_client().utils
super().__init__(None, *args, **kwargs)
self.dlcs = CurrentApplicationDlcs()
self.achievements = CurrentApplicationAchievements()
@property
def app_id(self):
return self._iface_utils.get_app_id()
@property
def beta_name(self):
return self._get_str(self._iface.get_name_beta, [])
@property
def build_id(self):
return self._iface.get_current_build_id()
@property
def language_current(self):
return self._iface.get_current_language()
@property
def language_available(self):
return self._iface.get_available_languages().split(',')
@property
def vac_banned(self):
return self._iface.get_is_vac_banned()
@property
def mode_cybercafe(self):
return self._iface.get_is_cybercafe()
@property
def mode_free_weekend(self):
return self._iface.get_is_free_weekend()
@property
def low_violence(self):
return self._iface.get_is_low_violence()
@property
def owned(self):
return self._iface.get_is_owned()
@property
def owner(self):
return User(self._iface.get_owner())
def mark_corrupt(self, only_files_missing=False):
return self._iface.mark_corrupt(only_files_missing)
class Applications(_ApiResourceBase):
installed: InstalledApplications = None
current: CurrentApplication = None
def __init__(self, *args, **kwargs):
self._iface = self.get_client().apps
super().__init__(*args, **kwargs)
self.installed = InstalledApplications()
self.current = CurrentApplication()
| true | true |
f710a363b900ea04622cbff2e29a0c3ee6a7036c | 44,133 | py | Python | jupytext/cli.py | sdrees/jupytext | 3b1eaa21d3d139444bdc278a0b696c363838e085 | [
"MIT"
] | 11 | 2018-06-15T12:12:11.000Z | 2018-08-25T14:01:52.000Z | jupytext/cli.py | sdrees/jupytext | 3b1eaa21d3d139444bdc278a0b696c363838e085 | [
"MIT"
] | 33 | 2018-06-17T01:16:10.000Z | 2018-08-30T16:09:02.000Z | jupytext/cli.py | sdrees/jupytext | 3b1eaa21d3d139444bdc278a0b696c363838e085 | [
"MIT"
] | 1 | 2018-07-20T06:52:12.000Z | 2018-07-20T06:52:12.000Z | """`jupytext` as a command line tool"""
import argparse
import glob
import json
import os
import re
import shlex
import subprocess
import sys
import warnings
from copy import copy
from tempfile import NamedTemporaryFile
from .combine import combine_inputs_with_outputs
from .compare import NotebookDifference, compare, test_round_trip_conversion
from .config import load_jupytext_config, notebook_formats
from .formats import (
_BINARY_FORMAT_OPTIONS,
_VALID_FORMAT_OPTIONS,
JUPYTEXT_FORMATS,
check_auto_ext,
check_file_version,
long_form_multiple_formats,
long_form_one_format,
short_form_one_format,
)
from .header import recursive_update
from .jupytext import create_prefix_dir, read, reads, write, writes
from .kernels import find_kernel_specs, get_kernel_spec, kernelspec_from_language
from .languages import _SCRIPT_EXTENSIONS
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import latest_inputs_and_outputs, read_pair, write_pair
from .version import __version__
def system(*args, **kwargs):
"""Execute the given bash command"""
kwargs.setdefault("stdout", subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, _ = proc.communicate()
if proc.returncode:
raise SystemExit(proc.returncode)
return out.decode("utf-8")
def str2bool(value):
"""Parse Yes/No/Default string
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse"""
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
if value.lower() in ("no", "false", "f", "n", "0"):
return False
if value.lower() in ("d", "default", ""):
return None
raise argparse.ArgumentTypeError("Expected: (Y)es/(T)rue/(N)o/(F)alse/(D)efault")
def parse_jupytext_args(args=None):
"""Command line parser for jupytext"""
parser = argparse.ArgumentParser(
description="Jupyter Notebooks as Markdown Documents, Julia, Python or R Scripts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Input
parser.add_argument(
"notebooks",
help="One or more notebook(s). "
"Notebook is read from stdin when this argument is empty.",
nargs="*",
)
parser.add_argument(
"--from",
dest="input_format",
help="Jupytext format for the input(s). Inferred from the "
"file extension and content when missing.",
)
# Destination format & act on metadata
parser.add_argument(
"--to",
dest="output_format",
help=(
"The destination format: 'ipynb', 'markdown' or 'script', or a file extension: "
"'md', 'Rmd', 'jl', 'py', 'R', ..., 'auto' (script extension matching the notebook language), "
"or a combination of an extension and a format name, e.g. {} ".format(
", ".join(
{
f"md:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".md"
}
)
)
+ " or {}. ".format(
", ".join(
{
f"py:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".py"
}
)
)
+ "The default format for scripts is the 'light' format, "
"which uses few cell markers (none when possible). "
"Alternatively, a format compatible with many editors is the "
"'percent' format, which uses '# %%%%' as cell markers. "
"The main formats (markdown, light, percent) preserve "
"notebooks and text documents in a roundtrip. Use the "
"--test and and --test-strict commands to test the roundtrip on your files. "
"Read more about the available formats at "
"https://jupytext.readthedocs.io/en/latest/formats.html"
),
)
# Destination file
parser.add_argument(
"-o",
"--output",
help="Destination file. Defaults to the original file, "
"with prefix/suffix/extension changed according to "
"the destination format. "
"Use '-' to print the notebook on stdout.",
)
parser.add_argument(
"--update",
action="store_true",
help="Preserve the output cells when the destination "
"notebook is an .ipynb file that already exists",
)
parser.add_argument(
"--set-formats",
type=str,
help="Turn the notebook or text document to one or more alternative representations "
"with e.g. '--set-formats ipynb,py:light'. "
"The --set-formats option also triggers the creation/update of all paired files",
)
# Action: convert(default)/version/list paired paths/sync/apply/test
action = parser.add_mutually_exclusive_group()
action.add_argument(
"--sync",
"-s",
help="Synchronize the content of the paired representations of "
"the given notebook. Input cells are taken from the file that "
"was last modified, and outputs are read from the ipynb file, "
"if present.",
action="store_true",
)
action.add_argument(
"--paired-paths",
"-p",
help="List the locations of the alternative representations for this notebook.",
action="store_true",
)
parser.add_argument(
"--format-options",
"--opt",
action="append",
help="Set format options with e.g. "
"'--opt comment_magics=true' or '--opt notebook_metadata_filter=-kernelspec'.",
)
parser.add_argument(
"--update-metadata",
default={},
type=json.loads,
help="Update the notebook metadata with the desired dictionary. "
"Argument must be given in JSON format. For instance, if you "
"want to activate a pairing in the generated file, use e.g. "
"""--update-metadata '{"jupytext":{"formats":"ipynb,py:light"}}' """
"See also the --opt and --set-formats options for other ways "
"to operate on the Jupytext metadata.",
)
parser.add_argument(
"--use-source-timestamp",
help="Set the modification timestamp of the output file(s) equal"
"to that of the source file, and keep the source file and "
"its timestamp unchanged.",
action="store_true",
)
parser.add_argument(
"--warn-only",
"-w",
action="store_true",
help="Only issue a warning and continue processing other notebooks "
"when the conversion of a given notebook fails",
)
action.add_argument(
"--test",
action="store_true",
help="Test that the notebook is stable under a round trip conversion, "
"up to the expected changes",
)
action.add_argument(
"--test-strict",
action="store_true",
help="Test that the notebook is strictly stable under a round trip conversion",
)
parser.add_argument(
"--stop",
"-x",
dest="stop_on_first_error",
action="store_true",
help="In --test mode, stop on first round trip conversion error, and report stack traceback",
)
# Pipe notebook inputs into other commands
parser.add_argument(
"--pipe",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and read the notebook back. For instance, reformat "
"your notebook with: "
"'jupytext notebook.ipynb --pipe black' "
"If you want to reformat it and sync the paired representation, execute: "
"'jupytext notebook.ipynb --sync --pipe black' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --pipe 'black {}'",
)
parser.add_argument(
"--diff",
"-d",
action="store_true",
help="Show the differences between (the inputs) of two notebooks",
)
parser.add_argument(
"--diff-format",
help="The text format used to show differences in --diff",
)
parser.add_argument(
"--check",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and test that the returned value is non zero. For "
"instance, test that your notebook is pep8 compliant with: "
"'jupytext notebook.ipynb --check flake8' "
"or run pytest on your notebook with: "
"'jupytext notebook.ipynb --check pytest' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --check 'pytest {}'",
)
parser.add_argument(
"--pipe-fmt",
default="auto:percent",
help="The format in which the notebook should be piped to other programs, "
"when using the --pipe and/or --check commands.",
)
# Execute the notebook
parser.add_argument(
"--set-kernel",
"-k",
type=str,
help="Set the kernel with the given name on the notebook. "
"Use '--set-kernel -' to set a kernel matching the current "
"environment on Python notebooks, and matching the notebook "
"language otherwise (get the list of available kernels with "
"'jupyter kernelspec list')",
)
parser.add_argument(
"--execute",
action="store_true",
help="Execute the notebook with the given kernel. In the "
"--pre-commit-mode, the notebook is executed only if a code "
"cell changed, or if some execution outputs are missing "
"or not ordered.",
)
parser.add_argument(
"--run-path",
type=str,
help="Execute the notebook at the given path (defaults to the notebook parent directory)",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Quiet mode: do not comment about files being updated or created",
)
parser.add_argument(
"--show-changes",
action="store_true",
help="Display the diff for each output file",
)
action.add_argument(
"--version",
"-v",
action="store_true",
help="Show jupytext's version number and exit",
)
parser.add_argument(
"--pre-commit",
action="store_true",
help="Ignore the notebook argument, and instead apply Jupytext "
"on the notebooks found in the git index, which have an "
"extension that matches the (optional) --from argument.",
)
parser.add_argument(
"--pre-commit-mode",
action="store_true",
help="This is a mode that is compatible with the pre-commit framework. "
"In this mode, --sync won't use timestamp but instead will "
"determines the source notebook as the element of the pair "
"that is added to the git index. An alert is raised if multiple inconsistent representations are "
"in the index. It also raises an alert after updating the paired files or outputs if those "
"files need to be added to the index. Finally, filepaths that aren't in the source format "
"you are trying to convert from are ignored.",
)
return parser.parse_args(args)
def jupytext(args=None):
"""Entry point for the jupytext script"""
args = parse_jupytext_args(args)
def log(text):
if not args.quiet:
sys.stdout.write(text + "\n")
if args.version:
log(__version__)
return 0
if args.pre_commit:
warnings.warn(
"The --pre-commit argument is deprecated. "
"Please consider switching to the pre-commit.com framework "
"(let us know at https://github.com/mwouts/jupytext/issues "
"if that is an issue for you)",
DeprecationWarning,
)
if args.notebooks:
raise ValueError(
"--pre-commit takes notebooks from the git index. Do not pass any notebook here."
)
args.notebooks = notebooks_in_git_index(args.input_format)
log("[jupytext] Notebooks in git index are:")
for nb_file in args.notebooks:
log(nb_file)
# Read notebook from stdin
if not args.notebooks:
if not args.pre_commit:
args.notebooks = ["-"]
if args.set_formats is not None:
# Replace empty string with None
args.update_metadata = recursive_update(
args.update_metadata, {"jupytext": {"formats": args.set_formats or None}}
)
args.sync = True
if args.paired_paths:
if len(args.notebooks) != 1:
raise ValueError("--paired-paths applies to a single notebook")
print_paired_paths(args.notebooks[0], args.input_format)
return 1
if args.run_path:
args.execute = True
if (
(args.test or args.test_strict)
and not args.output_format
and not args.output
and not args.sync
):
raise ValueError("Please provide one of --to, --output or --sync")
if (
not args.output_format
and not args.output
and not args.sync
and not args.pipe
and not args.diff
and not args.check
and not args.update_metadata
and not args.format_options
and not args.set_kernel
and not args.execute
):
raise ValueError(
"Please provide one of --to, --output, --set-formats, --sync, --pipe, --diff, "
"--check, --update-metadata, --format-options, --set-kernel or --execute"
)
if args.diff:
if (
len(args.notebooks) != 2
or args.output_format
or args.output
or args.sync
or args.pipe
or args.check
or args.update_metadata
or args.format_options
or args.set_kernel
or args.execute
):
raise ValueError(
"Please provide two notebooks after 'jupytext --diff'.\n"
"NB: Use --show-changes if you wish to see the changes in "
"a notebook being updated by Jupytext."
)
nb_file1, nb_file2 = args.notebooks
nb1 = read(nb_file1)
nb2 = read(nb_file2)
def fmt_if_not_ipynb(nb):
fmt = nb.metadata["jupytext"]["text_representation"]
if fmt["extension"] == ".ipynb":
return None
return short_form_one_format(fmt)
diff_fmt = (
args.diff_format or fmt_if_not_ipynb(nb1) or fmt_if_not_ipynb(nb2) or "md"
)
diff = compare(
writes(nb2, diff_fmt),
writes(nb1, diff_fmt),
nb_file2,
nb_file1,
return_diff=True,
)
sys.stdout.write(diff)
return
if args.output and len(args.notebooks) != 1:
raise ValueError("Please input a single notebook when using --output")
# Warn if '--to' is used in place of '--output'
if (
not args.output
and args.output_format
and "." in args.output_format
# a suffix is expected to start with one of these characters #901
and not args.output_format.startswith((".", "-", "_"))
and "//" not in args.output_format
):
def single_line(msg, *args, **kwargs):
return f"[warning] {msg}\n"
warnings.formatwarning = single_line
warnings.warn(
"You might have passed a file name to the '--to' option, "
"when a format description was expected. Maybe you want to use the '-o' option instead?"
)
if args.input_format:
args.input_format = long_form_one_format(args.input_format)
if args.output_format:
args.output_format = long_form_one_format(args.output_format)
set_format_options(args.output_format, args.format_options)
# Wildcard extension on Windows #202
notebooks = []
for pattern in args.notebooks:
if "*" in pattern or "?" in pattern:
# Exclude the .jupytext.py configuration file
notebooks.extend(glob.glob(pattern, recursive=True))
else:
notebooks.append(pattern)
# Count how many file have round-trip issues when testing
exit_code = 0
for nb_file in notebooks:
if not args.warn_only:
exit_code += jupytext_single_file(nb_file, args, log)
else:
try:
exit_code += jupytext_single_file(nb_file, args, log)
except Exception as err:
sys.stderr.write(f"[jupytext] Error: {str(err)}\n")
return exit_code
def jupytext_single_file(nb_file, args, log):
"""Apply the jupytext command, with given arguments, to a single file"""
if nb_file == "-" and args.sync:
msg = "Missing notebook path."
if args.set_formats is not None and os.path.isfile(args.set_formats):
msg += f" Maybe you mean 'jupytext --sync {args.set_formats}' ?"
raise ValueError(msg)
nb_dest = None
if args.output:
nb_dest = args.output
elif nb_file == "-":
nb_dest = "-"
else:
try:
bp = base_path(nb_file, args.input_format)
except InconsistentPath:
if args.pre_commit_mode:
log(
"[jupytext] Ignoring unmatched input path {}{}".format(
nb_file,
f" for format {args.input_format}" if args.input_format else "",
)
)
return 0
raise
if args.output_format:
nb_dest = full_path(bp, args.output_format)
config = load_jupytext_config(os.path.abspath(nb_file))
# Just acting on metadata / pipe => save in place
save_in_place = not nb_dest and not args.sync
if save_in_place:
nb_dest = nb_file
if nb_dest == "-":
args.quiet = True
# I. ### Read the notebook ###
fmt = copy(args.input_format) or {}
if not fmt:
ext = os.path.splitext(nb_file)[1]
if ext:
fmt = {"extension": ext}
if fmt:
set_format_options(fmt, args.format_options)
log(
"[jupytext] Reading {}{}".format(
nb_file if nb_file != "-" else "stdin",
f" in format {short_form_one_format(fmt)}" if "extension" in fmt else "",
)
)
notebook = read(nb_file, fmt=fmt, config=config)
if "extension" in fmt and "format_name" not in fmt:
text_representation = notebook.metadata.get("jupytext", {}).get(
"text_representation", {}
)
if text_representation.get("extension") == fmt["extension"]:
fmt["format_name"] = text_representation["format_name"]
# Compute actual extension when using script/auto, and update nb_dest if necessary
dest_fmt = args.output_format
if dest_fmt and dest_fmt["extension"] == ".auto":
dest_fmt = check_auto_ext(dest_fmt, notebook.metadata, "--to")
if not args.output and nb_file != "-":
nb_dest = full_path(base_path(nb_file, args.input_format), dest_fmt)
# Set the kernel
set_kernel = args.set_kernel
if (
(not set_kernel)
and args.execute
and notebook.metadata.get("kernelspec", {}).get("name") is None
):
set_kernel = "-"
if set_kernel:
if set_kernel == "-":
language = (
notebook.metadata.get("jupytext", {}).get("main_language")
or notebook.metadata["kernelspec"]["language"]
)
if not language:
raise ValueError(
"Cannot infer a kernel as notebook language is not defined"
)
kernelspec = kernelspec_from_language(language)
else:
try:
kernelspec = get_kernel_spec(set_kernel)
except KeyError as err:
raise KeyError(
"Please choose a kernel name among {}".format(
find_kernel_specs().keys()
)
) from err
kernelspec = {
"name": args.set_kernel,
"language": kernelspec.language,
"display_name": kernelspec.display_name,
}
log("[jupytext] Setting kernel {}".format(kernelspec.get("name")))
args.update_metadata["kernelspec"] = kernelspec
# Are we updating a text file that has a metadata filter? #212
if args.update_metadata or args.format_options:
if (
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter")
== "-all"
):
notebook.metadata.get("jupytext", {}).pop("notebook_metadata_filter")
# Update the metadata
if args.update_metadata:
log(
"[jupytext] Updating notebook metadata with '{}'".format(
json.dumps(args.update_metadata)
)
)
if (
"kernelspec" in args.update_metadata
and "main_language" in notebook.metadata.get("jupytext", {})
):
notebook.metadata["jupytext"].pop("main_language")
recursive_update(notebook.metadata, args.update_metadata)
# Read paired notebooks, except if the pair is being created
nb_files = [nb_file, nb_dest]
if args.sync:
formats = notebook_formats(
notebook, config, nb_file, fallback_on_current_fmt=False
)
set_prefix_and_suffix(fmt, formats, nb_file)
if args.set_formats is None:
try:
notebook, inputs_nb_file, outputs_nb_file = load_paired_notebook(
notebook, fmt, config, formats, nb_file, log, args.pre_commit_mode
)
nb_files = [inputs_nb_file, outputs_nb_file]
except NotAPairedNotebook as err:
sys.stderr.write("[jupytext] Warning: " + str(err) + "\n")
return 0
except InconsistentVersions as err:
sys.stderr.write("[jupytext] Error: " + str(err) + "\n")
return 1
else:
nb_files = [nb_file]
# II. ### Apply commands onto the notebook ###
# Pipe the notebook into the desired commands
prefix = None if nb_file == "-" else os.path.splitext(os.path.basename(nb_file))[0]
for cmd in args.pipe or []:
notebook = pipe_notebook(
notebook, cmd, args.pipe_fmt, prefix=prefix, warn_only=args.warn_only
)
# and/or test the desired commands onto the notebook
for cmd in args.check or []:
pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
update=False,
prefix=prefix,
warn_only=args.warn_only,
)
if (
args.execute
and args.pre_commit_mode
and execution_counts_are_in_order(notebook)
and not code_cells_have_changed(notebook, nb_files)
):
log(
f"[jupytext] Execution of {shlex.quote(nb_file)} "
f"skipped as code cells have not changed and outputs are present."
)
args.execute = False
# Execute the notebook
if args.execute:
kernel_name = notebook.metadata.get("kernelspec", {}).get("name")
log(f"[jupytext] Executing notebook with kernel {kernel_name}")
if nb_dest is not None and nb_dest != "-":
nb_path = os.path.dirname(nb_dest)
elif nb_file != "-":
nb_path = os.path.dirname(nb_file)
else:
nb_path = None
run_path = args.run_path or nb_path
if args.run_path and not os.path.isdir(run_path):
# is this a relative directory?
for base_dir in [nb_path, os.getcwd()]:
try_path = os.path.join(base_dir, run_path)
if os.path.isdir(try_path):
run_path = try_path
break
if not os.path.isdir(run_path):
raise ValueError(f"--run-path={args.run_path} is not a valid path")
if run_path:
resources = {"metadata": {"path": run_path}}
else:
resources = {}
try:
from nbconvert.preprocessors import ExecutePreprocessor
exec_proc = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
exec_proc.preprocess(notebook, resources=resources)
except (ImportError, RuntimeError) as err:
if args.pre_commit_mode:
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that you have listed 'nbconvert' and 'ipykernel' "
"under 'additional_dependencies' in the jupytext hook."
) from err
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that 'nbconvert' and 'ipykernel' are installed."
) from err
# III. ### Possible actions ###
# a. Test round trip conversion
if args.test or args.test_strict:
try:
# Round trip from an ipynb document
if fmt["extension"] == ".ipynb":
test_round_trip_conversion(
notebook,
dest_fmt,
update=args.update,
allow_expected_differences=not args.test_strict,
stop_on_first_error=args.stop_on_first_error,
)
# Round trip from a text file
else:
with open(nb_file, encoding="utf-8") as fp:
org_text = fp.read()
# If the destination is not ipynb, we convert to/back that format
if dest_fmt["extension"] != ".ipynb":
dest_text = writes(notebook, fmt=dest_fmt)
notebook = reads(dest_text, fmt=dest_fmt)
text = writes(notebook, fmt=fmt, config=config)
if args.test_strict:
compare(text, org_text)
else:
# we ignore the YAML header in the comparison #414
comment = _SCRIPT_EXTENSIONS.get(fmt["extension"], {}).get(
"comment", ""
)
# white spaces between the comment char and the YAML delimiters are allowed
if comment:
comment = comment + r"\s*"
yaml_header = re.compile(
r"^{comment}---\s*\n.*\n{comment}---\s*\n".format(
comment=comment
),
re.MULTILINE | re.DOTALL,
)
compare(
re.sub(yaml_header, "", text), re.sub(yaml_header, "", org_text)
)
except (NotebookDifference, AssertionError) as err:
sys.stdout.write(f"{nb_file}: {str(err)}")
return 1
return 0
# b. Output to the desired file or format
untracked_files = 0
def lazy_write(path, fmt=None, action=None, update_timestamp_only=False):
"""Write the notebook only if it has changed"""
if path == "-":
write(notebook, "-", fmt=fmt)
return
nonlocal untracked_files
if update_timestamp_only:
modified = False
else:
_, ext = os.path.splitext(path)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
new_content = writes(notebook, fmt=fmt, config=config)
diff = None
if not new_content.endswith("\n"):
new_content += "\n"
if not os.path.isfile(path):
modified = True
else:
with open(path, encoding="utf-8") as fp:
current_content = fp.read()
modified = new_content != current_content
if modified and args.show_changes:
diff = compare(
new_content,
current_content,
"",
"",
return_diff=True,
)
if modified:
# The text representation of the notebook has changed, we write it on disk
if action is None:
message = f"[jupytext] Updating {shlex.quote(path)}"
else:
message = "[jupytext] Writing {path}{format}{action}".format(
path=shlex.quote(path),
format=" in format " + short_form_one_format(fmt)
if fmt and "format_name" in fmt
else "",
action=action,
)
if diff is not None:
message += " with this change:\n" + diff
log(message)
create_prefix_dir(path, fmt)
with open(path, "w", encoding="utf-8") as fp:
fp.write(new_content)
# Otherwise, we only update the timestamp of the text file to make sure
# they remain more recent than the ipynb file, for compatibility with the
# Jupytext contents manager for Jupyter
if args.use_source_timestamp:
log(
f"[jupytext] Setting the timestamp of {shlex.quote(path)} equal to that of {shlex.quote(nb_file)}"
)
os.utime(path, (os.stat(path).st_atime, os.stat(nb_file).st_mtime))
elif not modified and not path.endswith(".ipynb"):
log(f"[jupytext] Updating the timestamp of {shlex.quote(path)}")
os.utime(path, None)
if args.pre_commit:
system("git", "add", path)
if args.pre_commit_mode and is_untracked(path):
log(
f"[jupytext] Error: the git index is outdated.\n"
f"Please add the paired notebook with:\n"
f" git add {shlex.quote(path)}"
)
untracked_files += 1
return
if nb_dest:
if nb_dest == nb_file and not dest_fmt:
dest_fmt = fmt
# Test consistency between dest name and output format
if dest_fmt and nb_dest != "-":
base_path(nb_dest, dest_fmt)
# Describe what jupytext is doing
if save_in_place:
action = ""
elif os.path.isfile(nb_dest) and args.update:
if not nb_dest.endswith(".ipynb"):
raise ValueError("--update is only for ipynb files")
action = " (destination file updated)"
check_file_version(notebook, nb_file, nb_dest)
notebook = combine_inputs_with_outputs(notebook, read(nb_dest), fmt=fmt)
elif os.path.isfile(nb_dest):
suggest_update = (
" [use --update to preserve cell outputs and ids]"
if nb_dest.endswith(".ipynb")
else ""
)
action = f" (destination file replaced{suggest_update})"
else:
action = ""
formats = notebook.metadata.get("jupytext", {}).get("formats")
formats = long_form_multiple_formats(formats)
if formats:
try:
base_path_out, _ = find_base_path_and_format(nb_dest, formats)
except InconsistentPath:
# Drop 'formats' if the destination is not part of the paired notebooks
formats = {}
notebook.metadata.get("jupytext", {}).pop("formats")
lazy_write(nb_dest, fmt=dest_fmt, action=action)
nb_dest_in_pair = formats and any(
os.path.exists(alt_path) and os.path.samefile(nb_dest, alt_path)
for alt_path, _ in paired_paths(nb_file, fmt, formats)
)
if (
nb_dest_in_pair
and os.path.isfile(nb_file)
and not nb_file.endswith(".ipynb")
and os.path.isfile(nb_dest)
and nb_dest.endswith(".ipynb")
):
# If the destination is an ipynb file and is in the pair, then we
# update the original text file timestamp, as required by our Content Manager
# Otherwise Jupyter will refuse to open the paired notebook #335
# NB: An alternative is --use-source-timestamp
lazy_write(nb_file, update_timestamp_only=True)
# c. Synchronize paired notebooks
elif args.sync:
write_pair(nb_file, formats, lazy_write)
return untracked_files
def notebooks_in_git_index(fmt):
"""Return the list of modified and deleted ipynb files in the git index that match the given format"""
git_status = system("git", "status", "--porcelain")
re_modified = re.compile(r"^[AM]+\s+(?P<name>.*)", re.MULTILINE)
modified_files_in_git_index = re_modified.findall(git_status)
files = []
for nb_file in modified_files_in_git_index:
if nb_file.startswith('"') and nb_file.endswith('"'):
nb_file = nb_file[1:-1]
try:
base_path(nb_file, fmt)
files.append(nb_file)
except InconsistentPath:
continue
return files
def is_untracked(filepath):
"""Check whether a file was created or modified and needs to be added to the git index"""
if not filepath:
return False
output = system("git", "ls-files", filepath).strip()
if output == "":
return True
output = system("git", "diff", filepath).strip()
if output != "":
return True
return False
def print_paired_paths(nb_file, fmt):
"""Display the paired paths for this notebook"""
notebook = read(nb_file, fmt=fmt)
formats = notebook.metadata.get("jupytext", {}).get("formats")
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + "\n")
def set_format_options(fmt, format_options):
"""Apply the desired format options to the format description fmt"""
if not format_options:
return
for opt in format_options:
try:
key, value = opt.split("=")
except ValueError as err:
raise ValueError(
"Format options are expected to be of the form key=value, not '{}'".format(
opt
)
) from err
if key not in _VALID_FORMAT_OPTIONS:
raise ValueError(
"'{}' is not a valid format option. Expected one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
if key in _BINARY_FORMAT_OPTIONS:
value = str2bool(value)
fmt[key] = value
def set_prefix_and_suffix(fmt, formats, nb_file):
"""Add prefix and suffix information from jupytext.formats if format and path matches"""
for alt_fmt in long_form_multiple_formats(formats):
if alt_fmt["extension"] == fmt["extension"] and fmt.get(
"format_name"
) == alt_fmt.get("format_name"):
try:
base_path(nb_file, alt_fmt)
fmt.update(alt_fmt)
return
except InconsistentPath:
continue
class NotAPairedNotebook(ValueError):
"""An error raised when a notebook is not a paired notebook"""
class InconsistentVersions(ValueError):
"""An error raised when two paired files in the git index contain inconsistent representations"""
def file_in_git_index(path):
if not os.path.isfile(path):
return False
return system("git", "status", "--porcelain", path).strip().startswith(("M", "A"))
def git_timestamp(path):
if not os.path.isfile(path):
return None
# Files that are in the git index are considered most recent
if file_in_git_index(path):
return float("inf")
# Return the commit timestamp
try:
git_ts_str = system("git", "log", "-1", "--pretty=%ct", path).strip()
except SystemExit as err:
if err.code == 128:
# git not initialized
git_ts_str = ""
else:
raise
if git_ts_str:
return float(git_ts_str)
# The file is not in the git index
return get_timestamp(path)
def get_timestamp(path):
if not os.path.isfile(path):
return None
return os.lstat(path).st_mtime
def load_paired_notebook(notebook, fmt, config, formats, nb_file, log, pre_commit_mode):
"""Update the notebook with the inputs and outputs of the most recent paired files"""
if not formats:
raise NotAPairedNotebook(f"{shlex.quote(nb_file)} is not a paired notebook")
formats = long_form_multiple_formats(formats)
_, fmt_with_prefix_suffix = find_base_path_and_format(nb_file, formats)
fmt.update(fmt_with_prefix_suffix)
def read_one_file(path, fmt):
if path == nb_file:
return notebook
log(f"[jupytext] Loading {shlex.quote(path)}")
return read(path, fmt=fmt, config=config)
if pre_commit_mode and file_in_git_index(nb_file):
# We raise an error if two representations of this notebook in the git index are inconsistent
nb_files_in_git_index = sorted(
(
(alt_path, alt_fmt)
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats)
if file_in_git_index(alt_path)
),
key=lambda x: 0 if x[1]["extension"] != ".ipynb" else 1,
)
if len(nb_files_in_git_index) > 1:
path0, fmt0 = nb_files_in_git_index[0]
with open(path0, encoding="utf-8") as fp:
text0 = fp.read()
for alt_path, alt_fmt in nb_files_in_git_index[1:]:
nb = read(alt_path, fmt=alt_fmt, config=config)
alt_text = writes(nb, fmt=fmt0, config=config)
if alt_text != text0:
diff = compare(alt_text, text0, alt_path, path0, return_diff=True)
raise InconsistentVersions(
f"{shlex.quote(alt_path)} and {shlex.quote(path0)} are inconsistent.\n"
+ diff
+ f"\nPlease revert JUST ONE of the files with EITHER\n"
f" git reset {shlex.quote(alt_path)} && git checkout -- {shlex.quote(alt_path)}\nOR\n"
f" git reset {shlex.quote(path0)} && git checkout -- {shlex.quote(path0)}\n"
)
inputs, outputs = latest_inputs_and_outputs(
nb_file, fmt, formats, git_timestamp if pre_commit_mode else get_timestamp
)
notebook = read_pair(inputs, outputs, read_one_file)
return notebook, inputs.path, outputs.path
def exec_command(command, input=None, capture=False, warn_only=False):
"""Execute the desired command, and pipe the given input into it"""
assert isinstance(command, list)
sys.stdout.write("[jupytext] Executing {}\n".format(" ".join(command)))
process = subprocess.Popen(
command,
**(
dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if input is not None
else {}
),
)
out, err = process.communicate(input=input)
if out and not capture:
sys.stdout.write(out.decode("utf-8"))
if err:
sys.stderr.write(err.decode("utf-8"))
if process.returncode:
msg = f"The command '{' '.join(command)}' exited with code {process.returncode}"
hint = (
"" if warn_only else " (use --warn-only to turn this error into a warning)"
)
sys.stderr.write(
f"[jupytext] {'Warning' if warn_only else 'Error'}: {msg}{hint}\n"
)
if not warn_only:
raise SystemExit(process.returncode)
return out
def pipe_notebook(
notebook, command, fmt="py:percent", update=True, prefix=None, warn_only=False
):
"""Pipe the notebook, in the desired representation, to the given command. Update the notebook
with the returned content if desired."""
if command in ["black", "flake8", "autopep8"]:
command = command + " -"
elif command in ["pytest", "unittest"]:
command = command + " {}"
fmt = long_form_one_format(
fmt, notebook.metadata, auto_ext_requires_language_info=False
)
fmt = check_auto_ext(fmt, notebook.metadata, "--pipe-fmt")
text = writes(notebook, fmt)
command = shlex.split(command)
if "{}" in command:
if prefix is not None:
prefix = prefix + (" " if " " in prefix else "_")
tmp_file_args = dict(
mode="w+",
encoding="utf8",
prefix=prefix,
suffix=fmt["extension"],
delete=False,
)
try:
tmp = NamedTemporaryFile(**tmp_file_args)
except TypeError:
# NamedTemporaryFile does not have an 'encoding' argument on pypy
tmp_file_args.pop("encoding")
tmp = NamedTemporaryFile(**tmp_file_args)
try:
tmp.write(text)
tmp.close()
exec_command(
[cmd if cmd != "{}" else tmp.name for cmd in command],
capture=update,
warn_only=warn_only,
)
if not update:
return notebook
piped_notebook = read(tmp.name, fmt=fmt)
finally:
os.remove(tmp.name)
else:
cmd_output = exec_command(
command, text.encode("utf-8"), capture=update, warn_only=warn_only
)
if not update:
return notebook
if not cmd_output:
sys.stderr.write(
"[jupytext] The command '{}' had no output. As a result, the notebook is empty. "
"Is this expected? If not, use --check rather than --pipe for this command.".format(
command
)
)
piped_notebook = reads(cmd_output.decode("utf-8"), fmt)
if fmt["extension"] != ".ipynb":
piped_notebook = combine_inputs_with_outputs(piped_notebook, notebook, fmt)
# Remove jupytext / text_representation entry
if "jupytext" in notebook.metadata:
piped_notebook.metadata["jupytext"] = notebook.metadata["jupytext"]
else:
piped_notebook.metadata.pop("jupytext", None)
return piped_notebook
def execution_counts_are_in_order(notebook):
"""Returns True if all the code cells have an execution count, ordered from 1 to N with no missing number"""
expected_execution_count = 1
for cell in notebook.cells:
if cell.cell_type == "code":
if cell.execution_count != expected_execution_count:
return False
expected_execution_count += 1
return True
def code_cells_have_changed(notebook, nb_files):
"""The source for the code cells has not changed"""
for nb_file in nb_files:
if not os.path.exists(nb_file):
return True
nb_ref = read(nb_file)
# Are the new code cells equals to those in the file?
ref = [cell.source for cell in nb_ref.cells if cell.cell_type == "code"]
new = [cell.source for cell in notebook.cells if cell.cell_type == "code"]
if ref != new:
return True
return False
| 35.448193 | 114 | 0.580246 |
import argparse
import glob
import json
import os
import re
import shlex
import subprocess
import sys
import warnings
from copy import copy
from tempfile import NamedTemporaryFile
from .combine import combine_inputs_with_outputs
from .compare import NotebookDifference, compare, test_round_trip_conversion
from .config import load_jupytext_config, notebook_formats
from .formats import (
_BINARY_FORMAT_OPTIONS,
_VALID_FORMAT_OPTIONS,
JUPYTEXT_FORMATS,
check_auto_ext,
check_file_version,
long_form_multiple_formats,
long_form_one_format,
short_form_one_format,
)
from .header import recursive_update
from .jupytext import create_prefix_dir, read, reads, write, writes
from .kernels import find_kernel_specs, get_kernel_spec, kernelspec_from_language
from .languages import _SCRIPT_EXTENSIONS
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import latest_inputs_and_outputs, read_pair, write_pair
from .version import __version__
def system(*args, **kwargs):
kwargs.setdefault("stdout", subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, _ = proc.communicate()
if proc.returncode:
raise SystemExit(proc.returncode)
return out.decode("utf-8")
def str2bool(value):
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
if value.lower() in ("no", "false", "f", "n", "0"):
return False
if value.lower() in ("d", "default", ""):
return None
raise argparse.ArgumentTypeError("Expected: (Y)es/(T)rue/(N)o/(F)alse/(D)efault")
def parse_jupytext_args(args=None):
parser = argparse.ArgumentParser(
description="Jupyter Notebooks as Markdown Documents, Julia, Python or R Scripts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"notebooks",
help="One or more notebook(s). "
"Notebook is read from stdin when this argument is empty.",
nargs="*",
)
parser.add_argument(
"--from",
dest="input_format",
help="Jupytext format for the input(s). Inferred from the "
"file extension and content when missing.",
)
parser.add_argument(
"--to",
dest="output_format",
help=(
"The destination format: 'ipynb', 'markdown' or 'script', or a file extension: "
"'md', 'Rmd', 'jl', 'py', 'R', ..., 'auto' (script extension matching the notebook language), "
"or a combination of an extension and a format name, e.g. {} ".format(
", ".join(
{
f"md:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".md"
}
)
)
+ " or {}. ".format(
", ".join(
{
f"py:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".py"
}
)
)
+ "The default format for scripts is the 'light' format, "
"which uses few cell markers (none when possible). "
"Alternatively, a format compatible with many editors is the "
"'percent' format, which uses '# %%%%' as cell markers. "
"The main formats (markdown, light, percent) preserve "
"notebooks and text documents in a roundtrip. Use the "
"--test and and --test-strict commands to test the roundtrip on your files. "
"Read more about the available formats at "
"https://jupytext.readthedocs.io/en/latest/formats.html"
),
)
parser.add_argument(
"-o",
"--output",
help="Destination file. Defaults to the original file, "
"with prefix/suffix/extension changed according to "
"the destination format. "
"Use '-' to print the notebook on stdout.",
)
parser.add_argument(
"--update",
action="store_true",
help="Preserve the output cells when the destination "
"notebook is an .ipynb file that already exists",
)
parser.add_argument(
"--set-formats",
type=str,
help="Turn the notebook or text document to one or more alternative representations "
"with e.g. '--set-formats ipynb,py:light'. "
"The --set-formats option also triggers the creation/update of all paired files",
)
action = parser.add_mutually_exclusive_group()
action.add_argument(
"--sync",
"-s",
help="Synchronize the content of the paired representations of "
"the given notebook. Input cells are taken from the file that "
"was last modified, and outputs are read from the ipynb file, "
"if present.",
action="store_true",
)
action.add_argument(
"--paired-paths",
"-p",
help="List the locations of the alternative representations for this notebook.",
action="store_true",
)
parser.add_argument(
"--format-options",
"--opt",
action="append",
help="Set format options with e.g. "
"'--opt comment_magics=true' or '--opt notebook_metadata_filter=-kernelspec'.",
)
parser.add_argument(
"--update-metadata",
default={},
type=json.loads,
help="Update the notebook metadata with the desired dictionary. "
"Argument must be given in JSON format. For instance, if you "
"want to activate a pairing in the generated file, use e.g. "
"""--update-metadata '{"jupytext":{"formats":"ipynb,py:light"}}' """
"See also the --opt and --set-formats options for other ways "
"to operate on the Jupytext metadata.",
)
parser.add_argument(
"--use-source-timestamp",
help="Set the modification timestamp of the output file(s) equal"
"to that of the source file, and keep the source file and "
"its timestamp unchanged.",
action="store_true",
)
parser.add_argument(
"--warn-only",
"-w",
action="store_true",
help="Only issue a warning and continue processing other notebooks "
"when the conversion of a given notebook fails",
)
action.add_argument(
"--test",
action="store_true",
help="Test that the notebook is stable under a round trip conversion, "
"up to the expected changes",
)
action.add_argument(
"--test-strict",
action="store_true",
help="Test that the notebook is strictly stable under a round trip conversion",
)
parser.add_argument(
"--stop",
"-x",
dest="stop_on_first_error",
action="store_true",
help="In --test mode, stop on first round trip conversion error, and report stack traceback",
)
parser.add_argument(
"--pipe",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and read the notebook back. For instance, reformat "
"your notebook with: "
"'jupytext notebook.ipynb --pipe black' "
"If you want to reformat it and sync the paired representation, execute: "
"'jupytext notebook.ipynb --sync --pipe black' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --pipe 'black {}'",
)
parser.add_argument(
"--diff",
"-d",
action="store_true",
help="Show the differences between (the inputs) of two notebooks",
)
parser.add_argument(
"--diff-format",
help="The text format used to show differences in --diff",
)
parser.add_argument(
"--check",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and test that the returned value is non zero. For "
"instance, test that your notebook is pep8 compliant with: "
"'jupytext notebook.ipynb --check flake8' "
"or run pytest on your notebook with: "
"'jupytext notebook.ipynb --check pytest' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --check 'pytest {}'",
)
parser.add_argument(
"--pipe-fmt",
default="auto:percent",
help="The format in which the notebook should be piped to other programs, "
"when using the --pipe and/or --check commands.",
)
parser.add_argument(
"--set-kernel",
"-k",
type=str,
help="Set the kernel with the given name on the notebook. "
"Use '--set-kernel -' to set a kernel matching the current "
"environment on Python notebooks, and matching the notebook "
"language otherwise (get the list of available kernels with "
"'jupyter kernelspec list')",
)
parser.add_argument(
"--execute",
action="store_true",
help="Execute the notebook with the given kernel. In the "
"--pre-commit-mode, the notebook is executed only if a code "
"cell changed, or if some execution outputs are missing "
"or not ordered.",
)
parser.add_argument(
"--run-path",
type=str,
help="Execute the notebook at the given path (defaults to the notebook parent directory)",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Quiet mode: do not comment about files being updated or created",
)
parser.add_argument(
"--show-changes",
action="store_true",
help="Display the diff for each output file",
)
action.add_argument(
"--version",
"-v",
action="store_true",
help="Show jupytext's version number and exit",
)
parser.add_argument(
"--pre-commit",
action="store_true",
help="Ignore the notebook argument, and instead apply Jupytext "
"on the notebooks found in the git index, which have an "
"extension that matches the (optional) --from argument.",
)
parser.add_argument(
"--pre-commit-mode",
action="store_true",
help="This is a mode that is compatible with the pre-commit framework. "
"In this mode, --sync won't use timestamp but instead will "
"determines the source notebook as the element of the pair "
"that is added to the git index. An alert is raised if multiple inconsistent representations are "
"in the index. It also raises an alert after updating the paired files or outputs if those "
"files need to be added to the index. Finally, filepaths that aren't in the source format "
"you are trying to convert from are ignored.",
)
return parser.parse_args(args)
def jupytext(args=None):
args = parse_jupytext_args(args)
def log(text):
if not args.quiet:
sys.stdout.write(text + "\n")
if args.version:
log(__version__)
return 0
if args.pre_commit:
warnings.warn(
"The --pre-commit argument is deprecated. "
"Please consider switching to the pre-commit.com framework "
"(let us know at https://github.com/mwouts/jupytext/issues "
"if that is an issue for you)",
DeprecationWarning,
)
if args.notebooks:
raise ValueError(
"--pre-commit takes notebooks from the git index. Do not pass any notebook here."
)
args.notebooks = notebooks_in_git_index(args.input_format)
log("[jupytext] Notebooks in git index are:")
for nb_file in args.notebooks:
log(nb_file)
# Read notebook from stdin
if not args.notebooks:
if not args.pre_commit:
args.notebooks = ["-"]
if args.set_formats is not None:
# Replace empty string with None
args.update_metadata = recursive_update(
args.update_metadata, {"jupytext": {"formats": args.set_formats or None}}
)
args.sync = True
if args.paired_paths:
if len(args.notebooks) != 1:
raise ValueError("--paired-paths applies to a single notebook")
print_paired_paths(args.notebooks[0], args.input_format)
return 1
if args.run_path:
args.execute = True
if (
(args.test or args.test_strict)
and not args.output_format
and not args.output
and not args.sync
):
raise ValueError("Please provide one of --to, --output or --sync")
if (
not args.output_format
and not args.output
and not args.sync
and not args.pipe
and not args.diff
and not args.check
and not args.update_metadata
and not args.format_options
and not args.set_kernel
and not args.execute
):
raise ValueError(
"Please provide one of --to, --output, --set-formats, --sync, --pipe, --diff, "
"--check, --update-metadata, --format-options, --set-kernel or --execute"
)
if args.diff:
if (
len(args.notebooks) != 2
or args.output_format
or args.output
or args.sync
or args.pipe
or args.check
or args.update_metadata
or args.format_options
or args.set_kernel
or args.execute
):
raise ValueError(
"Please provide two notebooks after 'jupytext --diff'.\n"
"NB: Use --show-changes if you wish to see the changes in "
"a notebook being updated by Jupytext."
)
nb_file1, nb_file2 = args.notebooks
nb1 = read(nb_file1)
nb2 = read(nb_file2)
def fmt_if_not_ipynb(nb):
fmt = nb.metadata["jupytext"]["text_representation"]
if fmt["extension"] == ".ipynb":
return None
return short_form_one_format(fmt)
diff_fmt = (
args.diff_format or fmt_if_not_ipynb(nb1) or fmt_if_not_ipynb(nb2) or "md"
)
diff = compare(
writes(nb2, diff_fmt),
writes(nb1, diff_fmt),
nb_file2,
nb_file1,
return_diff=True,
)
sys.stdout.write(diff)
return
if args.output and len(args.notebooks) != 1:
raise ValueError("Please input a single notebook when using --output")
# Warn if '--to' is used in place of '--output'
if (
not args.output
and args.output_format
and "." in args.output_format
# a suffix is expected to start with one of these characters #901
and not args.output_format.startswith((".", "-", "_"))
and "//" not in args.output_format
):
def single_line(msg, *args, **kwargs):
return f"[warning] {msg}\n"
warnings.formatwarning = single_line
warnings.warn(
"You might have passed a file name to the '--to' option, "
"when a format description was expected. Maybe you want to use the '-o' option instead?"
)
if args.input_format:
args.input_format = long_form_one_format(args.input_format)
if args.output_format:
args.output_format = long_form_one_format(args.output_format)
set_format_options(args.output_format, args.format_options)
# Wildcard extension on Windows #202
notebooks = []
for pattern in args.notebooks:
if "*" in pattern or "?" in pattern:
# Exclude the .jupytext.py configuration file
notebooks.extend(glob.glob(pattern, recursive=True))
else:
notebooks.append(pattern)
# Count how many file have round-trip issues when testing
exit_code = 0
for nb_file in notebooks:
if not args.warn_only:
exit_code += jupytext_single_file(nb_file, args, log)
else:
try:
exit_code += jupytext_single_file(nb_file, args, log)
except Exception as err:
sys.stderr.write(f"[jupytext] Error: {str(err)}\n")
return exit_code
def jupytext_single_file(nb_file, args, log):
if nb_file == "-" and args.sync:
msg = "Missing notebook path."
if args.set_formats is not None and os.path.isfile(args.set_formats):
msg += f" Maybe you mean 'jupytext --sync {args.set_formats}' ?"
raise ValueError(msg)
nb_dest = None
if args.output:
nb_dest = args.output
elif nb_file == "-":
nb_dest = "-"
else:
try:
bp = base_path(nb_file, args.input_format)
except InconsistentPath:
if args.pre_commit_mode:
log(
"[jupytext] Ignoring unmatched input path {}{}".format(
nb_file,
f" for format {args.input_format}" if args.input_format else "",
)
)
return 0
raise
if args.output_format:
nb_dest = full_path(bp, args.output_format)
config = load_jupytext_config(os.path.abspath(nb_file))
# Just acting on metadata / pipe => save in place
save_in_place = not nb_dest and not args.sync
if save_in_place:
nb_dest = nb_file
if nb_dest == "-":
args.quiet = True
# I. ### Read the notebook ###
fmt = copy(args.input_format) or {}
if not fmt:
ext = os.path.splitext(nb_file)[1]
if ext:
fmt = {"extension": ext}
if fmt:
set_format_options(fmt, args.format_options)
log(
"[jupytext] Reading {}{}".format(
nb_file if nb_file != "-" else "stdin",
f" in format {short_form_one_format(fmt)}" if "extension" in fmt else "",
)
)
notebook = read(nb_file, fmt=fmt, config=config)
if "extension" in fmt and "format_name" not in fmt:
text_representation = notebook.metadata.get("jupytext", {}).get(
"text_representation", {}
)
if text_representation.get("extension") == fmt["extension"]:
fmt["format_name"] = text_representation["format_name"]
# Compute actual extension when using script/auto, and update nb_dest if necessary
dest_fmt = args.output_format
if dest_fmt and dest_fmt["extension"] == ".auto":
dest_fmt = check_auto_ext(dest_fmt, notebook.metadata, "--to")
if not args.output and nb_file != "-":
nb_dest = full_path(base_path(nb_file, args.input_format), dest_fmt)
# Set the kernel
set_kernel = args.set_kernel
if (
(not set_kernel)
and args.execute
and notebook.metadata.get("kernelspec", {}).get("name") is None
):
set_kernel = "-"
if set_kernel:
if set_kernel == "-":
language = (
notebook.metadata.get("jupytext", {}).get("main_language")
or notebook.metadata["kernelspec"]["language"]
)
if not language:
raise ValueError(
"Cannot infer a kernel as notebook language is not defined"
)
kernelspec = kernelspec_from_language(language)
else:
try:
kernelspec = get_kernel_spec(set_kernel)
except KeyError as err:
raise KeyError(
"Please choose a kernel name among {}".format(
find_kernel_specs().keys()
)
) from err
kernelspec = {
"name": args.set_kernel,
"language": kernelspec.language,
"display_name": kernelspec.display_name,
}
log("[jupytext] Setting kernel {}".format(kernelspec.get("name")))
args.update_metadata["kernelspec"] = kernelspec
# Are we updating a text file that has a metadata filter? #212
if args.update_metadata or args.format_options:
if (
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter")
== "-all"
):
notebook.metadata.get("jupytext", {}).pop("notebook_metadata_filter")
# Update the metadata
if args.update_metadata:
log(
"[jupytext] Updating notebook metadata with '{}'".format(
json.dumps(args.update_metadata)
)
)
if (
"kernelspec" in args.update_metadata
and "main_language" in notebook.metadata.get("jupytext", {})
):
notebook.metadata["jupytext"].pop("main_language")
recursive_update(notebook.metadata, args.update_metadata)
# Read paired notebooks, except if the pair is being created
nb_files = [nb_file, nb_dest]
if args.sync:
formats = notebook_formats(
notebook, config, nb_file, fallback_on_current_fmt=False
)
set_prefix_and_suffix(fmt, formats, nb_file)
if args.set_formats is None:
try:
notebook, inputs_nb_file, outputs_nb_file = load_paired_notebook(
notebook, fmt, config, formats, nb_file, log, args.pre_commit_mode
)
nb_files = [inputs_nb_file, outputs_nb_file]
except NotAPairedNotebook as err:
sys.stderr.write("[jupytext] Warning: " + str(err) + "\n")
return 0
except InconsistentVersions as err:
sys.stderr.write("[jupytext] Error: " + str(err) + "\n")
return 1
else:
nb_files = [nb_file]
# II. ### Apply commands onto the notebook ###
# Pipe the notebook into the desired commands
prefix = None if nb_file == "-" else os.path.splitext(os.path.basename(nb_file))[0]
for cmd in args.pipe or []:
notebook = pipe_notebook(
notebook, cmd, args.pipe_fmt, prefix=prefix, warn_only=args.warn_only
)
# and/or test the desired commands onto the notebook
for cmd in args.check or []:
pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
update=False,
prefix=prefix,
warn_only=args.warn_only,
)
if (
args.execute
and args.pre_commit_mode
and execution_counts_are_in_order(notebook)
and not code_cells_have_changed(notebook, nb_files)
):
log(
f"[jupytext] Execution of {shlex.quote(nb_file)} "
f"skipped as code cells have not changed and outputs are present."
)
args.execute = False
# Execute the notebook
if args.execute:
kernel_name = notebook.metadata.get("kernelspec", {}).get("name")
log(f"[jupytext] Executing notebook with kernel {kernel_name}")
if nb_dest is not None and nb_dest != "-":
nb_path = os.path.dirname(nb_dest)
elif nb_file != "-":
nb_path = os.path.dirname(nb_file)
else:
nb_path = None
run_path = args.run_path or nb_path
if args.run_path and not os.path.isdir(run_path):
# is this a relative directory?
for base_dir in [nb_path, os.getcwd()]:
try_path = os.path.join(base_dir, run_path)
if os.path.isdir(try_path):
run_path = try_path
break
if not os.path.isdir(run_path):
raise ValueError(f"--run-path={args.run_path} is not a valid path")
if run_path:
resources = {"metadata": {"path": run_path}}
else:
resources = {}
try:
from nbconvert.preprocessors import ExecutePreprocessor
exec_proc = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
exec_proc.preprocess(notebook, resources=resources)
except (ImportError, RuntimeError) as err:
if args.pre_commit_mode:
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that you have listed 'nbconvert' and 'ipykernel' "
"under 'additional_dependencies' in the jupytext hook."
) from err
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that 'nbconvert' and 'ipykernel' are installed."
) from err
# III. ### Possible actions ###
# a. Test round trip conversion
if args.test or args.test_strict:
try:
# Round trip from an ipynb document
if fmt["extension"] == ".ipynb":
test_round_trip_conversion(
notebook,
dest_fmt,
update=args.update,
allow_expected_differences=not args.test_strict,
stop_on_first_error=args.stop_on_first_error,
)
# Round trip from a text file
else:
with open(nb_file, encoding="utf-8") as fp:
org_text = fp.read()
# If the destination is not ipynb, we convert to/back that format
if dest_fmt["extension"] != ".ipynb":
dest_text = writes(notebook, fmt=dest_fmt)
notebook = reads(dest_text, fmt=dest_fmt)
text = writes(notebook, fmt=fmt, config=config)
if args.test_strict:
compare(text, org_text)
else:
# we ignore the YAML header in the comparison #414
comment = _SCRIPT_EXTENSIONS.get(fmt["extension"], {}).get(
"comment", ""
)
# white spaces between the comment char and the YAML delimiters are allowed
if comment:
comment = comment + r"\s*"
yaml_header = re.compile(
r"^{comment}---\s*\n.*\n{comment}---\s*\n".format(
comment=comment
),
re.MULTILINE | re.DOTALL,
)
compare(
re.sub(yaml_header, "", text), re.sub(yaml_header, "", org_text)
)
except (NotebookDifference, AssertionError) as err:
sys.stdout.write(f"{nb_file}: {str(err)}")
return 1
return 0
# b. Output to the desired file or format
untracked_files = 0
def lazy_write(path, fmt=None, action=None, update_timestamp_only=False):
if path == "-":
write(notebook, "-", fmt=fmt)
return
nonlocal untracked_files
if update_timestamp_only:
modified = False
else:
_, ext = os.path.splitext(path)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
new_content = writes(notebook, fmt=fmt, config=config)
diff = None
if not new_content.endswith("\n"):
new_content += "\n"
if not os.path.isfile(path):
modified = True
else:
with open(path, encoding="utf-8") as fp:
current_content = fp.read()
modified = new_content != current_content
if modified and args.show_changes:
diff = compare(
new_content,
current_content,
"",
"",
return_diff=True,
)
if modified:
# The text representation of the notebook has changed, we write it on disk
if action is None:
message = f"[jupytext] Updating {shlex.quote(path)}"
else:
message = "[jupytext] Writing {path}{format}{action}".format(
path=shlex.quote(path),
format=" in format " + short_form_one_format(fmt)
if fmt and "format_name" in fmt
else "",
action=action,
)
if diff is not None:
message += " with this change:\n" + diff
log(message)
create_prefix_dir(path, fmt)
with open(path, "w", encoding="utf-8") as fp:
fp.write(new_content)
# Otherwise, we only update the timestamp of the text file to make sure
# they remain more recent than the ipynb file, for compatibility with the
# Jupytext contents manager for Jupyter
if args.use_source_timestamp:
log(
f"[jupytext] Setting the timestamp of {shlex.quote(path)} equal to that of {shlex.quote(nb_file)}"
)
os.utime(path, (os.stat(path).st_atime, os.stat(nb_file).st_mtime))
elif not modified and not path.endswith(".ipynb"):
log(f"[jupytext] Updating the timestamp of {shlex.quote(path)}")
os.utime(path, None)
if args.pre_commit:
system("git", "add", path)
if args.pre_commit_mode and is_untracked(path):
log(
f"[jupytext] Error: the git index is outdated.\n"
f"Please add the paired notebook with:\n"
f" git add {shlex.quote(path)}"
)
untracked_files += 1
return
if nb_dest:
if nb_dest == nb_file and not dest_fmt:
dest_fmt = fmt
# Test consistency between dest name and output format
if dest_fmt and nb_dest != "-":
base_path(nb_dest, dest_fmt)
# Describe what jupytext is doing
if save_in_place:
action = ""
elif os.path.isfile(nb_dest) and args.update:
if not nb_dest.endswith(".ipynb"):
raise ValueError("--update is only for ipynb files")
action = " (destination file updated)"
check_file_version(notebook, nb_file, nb_dest)
notebook = combine_inputs_with_outputs(notebook, read(nb_dest), fmt=fmt)
elif os.path.isfile(nb_dest):
suggest_update = (
" [use --update to preserve cell outputs and ids]"
if nb_dest.endswith(".ipynb")
else ""
)
action = f" (destination file replaced{suggest_update})"
else:
action = ""
formats = notebook.metadata.get("jupytext", {}).get("formats")
formats = long_form_multiple_formats(formats)
if formats:
try:
base_path_out, _ = find_base_path_and_format(nb_dest, formats)
except InconsistentPath:
# Drop 'formats' if the destination is not part of the paired notebooks
formats = {}
notebook.metadata.get("jupytext", {}).pop("formats")
lazy_write(nb_dest, fmt=dest_fmt, action=action)
nb_dest_in_pair = formats and any(
os.path.exists(alt_path) and os.path.samefile(nb_dest, alt_path)
for alt_path, _ in paired_paths(nb_file, fmt, formats)
)
if (
nb_dest_in_pair
and os.path.isfile(nb_file)
and not nb_file.endswith(".ipynb")
and os.path.isfile(nb_dest)
and nb_dest.endswith(".ipynb")
):
# If the destination is an ipynb file and is in the pair, then we
# update the original text file timestamp, as required by our Content Manager
# Otherwise Jupyter will refuse to open the paired notebook #335
# NB: An alternative is --use-source-timestamp
lazy_write(nb_file, update_timestamp_only=True)
# c. Synchronize paired notebooks
elif args.sync:
write_pair(nb_file, formats, lazy_write)
return untracked_files
def notebooks_in_git_index(fmt):
git_status = system("git", "status", "--porcelain")
re_modified = re.compile(r"^[AM]+\s+(?P<name>.*)", re.MULTILINE)
modified_files_in_git_index = re_modified.findall(git_status)
files = []
for nb_file in modified_files_in_git_index:
if nb_file.startswith('"') and nb_file.endswith('"'):
nb_file = nb_file[1:-1]
try:
base_path(nb_file, fmt)
files.append(nb_file)
except InconsistentPath:
continue
return files
def is_untracked(filepath):
if not filepath:
return False
output = system("git", "ls-files", filepath).strip()
if output == "":
return True
output = system("git", "diff", filepath).strip()
if output != "":
return True
return False
def print_paired_paths(nb_file, fmt):
notebook = read(nb_file, fmt=fmt)
formats = notebook.metadata.get("jupytext", {}).get("formats")
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + "\n")
def set_format_options(fmt, format_options):
if not format_options:
return
for opt in format_options:
try:
key, value = opt.split("=")
except ValueError as err:
raise ValueError(
"Format options are expected to be of the form key=value, not '{}'".format(
opt
)
) from err
if key not in _VALID_FORMAT_OPTIONS:
raise ValueError(
"'{}' is not a valid format option. Expected one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
if key in _BINARY_FORMAT_OPTIONS:
value = str2bool(value)
fmt[key] = value
def set_prefix_and_suffix(fmt, formats, nb_file):
for alt_fmt in long_form_multiple_formats(formats):
if alt_fmt["extension"] == fmt["extension"] and fmt.get(
"format_name"
) == alt_fmt.get("format_name"):
try:
base_path(nb_file, alt_fmt)
fmt.update(alt_fmt)
return
except InconsistentPath:
continue
class NotAPairedNotebook(ValueError):
class InconsistentVersions(ValueError):
def file_in_git_index(path):
if not os.path.isfile(path):
return False
return system("git", "status", "--porcelain", path).strip().startswith(("M", "A"))
def git_timestamp(path):
if not os.path.isfile(path):
return None
# Files that are in the git index are considered most recent
if file_in_git_index(path):
return float("inf")
# Return the commit timestamp
try:
git_ts_str = system("git", "log", "-1", "--pretty=%ct", path).strip()
except SystemExit as err:
if err.code == 128:
# git not initialized
git_ts_str = ""
else:
raise
if git_ts_str:
return float(git_ts_str)
# The file is not in the git index
return get_timestamp(path)
def get_timestamp(path):
if not os.path.isfile(path):
return None
return os.lstat(path).st_mtime
def load_paired_notebook(notebook, fmt, config, formats, nb_file, log, pre_commit_mode):
if not formats:
raise NotAPairedNotebook(f"{shlex.quote(nb_file)} is not a paired notebook")
formats = long_form_multiple_formats(formats)
_, fmt_with_prefix_suffix = find_base_path_and_format(nb_file, formats)
fmt.update(fmt_with_prefix_suffix)
def read_one_file(path, fmt):
if path == nb_file:
return notebook
log(f"[jupytext] Loading {shlex.quote(path)}")
return read(path, fmt=fmt, config=config)
if pre_commit_mode and file_in_git_index(nb_file):
# We raise an error if two representations of this notebook in the git index are inconsistent
nb_files_in_git_index = sorted(
(
(alt_path, alt_fmt)
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats)
if file_in_git_index(alt_path)
),
key=lambda x: 0 if x[1]["extension"] != ".ipynb" else 1,
)
if len(nb_files_in_git_index) > 1:
path0, fmt0 = nb_files_in_git_index[0]
with open(path0, encoding="utf-8") as fp:
text0 = fp.read()
for alt_path, alt_fmt in nb_files_in_git_index[1:]:
nb = read(alt_path, fmt=alt_fmt, config=config)
alt_text = writes(nb, fmt=fmt0, config=config)
if alt_text != text0:
diff = compare(alt_text, text0, alt_path, path0, return_diff=True)
raise InconsistentVersions(
f"{shlex.quote(alt_path)} and {shlex.quote(path0)} are inconsistent.\n"
+ diff
+ f"\nPlease revert JUST ONE of the files with EITHER\n"
f" git reset {shlex.quote(alt_path)} && git checkout -- {shlex.quote(alt_path)}\nOR\n"
f" git reset {shlex.quote(path0)} && git checkout -- {shlex.quote(path0)}\n"
)
inputs, outputs = latest_inputs_and_outputs(
nb_file, fmt, formats, git_timestamp if pre_commit_mode else get_timestamp
)
notebook = read_pair(inputs, outputs, read_one_file)
return notebook, inputs.path, outputs.path
def exec_command(command, input=None, capture=False, warn_only=False):
assert isinstance(command, list)
sys.stdout.write("[jupytext] Executing {}\n".format(" ".join(command)))
process = subprocess.Popen(
command,
**(
dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if input is not None
else {}
),
)
out, err = process.communicate(input=input)
if out and not capture:
sys.stdout.write(out.decode("utf-8"))
if err:
sys.stderr.write(err.decode("utf-8"))
if process.returncode:
msg = f"The command '{' '.join(command)}' exited with code {process.returncode}"
hint = (
"" if warn_only else " (use --warn-only to turn this error into a warning)"
)
sys.stderr.write(
f"[jupytext] {'Warning' if warn_only else 'Error'}: {msg}{hint}\n"
)
if not warn_only:
raise SystemExit(process.returncode)
return out
def pipe_notebook(
notebook, command, fmt="py:percent", update=True, prefix=None, warn_only=False
):
if command in ["black", "flake8", "autopep8"]:
command = command + " -"
elif command in ["pytest", "unittest"]:
command = command + " {}"
fmt = long_form_one_format(
fmt, notebook.metadata, auto_ext_requires_language_info=False
)
fmt = check_auto_ext(fmt, notebook.metadata, "--pipe-fmt")
text = writes(notebook, fmt)
command = shlex.split(command)
if "{}" in command:
if prefix is not None:
prefix = prefix + (" " if " " in prefix else "_")
tmp_file_args = dict(
mode="w+",
encoding="utf8",
prefix=prefix,
suffix=fmt["extension"],
delete=False,
)
try:
tmp = NamedTemporaryFile(**tmp_file_args)
except TypeError:
# NamedTemporaryFile does not have an 'encoding' argument on pypy
tmp_file_args.pop("encoding")
tmp = NamedTemporaryFile(**tmp_file_args)
try:
tmp.write(text)
tmp.close()
exec_command(
[cmd if cmd != "{}" else tmp.name for cmd in command],
capture=update,
warn_only=warn_only,
)
if not update:
return notebook
piped_notebook = read(tmp.name, fmt=fmt)
finally:
os.remove(tmp.name)
else:
cmd_output = exec_command(
command, text.encode("utf-8"), capture=update, warn_only=warn_only
)
if not update:
return notebook
if not cmd_output:
sys.stderr.write(
"[jupytext] The command '{}' had no output. As a result, the notebook is empty. "
"Is this expected? If not, use --check rather than --pipe for this command.".format(
command
)
)
piped_notebook = reads(cmd_output.decode("utf-8"), fmt)
if fmt["extension"] != ".ipynb":
piped_notebook = combine_inputs_with_outputs(piped_notebook, notebook, fmt)
# Remove jupytext / text_representation entry
if "jupytext" in notebook.metadata:
piped_notebook.metadata["jupytext"] = notebook.metadata["jupytext"]
else:
piped_notebook.metadata.pop("jupytext", None)
return piped_notebook
def execution_counts_are_in_order(notebook):
expected_execution_count = 1
for cell in notebook.cells:
if cell.cell_type == "code":
if cell.execution_count != expected_execution_count:
return False
expected_execution_count += 1
return True
def code_cells_have_changed(notebook, nb_files):
for nb_file in nb_files:
if not os.path.exists(nb_file):
return True
nb_ref = read(nb_file)
# Are the new code cells equals to those in the file?
ref = [cell.source for cell in nb_ref.cells if cell.cell_type == "code"]
new = [cell.source for cell in notebook.cells if cell.cell_type == "code"]
if ref != new:
return True
return False
| true | true |
f710a3e5b600a82151ff00b430ce32b511a15cd8 | 3,363 | py | Python | spencer/settings.py | AJMansfield/Spencer-Bot | 6955e2dec78ebde4c01ed9f637040c4226ec14d0 | [
"Apache-2.0"
] | null | null | null | spencer/settings.py | AJMansfield/Spencer-Bot | 6955e2dec78ebde4c01ed9f637040c4226ec14d0 | [
"Apache-2.0"
] | null | null | null | spencer/settings.py | AJMansfield/Spencer-Bot | 6955e2dec78ebde4c01ed9f637040c4226ec14d0 | [
"Apache-2.0"
] | null | null | null | """
Django settings for spencer project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#-js))k7nx&)biw-=pso3u*o%&w@_wngqw0kq1l3ckhh5(52s@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'roles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spencer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spencer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'spencer',
'USER': 'spencer_django',
'PASSWORD': '9Ag91LaQjR$n',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.671756 | 91 | 0.691347 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-#-js))k7nx&)biw-=pso3u*o%&w@_wngqw0kq1l3ckhh5(52s@'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'roles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spencer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spencer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'spencer',
'USER': 'spencer_django',
'PASSWORD': '9Ag91LaQjR$n',
'HOST': '',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true | true |
f710a3ecafc960d8f7fd50c71912c87d2588db52 | 411 | py | Python | exercises/structures/test/test_treasure_map.py | bmazey/summer2020 | 0e943c356677f1d0ec55da5fe4b30a54b37507cd | [
"MIT"
] | null | null | null | exercises/structures/test/test_treasure_map.py | bmazey/summer2020 | 0e943c356677f1d0ec55da5fe4b30a54b37507cd | [
"MIT"
] | null | null | null | exercises/structures/test/test_treasure_map.py | bmazey/summer2020 | 0e943c356677f1d0ec55da5fe4b30a54b37507cd | [
"MIT"
] | null | null | null | from exercises.structures.src.treasure_map import TreasureMap
tm = TreasureMap()
tm.populate_map()
def test_beach_key():
assert tm.map['beach'] == 'sandy shore'.casefold()
def test_coast_key():
assert tm.map['coast'] == 'ocean reef'.casefold()
def test_volcano_key():
assert tm.map['volcano'] == 'hot lava'.casefold()
def test_x_key():
assert tm.map['x'] == 'marks the spot'.casefold()
| 19.571429 | 61 | 0.681265 | from exercises.structures.src.treasure_map import TreasureMap
tm = TreasureMap()
tm.populate_map()
def test_beach_key():
assert tm.map['beach'] == 'sandy shore'.casefold()
def test_coast_key():
assert tm.map['coast'] == 'ocean reef'.casefold()
def test_volcano_key():
assert tm.map['volcano'] == 'hot lava'.casefold()
def test_x_key():
assert tm.map['x'] == 'marks the spot'.casefold()
| true | true |
f710a51bd1266dc4b0e1f75441f19122c01ede92 | 16,497 | py | Python | .vscode-server/data/User/History/-1f47d17c/Kqqg.py | UNIZAR-30226-2022-09/back-end | 7f20e141e34bf0ae7cce70515a1e4bb0cd85b173 | [
"MIT"
] | null | null | null | .vscode-server/data/User/History/-1f47d17c/Kqqg.py | UNIZAR-30226-2022-09/back-end | 7f20e141e34bf0ae7cce70515a1e4bb0cd85b173 | [
"MIT"
] | 1 | 2022-02-16T12:12:43.000Z | 2022-02-16T12:15:03.000Z | .vscode-server/data/User/History/-1f47d17c/Kqqg.py | UNIZAR-30226-2022-09/back-end | 7f20e141e34bf0ae7cce70515a1e4bb0cd85b173 | [
"MIT"
] | null | null | null | # from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
#data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
# for record in results:
# print("\n", record)
vector0 =""
vector1 = []
vector2 = []
for r in results:
print(str(r[0]))
vector0 = vector0 + ","+ str(r[0])
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
# for r in results:
# for b in resultb:
# a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id))
# resulta = db.session.execute(a)
# for a in resultaa:
# Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count()
# Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count()
# Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()
print(vector0)
fila = {
"id": vector0,
#"link": a.link,
#"titulo": a.titulo,
#"autor": a.autor,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
#"nlikes": Gustas,
#"ncomentarios": Comentarios,
#"nguardados": Guardados,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| 33.874743 | 235 | 0.655695 |
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
descripcion = db.Column(db.String(1000))
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
]
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user:
return jsonify({'error': 'Existe correo'})
if nick:
return jsonify({'error': 'Existe nick'})
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'})
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user)
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"):
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"):
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
vector0 =""
vector1 = []
vector2 = []
for r in results:
print(str(r[0]))
vector0 = vector0 + ","+ str(r[0])
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
print(vector0)
fila = {
"id": vector0,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f710a56d3627ff3cc484543b10918a7e02d8f710 | 348 | py | Python | IPython/config/profile/python3/ipython_config.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 1 | 2015-01-09T21:10:58.000Z | 2015-01-09T21:10:58.000Z | IPython/config/profile/python3/ipython_config.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 3 | 2015-04-01T13:14:57.000Z | 2015-05-26T16:01:37.000Z | IPython/config/profile/python3/ipython_config.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 1 | 2015-05-17T14:14:26.000Z | 2015-05-17T14:14:26.000Z | c = get_config()
# If the master config file uses syntax that's invalid in Python 3, we'll skip
# it and just use the factory defaults.
try:
load_subconfig('ipython_config.py', profile='default')
except Exception:
pass
else:
# We reset exec_lines in case they're not compatible with Python 3.
c.InteractiveShellApp.exec_lines = []
| 29 | 78 | 0.729885 | c = get_config()
try:
load_subconfig('ipython_config.py', profile='default')
except Exception:
pass
else:
c.InteractiveShellApp.exec_lines = []
| true | true |
f710a66be19e70b5868e552408578511804999cb | 5,061 | py | Python | fastestimator/trace/metric/mcc.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | null | null | null | fastestimator/trace/metric/mcc.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | null | null | null | fastestimator/trace/metric/mcc.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Union, Iterable
import numpy as np
from sklearn.metrics import matthews_corrcoef
from fastestimator.trace.meta._per_ds import per_ds
from fastestimator.trace.trace import Trace
from fastestimator.util.data import Any, Data, Dict
from fastestimator.util.traceability_util import traceable
from fastestimator.util.util import to_number
@per_ds
@traceable()
class MCC(Trace):
"""A trace which computes the Matthews Correlation Coefficient for a given set of predictions.
This is a preferable metric to accuracy or F1 score since it automatically corrects for class imbalances and does
not depend on the choice of target class (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6941312/). Ideal value is 1,
a value of 0 means your predictions are completely uncorrelated with the true data. A value less than zero implies
anti-correlation (you should invert your classifier predictions in order to do better).
Args:
true_key: Name of the key that corresponds to ground truth in the batch dictionary.
pred_key: Name of the key that corresponds to predicted score in the batch dictionary.
mode: What mode(s) to execute this Trace in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
output_name: What to call the output from this trace (for example in the logger output).
per_ds: Whether to automatically compute this metric individually for every ds_id it runs on, in addition to
computing an aggregate across all ds_ids on which it runs. This is automatically False if `output_name`
contains a "|" character.
**kwargs: Additional keyword arguments that pass to sklearn.metrics.matthews_corrcoef()
Raises:
ValueError: One of ["y_true", "y_pred"] argument exists in `kwargs`.
"""
def __init__(self,
true_key: str,
pred_key: str,
mode: Union[None, str, Iterable[str]] = ("eval", "test"),
ds_id: Union[None, str, Iterable[str]] = None,
output_name: str = "mcc",
per_ds: bool = True,
**kwargs) -> None:
MCC.check_kwargs(kwargs)
super().__init__(inputs=(true_key, pred_key), mode=mode, outputs=output_name, ds_id=ds_id)
self.kwargs = kwargs
self.y_true = []
self.y_pred = []
self.per_ds = per_ds
@property
def true_key(self) -> str:
return self.inputs[0]
@property
def pred_key(self) -> str:
return self.inputs[1]
def on_epoch_begin(self, data: Data) -> None:
self.y_true = []
self.y_pred = []
def on_batch_end(self, data: Data) -> None:
y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])
if y_true.shape[-1] > 1 and y_true.ndim > 1:
y_true = np.argmax(y_true, axis=-1)
if y_pred.shape[-1] > 1 and y_pred.ndim > 1:
y_pred = np.argmax(y_pred, axis=-1)
else:
y_pred = np.round(y_pred)
assert y_pred.size == y_true.size
self.y_true.extend(y_true)
self.y_pred.extend(y_pred)
def on_epoch_end(self, data: Data) -> None:
data.write_with_log(self.outputs[0], matthews_corrcoef(y_true=self.y_true, y_pred=self.y_pred, **self.kwargs))
@staticmethod
def check_kwargs(kwargs: Dict[str, Any]) -> None:
"""Check if `kwargs` has any blacklist argument and raise an error if it does.
Args:
kwargs: Keywork arguments to be examined.
Raises:
ValueError: One of ["y_true", "y_pred"] argument exists in `kwargs`.
"""
blacklist = ["y_true", "y_pred"]
illegal_kwarg = [x for x in blacklist if x in kwargs]
if illegal_kwarg:
raise ValueError(
f"Arguments {illegal_kwarg} cannot exist in kwargs, since FastEstimator will later directly use them in"
" sklearn.metrics.matthews_corrcoef()")
| 45.1875 | 120 | 0.657182 |
from typing import Union, Iterable
import numpy as np
from sklearn.metrics import matthews_corrcoef
from fastestimator.trace.meta._per_ds import per_ds
from fastestimator.trace.trace import Trace
from fastestimator.util.data import Any, Data, Dict
from fastestimator.util.traceability_util import traceable
from fastestimator.util.util import to_number
@per_ds
@traceable()
class MCC(Trace):
def __init__(self,
true_key: str,
pred_key: str,
mode: Union[None, str, Iterable[str]] = ("eval", "test"),
ds_id: Union[None, str, Iterable[str]] = None,
output_name: str = "mcc",
per_ds: bool = True,
**kwargs) -> None:
MCC.check_kwargs(kwargs)
super().__init__(inputs=(true_key, pred_key), mode=mode, outputs=output_name, ds_id=ds_id)
self.kwargs = kwargs
self.y_true = []
self.y_pred = []
self.per_ds = per_ds
@property
def true_key(self) -> str:
return self.inputs[0]
@property
def pred_key(self) -> str:
return self.inputs[1]
def on_epoch_begin(self, data: Data) -> None:
self.y_true = []
self.y_pred = []
def on_batch_end(self, data: Data) -> None:
y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])
if y_true.shape[-1] > 1 and y_true.ndim > 1:
y_true = np.argmax(y_true, axis=-1)
if y_pred.shape[-1] > 1 and y_pred.ndim > 1:
y_pred = np.argmax(y_pred, axis=-1)
else:
y_pred = np.round(y_pred)
assert y_pred.size == y_true.size
self.y_true.extend(y_true)
self.y_pred.extend(y_pred)
def on_epoch_end(self, data: Data) -> None:
data.write_with_log(self.outputs[0], matthews_corrcoef(y_true=self.y_true, y_pred=self.y_pred, **self.kwargs))
@staticmethod
def check_kwargs(kwargs: Dict[str, Any]) -> None:
blacklist = ["y_true", "y_pred"]
illegal_kwarg = [x for x in blacklist if x in kwargs]
if illegal_kwarg:
raise ValueError(
f"Arguments {illegal_kwarg} cannot exist in kwargs, since FastEstimator will later directly use them in"
" sklearn.metrics.matthews_corrcoef()")
| true | true |
f710a6a1dbb11d687aae20d29ee76bd20dcd3030 | 89 | py | Python | blackdog/admin.py | UncleGoogle/dafipost | 5e19d6a69dde9b7e5267bbdba680906bdb5e56eb | [
"MIT"
] | null | null | null | blackdog/admin.py | UncleGoogle/dafipost | 5e19d6a69dde9b7e5267bbdba680906bdb5e56eb | [
"MIT"
] | 1 | 2021-02-08T01:44:32.000Z | 2021-02-08T01:44:32.000Z | blackdog/admin.py | UncleGoogle/dafipost | 5e19d6a69dde9b7e5267bbdba680906bdb5e56eb | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
admin.site.register(models.Bark)
| 14.833333 | 32 | 0.797753 | from django.contrib import admin
from . import models
admin.site.register(models.Bark)
| true | true |
f710a6c24308bd6ba7693092f6d121cecdb9b7b8 | 1,607 | py | Python | inaccel/keras/applications/imagenet_utils.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | 1 | 2021-01-27T12:20:35.000Z | 2021-01-27T12:20:35.000Z | inaccel/keras/applications/imagenet_utils.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | null | null | null | inaccel/keras/applications/imagenet_utils.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | null | null | null | """Utilities for ImageNet data preprocessing & prediction decoding.
"""
import json
import keras.utils.data_utils as data_utils
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description)`.
One list of tuples per sample in batch input.
# Raises
ValueError: In case of invalid shape of the `preds` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 5:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 5)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred[:min(top, 5)]
result = [tuple(CLASS_INDEX[str(i)]) for i in top_indices]
results.append(result)
return results
| 33.479167 | 77 | 0.613566 |
import json
import keras.utils.data_utils as data_utils
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 5:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 5)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred[:min(top, 5)]
result = [tuple(CLASS_INDEX[str(i)]) for i in top_indices]
results.append(result)
return results
| true | true |
f710a7f55ec93a0f5804d75f8bd5493b3a4d1321 | 3,798 | py | Python | tests/accounts/ecdsa_test.py | mustafa-travisci/lto-api.python | 0493a46b69575e94d09a038dadf472b46f88d036 | [
"MIT"
] | null | null | null | tests/accounts/ecdsa_test.py | mustafa-travisci/lto-api.python | 0493a46b69575e94d09a038dadf472b46f88d036 | [
"MIT"
] | null | null | null | tests/accounts/ecdsa_test.py | mustafa-travisci/lto-api.python | 0493a46b69575e94d09a038dadf472b46f88d036 | [
"MIT"
] | null | null | null | import copy
from lto.accounts.ecdsa.account_factory_ecdsa import AccountFactoryECDSA
import base58
import pytest
from lto.transactions.anchor import Anchor
class TestAccountECDSA():
factory = AccountFactoryECDSA('L')
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = factory.create()
def test_make_key(self):
assert self.factory._MakeKey(self.seed).to_string() == (b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%')
#@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_address(self):
assert self.factory.create_address(self.account.public_key) == self.account.address
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_sign_keys(self):
private_key, public_key, key_type = self.factory.create_sign_keys(self.seed)
assert self.account.public_key == public_key
assert self.account.private_key == private_key
assert key_type == 'secp256k1'
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_public(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_public_key(account.public_key)
# object
assert account.address == account2.address
assert account.public_key == account2.public_key
# bytes
public_key = b"5\xcf4\xeb\xe0\xd5,s\x00t\xc6to\x8b\xd0\x0e\xf8N\xe6\xa1\x1d\x13\x18s+\x11\x82\x7fR\x8d='\x03!a\x13H\xca=]\x8aV\xf71\x16C\x0c\x9ad{\x14z\x8e1\x9dg\x8b\xb2\xf2\x9e\x0fo\xa7\x9d"
account3 = AccountFactoryECDSA('T').create_from_public_key(public_key)
assert account.address == account3.address
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_public_key(base58.b58encode(public_key))
assert account.address == account4.address
assert account.public_key == account4.public_key
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_private_key(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_private_key(account.private_key)
# object
assert account.address == account2.address
assert account.private_key == account2.private_key
assert account.public_key == account2.public_key
# bytes
private_key = b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%'
account3 = AccountFactoryECDSA('T').create_from_private_key(private_key)
assert account.address == account3.address
assert account.private_key == account3.private_key
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_private_key(base58.b58encode(private_key))
assert account.address == account4.address
assert account.private_key == account4.private_key
assert account.public_key == account4.public_key
def test_verify_random_account_signed_transaction(self):
account = self.factory.create()
transaction = Anchor('rtrtrtr')
transaction.sign_with(account)
cloned_tx = copy.copy(transaction)
cloned_tx.proofs = []
message = cloned_tx.to_binary()
assert account.verify_signature(message, transaction.proofs[0]) is True
| 50.64 | 199 | 0.718273 | import copy
from lto.accounts.ecdsa.account_factory_ecdsa import AccountFactoryECDSA
import base58
import pytest
from lto.transactions.anchor import Anchor
class TestAccountECDSA():
factory = AccountFactoryECDSA('L')
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = factory.create()
def test_make_key(self):
assert self.factory._MakeKey(self.seed).to_string() == (b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%')
def test_create_address(self):
assert self.factory.create_address(self.account.public_key) == self.account.address
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_sign_keys(self):
private_key, public_key, key_type = self.factory.create_sign_keys(self.seed)
assert self.account.public_key == public_key
assert self.account.private_key == private_key
assert key_type == 'secp256k1'
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_public(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_public_key(account.public_key)
assert account.address == account2.address
assert account.public_key == account2.public_key
public_key = b"5\xcf4\xeb\xe0\xd5,s\x00t\xc6to\x8b\xd0\x0e\xf8N\xe6\xa1\x1d\x13\x18s+\x11\x82\x7fR\x8d='\x03!a\x13H\xca=]\x8aV\xf71\x16C\x0c\x9ad{\x14z\x8e1\x9dg\x8b\xb2\xf2\x9e\x0fo\xa7\x9d"
account3 = AccountFactoryECDSA('T').create_from_public_key(public_key)
assert account.address == account3.address
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_public_key(base58.b58encode(public_key))
assert account.address == account4.address
assert account.public_key == account4.public_key
@pytest.mark.skip(reason="Secp256k1 under construction")
def test_create_from_private_key(self):
seed = 'divert manage prefer child kind maximum october hand manual connect fitness small symptom range sleep'
account = AccountFactoryECDSA('T').create_from_seed(seed)
account2 = AccountFactoryECDSA('T').create_from_private_key(account.private_key)
# object
assert account.address == account2.address
assert account.private_key == account2.private_key
assert account.public_key == account2.public_key
# bytes
private_key = b'\xa7\x90:j\x80\xdb\x00}|~\x9e\x8cq]S\x97\x92\x97W\xfe\x17h>\xd5\xc1b\xa8\x1c|\x80\xc6%'
account3 = AccountFactoryECDSA('T').create_from_private_key(private_key)
assert account.address == account3.address
assert account.private_key == account3.private_key
assert account.public_key == account3.public_key
# b58 str
account4 = AccountFactoryECDSA('T').create_from_private_key(base58.b58encode(private_key))
assert account.address == account4.address
assert account.private_key == account4.private_key
assert account.public_key == account4.public_key
def test_verify_random_account_signed_transaction(self):
account = self.factory.create()
transaction = Anchor('rtrtrtr')
transaction.sign_with(account)
cloned_tx = copy.copy(transaction)
cloned_tx.proofs = []
message = cloned_tx.to_binary()
assert account.verify_signature(message, transaction.proofs[0]) is True
| true | true |
f710a859d9f52e08d86ed3ddb3b5b3af0b18ffd1 | 1,351 | py | Python | consolemenu/items/command_item.py | Gimli76/console-menu | febd66a49c199fb349a54499ff267c15e0e04bd9 | [
"MIT"
] | 1 | 2021-02-17T21:18:32.000Z | 2021-02-17T21:18:32.000Z | consolemenu/items/command_item.py | Gimli76/console-menu | febd66a49c199fb349a54499ff267c15e0e04bd9 | [
"MIT"
] | 10 | 2020-06-05T23:30:34.000Z | 2021-09-22T18:56:54.000Z | consolemenu/items/command_item.py | Gimli76/console-menu | febd66a49c199fb349a54499ff267c15e0e04bd9 | [
"MIT"
] | null | null | null | import subprocess
from consolemenu.items import ExternalItem
class CommandItem(ExternalItem):
"""
A menu item to execute a console command
"""
def __init__(self, text, command, arguments=None, menu=None, should_exit=False):
"""
:ivar str command: The console command to be executed
:ivar list[str] arguments: An optional list of string arguments to be passed to the command
:ivar int exit_status: the exit status of the command, None if it hasn't been run yet
"""
super(CommandItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.command = command
if arguments:
self.arguments = arguments
else:
self.arguments = []
self.exit_status = None
def action(self):
"""
This class overrides this method
"""
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True)
def get_return(self):
"""
:return: the exit status of the command
:rtype: int
"""
return self.exit_status
| 30.704545 | 99 | 0.623242 | import subprocess
from consolemenu.items import ExternalItem
class CommandItem(ExternalItem):
def __init__(self, text, command, arguments=None, menu=None, should_exit=False):
super(CommandItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.command = command
if arguments:
self.arguments = arguments
else:
self.arguments = []
self.exit_status = None
def action(self):
commandline = "{0} {1}".format(self.command, " ".join(self.arguments))
try:
completed_process = subprocess.run(commandline, shell=True)
self.exit_status = completed_process.returncode
except AttributeError:
self.exit_status = subprocess.call(commandline, shell=True)
def get_return(self):
return self.exit_status
| true | true |
f710a98119943a2f7fadb0a04b71a3e85f1d84f5 | 722 | py | Python | tests/binpacking/solver/test_statistics.py | Jxtopher/binpacking | 6ce2a1cd071a0660c32f17f05298dde42942a2d9 | [
"MIT"
] | 1 | 2021-12-27T12:37:58.000Z | 2021-12-27T12:37:58.000Z | tests/binpacking/solver/test_statistics.py | Jxtopher/binpacking | 6ce2a1cd071a0660c32f17f05298dde42942a2d9 | [
"MIT"
] | null | null | null | tests/binpacking/solver/test_statistics.py | Jxtopher/binpacking | 6ce2a1cd071a0660c32f17f05298dde42942a2d9 | [
"MIT"
] | null | null | null | from tests.base import BaseTestCase
from binpacking.solver.data_structure.solution import Solution
from binpacking.solver.statistics import Statistics, StatisticIteration, StatisticFitness
class StatisticsTest(BaseTestCase):
def test_statistics(self) -> None:
iteration = StatisticIteration()
fitness = StatisticFitness()
statistics = Statistics()
statistics.add_statistic(iteration)
statistics.add_statistic(fitness)
expected_size = 4
sol = Solution(expected_size)
sol.set_fitness(float(42))
r = statistics.run(sol)
self.assertTrue(r['iteration'] == 0)
r = statistics.run(sol)
self.assertTrue(r['iteration'] == 1)
| 28.88 | 89 | 0.691136 | from tests.base import BaseTestCase
from binpacking.solver.data_structure.solution import Solution
from binpacking.solver.statistics import Statistics, StatisticIteration, StatisticFitness
class StatisticsTest(BaseTestCase):
def test_statistics(self) -> None:
iteration = StatisticIteration()
fitness = StatisticFitness()
statistics = Statistics()
statistics.add_statistic(iteration)
statistics.add_statistic(fitness)
expected_size = 4
sol = Solution(expected_size)
sol.set_fitness(float(42))
r = statistics.run(sol)
self.assertTrue(r['iteration'] == 0)
r = statistics.run(sol)
self.assertTrue(r['iteration'] == 1)
| true | true |
f710aa5ecac09bdab7ddb4892fe162790bf8b77d | 6,807 | py | Python | sdk/python/pulumi_aws/athena/database.py | Otanikotani/pulumi-aws | 00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/athena/database.py | Otanikotani/pulumi-aws | 00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/athena/database.py | Otanikotani/pulumi-aws | 00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Database']
class Database(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Athena database.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
hoge_bucket = aws.s3.Bucket("hogeBucket")
hoge_database = aws.athena.Database("hogeDatabase",
name="database_name",
bucket=hoge_bucket.bucket)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: Name of s3 bucket to save the results of the query execution.
:param pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']] encryption_configuration: The encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. An `encryption_configuration` block is documented below.
:param pulumi.Input[bool] force_destroy: A boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable.
:param pulumi.Input[str] name: Name of the database to create.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['encryption_configuration'] = encryption_configuration
__props__['force_destroy'] = force_destroy
__props__['name'] = name
super(Database, __self__).__init__(
'aws:athena/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'Database':
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: Name of s3 bucket to save the results of the query execution.
:param pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']] encryption_configuration: The encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. An `encryption_configuration` block is documented below.
:param pulumi.Input[bool] force_destroy: A boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable.
:param pulumi.Input[str] name: Name of the database to create.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket"] = bucket
__props__["encryption_configuration"] = encryption_configuration
__props__["force_destroy"] = force_destroy
__props__["name"] = name
return Database(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
"""
Name of s3 bucket to save the results of the query execution.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="encryptionConfiguration")
def encryption_configuration(self) -> pulumi.Output[Optional['outputs.DatabaseEncryptionConfiguration']]:
"""
The encryption key block AWS Athena uses to decrypt the data in S3, such as an AWS Key Management Service (AWS KMS) key. An `encryption_configuration` block is documented below.
"""
return pulumi.get(self, "encryption_configuration")
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
A boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable.
"""
return pulumi.get(self, "force_destroy")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the database to create.
"""
return pulumi.get(self, "name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 46.623288 | 288 | 0.665785 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Database']
class Database(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bucket is None:
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['encryption_configuration'] = encryption_configuration
__props__['force_destroy'] = force_destroy
__props__['name'] = name
super(Database, __self__).__init__(
'aws:athena/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DatabaseEncryptionConfigurationArgs']]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'Database':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bucket"] = bucket
__props__["encryption_configuration"] = encryption_configuration
__props__["force_destroy"] = force_destroy
__props__["name"] = name
return Database(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="encryptionConfiguration")
def encryption_configuration(self) -> pulumi.Output[Optional['outputs.DatabaseEncryptionConfiguration']]:
return pulumi.get(self, "encryption_configuration")
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "force_destroy")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f710aa676b7ed87b52497df6e971ab5a80b028fe | 1,281 | py | Python | typos.py | Ulzahk/Practica-Selenium-Python | f2d0f215afb8ebba019544b3eb60cf2f7f23ddbf | [
"MIT"
] | null | null | null | typos.py | Ulzahk/Practica-Selenium-Python | f2d0f215afb8ebba019544b3eb60cf2f7f23ddbf | [
"MIT"
] | null | null | null | typos.py | Ulzahk/Practica-Selenium-Python | f2d0f215afb8ebba019544b3eb60cf2f7f23ddbf | [
"MIT"
] | null | null | null | import unittest
from selenium import webdriver
class Typos(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path = r'./chromedriver.exe')
driver = self.driver
driver.get('http://the-internet.herokuapp.com/')
driver.find_element_by_link_text('Typos').click()
def test_find_typo(self):
driver = self.driver
paragraph_to_check = driver.find_element_by_css_selector('#content > div > p:nth-child(3)')
text_to_check = paragraph_to_check.text
print(text_to_check)
tries = 1
found = False
correct_text = 'Sometimes you\'ll see a typo, other times you won\'t.'
while text_to_check != correct_text:
paragraph_to_check = driver.find_element_by_css_selector('#content > div > p:nth-child(3)')
text_to_check = paragraph_to_check.text
driver.refresh()
tries += 1
while not found:
if text_to_check == correct_text:
driver.refresh()
found = True
self.assertEqual(found, True)
print(f'it took {tries} to find the typo')
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
unittest.main() | 29.113636 | 103 | 0.615925 | import unittest
from selenium import webdriver
class Typos(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path = r'./chromedriver.exe')
driver = self.driver
driver.get('http://the-internet.herokuapp.com/')
driver.find_element_by_link_text('Typos').click()
def test_find_typo(self):
driver = self.driver
paragraph_to_check = driver.find_element_by_css_selector('#content > div > p:nth-child(3)')
text_to_check = paragraph_to_check.text
print(text_to_check)
tries = 1
found = False
correct_text = 'Sometimes you\'ll see a typo, other times you won\'t.'
while text_to_check != correct_text:
paragraph_to_check = driver.find_element_by_css_selector('#content > div > p:nth-child(3)')
text_to_check = paragraph_to_check.text
driver.refresh()
tries += 1
while not found:
if text_to_check == correct_text:
driver.refresh()
found = True
self.assertEqual(found, True)
print(f'it took {tries} to find the typo')
def tearDown(self):
self.driver.close()
if __name__ == '__main__':
unittest.main() | true | true |
f710aa9ee8bb044fb5cf58191f744088af8709bd | 1,030 | py | Python | sherlock-and-the-valid-string.py | gauravkanoongo/cp | f33cec95c121876a737b0a90faa2a51238be52a3 | [
"MIT"
] | null | null | null | sherlock-and-the-valid-string.py | gauravkanoongo/cp | f33cec95c121876a737b0a90faa2a51238be52a3 | [
"MIT"
] | null | null | null | sherlock-and-the-valid-string.py | gauravkanoongo/cp | f33cec95c121876a737b0a90faa2a51238be52a3 | [
"MIT"
] | 1 | 2021-09-19T13:04:41.000Z | 2021-09-19T13:04:41.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'isValid' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def isValid(s):
# Write your code here
# Write your code here
freq = {i : s.count(i) for i in set(s)}
fv = list(freq.values())
ffreq = {v : list(fv).count(v) for v in set(fv)}
print("s:",s, "\nfreq:", freq, "\nfv:", fv, "\nffreq:", ffreq)
if len(ffreq)>2:
return "NO"
elif len(ffreq)<=1:
return "YES"
else:
mx = max(ffreq)
mn = min(ffreq)
print("mx:", mx, " mn:", mn)
if (mn==1) and ffreq.get(mn, 0)<=1:
return "YES"
if abs(mx - mn)>1:
return "NO"
if min(ffreq.values()) > 1:
return "NO"
else:
return "YES"
if __name__ == '__main__':
fptr = open('CON', 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
| 20.6 | 66 | 0.526214 |
import math
import os
import random
import re
import sys
def isValid(s):
freq = {i : s.count(i) for i in set(s)}
fv = list(freq.values())
ffreq = {v : list(fv).count(v) for v in set(fv)}
print("s:",s, "\nfreq:", freq, "\nfv:", fv, "\nffreq:", ffreq)
if len(ffreq)>2:
return "NO"
elif len(ffreq)<=1:
return "YES"
else:
mx = max(ffreq)
mn = min(ffreq)
print("mx:", mx, " mn:", mn)
if (mn==1) and ffreq.get(mn, 0)<=1:
return "YES"
if abs(mx - mn)>1:
return "NO"
if min(ffreq.values()) > 1:
return "NO"
else:
return "YES"
if __name__ == '__main__':
fptr = open('CON', 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
| true | true |
f710aac2afd303f05b5049f4348f7aafb94efd9a | 546 | py | Python | account/account_sample.py | appenz/minebot | e1bd18053873c4d686de57e014a2cd8f27d4dd4c | [
"Apache-2.0"
] | 11 | 2021-08-28T18:21:43.000Z | 2022-03-08T16:08:55.000Z | account/account_sample.py | appenz/minebot | e1bd18053873c4d686de57e014a2cd8f27d4dd4c | [
"Apache-2.0"
] | 3 | 2022-02-05T17:47:53.000Z | 2022-03-10T17:36:48.000Z | account/account_sample.py | appenz/minebot | e1bd18053873c4d686de57e014a2cd8f27d4dd4c | [
"Apache-2.0"
] | 5 | 2022-02-04T19:12:50.000Z | 2022-03-18T20:54:00.000Z | #
# Account information
#
# Copy this file to account.py and fill in the real values for the Minecraft account.
#
#
#
#
account = {
"user" : 'your@login.com',
"password" : 'your_password',
"master" : 'minecraft_name_who_the_bot_will_listen_to',
"host" : 'exampleserver.whatever.com',
"version" : '1.16.5',
}
#
# List of world locations you can use in commands
#
locations = {
"minedrop": [29,13,-19],
"farmdrop": [42.5,89,-15.5],
"minecenter": [20.5,12,-23.5],
} | 21 | 85 | 0.569597 |
account = {
"user" : 'your@login.com',
"password" : 'your_password',
"master" : 'minecraft_name_who_the_bot_will_listen_to',
"host" : 'exampleserver.whatever.com',
"version" : '1.16.5',
}
locations = {
"minedrop": [29,13,-19],
"farmdrop": [42.5,89,-15.5],
"minecenter": [20.5,12,-23.5],
} | true | true |
f710aad9fae96e7df461ea9dc6b3959777fae07a | 3,074 | py | Python | apps/courts/views.py | gooseswan2/rent-a-court | 2bba4b94e2b1a3deae6f6e0e15f35aef1e8aa963 | [
"MIT"
] | null | null | null | apps/courts/views.py | gooseswan2/rent-a-court | 2bba4b94e2b1a3deae6f6e0e15f35aef1e8aa963 | [
"MIT"
] | null | null | null | apps/courts/views.py | gooseswan2/rent-a-court | 2bba4b94e2b1a3deae6f6e0e15f35aef1e8aa963 | [
"MIT"
] | null | null | null | from django.shortcuts import render,redirect
from django.contrib import messages
from django.template import Context
from .models import Court, CourtManager, SelectedCourt
from apps.users.models import User
from datetime import datetime
from decimal import Decimal
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, "courts/index.html")
def main(request):
context = {
'court' : Court.objects.all()
}
return render(request, "courts/main.html", context)
def court(request, courtid):
context = {
'one_court' : Court.objects.get(id=courtid)
}
return render(request, "courts/courts.html", context)
def select(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
context = {
'courts' : Court.objects.all()
}
return render(request, "courts/select.html", context)
"""
This is logic that checks the times that a court has been reserved.
"""
def schedule(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/", context)
usr = User(id=request.session['user_id'])
crt = Court.objects.get(id=request.POST['courtid'])
intime = request.POST['timein']
outtime = request.POST['timeout']
dform = "%Y-%m-%d %H:%M"
diff = datetime.strptime(outtime, dform) - datetime.strptime(intime, dform)
hours = diff.seconds/3600
if hours < 4 and hours > 0:
total_price = Decimal(hours) * crt.price
if intime > outtime:
context = {
'courts' : Court.objects.all(),
'message': "End date/time is earlier than begin date/time."
}
elif intime <= datetime.now().strftime(dform):
context = {
'courts' : Court.objects.all(),
'message': "Begin date/time is in the past."
}
else:
SelectedCourt.objects.create(user=usr, court=crt, timein=intime, timeout=outtime, total_price=total_price)
context = {
'courts' : Court.objects.all()
}
else:
context = {
'courts' : Court.objects.all(),
'message': "Scheduled time is too long."
}
return render(request, "courts/select.html", context)
"""
This presents a dashboard which shows court reservations.
"""
def dashboard(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
usr = User(id=request.session['user_id'])
context = {
'court_times' : SelectedCourt.objects.filter(user=usr)
}
return render(request, "courts/dashboard.html", context)
def search(request):
return render(request, "courts/search.html")
def searchzip(request):
return "HELLO WORLD" | 30.74 | 118 | 0.617437 | from django.shortcuts import render,redirect
from django.contrib import messages
from django.template import Context
from .models import Court, CourtManager, SelectedCourt
from apps.users.models import User
from datetime import datetime
from decimal import Decimal
from django.contrib.auth.decorators import login_required
def index(request):
return render(request, "courts/index.html")
def main(request):
context = {
'court' : Court.objects.all()
}
return render(request, "courts/main.html", context)
def court(request, courtid):
context = {
'one_court' : Court.objects.get(id=courtid)
}
return render(request, "courts/courts.html", context)
def select(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
context = {
'courts' : Court.objects.all()
}
return render(request, "courts/select.html", context)
def schedule(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/", context)
usr = User(id=request.session['user_id'])
crt = Court.objects.get(id=request.POST['courtid'])
intime = request.POST['timein']
outtime = request.POST['timeout']
dform = "%Y-%m-%d %H:%M"
diff = datetime.strptime(outtime, dform) - datetime.strptime(intime, dform)
hours = diff.seconds/3600
if hours < 4 and hours > 0:
total_price = Decimal(hours) * crt.price
if intime > outtime:
context = {
'courts' : Court.objects.all(),
'message': "End date/time is earlier than begin date/time."
}
elif intime <= datetime.now().strftime(dform):
context = {
'courts' : Court.objects.all(),
'message': "Begin date/time is in the past."
}
else:
SelectedCourt.objects.create(user=usr, court=crt, timein=intime, timeout=outtime, total_price=total_price)
context = {
'courts' : Court.objects.all()
}
else:
context = {
'courts' : Court.objects.all(),
'message': "Scheduled time is too long."
}
return render(request, "courts/select.html", context)
def dashboard(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
usr = User(id=request.session['user_id'])
context = {
'court_times' : SelectedCourt.objects.filter(user=usr)
}
return render(request, "courts/dashboard.html", context)
def search(request):
return render(request, "courts/search.html")
def searchzip(request):
return "HELLO WORLD" | true | true |
f710ab8364bbdcbe4c3b37527988de78e77269bb | 5,653 | py | Python | test/test_mpc.py | AwhLorraine/mshoot | d6981fa37c55da0457ac0371f9850743858a3543 | [
"BSD-3-Clause"
] | 14 | 2019-01-15T14:30:43.000Z | 2022-02-06T08:36:36.000Z | test/test_mpc.py | AwhLorraine/mshoot | d6981fa37c55da0457ac0371f9850743858a3543 | [
"BSD-3-Clause"
] | 4 | 2019-02-01T10:32:48.000Z | 2021-02-21T08:53:53.000Z | test/test_mpc.py | AwhLorraine/mshoot | d6981fa37c55da0457ac0371f9850743858a3543 | [
"BSD-3-Clause"
] | 5 | 2019-02-08T09:20:52.000Z | 2021-04-25T02:17:54.000Z | import unittest
import os
import numpy as np
import pandas as pd
from scipy.signal import StateSpace
import matplotlib.pyplot as plt
import mshoot
def cfun(xdf, ydf):
"""
:param ydf: DataFrame, model states
:param ydf: DataFrame, model outputs
:return: float
"""
qout = ydf['qout'].values
c = np.sum(qout ** 2) / qout.size
return c
class TestMPC(unittest.TestCase):
def setUp(self):
fmupath = os.path.join('resources', 'fmus', 'R1C1', 'R1C1.fmu')
parameters = {'C': 1e6, 'R': 0.01}
self.model = mshoot.SimFMU(
fmupath,
outputs=['qout', 'Tr'],
states=['heatCapacitor.T'],
parameters=parameters,
verbose=False)
def tearDown(self):
pass
def test_mpc(self):
# Inputs
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
# Bounds
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
# Initial state
x0 = [293.65]
# Optimization
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=inp.copy(),
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
# ax = u.plot(title='u')
# ax.set_ylim(0, 4000)
# ax = xemu.plot(title='xemu')
# ax.set_ylim(292.15, 296.15)
# plt.show()
# Assert the solution is correct
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer
# Validate emulation with optimized control
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
# self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
# * FMU results might be shifted in time by one time step.
# The reason is unknown, but FMU- or pyFMI-specific.
def test_mpc_inp_clb(self):
# Inputs
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
# Bounds
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
# Initial state
x0 = [293.65]
# Input callback function
def inp_clb(index):
return inp.loc[index]
# Optimization
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=None,
inp_clb=inp_clb,
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
# Assert the solution is correct
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer
# Validate emulation with optimized control
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
# self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *
# * FMU results might be shifted in time by one time step.
# The reason is unknown, but FMU- or pyFMI-specific.
# def test_2_inputs(self):
# """THE SOLVER HAS PROBLEMS WITH GETTING THE RIGHT SOLUTION. (?)"""
# # Inputs
# t = np.arange(0, 3600 * 10, 3600)
# inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
# inp['q'] = np.full(t.size, 0)
# inp['Tout'] = np.full(t.size, 273.15)
# # Bounds
# ubounds = [(0., 10000.), (272.15, 275.)] # <-- Solver should try to yield Tout = 275
# xbounds = [(293.15, 296.15)]
# # Initial state
# x0 = [293.65]
# # Optimization
# mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
# u, xctr, xemu, yemu, uhist = mpc.optimize(
# model=self.model,
# inp=inp,
# free=['q', 'Tout'],
# ubounds=ubounds,
# xbounds=xbounds,
# x0=x0,
# unominal=[4000., 273.15],
# ynominal=[4000., 293.15],
# step=1,
# horizon=4
# )
# ax = u.plot(title='u', subplots=True)
# ax = xemu.plot(title='xemu')
# plt.show()
# # Assert the solution is correct
# self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.01)
# # Validate emulation with optimized control
# inp['q'] = u['q']
# yvld, xvld = self.model.simulate(inp, x0)
# # self.assertTrue((yvld - yemu < 1e-3).all().all()) # Might not be true for FMUs *
# # self.assertTrue((xvld - xemu < 1e-3).all().all()) # Might not be true for FMUs *
# # * FMU results might be shifted in time by one time step.
# # The reason is unknown, but FMU- or pyFMI-specific.
if __name__ == '__main__':
unittest.main()
| 30.556757 | 110 | 0.523262 | import unittest
import os
import numpy as np
import pandas as pd
from scipy.signal import StateSpace
import matplotlib.pyplot as plt
import mshoot
def cfun(xdf, ydf):
qout = ydf['qout'].values
c = np.sum(qout ** 2) / qout.size
return c
class TestMPC(unittest.TestCase):
def setUp(self):
fmupath = os.path.join('resources', 'fmus', 'R1C1', 'R1C1.fmu')
parameters = {'C': 1e6, 'R': 0.01}
self.model = mshoot.SimFMU(
fmupath,
outputs=['qout', 'Tr'],
states=['heatCapacitor.T'],
parameters=parameters,
verbose=False)
def tearDown(self):
pass
def test_mpc(self):
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
x0 = [293.65]
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=inp.copy(),
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3)
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
d - xemu).abs() < 1e-3).all().all())
def test_mpc_inp_clb(self):
t = np.arange(0, 3600 * 10, 3600)
inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])
inp['q'] = np.full(t.size, 0)
inp['Tout'] = np.full(t.size, 273.15)
ubounds = [(0., 4000.)]
xbounds = [(293.15, 296.15)]
x0 = [293.65]
def inp_clb(index):
return inp.loc[index]
mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)
u, xctr, xemu, yemu, uhist = mpc.optimize(
model=self.model,
inp_ctr=None,
inp_clb=inp_clb,
inp_emu=inp.copy(),
free=['q'],
ubounds=ubounds,
xbounds=xbounds,
x0=x0,
ynominal=[4000., 293.15],
step=1,
horizon=3
)
self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3)
inp['q'] = u['q']
yvld, xvld = self.model.simulate(inp, x0)
d - xemu).abs() < 1e-3).all().all())
| true | true |
f710abb49b22c3947a49393e8d333e11f696684b | 90,076 | py | Python | src/edges_cal/cal_coefficients.py | edges-collab/edges-cal | 9b7b28f71e1aa5347f901af38ef3bc0d28766e21 | [
"MIT"
] | null | null | null | src/edges_cal/cal_coefficients.py | edges-collab/edges-cal | 9b7b28f71e1aa5347f901af38ef3bc0d28766e21 | [
"MIT"
] | 86 | 2020-02-07T23:00:23.000Z | 2022-03-31T22:08:19.000Z | src/edges_cal/cal_coefficients.py | edges-collab/edges-cal | 9b7b28f71e1aa5347f901af38ef3bc0d28766e21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The main user-facing module of ``edges-cal``.
This module contains wrappers around lower-level functions in other modules, providing
a one-stop interface for everything related to calibration.
"""
from __future__ import annotations
import attr
import h5py
import numpy as np
import tempfile
import warnings
import yaml
from abc import ABCMeta, abstractmethod
from astropy.convolution import Gaussian1DKernel, convolve
from copy import copy
from edges_io import io
from edges_io.logging import logger
from functools import lru_cache
from hashlib import md5
from matplotlib import pyplot as plt
from pathlib import Path
from scipy.interpolate import InterpolatedUnivariateSpline as Spline
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from . import DATA_PATH
from . import modelling as mdl
from . import receiver_calibration_func as rcf
from . import reflection_coefficient as rc
from . import s11_correction as s11
from . import tools
from . import types as tp
from . import xrfi
from .cached_property import cached_property
from .tools import EdgesFrequencyRange, FrequencyRange
class S1P:
def __init__(
self,
s1p: tp.PathLike | io.S1P,
f_low: float | None = None,
f_high: float | None = None,
switchval: int | None = None,
):
"""
An object representing the measurements of a VNA.
The measurements are read in via a .s1p file
Parameters
----------
s1p : str, Path or :class:`io.S1P`
The path to a valid .s1p file containing VNA measurements, or an S1P
object of such a type.
f_low, f_high : float
The minimum/maximum frequency to keep.
switchval : int
The standard value of the switch for the component.
"""
try:
s1p = Path(s1p)
self.s1p = io.S1P(s1p)
except TypeError:
if isinstance(s1p, io.S1P):
self.s1p = s1p
else:
raise TypeError(
"s1p must be a path to an s1p file, or an io.S1P object"
)
self.load_name = self.s1p.kind
self.repeat_num = self.s1p.repeat_num
spec = self.s1p.s11
f = self.s1p.freq
self.freq = FrequencyRange(f, f_low, f_high)
self.s11 = spec[self.freq.mask]
self._switchval = switchval
@cached_property
def switchval(self):
"""The standard value of the switch for the component."""
if self._switchval is not None:
return self._switchval * np.ones_like(self.freq.freq)
else:
return None
# For backwards compatibility
VNA = S1P
class _S11Base(metaclass=ABCMeta):
default_nterms = {
"ambient": 37,
"hot_load": 37,
"open": 105,
"short": 105,
"AntSim2": 55,
"AntSim3": 55,
"AntSim4": 55,
"lna": 37,
}
def __init__(
self,
*,
load_s11: Union[io._S11SubDir, io.ReceiverReading],
f_low: Optional[float] = None,
f_high: Optional[float] = None,
n_terms: Optional[int] = None,
model_type: tp.Modelable = "fourier",
):
"""
A class representing relevant switch corrections for a load.
Parameters
----------
load_s11 : :class:`io._S11SubDir`
An instance of the basic ``io`` S11 folder.
f_low : float
Minimum frequency to use. Default is all frequencies.
f_high : float
Maximum frequency to use. Default is all frequencies.
resistance : float
The resistance of the switch (in Ohms).
n_terms : int
The number of terms to use in fitting a model to the S11 (used to both
smooth and interpolate the data). Must be odd.
"""
self.load_s11 = load_s11
self.base_path = self.load_s11.path
try:
self.load_name = getattr(self.load_s11, "load_name")
except AttributeError:
self.load_name = None
self.run_num = self.load_s11.run_num
switchvals = {"open": 1, "short": -1, "match": 0}
for name in self.load_s11.STANDARD_NAMES:
setattr(
self,
name.lower(),
S1P(
s1p=self.load_s11.children[name.lower()],
f_low=f_low,
f_high=f_high,
switchval=switchvals.get(name.lower()),
),
)
# Expose one of the frequency objects
self.freq = self.open.freq
self._nterms = int(n_terms) if n_terms is not None else None
self.model_type = model_type
@cached_property
def n_terms(self):
"""Number of terms to use (by default) in modelling the S11.
Raises
------
ValueError
If n_terms is even.
"""
res = self._nterms or self.default_nterms.get(self.load_name, None)
if not (isinstance(res, int) and res % 2):
raise ValueError(
f"n_terms must be odd for S11 models. For {self.load_name} got "
f"n_terms={res}."
)
return res
@classmethod
@abstractmethod
def from_path(cls, **kwargs):
pass # pragma: no cover
@cached_property
@abstractmethod
def measured_load_s11_raw(self):
pass # pragma: no cover
@cached_property
def corrected_load_s11(self) -> np.ndarray:
"""The measured S11 of the load, corrected for internal switch."""
return self.measured_load_s11_raw
@lru_cache()
def get_corrected_s11_model(
self,
n_terms: int | None = None,
model_type: tp.Modelable | None = None,
):
"""Generate a callable model for the S11 correction.
This should closely match :method:`s11_correction`.
Parameters
----------
n_terms : int
Number of terms used in the fourier-based model. Not necessary if
`load_name` is specified in the class.
Returns
-------
callable :
A function of one argument, f, which should be a frequency in the same units
as `self.freq.freq`.
Raises
------
ValueError
If n_terms is not an integer, or not odd.
"""
n_terms = n_terms or self.n_terms
model_type = mdl.get_mdl(model_type or self.model_type)
model = model_type(
n_terms=n_terms,
transform=mdl.UnitTransform(range=[self.freq.min, self.freq.max]),
)
emodel = model.at(x=self.freq.freq)
cmodel = mdl.ComplexMagPhaseModel(mag=emodel, phs=emodel)
s11_correction = self.corrected_load_s11
return cmodel.fit(ydata=s11_correction)
@cached_property
def s11_model(self) -> callable:
"""The S11 model."""
return self.get_corrected_s11_model()
def plot_residuals(
self,
fig=None,
ax=None,
color_abs="C0",
color_diff="g",
label=None,
title=None,
decade_ticks=True,
ylabels=True,
) -> plt.Figure:
"""
Make a plot of the residuals of the S11 model and the correction data.
Residuals obtained via :func:`get_corrected_s11_model`
Returns
-------
fig :
Matplotlib Figure handle.
"""
if fig is None or ax is None or len(ax) != 4:
fig, ax = plt.subplots(
4, 1, sharex=True, gridspec_kw={"hspace": 0.05}, facecolor="w"
)
if decade_ticks:
for axx in ax:
axx.xaxis.set_ticks(
[50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180],
minor=[],
)
axx.grid(True)
ax[-1].set_xlabel("Frequency [MHz]")
corr = self.corrected_load_s11
model = self.s11_model(self.freq.freq)
ax[0].plot(
self.freq.freq, 20 * np.log10(np.abs(model)), color=color_abs, label=label
)
if ylabels:
ax[0].set_ylabel(r"$|S_{11}|$")
ax[1].plot(self.freq.freq, np.abs(model) - np.abs(corr), color_diff)
if ylabels:
ax[1].set_ylabel(r"$\Delta |S_{11}|$")
ax[2].plot(
self.freq.freq, np.unwrap(np.angle(model)) * 180 / np.pi, color=color_abs
)
if ylabels:
ax[2].set_ylabel(r"$\angle S_{11}$")
ax[3].plot(
self.freq.freq,
np.unwrap(np.angle(model)) - np.unwrap(np.angle(corr)),
color_diff,
)
if ylabels:
ax[3].set_ylabel(r"$\Delta \angle S_{11}$")
if title is None:
title = f"{self.load_name} Reflection Coefficient Models"
if title:
fig.suptitle(f"{self.load_name} Reflection Coefficient Models", fontsize=14)
if label:
ax[0].legend()
return fig
class LoadS11(_S11Base):
def __init__(self, *, internal_switch: s11.InternalSwitch, **kwargs):
"""S11 for a lab calibration load.
Parameters
----------
internal_switch : :class:`s11.InternalSwitch`
The internal switch state corresponding to the load.
Other Parameters
----------------
Passed through to :class:`_S11Base`.
"""
assert isinstance(internal_switch, s11.InternalSwitch)
self.internal_switch = internal_switch
super().__init__(**kwargs)
@classmethod
def from_path(
cls,
load_name: str,
path: tp.PathLike,
run_num_load: int = 1,
run_num_switch: int = 1,
repeat_num_load: int = None,
repeat_num_switch: int = None,
resistance: float = 50.166,
model_internal_switch: mdl.Model = attr.NOTHING,
**kwargs,
):
"""
Create a new object from a given path and load name.
Parameters
----------
load_name : str
The name of the load to create.
path : str or Path
The path to the overall calibration observation.
run_num_load : int
The run to use (default is last run available).
run_num_switch : int
The run to use for the switch S11 (default is last run available).
kwargs
All other arguments are passed through to the constructor of
:class:`LoadS11`.
Returns
-------
s11 : :class:`LoadS11`
The S11 of the load.
"""
antsim = load_name.startswith("AntSim")
path = Path(path)
if not antsim:
load_name = io.LOAD_ALIASES[load_name]
s11_load_dir = (io.AntSimS11 if antsim else io.LoadS11)(
path / "S11" / f"{load_name}{run_num_load:02}", repeat_num=repeat_num_load
)
internal_switch = s11.InternalSwitch(
data=io.SwitchingState(
path / "S11" / f"SwitchingState{run_num_switch:02}",
repeat_num=repeat_num_switch,
),
resistance=resistance,
model=model_internal_switch,
)
return cls(load_s11=s11_load_dir, internal_switch=internal_switch, **kwargs)
@cached_property
def measured_load_s11_raw(self):
"""The measured S11 of the load, calculated from raw internal standards."""
return rc.de_embed(
self.open.switchval,
self.short.switchval,
self.match.switchval,
self.open.s11,
self.short.s11,
self.match.s11,
self.external.s11,
)[0]
@cached_property
def corrected_load_s11(self) -> np.ndarray:
"""The measured S11 of the load, corrected for the internal switch."""
return rc.gamma_de_embed(
self.internal_switch.s11_model(self.freq.freq),
self.internal_switch.s12_model(self.freq.freq),
self.internal_switch.s22_model(self.freq.freq),
self.measured_load_s11_raw,
)
class LNA(_S11Base):
def __init__(
self, load_s11: io.ReceiverReading, resistance: float = 50.009, **kwargs
):
"""A special case of :class:`SwitchCorrection` for the LNA.
Parameters
----------
load_s11 : :class:`io.ReceiverReading`
The Receiver Reading S11 measurements.
resistance : float
The resistance of the receiver.
kwargs :
All other arguments passed to :class:`SwitchCorrection`.
"""
super().__init__(load_s11=load_s11, **kwargs)
self.resistance = resistance
self.load_name = "lna"
self.repeat_num = self.load_s11.repeat_num
@classmethod
def from_path(
cls,
path: Union[str, Path],
repeat_num: Optional[int] = None,
run_num: int = 1,
**kwargs,
):
"""
Create an instance from a given path.
Parameters
----------
path : str or Path
Path to overall Calibration Observation.
run_num_load : int
The run to use for the LNA (default latest available).
run_num_switch : int
The run to use for the switching state (default lastest available).
kwargs
All other arguments passed through to :class:`SwitchCorrection`.
Returns
-------
lna : :class:`LNA`
The LNA object.
"""
path = Path(path)
load_s11 = io.ReceiverReading(
path=path / "S11" / f"ReceiverReading{run_num:02}",
repeat_num=repeat_num,
fix=False,
)
return cls(load_s11=load_s11, **kwargs)
@cached_property
def external(self):
"""VNA S11 measurements for the load."""
return S1P(
self.load_s11.children["receiverreading"],
f_low=self.freq.freq.min(),
f_high=self.freq.freq.max(),
)
@cached_property
def measured_load_s11_raw(self):
"""Measured S11 of of the LNA."""
# Models of standards
oa, sa, la = rc.agilent_85033E(
self.freq.freq, self.resistance, match_delay=True
)
# Correction at switch
return rc.de_embed(
oa, sa, la, self.open.s11, self.short.s11, self.match.s11, self.external.s11
)[0]
class LoadSpectrum:
def __init__(
self,
spec_obj: List[io.Spectrum],
resistance_obj: io.Resistance,
switch_correction: Optional[LoadS11] = None,
f_low: float = 40.0,
f_high: Optional[float] = None,
ignore_times_percent: float = 5.0,
rfi_removal: str = "1D2D",
rfi_kernel_width_time: int = 16,
rfi_kernel_width_freq: int = 16,
rfi_threshold: float = 6,
cache_dir: Optional[Union[str, Path]] = None,
t_load: float = 300.0,
t_load_ns: float = 400.0,
):
"""A class representing a measured spectrum from some Load.
Parameters
----------
spec_obj : :class:`io.Spectrum`
The base Spectrum object defining the on-disk spectra.
resistance_obj : :class:`io.Resistance`
The base Resistance object defining the on-disk resistance measurements.
switch_correction : :class:`SwitchCorrection`
A `SwitchCorrection` for this particular load. If not given, will be
constructed automatically.
f_low : float
Minimum frequency to keep.
f_high : float
Maximum frequency to keep.
ignore_times_percent : float
Must be between 0 and 100. Number of time-samples in a file to reject
from the start of the file.
rfi_removal : str
Either '1D', '2D' or '1D2D'. If given, will perform median and mean-filtered
xRFI over either the
2D waterfall, or integrated 1D spectrum. The latter is usually reasonable
for calibration sources, while the former is good for field data. "1D2D"
is a hybrid approach in which the variance per-frequency is determined
from the 2D data, but filtering occurs only over frequency.
rfi_kernel_width_time : int
The kernel width for the detrending of data for
RFI removal in the time dimension (only used if `rfi_removal` is "2D").
rfi_kernel_width_freq : int
The kernel width for the detrending of data for
RFI removal in the frequency dimension.
rfi_threshold : float
The threshold (in equivalent standard deviation units) above which to
flag data as RFI.
cache_dir : str or Path
An alternative directory in which to load/save cached reduced files. By
default, the same as the path to the .mat files. If you don't have
write permission there, it may be useful to use an alternative path.
t_load
Fiducial guess for the temperature of the internal load.
t_load_ns
Fiducial guess for the temperature of the internal load + noise source.
"""
self.spec_obj = spec_obj
self.resistance_obj = resistance_obj
self.load_name = self.spec_obj[0].load_name
assert (
self.load_name == self.resistance_obj.load_name
), "spec and resistance load_name must be the same"
self.spec_files = (spec_obj.path for spec_obj in self.spec_obj)
self.resistance_file = self.resistance_obj.path
self.run_num = self.spec_obj[0].run_num
self.cache_dir = Path(cache_dir or ".")
self.rfi_kernel_width_time = rfi_kernel_width_time
self.rfi_kernel_width_freq = rfi_kernel_width_freq
self.rfi_threshold = rfi_threshold
assert rfi_removal in [
"1D",
"2D",
"1D2D",
False,
None,
], "rfi_removal must be either '1D', '2D', '1D2D, or False/None"
self.rfi_removal = rfi_removal
self.switch_correction = switch_correction
self.ignore_times_percent = ignore_times_percent
self.freq = EdgesFrequencyRange(f_low=f_low, f_high=f_high)
self.t_load = t_load
self.t_load_ns = t_load_ns
@classmethod
def from_load_name(
cls,
load_name: str,
direc: Union[str, Path],
run_num: Optional[int] = None,
filetype: Optional[str] = None,
**kwargs,
):
"""Instantiate the class from a given load name and directory.
Parameters
----------
load_name : str
The load name (one of 'ambient', 'hot_load', 'open' or 'short').
direc : str or Path
The top-level calibration observation directory.
run_num : int
The run number to use for the spectra.
filetype : str
The filetype to look for (acq or h5).
kwargs :
All other arguments to :class:`LoadSpectrum`.
Returns
-------
:class:`LoadSpectrum`.
"""
direc = Path(direc)
spec = io.Spectrum.from_load(
load=load_name, direc=direc / "Spectra", run_num=run_num, filetype=filetype
)
res = io.Resistance.from_load(
load=load_name,
direc=direc / "Resistance",
run_num=run_num,
filetype=filetype,
)
return cls(spec_obj=spec, resistance_obj=res, **kwargs)
@cached_property
def averaged_Q(self) -> np.ndarray:
"""Ratio of powers averaged over time.
Notes
-----
The formula is
.. math:: Q = (P_source - P_load)/(P_noise - P_load)
"""
# TODO: should also get weights!
spec = self._ave_and_var_spec[0]["Q"]
if self.rfi_removal == "1D":
flags, _ = xrfi.xrfi_medfilt(
spec, threshold=self.rfi_threshold, kf=self.rfi_kernel_width_freq
)
spec[flags] = np.nan
return spec
@property
def variance_Q(self) -> np.ndarray:
"""Variance of Q across time (see averaged_Q)."""
return self._ave_and_var_spec[1]["Q"]
@property
def averaged_spectrum(self) -> np.ndarray:
"""T* = T_noise * Q + T_load."""
return self.averaged_Q * self.t_load_ns + self.t_load
@property
def variance_spectrum(self) -> np.ndarray:
"""Variance of uncalibrated spectrum across time (see averaged_spectrum)."""
return self.variance_Q * self.t_load_ns ** 2
@property
def ancillary(self) -> dict:
"""Ancillary measurement data."""
return [d.data["meta"] for d in self.spec_obj]
@property
def averaged_p0(self) -> np.ndarray:
"""Power of the load, averaged over time."""
return self._ave_and_var_spec[0]["p0"]
@property
def averaged_p1(self) -> np.ndarray:
"""Power of the noise-source, averaged over time."""
return self._ave_and_var_spec[0]["p1"]
@property
def averaged_p2(self) -> np.ndarray:
"""Power of the load plus noise-source, averaged over time."""
return self._ave_and_var_spec[0]["p2"]
@property
def variance_p0(self) -> np.ndarray:
"""Variance of the load, averaged over time."""
return self._ave_and_var_spec[1]["p0"]
@property
def variance_p1(self) -> np.ndarray:
"""Variance of the noise-source, averaged over time."""
return self._ave_and_var_spec[1]["p1"]
@property
def variance_p2(self) -> np.ndarray:
"""Variance of the load plus noise-source, averaged over time."""
return self._ave_and_var_spec[1]["p2"]
@property
def n_integrations(self) -> int:
"""The number of integrations recorded for the spectrum (after ignoring)."""
return self._ave_and_var_spec[2]
def _get_integrated_filename(self):
"""Determine a unique filename for the reduced data of this instance."""
params = (
self.rfi_threshold,
self.rfi_kernel_width_time,
self.rfi_kernel_width_freq,
self.rfi_removal,
self.ignore_times_percent,
self.freq.min,
self.freq.max,
self.t_load,
self.t_load_ns,
tuple(path.name for path in self.spec_files),
)
hsh = md5(str(params).encode()).hexdigest()
return self.cache_dir / f"{self.load_name}_{hsh}.h5"
@cached_property
def _ave_and_var_spec(self) -> Tuple[Dict, Dict, int]:
"""Get the mean and variance of the spectra."""
fname = self._get_integrated_filename()
kinds = ["p0", "p1", "p2", "Q"]
if fname.exists():
logger.info(
f"Reading in previously-created integrated {self.load_name} spectra..."
)
means = {}
variances = {}
with h5py.File(fname, "r") as fl:
for kind in kinds:
means[kind] = fl[kind + "_mean"][...]
variances[kind] = fl[kind + "_var"][...]
n_integrations = fl.attrs.get("n_integrations", 0)
return means, variances, n_integrations
logger.info(f"Reducing {self.load_name} spectra...")
spectra = self.get_spectra()
means = {}
variances = {}
for key, spec in spectra.items():
# Weird thing where there are zeros in the spectra.
spec[spec == 0] = np.nan
mean = np.nanmean(spec, axis=1)
var = np.nanvar(spec, axis=1)
n_intg = spec.shape[1]
if self.rfi_removal == "1D2D":
nsample = np.sum(~np.isnan(spec), axis=1)
varfilt = xrfi.flagged_filter(
var, size=2 * self.rfi_kernel_width_freq + 1
)
resid = mean - xrfi.flagged_filter(
mean, size=2 * self.rfi_kernel_width_freq + 1
)
flags = np.logical_or(
resid > self.rfi_threshold * np.sqrt(varfilt / nsample),
var - varfilt
> self.rfi_threshold * np.sqrt(2 * varfilt ** 2 / (nsample - 1)),
)
mean[flags] = np.nan
var[flags] = np.nan
means[key] = mean
variances[key] = var
if not self.cache_dir.exists():
self.cache_dir.mkdir()
with h5py.File(fname, "w") as fl:
logger.info(f"Saving reduced spectra to cache at {fname}")
for kind in kinds:
fl[kind + "_mean"] = means[kind]
fl[kind + "_var"] = variances[kind]
fl.attrs["n_integrations"] = n_intg
return means, variances, n_intg
def get_spectra(self) -> dict:
"""Read all spectra and remove RFI.
Returns
-------
dict :
A dictionary with keys being different powers (p1, p2, p3, Q), and values
being ndarrays.
"""
spec = self._read_spectrum()
if self.rfi_removal == "2D":
for key, val in spec.items():
# Need to set nans and zeros to inf so that median/mean detrending
# can work.
val[np.isnan(val)] = np.inf
if key != "Q":
val[val == 0] = np.inf
flags, _ = xrfi.xrfi_medfilt(
val,
threshold=self.rfi_threshold,
kt=self.rfi_kernel_width_time,
kf=self.rfi_kernel_width_freq,
)
val[flags] = np.nan
spec[key] = val
return spec
def _read_spectrum(self) -> dict:
"""
Read the contents of the spectrum files into memory.
Removes a starting percentage of times, and masks out certain frequencies.
Returns
-------
dict :
A dictionary of the contents of the file. Usually p0, p1, p2 (un-normalised
powers of source, load, and load+noise respectively), and ant_temp (the
uncalibrated, but normalised antenna temperature).
"""
data = [spec_obj.data for spec_obj in self.spec_obj]
n_times = sum(len(d["time_ancillary"]["times"]) for d in data)
out = {
"p0": np.empty((len(self.freq.freq), n_times)),
"p1": np.empty((len(self.freq.freq), n_times)),
"p2": np.empty((len(self.freq.freq), n_times)),
"Q": np.empty((len(self.freq.freq), n_times)),
}
index_start_spectra = int((self.ignore_times_percent / 100) * n_times)
for key, val in out.items():
nn = 0
for d in data:
n = len(d["time_ancillary"]["times"])
val[:, nn : (nn + n)] = d["spectra"][key][self.freq.mask]
nn += n
out[key] = val[:, index_start_spectra:]
return out
@cached_property
def thermistor(self) -> np.ndarray:
"""The thermistor readings."""
ary = self.resistance_obj.read()[0]
return ary[int((self.ignore_times_percent / 100) * len(ary)) :]
@cached_property
def thermistor_temp(self):
"""The associated thermistor temperature in K."""
return rcf.temperature_thermistor(self.thermistor["load_resistance"])
@cached_property
def temp_ave(self):
"""Average thermistor temperature (over time and frequency)."""
return np.nanmean(self.thermistor_temp)
def write(self, path=None):
"""
Write a HDF5 file containing the contents of the LoadSpectrum.
Parameters
----------
path : str
Directory into which to save the file, or full path to file.
If a directory, filename will be <load_name>_averaged_spectrum.h5.
Default is current directory.
"""
path = Path(path or ".")
# Allow to pass in a directory name *or* full path.
if path.is_dir():
path /= f"{self.load_name}_averaged_spectrum.h5"
with h5py.File(path, "w") as fl:
fl.attrs["load_name"] = self.load_name
fl["freq"] = self.freq.freq
fl["averaged_raw_spectrum"] = self.averaged_spectrum
fl["temperature"] = self.thermistor_temp
def plot(
self, thermistor=False, fig=None, ax=None, xlabel=True, ylabel=True, **kwargs
):
"""
Make a plot of the averaged uncalibrated spectrum associated with this load.
Parameters
----------
thermistor : bool
Whether to plot the thermistor temperature on the same axis.
fig : Figure
Optionally, pass a matplotlib figure handle which will be used to plot.
ax : Axis
Optional, pass a matplotlib Axis handle which will be added to.
xlabel : bool
Whether to make an x-axis label.
ylabel : bool
Whether to plot the y-axis label
kwargs :
All other arguments are passed to `plt.subplots()`.
"""
if fig is None:
fig, ax = plt.subplots(
1, 1, facecolor=kwargs.pop("facecolor", "white"), **kwargs
)
if thermistor:
ax.plot(self.freq.freq, self.thermistor_temp)
if ylabel:
ax.set_ylabel("Temperature [K]")
else:
ax.plot(self.freq.freq, self.averaged_spectrum)
if ylabel:
ax.set_ylabel("$T^*$ [K]")
ax.grid(True)
if xlabel:
ax.set_xlabel("Frequency [MHz]")
class HotLoadCorrection:
_kinds = {"s11": 0, "s12": 1, "s22": 2}
def __init__(
self,
path: Union[str, Path] = ":semi_rigid_s_parameters_WITH_HEADER.txt",
f_low: Optional[float] = None,
f_high: Optional[float] = None,
n_terms: int = 21,
):
"""
Corrections for the hot load.
Measurements required to define the HotLoad temperature, from Monsalve et al.
(2017), Eq. 8+9.
Parameters
----------
path : str or Path, optional
Path to a file containing measurements of the semi-rigid cable reflection
parameters. A preceding colon (:) indicates to prefix with DATA_PATH.
The default file was measured in 2015, but there is also a file included
that can be used from 2017: ":semi_rigid_s_parameters_2017.txt".
f_low, f_high : float
Lowest/highest frequency to retain from measurements.
"""
# Get the path to the S11 file.
if not isinstance(path, Path):
path = DATA_PATH / path[1:] if path[0] == ":" else Path(path)
self.path = path
data = np.genfromtxt(self.path)
f = data[:, 0]
self.freq = FrequencyRange(f, f_low, f_high)
if data.shape[1] == 7: # Original file from 2015
self.data = data[self.freq.mask, 1::2] + 1j * data[self.freq.mask, 2::2]
elif data.shape[1] == 6: # File from 2017
self.data = np.array(
[
data[self.freq.mask, 1] + 1j * data[self.freq.mask, 2],
data[self.freq.mask, 3],
data[self.freq.mask, 4] + 1j * data[self.freq.mask, 5],
]
).T
else:
raise IOError("Semi-Rigid Cable file has wrong data format.")
self.n_terms = int(n_terms)
def _get_model_kind(self, kind):
model = mdl.Polynomial(
n_terms=self.n_terms,
transform=mdl.UnitTransform(range=(self.freq.min, self.freq.max)),
)
model = mdl.ComplexMagPhaseModel(mag=model, phs=model)
return model.fit(xdata=self.freq.freq, ydata=self.data[:, self._kinds[kind]])
@cached_property
def s11_model(self):
"""The reflection coefficient."""
return self._get_model_kind("s11")
@cached_property
def s12_model(self):
"""The transmission coefficient."""
return self._get_model_kind("s12")
@cached_property
def s22_model(self):
"""The reflection coefficient from the other side."""
return self._get_model_kind("s22")
def power_gain(self, freq: np.ndarray, hot_load_s11: LoadS11) -> np.ndarray:
"""
Calculate the power gain.
Parameters
----------
freq : np.ndarray
The frequencies.
hot_load_s11 : :class:`LoadS11`
The S11 of the hot load.
Returns
-------
gain : np.ndarray
The power gain as a function of frequency.
"""
assert isinstance(
hot_load_s11, LoadS11
), "hot_load_s11 must be a switch correction"
assert (
hot_load_s11.load_name == "hot_load"
), "hot_load_s11 must be a hot_load s11"
return self.get_power_gain(
{
"s11": self.s11_model(freq),
"s12s21": self.s12_model(freq),
"s22": self.s22_model(freq),
},
hot_load_s11.s11_model(freq),
)
@staticmethod
def get_power_gain(
semi_rigid_sparams: dict, hot_load_s11: np.ndarray
) -> np.ndarray:
"""Define Eq. 9 from M17.
Parameters
----------
semi_rigid_sparams : dict
A dictionary of reflection coefficient measurements as a function of
frequency for the semi-rigid cable.
hot_load_s11 : array-like
The S11 measurement of the hot_load.
Returns
-------
gain : np.ndarray
The power gain.
"""
rht = rc.gamma_de_embed(
semi_rigid_sparams["s11"],
semi_rigid_sparams["s12s21"],
semi_rigid_sparams["s22"],
hot_load_s11,
)
return (
np.abs(semi_rigid_sparams["s12s21"])
* (1 - np.abs(rht) ** 2)
/ (
(np.abs(1 - semi_rigid_sparams["s11"] * rht)) ** 2
* (1 - np.abs(hot_load_s11) ** 2)
)
)
class Load:
def __init__(
self,
spectrum: LoadSpectrum,
reflections: LoadS11,
hot_load_correction: Optional[HotLoadCorrection] = None,
ambient: Optional[LoadSpectrum] = None,
):
"""Wrapper class containing all relevant information for a given load.
Parameters
----------
spectrum : :class:`LoadSpectrum`
The spectrum for this particular load.
reflections : :class:`SwitchCorrection`
The S11 measurements for this particular load.
hot_load_correction : :class:`HotLoadCorrection`
If this is a hot load, provide a hot load correction.
ambient : :class:`LoadSpectrum`
If this is a hot load, need to provide an ambient spectrum to correct it.
"""
assert isinstance(spectrum, LoadSpectrum), "spectrum must be a LoadSpectrum"
assert isinstance(reflections, LoadS11), "spectrum must be a SwitchCorrection"
assert spectrum.load_name == reflections.load_name
self.spectrum = spectrum
self.reflections = reflections
self.load_name = spectrum.load_name
self.t_load = self.spectrum.t_load
self.t_load_ns = self.spectrum.t_load_ns
if self.load_name == "hot_load":
self._correction = hot_load_correction
self._ambient = ambient
@classmethod
def from_path(
cls,
path: Union[str, Path],
load_name: str,
f_low: Optional[float] = None,
f_high: Optional[float] = None,
reflection_kwargs: Optional[dict] = None,
spec_kwargs: Optional[dict] = None,
):
"""
Define a full :class:`Load` from a path and name.
Parameters
----------
path : str or Path
Path to the top-level calibration observation.
load_name : str
Name of a load to define.
f_low, f_high : float
Min/max frequencies to keep in measurements.
reflection_kwargs : dict
Extra arguments to pass through to :class:`SwitchCorrection`.
spec_kwargs : dict
Extra arguments to pass through to :class:`LoadSpectrum`.
Returns
-------
load : :class:`Load`
The load object, containing all info about spectra and S11's for that load.
"""
if not spec_kwargs:
spec_kwargs = {}
if not reflection_kwargs:
reflection_kwargs = {}
spec = LoadSpectrum.from_load_name(
load_name,
path,
f_low=f_low,
f_high=f_high,
**spec_kwargs,
)
refl = LoadS11.from_path(
load_name,
path,
f_low=f_low,
f_high=f_high,
**reflection_kwargs,
)
return cls(spec, refl)
@property
def s11_model(self):
"""The S11 model."""
return self.reflections.s11_model
@cached_property
def temp_ave(self):
"""The average temperature of the thermistor (over frequency and time)."""
if self.load_name != "hot_load":
return self.spectrum.temp_ave
gain = self._correction.power_gain(self.freq.freq, self.reflections)
# temperature
return gain * self.spectrum.temp_ave + (1 - gain) * self._ambient.temp_ave
@property
def averaged_Q(self):
"""Averaged power ratio."""
return self.spectrum.averaged_Q
@property
def averaged_spectrum(self):
"""Averaged uncalibrated temperature."""
return self.spectrum.averaged_spectrum
@property
def freq(self):
"""A :class:`FrequencyRange` object corresponding to this measurement."""
return self.spectrum.freq
class CalibrationObservation:
_sources = ("ambient", "hot_load", "open", "short")
def __init__(
self,
path: Union[str, Path],
semi_rigid_path: Union[str, Path] = ":semi_rigid_s_parameters_WITH_HEADER.txt",
f_low: Optional[float] = 40,
f_high: Optional[float] = None,
run_num: Union[None, int, dict] = None,
repeat_num: Union[None, int, dict] = None,
resistance_f: Optional[float] = None,
cterms: int = 5,
wterms: int = 7,
load_kwargs: Optional[dict] = None,
s11_kwargs: Optional[dict] = None,
load_spectra: Optional[dict] = None,
load_s11s: Optional[dict] = None,
compile_from_def: bool = True,
include_previous: bool = False,
internal_switch_kwargs: Optional[Dict[str, Any]] = None,
):
"""
A composite object representing a full Calibration Observation.
This includes spectra of all calibrators, and methods to find the calibration
parameters. It strictly follows Monsalve et al. (2017) in its formalism.
While by default the class uses the calibrator sources ("ambient", "hot_load",
"open", "short"), it can be modified to take other sources by setting
``CalibrationObservation._sources`` to a new tuple of strings.
Parameters
----------
path : str or Path
Path to the directory containing all relevant measurements. It is assumed
that in this directory is an `S11`, `Resistance` and `Spectra` directory.
semi_rigid_path : str or Path, optional
Path to a file containing S11 measurements for the semi rigid cable. Used to
correct the hot load S11. Found automatically if not given.
ambient_temp : int
Ambient temperature (C) at which measurements were taken.
f_low : float
Minimum frequency to keep for all loads (and their S11's). If for some
reason different frequency bounds are desired per-load, one can pass in
full load objects through ``load_spectra``.
f_high : float
Maximum frequency to keep for all loads (and their S11's). If for some
reason different frequency bounds are desired per-load, one can pass in
full load objects through ``load_spectra``.
run_num : int or dict
Which run number to use for the calibrators. Default is to use the last run
for each. Passing an int will attempt to use that run for each source. Pass
a dict mapping sources to numbers to use different combinations.
repeat_num : int or dict
Which repeat number to use for the calibrators. Default is to use the last
repeat for each. Passing an int will attempt to use that repeat for each
source. Pass a dict mapping sources to numbers to use different
combinations.
resistance_f : float
Female resistance (Ohms). Used for the LNA S11.
cterms : int
The number of terms to use for the polynomial fits to the calibration
functions.
wterms : int
The number of terms to use for the polynomial fits to the noise-wave
calibration functions.
load_kwargs : dict
Keyword arguments used to instantiate the calibrator :class:`LoadSpectrum`
objects. See its documentation for relevant parameters. Parameters specified
here are used for _all_ calibrator sources.
s11_kwargs : dict
Keyword arguments used to instantiate the calibrator :class:`LoadS11`
objects. See its documentation for relevant parameters. Parameters specified
here are used for _all_ calibrator sources.
load_spectra : dict
A dictionary mapping load names of calibration sources (eg. ambient, short)
to either :class:`LoadSpectrum` instances or dictionaries of keywords to
instantiate those objects. Useful for individually specifying
properties of each load separately. Values in these dictionaries (if
supplied) over-ride those given in ``load_kwargs`` (but values in
``load_kwargs`` are still used if not over-ridden).
load_s11s : dict
A dictionary mapping load names of calibration sources (eg. ambient, short)
to :class:`LoadS11` instances or dictionaries of keywords to instantiate
those objects. Useful for individually specifying properties of each load
separately. Values in these dictionaries (if supplied) over-ride those
given in ``s11_kwargs`` (but values in ``s11_kwargs`` are still used if not
over-ridden).
compile_from_def : bool
Whether to attempt compiling a virtual observation from a
``definition.yaml`` inside the observation directory. This is the default
behaviour, but can be turned off to enforce that the current directory
should be used directly.
include_previous : bool
Whether to include the previous observation by default to supplement this
one if required files are missing.
Examples
--------
This will setup an observation with all default options applied:
>>> path = '/CalibrationObservations/Receiver01_25C_2019_11_26_040_to_200MHz'
>>> calobs = CalibrationObservation(path)
To specify some options for constructing the various calibrator load spectra:
>>> calobs = CalibrationObservation(
>>> path,
>>> load_kwargs={"cache_dir":".", "ignore_times_percent": 50}
>>> )
But if we typically wanted 50% of times ignored, but in one special case we'd
like 80%:
>>> calobs = CalibrationObservation(
>>> path,
>>> load_kwargs={"cache_dir":".", "ignore_times_percent": 50},
>>> load_spectra={"short": {"ignore_times_percent": 80}}
>>> )
"""
load_spectra = load_spectra or {}
load_s11s = load_s11s or {}
load_kwargs = load_kwargs or {}
s11_kwargs = s11_kwargs or {}
internal_switch_kwargs = internal_switch_kwargs or {}
assert all(name in self._sources for name in load_spectra)
assert all(name in self._sources + ("lna",) for name in load_s11s)
self.io = io.CalibrationObservation(
path,
run_num=run_num,
repeat_num=repeat_num,
fix=False,
compile_from_def=compile_from_def,
include_previous=include_previous,
)
self.compiled_from_def = compile_from_def
self.previous_included = include_previous
self.path = Path(self.io.path)
hot_load_correction = HotLoadCorrection(semi_rigid_path, f_low, f_high)
self.internal_switch = s11.InternalSwitch(
data=self.io.s11.switching_state,
resistance=self.io.definition["measurements"]["resistance_m"][
self.io.s11.switching_state.run_num
],
**internal_switch_kwargs,
)
self._loads = {}
for source in self._sources:
load = load_spectra.get(source, {})
if isinstance(load, dict):
load = LoadSpectrum(
spec_obj=getattr(self.io.spectra, source),
resistance_obj=getattr(self.io.resistance, source),
f_low=f_low,
f_high=f_high,
**{**load_kwargs, **load},
)
# Ensure that we finally have a LoadSpectrum
if not isinstance(load, LoadSpectrum):
raise TypeError("load_spectra must be a dict of LoadSpectrum or dicts.")
refl = load_s11s.get(source, {})
if isinstance(refl, dict):
refl = LoadS11(
load_s11=getattr(self.io.s11, source),
internal_switch=self.internal_switch,
f_low=f_low,
f_high=f_high,
**{**s11_kwargs, **refl},
)
if source == "hot_load":
self._loads[source] = Load(
load,
refl,
hot_load_correction=hot_load_correction,
ambient=self._loads["ambient"].spectrum,
)
else:
self._loads[source] = Load(load, refl)
for name, load in self._loads.items():
setattr(self, name, load)
refl = load_s11s.get("lna", {})
self.lna = LNA(
load_s11=self.io.s11.receiver_reading,
f_low=f_low,
f_high=f_high,
resistance=resistance_f
or self.io.definition["measurements"]["resistance_f"][
self.io.s11.receiver_reading.run_num
],
**{**s11_kwargs, **refl},
)
# We must use the most restricted frequency range available from all available
# sources as well as the LNA.
fmin = max(
sum(
(
[load.spectrum.freq.min, load.reflections.freq.min]
for load in self._loads.values()
),
[],
)
+ [self.lna.freq.min]
)
fmax = min(
sum(
(
[load.spectrum.freq.max, load.reflections.freq.max]
for load in self._loads.values()
),
[],
)
+ [self.lna.freq.max]
)
if fmax <= fmin:
raise ValueError(
"The inputs loads and S11s have non-overlapping frequency ranges!"
)
self.freq = EdgesFrequencyRange(f_low=fmin, f_high=fmax)
# Now make everything actually consistent in its frequency range.
for load in self._loads.values():
load.spectrum.freq = self.freq
self.cterms = cterms
self.wterms = wterms
self.t_load = self.ambient.t_load
self.t_load_ns = self.ambient.t_load_ns
@property
def load_names(self) -> Tuple[str]:
"""Names of the loads."""
return tuple(self._loads.keys())
def new_load(
self,
load_name: str,
run_num: int = 1,
reflection_kwargs: Optional[dict] = None,
spec_kwargs: Optional[dict] = None,
):
"""Create a new load with the given load name.
Uses files inside the current observation.
Parameters
----------
load_name : str
The name of the load ('ambient', 'hot_load', 'open', 'short').
run_num_spec : dict or int
Run number to use for the spectrum.
run_num_load : dict or int
Run number to use for the load's S11.
reflection_kwargs : dict
Keyword arguments to construct the :class:`SwitchCorrection`.
spec_kwargs : dict
Keyword arguments to construct the :class:`LoadSpectrum`.
"""
reflection_kwargs = reflection_kwargs or {}
spec_kwargs = spec_kwargs or {}
# Fill up kwargs with keywords from this instance
if "resistance" not in reflection_kwargs:
reflection_kwargs[
"resistance"
] = self.open.reflections.internal_switch.resistance
for key in [
"ignore_times_percent",
"rfi_removal",
"rfi_kernel_width_freq",
"rfi_kernel_width_time",
"rfi_threshold",
"cache_dir",
"t_load",
"t_load_ns",
]:
if key not in spec_kwargs:
spec_kwargs[key] = getattr(self.open.spectrum, key)
reflection_kwargs["run_num_load"] = run_num
reflection_kwargs["repeat_num_switch"] = self.io.s11.switching_state.repeat_num
reflection_kwargs["run_num_switch"] = self.io.s11.switching_state.run_num
spec_kwargs["run_num"] = run_num
return Load.from_path(
path=self.io.path,
load_name=load_name,
f_low=self.freq.min,
f_high=self.freq.max,
reflection_kwargs=reflection_kwargs,
spec_kwargs=spec_kwargs,
)
def plot_raw_spectra(self, fig=None, ax=None) -> plt.Figure:
"""
Plot raw uncalibrated spectra for all calibrator sources.
Parameters
----------
fig : :class:`plt.Figure`
A matplotlib figure on which to make the plot. By default creates a new one.
ax : :class:`plt.Axes`
A matplotlib Axes on which to make the plot. By default creates a new one.
Returns
-------
fig : :class:`plt.Figure`
The figure on which the plot was made.
"""
if fig is None and ax is None:
fig, ax = plt.subplots(
len(self._sources), 1, sharex=True, gridspec_kw={"hspace": 0.05}
)
for i, (name, load) in enumerate(self._loads.items()):
load.spectrum.plot(
fig=fig, ax=ax[i], xlabel=(i == (len(self._sources) - 1))
)
ax[i].set_title(name)
return fig
def plot_s11_models(self, **kwargs):
"""
Plot residuals of S11 models for all sources.
Returns
-------
dict:
Each entry has a key of the source name, and the value is a matplotlib fig.
"""
out = {
name: source.reflections.plot_residuals(**kwargs)
for name, source in self._loads.items()
}
out.update({"lna": self.lna.plot_residuals(**kwargs)})
return out
@cached_property
def s11_correction_models(self):
"""Dictionary of S11 correction models, one for each source."""
try:
return dict(self._injected_source_s11s)
except (TypeError, AttributeError):
return {
name: source.s11_model(self.freq.freq)
for name, source in self._loads.items()
}
@cached_property
def source_thermistor_temps(self) -> Dict[str, Union[float, np.ndarray]]:
"""Dictionary of input source thermistor temperatures."""
if (
hasattr(self, "_injected_source_temps")
and self._injected_source_temps is not None
):
return self._injected_source_temps
return {k: source.temp_ave for k, source in self._loads.items()}
@cached_property
def _calibration_coefficients(self):
"""The calibration polynomials, evaluated at `freq.freq`."""
if (
hasattr(self, "_injected_averaged_spectra")
and self._injected_averaged_spectra is not None
):
ave_spec = self._injected_averaged_spectra
else:
ave_spec = {
k: source.averaged_spectrum for k, source in self._loads.items()
}
scale, off, Tu, TC, TS = rcf.get_calibration_quantities_iterative(
self.freq.freq_recentred,
temp_raw=ave_spec,
gamma_rec=self.lna_s11,
gamma_ant=self.s11_correction_models,
temp_ant=self.source_thermistor_temps,
cterms=self.cterms,
wterms=self.wterms,
temp_amb_internal=self.t_load,
)
return scale, off, Tu, TC, TS
@cached_property
def C1_poly(self): # noqa: N802
"""`np.poly1d` object describing the Scaling calibration coefficient C1.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~C1` as a direct
function on frequency.
"""
return self._calibration_coefficients[0]
@cached_property
def C2_poly(self): # noqa: N802
"""`np.poly1d` object describing the offset calibration coefficient C2.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~C2` as a direct
function on frequency.
"""
return self._calibration_coefficients[1]
@cached_property
def Tunc_poly(self): # noqa: N802
"""`np.poly1d` object describing the uncorrelated noise-wave parameter, Tunc.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~Tunc` as a direct
function on frequency.
"""
return self._calibration_coefficients[2]
@cached_property
def Tcos_poly(self): # noqa: N802
"""`np.poly1d` object describing the cosine noise-wave parameter, Tcos.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~Tcos` as a direct
function on frequency.
"""
return self._calibration_coefficients[3]
@cached_property
def Tsin_poly(self): # noqa: N802
"""`np.poly1d` object describing the sine noise-wave parameter, Tsin.
The polynomial is defined to act on normalized frequencies such that `freq.min`
and `freq.max` map to -1 and 1 respectively. Use :func:`~Tsin` as a direct
function on frequency.
"""
return self._calibration_coefficients[4]
def C1(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Scaling calibration parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate C1. By default, the frequencies of this
instance.
"""
if hasattr(self, "_injected_c1") and self._injected_c1 is not None:
return np.array(self._injected_c1)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.C1_poly(fnorm)
def C2(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Offset calibration parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate C2. By default, the frequencies of this
instance.
"""
if hasattr(self, "_injected_c2") and self._injected_c2 is not None:
return np.array(self._injected_c2)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.C2_poly(fnorm)
def Tunc(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Uncorrelated noise-wave parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate Tunc. By default, the frequencies of
thisinstance.
"""
if hasattr(self, "_injected_t_unc") and self._injected_t_unc is not None:
return np.array(self._injected_t_unc)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tunc_poly(fnorm)
def Tcos(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Cosine noise-wave parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate Tcos. By default, the frequencies of
this instance.
"""
if hasattr(self, "_injected_t_cos") and self._injected_t_cos is not None:
return np.array(self._injected_t_cos)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tcos_poly(fnorm)
def Tsin(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
"""
Sine noise-wave parameter.
Parameters
----------
f : array-like
The frequencies at which to evaluate Tsin. By default, the frequencies of
this instance.
"""
if hasattr(self, "_injected_t_sin") and self._injected_t_sin is not None:
return np.array(self._injected_t_sin)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tsin_poly(fnorm)
@cached_property
def lna_s11(self):
"""The corrected S11 of the LNA evaluated at the data frequencies."""
if hasattr(self, "_injected_lna_s11") and self._injected_lna_s11 is not None:
return self._injected_lna_s11
else:
return self.lna.s11_model(self.freq.freq)
def get_linear_coefficients(self, load: Union[Load, str]):
"""
Calibration coefficients a,b such that T = aT* + b (derived from Eq. 7).
Parameters
----------
load : str or :class:`Load`
The load for which to get the linear coefficients.
"""
if isinstance(load, str):
load_s11 = self.s11_correction_models[load]
elif load.load_name in self.s11_correction_models:
load_s11 = self.s11_correction_models[load.load_name]
else:
load_s11 = load.s11_model(self.freq.freq)
return rcf.get_linear_coefficients(
load_s11,
self.lna_s11,
self.C1(self.freq.freq),
self.C2(self.freq.freq),
self.Tunc(self.freq.freq),
self.Tcos(self.freq.freq),
self.Tsin(self.freq.freq),
t_load=self.t_load,
)
def calibrate(self, load: Union[Load, str], q=None, temp=None):
"""
Calibrate the temperature of a given load.
Parameters
----------
load : :class:`Load` or str
The load to calibrate.
Returns
-------
array : calibrated antenna temperature in K, len(f).
"""
load = self._load_str_to_load(load)
a, b = self.get_linear_coefficients(load)
if q is not None:
temp = self.t_load_ns * q + self.t_load
elif temp is None:
temp = load.averaged_spectrum
return a * temp + b
def _load_str_to_load(self, load: Union[Load, str]):
if isinstance(load, str):
try:
load = self._loads[load]
except AttributeError:
raise AttributeError(
"load must be a Load object or a string (one of "
"{ambient,hot_load,open,short})"
)
else:
assert isinstance(
load, Load
), "load must be a Load instance, got the {} {}".format(load, type(Load))
return load
def decalibrate(
self, temp: np.ndarray, load: Union[Load, str], freq: np.ndarray = None
):
"""
Decalibrate a temperature spectrum, yielding uncalibrated T*.
Parameters
----------
temp : array_like
A temperature spectrum, with the same length as `freq.freq`.
load : str or :class:`Load`
The load to calibrate.
freq : array-like
The frequencies at which to decalibrate. By default, the frequencies of the
instance.
Returns
-------
array_like : T*, the normalised uncalibrated temperature.
"""
if freq is None:
freq = self.freq.freq
if freq.min() < self.freq.freq.min():
warnings.warn(
"The minimum frequency is outside the calibrated range "
f"({self.freq.freq.min()} - {self.freq.freq.max()} MHz)"
)
if freq.min() > self.freq.freq.max():
warnings.warn("The maximum frequency is outside the calibrated range ")
a, b = self.get_linear_coefficients(load)
return (temp - b) / a
def get_K(
self, freq: np.ndarray | None = None
) -> Dict[str, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
"""Get the source-S11-dependent factors of Monsalve (2017) Eq. 7."""
if freq is None:
freq = self.freq.freq
gamma_ants = self.s11_correction_models
else:
gamma_ants = {
name: source.s11_model(freq) for name, source in self._loads.items()
}
lna_s11 = self.lna.s11_model(freq)
return {
name: rcf.get_K(gamma_rec=lna_s11, gamma_ant=gamma_ant)
for name, gamma_ant in gamma_ants.items()
}
def plot_calibrated_temp(
self,
load: Union[Load, str],
bins: int = 2,
fig=None,
ax=None,
xlabel=True,
ylabel=True,
):
"""
Make a plot of calibrated temperature for a given source.
Parameters
----------
load : :class:`~LoadSpectrum` instance
Source to plot.
bins : int
Number of bins to smooth over (std of Gaussian kernel)
fig : Figure
Optionally provide a matplotlib figure to add to.
ax : Axis
Optionally provide a matplotlib Axis to add to.
xlabel : bool
Whether to write the x-axis label
ylabel : bool
Whether to write the y-axis label
Returns
-------
fig :
The matplotlib figure that was created.
"""
load = self._load_str_to_load(load)
if fig is None and ax is None:
fig, ax = plt.subplots(1, 1, facecolor="w")
# binning
temp_calibrated = self.calibrate(load)
if bins > 0:
freq_ave_cal = convolve(
temp_calibrated, Gaussian1DKernel(stddev=bins), boundary="extend"
)
else:
freq_ave_cal = temp_calibrated
freq_ave_cal[np.isinf(freq_ave_cal)] = np.nan
rms = np.sqrt(np.mean((freq_ave_cal - np.mean(freq_ave_cal)) ** 2))
ax.plot(
self.freq.freq,
freq_ave_cal,
label=f"Calibrated {load.spectrum.load_name} [RMS = {rms:.3f}]",
)
temp_ave = self.source_thermistor_temps.get(load.load_name, load.temp_ave)
if not hasattr(temp_ave, "__len__"):
ax.axhline(temp_ave, color="C2", label="Average thermistor temp")
else:
ax.plot(
self.freq.freq,
temp_ave,
color="C2",
label="Average thermistor temp",
)
ax.set_ylim([np.nanmin(freq_ave_cal), np.nanmax(freq_ave_cal)])
if xlabel:
ax.set_xlabel("Frequency [MHz]")
if ylabel:
ax.set_ylabel("Temperature [K]")
plt.ticklabel_format(useOffset=False)
ax.grid()
ax.legend()
return plt.gcf()
def get_load_residuals(self):
"""Get residuals of the calibrated temperature for a each load."""
out = {}
for source in self._sources:
load = self._load_str_to_load(source)
cal = self.calibrate(load)
true = self.source_thermistor_temps[source]
out[source] = cal - true
return out
def get_rms(self, smooth: int = 4):
"""Return a dict of RMS values for each source.
Parameters
----------
smooth : int
The number of bins over which to smooth residuals before taking the RMS.
"""
resids = self.get_load_residuals()
out = {}
for name, res in resids.items():
if smooth > 1:
res = convolve(res, Gaussian1DKernel(stddev=smooth), boundary="extend")
out[name] = np.sqrt(np.nanmean(res ** 2))
return out
def plot_calibrated_temps(self, bins=64, fig=None, ax=None):
"""
Plot all calibrated temperatures in a single figure.
Parameters
----------
bins : int
Number of bins in the smoothed spectrum
Returns
-------
fig :
Matplotlib figure that was created.
"""
if fig is None or ax is None or len(ax) != len(self._sources):
fig, ax = plt.subplots(
len(self._sources),
1,
sharex=True,
gridspec_kw={"hspace": 0.05},
figsize=(10, 12),
)
for i, source in enumerate(self._sources):
self.plot_calibrated_temp(
source,
bins=bins,
fig=fig,
ax=ax[i],
xlabel=i == (len(self._sources) - 1),
)
fig.suptitle("Calibrated Temperatures for Calibration Sources", fontsize=15)
return fig
def write_coefficients(self, path: Optional[str] = None):
"""
Save a text file with the derived calibration co-efficients.
Parameters
----------
path : str
Directory in which to write the file. The filename starts with
`All_cal-params` and includes parameters of the class in the filename.
By default, current directory.
"""
path = Path(path or ".")
if path.is_dir():
path /= (
f"calibration_parameters_fmin{self.freq.freq.min()}_"
f"fmax{self.freq.freq.max()}_C{self.cterms}_W{self.wterms}.txt"
)
np.savetxt(
path,
[
self.freq.freq,
self.C1(),
self.C2(),
self.Tunc(),
self.Tcos(),
self.Tsin(),
],
)
def plot_coefficients(self, fig=None, ax=None):
"""
Make a plot of the calibration models, C1, C2, Tunc, Tcos and Tsin.
Parameters
----------
fig : Figure
Optionally pass a matplotlib figure to add to.
ax : Axis
Optionally pass a matplotlib axis to pass to. Must have 5 axes.
"""
if fig is None or ax is None:
fig, ax = plt.subplots(
5, 1, facecolor="w", gridspec_kw={"hspace": 0.05}, figsize=(10, 9)
)
labels = [
"Scale ($C_1$)",
"Offset ($C_2$) [K]",
r"$T_{\rm unc}$ [K]",
r"$T_{\rm cos}$ [K]",
r"$T_{\rm sin}$ [K]",
]
for i, (kind, label) in enumerate(
zip(["C1", "C2", "Tunc", "Tcos", "Tsin"], labels)
):
ax[i].plot(self.freq.freq, getattr(self, kind)())
ax[i].set_ylabel(label, fontsize=13)
ax[i].grid()
plt.ticklabel_format(useOffset=False)
if i == 4:
ax[i].set_xlabel("Frequency [MHz]", fontsize=13)
fig.suptitle("Calibration Parameters", fontsize=15)
return fig
def invalidate_cache(self):
"""Invalidate all cached attributes so they must be recalculated."""
if not hasattr(self, "_cached_"):
return
for cache in self._cached_:
del self.__dict__[cache]
def update(self, **kwargs):
"""Update the class in-place, invalidating the cache as well.
Parameters
----------
kwargs :
All parameters to be updated.
"""
self.invalidate_cache()
for k, v in kwargs.items():
setattr(self, k, v)
def write(self, filename: Union[str, Path]):
"""
Write all information required to calibrate a new spectrum to file.
Parameters
----------
filename : path
The filename to write to.
"""
with h5py.File(filename, "w") as fl:
# Write attributes
fl.attrs["path"] = str(self.io.original_path)
fl.attrs["cterms"] = self.cterms
fl.attrs["wterms"] = self.wterms
fl.attrs["switch_path"] = str(self.internal_switch.data.path)
fl.attrs["switch_repeat_num"] = self.internal_switch.data.repeat_num
fl.attrs["switch_resistance"] = self.internal_switch.resistance
fl.attrs["switch_nterms"] = self.internal_switch.n_terms[0]
fl.attrs["switch_model"] = str(self.internal_switch.model)
fl.attrs["t_load"] = self.open.spectrum.t_load
fl.attrs["t_load_ns"] = self.open.spectrum.t_load_ns
fl["C1"] = self.C1_poly.coefficients
fl["C2"] = self.C2_poly.coefficients
fl["Tunc"] = self.Tunc_poly.coefficients
fl["Tcos"] = self.Tcos_poly.coefficients
fl["Tsin"] = self.Tsin_poly.coefficients
fl["frequencies"] = self.freq.freq
fl["lna_s11_real"] = self.lna.s11_model(self.freq.freq).real
fl["lna_s11_imag"] = self.lna.s11_model(self.freq.freq).imag
fl["internal_switch_s11_real"] = np.real(
self.internal_switch.s11_model(self.freq.freq)
)
fl["internal_switch_s11_imag"] = np.imag(
self.internal_switch.s11_model(self.freq.freq)
)
fl["internal_switch_s12_real"] = np.real(
self.internal_switch.s12_model(self.freq.freq)
)
fl["internal_switch_s12_imag"] = np.imag(
self.internal_switch.s12_model(self.freq.freq)
)
fl["internal_switch_s22_real"] = np.real(
self.internal_switch.s22_model(self.freq.freq)
)
fl["internal_switch_s22_imag"] = np.imag(
self.internal_switch.s22_model(self.freq.freq)
)
load_grp = fl.create_group("loads")
for name, load in self._loads.items():
grp = load_grp.create_group(name)
grp.attrs["s11_model"] = yaml.dump(load.s11_model)
grp["averaged_Q"] = load.spectrum.averaged_Q
grp["variance_Q"] = load.spectrum.variance_Q
grp["temp_ave"] = load.temp_ave
grp.attrs["n_integrations"] = load.spectrum.n_integrations
def to_calfile(self):
"""Directly create a :class:`Calibration` object without writing to file."""
return Calibration.from_calobs(self)
def inject(
self,
lna_s11: np.ndarray = None,
source_s11s: Dict[str, np.ndarray] = None,
c1: np.ndarray = None,
c2: np.ndarray = None,
t_unc: np.ndarray = None,
t_cos: np.ndarray = None,
t_sin: np.ndarray = None,
averaged_spectra: Dict[str, np.ndarray] = None,
thermistor_temp_ave: Dict[str, np.ndarray] = None,
) -> CalibrationObservation:
"""Make a new :class:`CalibrationObservation` based on this, with injections.
Parameters
----------
lna_s11
The LNA S11 as a function of frequency to inject.
source_s11s
Dictionary of ``{source: S11}`` for each source to inject.
c1
Scaling parameter as a function of frequency to inject.
c2 : [type], optional
Offset parameter to inject as a function of frequency.
t_unc
Uncorrelated temperature to inject (as function of frequency)
t_cos
Correlated temperature to inject (as function of frequency)
t_sin
Correlated temperature to inject (as function of frequency)
averaged_spectra
Dictionary of ``{source: spectrum}`` for each source to inject.
Returns
-------
:class:`CalibrationObservation`
A new observation object with the injected models.
"""
new = copy(self)
new.invalidate_cache()
new._injected_lna_s11 = lna_s11
new._injected_source_s11s = source_s11s
new._injected_c1 = c1
new._injected_c2 = c2
new._injected_t_unc = t_unc
new._injected_t_cos = t_cos
new._injected_t_sin = t_sin
new._injected_averaged_spectra = averaged_spectra
new._injected_source_temps = thermistor_temp_ave
return new
@attr.s
class _LittleS11:
s11_model: Callable = attr.ib()
@attr.s
class _LittleSpectrum:
averaged_Q: np.ndarray = attr.ib()
variance_Q: np.ndarray = attr.ib()
n_integrations: int = attr.ib()
@attr.s
class _LittleLoad:
reflections: _LittleS11 = attr.ib()
spectrum: _LittleSpectrum = attr.ib()
temp_ave: np.ndarray = attr.ib()
class Calibration:
def __init__(self, filename: Union[str, Path]):
"""
A class defining an interface to a HDF5 file containing calibration information.
Parameters
----------
filename : str or Path
The path to the calibration file.
"""
self.calfile = Path(filename)
with h5py.File(filename, "r") as fl:
self.calobs_path = fl.attrs["path"]
self.cterms = int(fl.attrs["cterms"])
self.wterms = int(fl.attrs["wterms"])
self.t_load = fl.attrs.get("t_load", 300)
self.t_load_ns = fl.attrs.get("t_load_ns", 400)
self.C1_poly = np.poly1d(fl["C1"][...])
self.C2_poly = np.poly1d(fl["C2"][...])
self.Tcos_poly = np.poly1d(fl["Tcos"][...])
self.Tsin_poly = np.poly1d(fl["Tsin"][...])
self.Tunc_poly = np.poly1d(fl["Tunc"][...])
self.freq = FrequencyRange(fl["frequencies"][...])
self._loads = {}
if "loads" in fl:
lg = fl["loads"]
self.load_names = list(lg.keys())
for name, grp in lg.items():
self._loads[name] = _LittleLoad(
reflections=_LittleS11(
s11_model=yaml.load(
grp.attrs["s11_model"], Loader=yaml.FullLoader
).at(x=self.freq.freq)
),
spectrum=_LittleSpectrum(
averaged_Q=grp["averaged_Q"][...],
variance_Q=grp["variance_Q"][...],
n_integrations=grp.attrs["n_integrations"],
),
temp_ave=grp["temp_ave"][...],
)
self._lna_s11_rl = Spline(self.freq.freq, fl["lna_s11_real"][...])
self._lna_s11_im = Spline(self.freq.freq, fl["lna_s11_imag"][...])
self._intsw_s11_rl = Spline(
self.freq.freq, fl["internal_switch_s11_real"][...]
)
self._intsw_s11_im = Spline(
self.freq.freq, fl["internal_switch_s11_imag"][...]
)
self._intsw_s12_rl = Spline(
self.freq.freq, fl["internal_switch_s12_real"][...]
)
self._intsw_s12_im = Spline(
self.freq.freq, fl["internal_switch_s12_imag"][...]
)
self._intsw_s22_rl = Spline(
self.freq.freq, fl["internal_switch_s22_real"][...]
)
self._intsw_s22_im = Spline(
self.freq.freq, fl["internal_switch_s22_imag"][...]
)
@classmethod
def from_calobs(cls, calobs: CalibrationObservation) -> Calibration:
"""Generate a :class:`Calibration` from an in-memory observation."""
tmp = tempfile.mktemp()
calobs.write(tmp)
return cls(tmp)
def lna_s11(self, freq=None):
"""Get the LNA S11 at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._lna_s11_rl(freq) + 1j * self._lna_s11_im(freq)
def internal_switch_s11(self, freq=None):
"""Get the S11 of the internal switch at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._intsw_s11_rl(freq) + 1j * self._intsw_s11_im(freq)
def internal_switch_s12(self, freq=None):
"""Get the S12 of the internal switch at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._intsw_s12_rl(freq) + 1j * self._intsw_s12_im(freq)
def internal_switch_s22(self, freq=None):
"""Get the S22 of the internal switch at given frequencies."""
if freq is None:
freq = self.freq.freq
return self._intsw_s22_rl(freq) + 1j * self._intsw_s22_im(freq)
def C1(self, freq=None):
"""Evaluate the Scale polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.C1_poly(self.freq.normalize(freq))
def C2(self, freq=None):
"""Evaluate the Offset polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.C2_poly(self.freq.normalize(freq))
def Tcos(self, freq=None):
"""Evaluate the cos temperature polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.Tcos_poly(self.freq.normalize(freq))
def Tsin(self, freq=None):
"""Evaluate the sin temperature polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.Tsin_poly(self.freq.normalize(freq))
def Tunc(self, freq=None):
"""Evaluate the uncorrelated temperature polynomial at given frequencies."""
if freq is None:
freq = self.freq.freq
return self.Tunc_poly(self.freq.normalize(freq))
def _linear_coefficients(self, freq, ant_s11):
return rcf.get_linear_coefficients(
ant_s11,
self.lna_s11(freq),
self.C1(freq),
self.C2(freq),
self.Tunc(freq),
self.Tcos(freq),
self.Tsin(freq),
self.t_load,
)
def calibrate_temp(self, freq: np.ndarray, temp: np.ndarray, ant_s11: np.ndarray):
"""
Calibrate given uncalibrated spectrum.
Parameters
----------
freq : np.ndarray
The frequencies at which to calibrate
temp : np.ndarray
The temperatures to calibrate (in K).
ant_s11 : np.ndarray
The antenna S11 for the load.
Returns
-------
temp : np.ndarray
The calibrated temperature.
"""
a, b = self._linear_coefficients(freq, ant_s11)
return temp * a + b
def decalibrate_temp(self, freq, temp, ant_s11):
"""
De-calibrate given calibrated spectrum.
Parameters
----------
freq : np.ndarray
The frequencies at which to calibrate
temp : np.ndarray
The temperatures to calibrate (in K).
ant_s11 : np.ndarray
The antenna S11 for the load.
Returns
-------
temp : np.ndarray
The calibrated temperature.
Notes
-----
Using this and then :method:`calibrate_temp` immediately should be an identity
operation.
"""
a, b = self._linear_coefficients(freq, ant_s11)
return (temp - b) / a
def calibrate_Q(
self, freq: np.ndarray, q: np.ndarray, ant_s11: np.ndarray
) -> np.ndarray:
"""
Calibrate given power ratio spectrum.
Parameters
----------
freq : np.ndarray
The frequencies at which to calibrate
q : np.ndarray
The power ratio to calibrate.
ant_s11 : np.ndarray
The antenna S11 for the load.
Returns
-------
temp : np.ndarray
The calibrated temperature.
"""
uncal_temp = self.t_load_ns * q + self.t_load
return self.calibrate_temp(freq, uncal_temp, ant_s11)
def perform_term_sweep(
calobs: CalibrationObservation,
delta_rms_thresh: float = 0,
max_cterms: int = 15,
max_wterms: int = 15,
explore_run_nums: bool = False,
explore_repeat_nums: bool = False,
direc=".",
verbose=False,
) -> CalibrationObservation:
"""For a given calibration definition, perform a sweep over number of terms.
There are options to save _every_ calibration solution, or just the "best" one.
Parameters
----------
calobs: :class:`CalibrationObservation` instance
The definition calibration class. The `cterms` and `wterms` in this instance
should define the *lowest* values of the parameters to sweep over.
delta_rms_thresh : float
The threshold in change in RMS between one set of parameters and the next that
will define where to cut off. If zero, will run all sets of parameters up to
the maximum terms specified.
max_cterms : int
The maximum number of cterms to trial.
max_wterms : int
The maximum number of wterms to trial.
explore_run_nums : bool
Whether to iterate over S11 run numbers to find the best residuals.
explore_repeat_nums : bool
Whether to iterate over S11 repeat numbers to find the best residuals.
direc : str
Directory to write resultant :class:`Calibration` file to.
verbose : bool
Whether to write out the RMS values derived throughout the sweep.
Notes
-----
When exploring run/repeat nums, run nums are kept constant within a load (i.e. the
match/short/open etc. all have either run_num=1 or run_num=2 for the same load.
This is physically motivated.
"""
cterms = range(calobs.cterms, max_cterms)
wterms = range(calobs.wterms, max_wterms)
winner = np.zeros(len(cterms), dtype=int)
s11_keys = ["switching_state", "receiver_reading"] + list(io.LOAD_ALIASES.keys())
if explore_repeat_nums:
# Note that we don't explore run_nums for spectra/resistance, because it's rare
# to have those, and they'll only exist if one got completely botched (and that
# should be set by the user).
rep_num = {
k: range(1, getattr(calobs.io.s11, k).max_repeat_num + 1) for k in s11_keys
}
else:
rep_num = {k: [getattr(calobs.io.s11, k).repeat_num] for k in s11_keys}
rep_num = tools.dct_of_list_to_list_of_dct(rep_num)
if explore_run_nums:
run_num = {
"switching_state": range(
1, calobs.io.s11.get_highest_run_num("SwitchingState") + 1
),
"receiver_reading": range(
1, calobs.io.s11.get_highest_run_num("ReceiverReading") + 1
),
}
else:
run_num = {
"switching_state": [calobs.io.s11.switching_state.run_num],
"receiver_reading": [calobs.io.s11.receiver_reading.run_num],
}
run_num = tools.dct_of_list_to_list_of_dct(run_num)
best_rms = np.inf
for this_rep_num in rep_num:
for this_run_num in run_num:
tmp_run_num = copy(calobs.io.run_num)
tmp_run_num.update(this_run_num)
# Change the base io.CalObs because it will change with rep/run.
calobs.io = io.CalibrationObservation(
path=calobs.io.path,
run_num=tmp_run_num,
repeat_num=this_rep_num,
fix=False,
compile_from_def=calobs.compiled_from_def,
include_previous=calobs.previous_included,
)
calobs.lna = LNA(
calobs.io.s11.receiver_reading,
f_low=calobs.freq.min,
f_high=calobs.freq.max,
resistance=calobs.lna.resistance,
)
# If we're changing anything else, we need to change each load.
for name, load in calobs._loads.items():
load.reflections = LoadS11.from_path(
load_name=name,
path=calobs.io.path,
repeat_num_load=this_rep_num[name],
run_num_switch=this_run_num["switching_state"],
repeat_num_switch=this_rep_num["switching_state"],
)
if verbose:
print(
f"SWEEPING SwSt={calobs.io.s11.switching_state.repeat_num}, "
f"RcvRd={calobs.io.s11.receiver_reading.repeat_num} "
f"[Sw={calobs.io.s11.switching_state.run_num}, "
f"RR={calobs.io.s11.receiver_reading.run_num}, "
f"open={calobs.io.s11.open.run_num}, "
f"short={calobs.io.s11.short.run_num}, "
f"ambient={calobs.io.s11.ambient.run_num}, "
f"hot={calobs.io.s11.hot_load.run_num}]"
)
print("-" * 30)
rms = np.zeros((len(cterms), len(wterms)))
for i, c in enumerate(cterms):
for j, w in enumerate(wterms):
calobs.update(cterms=c, wterms=w)
res = calobs.get_load_residuals()
dof = sum(len(r) for r in res.values()) - c - w
rms[i, j] = np.sqrt(
sum(np.nansum(np.square(x)) for x in res.values()) / dof
)
if verbose:
print(f"Nc = {c:02}, Nw = {w:02}; RMS/dof = {rms[i, j]:1.3e}")
# If we've decreased by more than the threshold, this wterms becomes
# the new winner (for this number of cterms)
if j > 0 and rms[i, j] >= rms[i, j - 1] - delta_rms_thresh:
winner[i] = j - 1
break
if (
i > 0
and rms[i, winner[i]]
>= rms[i - 1, winner[i - 1]] - delta_rms_thresh
):
break
if verbose:
print(
f"Best parameters found for Nc={cterms[i-1]}, "
f"Nw={wterms[winner[i-1]]}, "
f"with RMS = {rms[i-1, winner[i-1]]}."
)
print()
if rms[i - 1, winner[i - 1]] < best_rms:
best_run_combo = (
calobs.io.run_num,
calobs.io.s11.receiver_reading.repeat_num,
calobs.io.s11.switching_state.repeat_num,
)
best_cterms = cterms[i - 1]
best_wterms = wterms[winner[i - 1]]
if verbose and (explore_repeat_nums or explore_run_nums):
print("The very best parameters were found were for:")
print(f"\tSwitchingState Repeat = {best_run_combo[2]}")
print(f"\tReceiverReading Repeat = {best_run_combo[1]}")
print(f"\tRun Numbers = {best_run_combo[0]}")
print(f"\t# C-terms = {best_cterms}")
print(f"\t# W-terms = {best_wterms}")
calobs.update(cterms=best_cterms, wterms=best_wterms)
calobs.io = io.CalibrationObservation(
path=calobs.io.path,
run_num=best_run_combo[0],
repeat_num={
"switching_state": best_run_combo[2],
"receiver_reading": best_run_combo[1],
},
fix=False,
compile_from_def=calobs.compiled_from_def,
include_previous=calobs.previous_included,
)
calobs.lna = LNA(
calobs.io.s11.receiver_reading,
f_low=calobs.freq.min,
f_high=calobs.freq.max,
resistance=calobs.lna.resistance,
)
if direc is not None:
direc = Path(direc)
if not direc.exists():
direc.mkdir(parents=True)
pth = Path(calobs.path).parent.name
pth = str(pth) + f"_c{calobs.cterms}_w{calobs.wterms}.h5"
calobs.write(direc / pth)
return calobs
| 34.197418 | 88 | 0.566877 |
from __future__ import annotations
import attr
import h5py
import numpy as np
import tempfile
import warnings
import yaml
from abc import ABCMeta, abstractmethod
from astropy.convolution import Gaussian1DKernel, convolve
from copy import copy
from edges_io import io
from edges_io.logging import logger
from functools import lru_cache
from hashlib import md5
from matplotlib import pyplot as plt
from pathlib import Path
from scipy.interpolate import InterpolatedUnivariateSpline as Spline
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from . import DATA_PATH
from . import modelling as mdl
from . import receiver_calibration_func as rcf
from . import reflection_coefficient as rc
from . import s11_correction as s11
from . import tools
from . import types as tp
from . import xrfi
from .cached_property import cached_property
from .tools import EdgesFrequencyRange, FrequencyRange
class S1P:
def __init__(
self,
s1p: tp.PathLike | io.S1P,
f_low: float | None = None,
f_high: float | None = None,
switchval: int | None = None,
):
try:
s1p = Path(s1p)
self.s1p = io.S1P(s1p)
except TypeError:
if isinstance(s1p, io.S1P):
self.s1p = s1p
else:
raise TypeError(
"s1p must be a path to an s1p file, or an io.S1P object"
)
self.load_name = self.s1p.kind
self.repeat_num = self.s1p.repeat_num
spec = self.s1p.s11
f = self.s1p.freq
self.freq = FrequencyRange(f, f_low, f_high)
self.s11 = spec[self.freq.mask]
self._switchval = switchval
@cached_property
def switchval(self):
if self._switchval is not None:
return self._switchval * np.ones_like(self.freq.freq)
else:
return None
VNA = S1P
class _S11Base(metaclass=ABCMeta):
default_nterms = {
"ambient": 37,
"hot_load": 37,
"open": 105,
"short": 105,
"AntSim2": 55,
"AntSim3": 55,
"AntSim4": 55,
"lna": 37,
}
def __init__(
self,
*,
load_s11: Union[io._S11SubDir, io.ReceiverReading],
f_low: Optional[float] = None,
f_high: Optional[float] = None,
n_terms: Optional[int] = None,
model_type: tp.Modelable = "fourier",
):
self.load_s11 = load_s11
self.base_path = self.load_s11.path
try:
self.load_name = getattr(self.load_s11, "load_name")
except AttributeError:
self.load_name = None
self.run_num = self.load_s11.run_num
switchvals = {"open": 1, "short": -1, "match": 0}
for name in self.load_s11.STANDARD_NAMES:
setattr(
self,
name.lower(),
S1P(
s1p=self.load_s11.children[name.lower()],
f_low=f_low,
f_high=f_high,
switchval=switchvals.get(name.lower()),
),
)
self.freq = self.open.freq
self._nterms = int(n_terms) if n_terms is not None else None
self.model_type = model_type
@cached_property
def n_terms(self):
res = self._nterms or self.default_nterms.get(self.load_name, None)
if not (isinstance(res, int) and res % 2):
raise ValueError(
f"n_terms must be odd for S11 models. For {self.load_name} got "
f"n_terms={res}."
)
return res
@classmethod
@abstractmethod
def from_path(cls, **kwargs):
pass
@cached_property
@abstractmethod
def measured_load_s11_raw(self):
pass
@cached_property
def corrected_load_s11(self) -> np.ndarray:
return self.measured_load_s11_raw
@lru_cache()
def get_corrected_s11_model(
self,
n_terms: int | None = None,
model_type: tp.Modelable | None = None,
):
n_terms = n_terms or self.n_terms
model_type = mdl.get_mdl(model_type or self.model_type)
model = model_type(
n_terms=n_terms,
transform=mdl.UnitTransform(range=[self.freq.min, self.freq.max]),
)
emodel = model.at(x=self.freq.freq)
cmodel = mdl.ComplexMagPhaseModel(mag=emodel, phs=emodel)
s11_correction = self.corrected_load_s11
return cmodel.fit(ydata=s11_correction)
@cached_property
def s11_model(self) -> callable:
return self.get_corrected_s11_model()
def plot_residuals(
self,
fig=None,
ax=None,
color_abs="C0",
color_diff="g",
label=None,
title=None,
decade_ticks=True,
ylabels=True,
) -> plt.Figure:
if fig is None or ax is None or len(ax) != 4:
fig, ax = plt.subplots(
4, 1, sharex=True, gridspec_kw={"hspace": 0.05}, facecolor="w"
)
if decade_ticks:
for axx in ax:
axx.xaxis.set_ticks(
[50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180],
minor=[],
)
axx.grid(True)
ax[-1].set_xlabel("Frequency [MHz]")
corr = self.corrected_load_s11
model = self.s11_model(self.freq.freq)
ax[0].plot(
self.freq.freq, 20 * np.log10(np.abs(model)), color=color_abs, label=label
)
if ylabels:
ax[0].set_ylabel(r"$|S_{11}|$")
ax[1].plot(self.freq.freq, np.abs(model) - np.abs(corr), color_diff)
if ylabels:
ax[1].set_ylabel(r"$\Delta |S_{11}|$")
ax[2].plot(
self.freq.freq, np.unwrap(np.angle(model)) * 180 / np.pi, color=color_abs
)
if ylabels:
ax[2].set_ylabel(r"$\angle S_{11}$")
ax[3].plot(
self.freq.freq,
np.unwrap(np.angle(model)) - np.unwrap(np.angle(corr)),
color_diff,
)
if ylabels:
ax[3].set_ylabel(r"$\Delta \angle S_{11}$")
if title is None:
title = f"{self.load_name} Reflection Coefficient Models"
if title:
fig.suptitle(f"{self.load_name} Reflection Coefficient Models", fontsize=14)
if label:
ax[0].legend()
return fig
class LoadS11(_S11Base):
def __init__(self, *, internal_switch: s11.InternalSwitch, **kwargs):
assert isinstance(internal_switch, s11.InternalSwitch)
self.internal_switch = internal_switch
super().__init__(**kwargs)
@classmethod
def from_path(
cls,
load_name: str,
path: tp.PathLike,
run_num_load: int = 1,
run_num_switch: int = 1,
repeat_num_load: int = None,
repeat_num_switch: int = None,
resistance: float = 50.166,
model_internal_switch: mdl.Model = attr.NOTHING,
**kwargs,
):
antsim = load_name.startswith("AntSim")
path = Path(path)
if not antsim:
load_name = io.LOAD_ALIASES[load_name]
s11_load_dir = (io.AntSimS11 if antsim else io.LoadS11)(
path / "S11" / f"{load_name}{run_num_load:02}", repeat_num=repeat_num_load
)
internal_switch = s11.InternalSwitch(
data=io.SwitchingState(
path / "S11" / f"SwitchingState{run_num_switch:02}",
repeat_num=repeat_num_switch,
),
resistance=resistance,
model=model_internal_switch,
)
return cls(load_s11=s11_load_dir, internal_switch=internal_switch, **kwargs)
@cached_property
def measured_load_s11_raw(self):
return rc.de_embed(
self.open.switchval,
self.short.switchval,
self.match.switchval,
self.open.s11,
self.short.s11,
self.match.s11,
self.external.s11,
)[0]
@cached_property
def corrected_load_s11(self) -> np.ndarray:
return rc.gamma_de_embed(
self.internal_switch.s11_model(self.freq.freq),
self.internal_switch.s12_model(self.freq.freq),
self.internal_switch.s22_model(self.freq.freq),
self.measured_load_s11_raw,
)
class LNA(_S11Base):
def __init__(
self, load_s11: io.ReceiverReading, resistance: float = 50.009, **kwargs
):
super().__init__(load_s11=load_s11, **kwargs)
self.resistance = resistance
self.load_name = "lna"
self.repeat_num = self.load_s11.repeat_num
@classmethod
def from_path(
cls,
path: Union[str, Path],
repeat_num: Optional[int] = None,
run_num: int = 1,
**kwargs,
):
path = Path(path)
load_s11 = io.ReceiverReading(
path=path / "S11" / f"ReceiverReading{run_num:02}",
repeat_num=repeat_num,
fix=False,
)
return cls(load_s11=load_s11, **kwargs)
@cached_property
def external(self):
return S1P(
self.load_s11.children["receiverreading"],
f_low=self.freq.freq.min(),
f_high=self.freq.freq.max(),
)
@cached_property
def measured_load_s11_raw(self):
oa, sa, la = rc.agilent_85033E(
self.freq.freq, self.resistance, match_delay=True
)
return rc.de_embed(
oa, sa, la, self.open.s11, self.short.s11, self.match.s11, self.external.s11
)[0]
class LoadSpectrum:
def __init__(
self,
spec_obj: List[io.Spectrum],
resistance_obj: io.Resistance,
switch_correction: Optional[LoadS11] = None,
f_low: float = 40.0,
f_high: Optional[float] = None,
ignore_times_percent: float = 5.0,
rfi_removal: str = "1D2D",
rfi_kernel_width_time: int = 16,
rfi_kernel_width_freq: int = 16,
rfi_threshold: float = 6,
cache_dir: Optional[Union[str, Path]] = None,
t_load: float = 300.0,
t_load_ns: float = 400.0,
):
self.spec_obj = spec_obj
self.resistance_obj = resistance_obj
self.load_name = self.spec_obj[0].load_name
assert (
self.load_name == self.resistance_obj.load_name
), "spec and resistance load_name must be the same"
self.spec_files = (spec_obj.path for spec_obj in self.spec_obj)
self.resistance_file = self.resistance_obj.path
self.run_num = self.spec_obj[0].run_num
self.cache_dir = Path(cache_dir or ".")
self.rfi_kernel_width_time = rfi_kernel_width_time
self.rfi_kernel_width_freq = rfi_kernel_width_freq
self.rfi_threshold = rfi_threshold
assert rfi_removal in [
"1D",
"2D",
"1D2D",
False,
None,
], "rfi_removal must be either '1D', '2D', '1D2D, or False/None"
self.rfi_removal = rfi_removal
self.switch_correction = switch_correction
self.ignore_times_percent = ignore_times_percent
self.freq = EdgesFrequencyRange(f_low=f_low, f_high=f_high)
self.t_load = t_load
self.t_load_ns = t_load_ns
@classmethod
def from_load_name(
cls,
load_name: str,
direc: Union[str, Path],
run_num: Optional[int] = None,
filetype: Optional[str] = None,
**kwargs,
):
direc = Path(direc)
spec = io.Spectrum.from_load(
load=load_name, direc=direc / "Spectra", run_num=run_num, filetype=filetype
)
res = io.Resistance.from_load(
load=load_name,
direc=direc / "Resistance",
run_num=run_num,
filetype=filetype,
)
return cls(spec_obj=spec, resistance_obj=res, **kwargs)
@cached_property
def averaged_Q(self) -> np.ndarray:
# TODO: should also get weights!
spec = self._ave_and_var_spec[0]["Q"]
if self.rfi_removal == "1D":
flags, _ = xrfi.xrfi_medfilt(
spec, threshold=self.rfi_threshold, kf=self.rfi_kernel_width_freq
)
spec[flags] = np.nan
return spec
@property
def variance_Q(self) -> np.ndarray:
return self._ave_and_var_spec[1]["Q"]
@property
def averaged_spectrum(self) -> np.ndarray:
return self.averaged_Q * self.t_load_ns + self.t_load
@property
def variance_spectrum(self) -> np.ndarray:
return self.variance_Q * self.t_load_ns ** 2
@property
def ancillary(self) -> dict:
return [d.data["meta"] for d in self.spec_obj]
@property
def averaged_p0(self) -> np.ndarray:
return self._ave_and_var_spec[0]["p0"]
@property
def averaged_p1(self) -> np.ndarray:
return self._ave_and_var_spec[0]["p1"]
@property
def averaged_p2(self) -> np.ndarray:
return self._ave_and_var_spec[0]["p2"]
@property
def variance_p0(self) -> np.ndarray:
return self._ave_and_var_spec[1]["p0"]
@property
def variance_p1(self) -> np.ndarray:
return self._ave_and_var_spec[1]["p1"]
@property
def variance_p2(self) -> np.ndarray:
return self._ave_and_var_spec[1]["p2"]
@property
def n_integrations(self) -> int:
return self._ave_and_var_spec[2]
def _get_integrated_filename(self):
params = (
self.rfi_threshold,
self.rfi_kernel_width_time,
self.rfi_kernel_width_freq,
self.rfi_removal,
self.ignore_times_percent,
self.freq.min,
self.freq.max,
self.t_load,
self.t_load_ns,
tuple(path.name for path in self.spec_files),
)
hsh = md5(str(params).encode()).hexdigest()
return self.cache_dir / f"{self.load_name}_{hsh}.h5"
@cached_property
def _ave_and_var_spec(self) -> Tuple[Dict, Dict, int]:
fname = self._get_integrated_filename()
kinds = ["p0", "p1", "p2", "Q"]
if fname.exists():
logger.info(
f"Reading in previously-created integrated {self.load_name} spectra..."
)
means = {}
variances = {}
with h5py.File(fname, "r") as fl:
for kind in kinds:
means[kind] = fl[kind + "_mean"][...]
variances[kind] = fl[kind + "_var"][...]
n_integrations = fl.attrs.get("n_integrations", 0)
return means, variances, n_integrations
logger.info(f"Reducing {self.load_name} spectra...")
spectra = self.get_spectra()
means = {}
variances = {}
for key, spec in spectra.items():
# Weird thing where there are zeros in the spectra.
spec[spec == 0] = np.nan
mean = np.nanmean(spec, axis=1)
var = np.nanvar(spec, axis=1)
n_intg = spec.shape[1]
if self.rfi_removal == "1D2D":
nsample = np.sum(~np.isnan(spec), axis=1)
varfilt = xrfi.flagged_filter(
var, size=2 * self.rfi_kernel_width_freq + 1
)
resid = mean - xrfi.flagged_filter(
mean, size=2 * self.rfi_kernel_width_freq + 1
)
flags = np.logical_or(
resid > self.rfi_threshold * np.sqrt(varfilt / nsample),
var - varfilt
> self.rfi_threshold * np.sqrt(2 * varfilt ** 2 / (nsample - 1)),
)
mean[flags] = np.nan
var[flags] = np.nan
means[key] = mean
variances[key] = var
if not self.cache_dir.exists():
self.cache_dir.mkdir()
with h5py.File(fname, "w") as fl:
logger.info(f"Saving reduced spectra to cache at {fname}")
for kind in kinds:
fl[kind + "_mean"] = means[kind]
fl[kind + "_var"] = variances[kind]
fl.attrs["n_integrations"] = n_intg
return means, variances, n_intg
def get_spectra(self) -> dict:
spec = self._read_spectrum()
if self.rfi_removal == "2D":
for key, val in spec.items():
# Need to set nans and zeros to inf so that median/mean detrending
# can work.
val[np.isnan(val)] = np.inf
if key != "Q":
val[val == 0] = np.inf
flags, _ = xrfi.xrfi_medfilt(
val,
threshold=self.rfi_threshold,
kt=self.rfi_kernel_width_time,
kf=self.rfi_kernel_width_freq,
)
val[flags] = np.nan
spec[key] = val
return spec
def _read_spectrum(self) -> dict:
data = [spec_obj.data for spec_obj in self.spec_obj]
n_times = sum(len(d["time_ancillary"]["times"]) for d in data)
out = {
"p0": np.empty((len(self.freq.freq), n_times)),
"p1": np.empty((len(self.freq.freq), n_times)),
"p2": np.empty((len(self.freq.freq), n_times)),
"Q": np.empty((len(self.freq.freq), n_times)),
}
index_start_spectra = int((self.ignore_times_percent / 100) * n_times)
for key, val in out.items():
nn = 0
for d in data:
n = len(d["time_ancillary"]["times"])
val[:, nn : (nn + n)] = d["spectra"][key][self.freq.mask]
nn += n
out[key] = val[:, index_start_spectra:]
return out
@cached_property
def thermistor(self) -> np.ndarray:
ary = self.resistance_obj.read()[0]
return ary[int((self.ignore_times_percent / 100) * len(ary)) :]
@cached_property
def thermistor_temp(self):
return rcf.temperature_thermistor(self.thermistor["load_resistance"])
@cached_property
def temp_ave(self):
return np.nanmean(self.thermistor_temp)
def write(self, path=None):
path = Path(path or ".")
# Allow to pass in a directory name *or* full path.
if path.is_dir():
path /= f"{self.load_name}_averaged_spectrum.h5"
with h5py.File(path, "w") as fl:
fl.attrs["load_name"] = self.load_name
fl["freq"] = self.freq.freq
fl["averaged_raw_spectrum"] = self.averaged_spectrum
fl["temperature"] = self.thermistor_temp
def plot(
self, thermistor=False, fig=None, ax=None, xlabel=True, ylabel=True, **kwargs
):
if fig is None:
fig, ax = plt.subplots(
1, 1, facecolor=kwargs.pop("facecolor", "white"), **kwargs
)
if thermistor:
ax.plot(self.freq.freq, self.thermistor_temp)
if ylabel:
ax.set_ylabel("Temperature [K]")
else:
ax.plot(self.freq.freq, self.averaged_spectrum)
if ylabel:
ax.set_ylabel("$T^*$ [K]")
ax.grid(True)
if xlabel:
ax.set_xlabel("Frequency [MHz]")
class HotLoadCorrection:
_kinds = {"s11": 0, "s12": 1, "s22": 2}
def __init__(
self,
path: Union[str, Path] = ":semi_rigid_s_parameters_WITH_HEADER.txt",
f_low: Optional[float] = None,
f_high: Optional[float] = None,
n_terms: int = 21,
):
# Get the path to the S11 file.
if not isinstance(path, Path):
path = DATA_PATH / path[1:] if path[0] == ":" else Path(path)
self.path = path
data = np.genfromtxt(self.path)
f = data[:, 0]
self.freq = FrequencyRange(f, f_low, f_high)
if data.shape[1] == 7: # Original file from 2015
self.data = data[self.freq.mask, 1::2] + 1j * data[self.freq.mask, 2::2]
elif data.shape[1] == 6: # File from 2017
self.data = np.array(
[
data[self.freq.mask, 1] + 1j * data[self.freq.mask, 2],
data[self.freq.mask, 3],
data[self.freq.mask, 4] + 1j * data[self.freq.mask, 5],
]
).T
else:
raise IOError("Semi-Rigid Cable file has wrong data format.")
self.n_terms = int(n_terms)
def _get_model_kind(self, kind):
model = mdl.Polynomial(
n_terms=self.n_terms,
transform=mdl.UnitTransform(range=(self.freq.min, self.freq.max)),
)
model = mdl.ComplexMagPhaseModel(mag=model, phs=model)
return model.fit(xdata=self.freq.freq, ydata=self.data[:, self._kinds[kind]])
@cached_property
def s11_model(self):
return self._get_model_kind("s11")
@cached_property
def s12_model(self):
return self._get_model_kind("s12")
@cached_property
def s22_model(self):
return self._get_model_kind("s22")
def power_gain(self, freq: np.ndarray, hot_load_s11: LoadS11) -> np.ndarray:
assert isinstance(
hot_load_s11, LoadS11
), "hot_load_s11 must be a switch correction"
assert (
hot_load_s11.load_name == "hot_load"
), "hot_load_s11 must be a hot_load s11"
return self.get_power_gain(
{
"s11": self.s11_model(freq),
"s12s21": self.s12_model(freq),
"s22": self.s22_model(freq),
},
hot_load_s11.s11_model(freq),
)
@staticmethod
def get_power_gain(
semi_rigid_sparams: dict, hot_load_s11: np.ndarray
) -> np.ndarray:
rht = rc.gamma_de_embed(
semi_rigid_sparams["s11"],
semi_rigid_sparams["s12s21"],
semi_rigid_sparams["s22"],
hot_load_s11,
)
return (
np.abs(semi_rigid_sparams["s12s21"])
* (1 - np.abs(rht) ** 2)
/ (
(np.abs(1 - semi_rigid_sparams["s11"] * rht)) ** 2
* (1 - np.abs(hot_load_s11) ** 2)
)
)
class Load:
def __init__(
self,
spectrum: LoadSpectrum,
reflections: LoadS11,
hot_load_correction: Optional[HotLoadCorrection] = None,
ambient: Optional[LoadSpectrum] = None,
):
assert isinstance(spectrum, LoadSpectrum), "spectrum must be a LoadSpectrum"
assert isinstance(reflections, LoadS11), "spectrum must be a SwitchCorrection"
assert spectrum.load_name == reflections.load_name
self.spectrum = spectrum
self.reflections = reflections
self.load_name = spectrum.load_name
self.t_load = self.spectrum.t_load
self.t_load_ns = self.spectrum.t_load_ns
if self.load_name == "hot_load":
self._correction = hot_load_correction
self._ambient = ambient
@classmethod
def from_path(
cls,
path: Union[str, Path],
load_name: str,
f_low: Optional[float] = None,
f_high: Optional[float] = None,
reflection_kwargs: Optional[dict] = None,
spec_kwargs: Optional[dict] = None,
):
if not spec_kwargs:
spec_kwargs = {}
if not reflection_kwargs:
reflection_kwargs = {}
spec = LoadSpectrum.from_load_name(
load_name,
path,
f_low=f_low,
f_high=f_high,
**spec_kwargs,
)
refl = LoadS11.from_path(
load_name,
path,
f_low=f_low,
f_high=f_high,
**reflection_kwargs,
)
return cls(spec, refl)
@property
def s11_model(self):
return self.reflections.s11_model
@cached_property
def temp_ave(self):
if self.load_name != "hot_load":
return self.spectrum.temp_ave
gain = self._correction.power_gain(self.freq.freq, self.reflections)
# temperature
return gain * self.spectrum.temp_ave + (1 - gain) * self._ambient.temp_ave
@property
def averaged_Q(self):
return self.spectrum.averaged_Q
@property
def averaged_spectrum(self):
return self.spectrum.averaged_spectrum
@property
def freq(self):
return self.spectrum.freq
class CalibrationObservation:
_sources = ("ambient", "hot_load", "open", "short")
def __init__(
self,
path: Union[str, Path],
semi_rigid_path: Union[str, Path] = ":semi_rigid_s_parameters_WITH_HEADER.txt",
f_low: Optional[float] = 40,
f_high: Optional[float] = None,
run_num: Union[None, int, dict] = None,
repeat_num: Union[None, int, dict] = None,
resistance_f: Optional[float] = None,
cterms: int = 5,
wterms: int = 7,
load_kwargs: Optional[dict] = None,
s11_kwargs: Optional[dict] = None,
load_spectra: Optional[dict] = None,
load_s11s: Optional[dict] = None,
compile_from_def: bool = True,
include_previous: bool = False,
internal_switch_kwargs: Optional[Dict[str, Any]] = None,
):
load_spectra = load_spectra or {}
load_s11s = load_s11s or {}
load_kwargs = load_kwargs or {}
s11_kwargs = s11_kwargs or {}
internal_switch_kwargs = internal_switch_kwargs or {}
assert all(name in self._sources for name in load_spectra)
assert all(name in self._sources + ("lna",) for name in load_s11s)
self.io = io.CalibrationObservation(
path,
run_num=run_num,
repeat_num=repeat_num,
fix=False,
compile_from_def=compile_from_def,
include_previous=include_previous,
)
self.compiled_from_def = compile_from_def
self.previous_included = include_previous
self.path = Path(self.io.path)
hot_load_correction = HotLoadCorrection(semi_rigid_path, f_low, f_high)
self.internal_switch = s11.InternalSwitch(
data=self.io.s11.switching_state,
resistance=self.io.definition["measurements"]["resistance_m"][
self.io.s11.switching_state.run_num
],
**internal_switch_kwargs,
)
self._loads = {}
for source in self._sources:
load = load_spectra.get(source, {})
if isinstance(load, dict):
load = LoadSpectrum(
spec_obj=getattr(self.io.spectra, source),
resistance_obj=getattr(self.io.resistance, source),
f_low=f_low,
f_high=f_high,
**{**load_kwargs, **load},
)
# Ensure that we finally have a LoadSpectrum
if not isinstance(load, LoadSpectrum):
raise TypeError("load_spectra must be a dict of LoadSpectrum or dicts.")
refl = load_s11s.get(source, {})
if isinstance(refl, dict):
refl = LoadS11(
load_s11=getattr(self.io.s11, source),
internal_switch=self.internal_switch,
f_low=f_low,
f_high=f_high,
**{**s11_kwargs, **refl},
)
if source == "hot_load":
self._loads[source] = Load(
load,
refl,
hot_load_correction=hot_load_correction,
ambient=self._loads["ambient"].spectrum,
)
else:
self._loads[source] = Load(load, refl)
for name, load in self._loads.items():
setattr(self, name, load)
refl = load_s11s.get("lna", {})
self.lna = LNA(
load_s11=self.io.s11.receiver_reading,
f_low=f_low,
f_high=f_high,
resistance=resistance_f
or self.io.definition["measurements"]["resistance_f"][
self.io.s11.receiver_reading.run_num
],
**{**s11_kwargs, **refl},
)
# We must use the most restricted frequency range available from all available
# sources as well as the LNA.
fmin = max(
sum(
(
[load.spectrum.freq.min, load.reflections.freq.min]
for load in self._loads.values()
),
[],
)
+ [self.lna.freq.min]
)
fmax = min(
sum(
(
[load.spectrum.freq.max, load.reflections.freq.max]
for load in self._loads.values()
),
[],
)
+ [self.lna.freq.max]
)
if fmax <= fmin:
raise ValueError(
"The inputs loads and S11s have non-overlapping frequency ranges!"
)
self.freq = EdgesFrequencyRange(f_low=fmin, f_high=fmax)
# Now make everything actually consistent in its frequency range.
for load in self._loads.values():
load.spectrum.freq = self.freq
self.cterms = cterms
self.wterms = wterms
self.t_load = self.ambient.t_load
self.t_load_ns = self.ambient.t_load_ns
@property
def load_names(self) -> Tuple[str]:
return tuple(self._loads.keys())
def new_load(
self,
load_name: str,
run_num: int = 1,
reflection_kwargs: Optional[dict] = None,
spec_kwargs: Optional[dict] = None,
):
reflection_kwargs = reflection_kwargs or {}
spec_kwargs = spec_kwargs or {}
# Fill up kwargs with keywords from this instance
if "resistance" not in reflection_kwargs:
reflection_kwargs[
"resistance"
] = self.open.reflections.internal_switch.resistance
for key in [
"ignore_times_percent",
"rfi_removal",
"rfi_kernel_width_freq",
"rfi_kernel_width_time",
"rfi_threshold",
"cache_dir",
"t_load",
"t_load_ns",
]:
if key not in spec_kwargs:
spec_kwargs[key] = getattr(self.open.spectrum, key)
reflection_kwargs["run_num_load"] = run_num
reflection_kwargs["repeat_num_switch"] = self.io.s11.switching_state.repeat_num
reflection_kwargs["run_num_switch"] = self.io.s11.switching_state.run_num
spec_kwargs["run_num"] = run_num
return Load.from_path(
path=self.io.path,
load_name=load_name,
f_low=self.freq.min,
f_high=self.freq.max,
reflection_kwargs=reflection_kwargs,
spec_kwargs=spec_kwargs,
)
def plot_raw_spectra(self, fig=None, ax=None) -> plt.Figure:
if fig is None and ax is None:
fig, ax = plt.subplots(
len(self._sources), 1, sharex=True, gridspec_kw={"hspace": 0.05}
)
for i, (name, load) in enumerate(self._loads.items()):
load.spectrum.plot(
fig=fig, ax=ax[i], xlabel=(i == (len(self._sources) - 1))
)
ax[i].set_title(name)
return fig
def plot_s11_models(self, **kwargs):
out = {
name: source.reflections.plot_residuals(**kwargs)
for name, source in self._loads.items()
}
out.update({"lna": self.lna.plot_residuals(**kwargs)})
return out
@cached_property
def s11_correction_models(self):
try:
return dict(self._injected_source_s11s)
except (TypeError, AttributeError):
return {
name: source.s11_model(self.freq.freq)
for name, source in self._loads.items()
}
@cached_property
def source_thermistor_temps(self) -> Dict[str, Union[float, np.ndarray]]:
if (
hasattr(self, "_injected_source_temps")
and self._injected_source_temps is not None
):
return self._injected_source_temps
return {k: source.temp_ave for k, source in self._loads.items()}
@cached_property
def _calibration_coefficients(self):
if (
hasattr(self, "_injected_averaged_spectra")
and self._injected_averaged_spectra is not None
):
ave_spec = self._injected_averaged_spectra
else:
ave_spec = {
k: source.averaged_spectrum for k, source in self._loads.items()
}
scale, off, Tu, TC, TS = rcf.get_calibration_quantities_iterative(
self.freq.freq_recentred,
temp_raw=ave_spec,
gamma_rec=self.lna_s11,
gamma_ant=self.s11_correction_models,
temp_ant=self.source_thermistor_temps,
cterms=self.cterms,
wterms=self.wterms,
temp_amb_internal=self.t_load,
)
return scale, off, Tu, TC, TS
@cached_property
def C1_poly(self): # noqa: N802
return self._calibration_coefficients[0]
@cached_property
def C2_poly(self): # noqa: N802
return self._calibration_coefficients[1]
@cached_property
def Tunc_poly(self): # noqa: N802
return self._calibration_coefficients[2]
@cached_property
def Tcos_poly(self): # noqa: N802
return self._calibration_coefficients[3]
@cached_property
def Tsin_poly(self): # noqa: N802
return self._calibration_coefficients[4]
def C1(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
if hasattr(self, "_injected_c1") and self._injected_c1 is not None:
return np.array(self._injected_c1)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.C1_poly(fnorm)
def C2(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
if hasattr(self, "_injected_c2") and self._injected_c2 is not None:
return np.array(self._injected_c2)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.C2_poly(fnorm)
def Tunc(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
if hasattr(self, "_injected_t_unc") and self._injected_t_unc is not None:
return np.array(self._injected_t_unc)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tunc_poly(fnorm)
def Tcos(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
if hasattr(self, "_injected_t_cos") and self._injected_t_cos is not None:
return np.array(self._injected_t_cos)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tcos_poly(fnorm)
def Tsin(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802
if hasattr(self, "_injected_t_sin") and self._injected_t_sin is not None:
return np.array(self._injected_t_sin)
fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)
return self.Tsin_poly(fnorm)
@cached_property
def lna_s11(self):
if hasattr(self, "_injected_lna_s11") and self._injected_lna_s11 is not None:
return self._injected_lna_s11
else:
return self.lna.s11_model(self.freq.freq)
def get_linear_coefficients(self, load: Union[Load, str]):
if isinstance(load, str):
load_s11 = self.s11_correction_models[load]
elif load.load_name in self.s11_correction_models:
load_s11 = self.s11_correction_models[load.load_name]
else:
load_s11 = load.s11_model(self.freq.freq)
return rcf.get_linear_coefficients(
load_s11,
self.lna_s11,
self.C1(self.freq.freq),
self.C2(self.freq.freq),
self.Tunc(self.freq.freq),
self.Tcos(self.freq.freq),
self.Tsin(self.freq.freq),
t_load=self.t_load,
)
def calibrate(self, load: Union[Load, str], q=None, temp=None):
load = self._load_str_to_load(load)
a, b = self.get_linear_coefficients(load)
if q is not None:
temp = self.t_load_ns * q + self.t_load
elif temp is None:
temp = load.averaged_spectrum
return a * temp + b
def _load_str_to_load(self, load: Union[Load, str]):
if isinstance(load, str):
try:
load = self._loads[load]
except AttributeError:
raise AttributeError(
"load must be a Load object or a string (one of "
"{ambient,hot_load,open,short})"
)
else:
assert isinstance(
load, Load
), "load must be a Load instance, got the {} {}".format(load, type(Load))
return load
def decalibrate(
self, temp: np.ndarray, load: Union[Load, str], freq: np.ndarray = None
):
if freq is None:
freq = self.freq.freq
if freq.min() < self.freq.freq.min():
warnings.warn(
"The minimum frequency is outside the calibrated range "
f"({self.freq.freq.min()} - {self.freq.freq.max()} MHz)"
)
if freq.min() > self.freq.freq.max():
warnings.warn("The maximum frequency is outside the calibrated range ")
a, b = self.get_linear_coefficients(load)
return (temp - b) / a
def get_K(
self, freq: np.ndarray | None = None
) -> Dict[str, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
if freq is None:
freq = self.freq.freq
gamma_ants = self.s11_correction_models
else:
gamma_ants = {
name: source.s11_model(freq) for name, source in self._loads.items()
}
lna_s11 = self.lna.s11_model(freq)
return {
name: rcf.get_K(gamma_rec=lna_s11, gamma_ant=gamma_ant)
for name, gamma_ant in gamma_ants.items()
}
def plot_calibrated_temp(
self,
load: Union[Load, str],
bins: int = 2,
fig=None,
ax=None,
xlabel=True,
ylabel=True,
):
load = self._load_str_to_load(load)
if fig is None and ax is None:
fig, ax = plt.subplots(1, 1, facecolor="w")
# binning
temp_calibrated = self.calibrate(load)
if bins > 0:
freq_ave_cal = convolve(
temp_calibrated, Gaussian1DKernel(stddev=bins), boundary="extend"
)
else:
freq_ave_cal = temp_calibrated
freq_ave_cal[np.isinf(freq_ave_cal)] = np.nan
rms = np.sqrt(np.mean((freq_ave_cal - np.mean(freq_ave_cal)) ** 2))
ax.plot(
self.freq.freq,
freq_ave_cal,
label=f"Calibrated {load.spectrum.load_name} [RMS = {rms:.3f}]",
)
temp_ave = self.source_thermistor_temps.get(load.load_name, load.temp_ave)
if not hasattr(temp_ave, "__len__"):
ax.axhline(temp_ave, color="C2", label="Average thermistor temp")
else:
ax.plot(
self.freq.freq,
temp_ave,
color="C2",
label="Average thermistor temp",
)
ax.set_ylim([np.nanmin(freq_ave_cal), np.nanmax(freq_ave_cal)])
if xlabel:
ax.set_xlabel("Frequency [MHz]")
if ylabel:
ax.set_ylabel("Temperature [K]")
plt.ticklabel_format(useOffset=False)
ax.grid()
ax.legend()
return plt.gcf()
def get_load_residuals(self):
out = {}
for source in self._sources:
load = self._load_str_to_load(source)
cal = self.calibrate(load)
true = self.source_thermistor_temps[source]
out[source] = cal - true
return out
def get_rms(self, smooth: int = 4):
resids = self.get_load_residuals()
out = {}
for name, res in resids.items():
if smooth > 1:
res = convolve(res, Gaussian1DKernel(stddev=smooth), boundary="extend")
out[name] = np.sqrt(np.nanmean(res ** 2))
return out
def plot_calibrated_temps(self, bins=64, fig=None, ax=None):
if fig is None or ax is None or len(ax) != len(self._sources):
fig, ax = plt.subplots(
len(self._sources),
1,
sharex=True,
gridspec_kw={"hspace": 0.05},
figsize=(10, 12),
)
for i, source in enumerate(self._sources):
self.plot_calibrated_temp(
source,
bins=bins,
fig=fig,
ax=ax[i],
xlabel=i == (len(self._sources) - 1),
)
fig.suptitle("Calibrated Temperatures for Calibration Sources", fontsize=15)
return fig
def write_coefficients(self, path: Optional[str] = None):
path = Path(path or ".")
if path.is_dir():
path /= (
f"calibration_parameters_fmin{self.freq.freq.min()}_"
f"fmax{self.freq.freq.max()}_C{self.cterms}_W{self.wterms}.txt"
)
np.savetxt(
path,
[
self.freq.freq,
self.C1(),
self.C2(),
self.Tunc(),
self.Tcos(),
self.Tsin(),
],
)
def plot_coefficients(self, fig=None, ax=None):
if fig is None or ax is None:
fig, ax = plt.subplots(
5, 1, facecolor="w", gridspec_kw={"hspace": 0.05}, figsize=(10, 9)
)
labels = [
"Scale ($C_1$)",
"Offset ($C_2$) [K]",
r"$T_{\rm unc}$ [K]",
r"$T_{\rm cos}$ [K]",
r"$T_{\rm sin}$ [K]",
]
for i, (kind, label) in enumerate(
zip(["C1", "C2", "Tunc", "Tcos", "Tsin"], labels)
):
ax[i].plot(self.freq.freq, getattr(self, kind)())
ax[i].set_ylabel(label, fontsize=13)
ax[i].grid()
plt.ticklabel_format(useOffset=False)
if i == 4:
ax[i].set_xlabel("Frequency [MHz]", fontsize=13)
fig.suptitle("Calibration Parameters", fontsize=15)
return fig
def invalidate_cache(self):
if not hasattr(self, "_cached_"):
return
for cache in self._cached_:
del self.__dict__[cache]
def update(self, **kwargs):
self.invalidate_cache()
for k, v in kwargs.items():
setattr(self, k, v)
def write(self, filename: Union[str, Path]):
with h5py.File(filename, "w") as fl:
# Write attributes
fl.attrs["path"] = str(self.io.original_path)
fl.attrs["cterms"] = self.cterms
fl.attrs["wterms"] = self.wterms
fl.attrs["switch_path"] = str(self.internal_switch.data.path)
fl.attrs["switch_repeat_num"] = self.internal_switch.data.repeat_num
fl.attrs["switch_resistance"] = self.internal_switch.resistance
fl.attrs["switch_nterms"] = self.internal_switch.n_terms[0]
fl.attrs["switch_model"] = str(self.internal_switch.model)
fl.attrs["t_load"] = self.open.spectrum.t_load
fl.attrs["t_load_ns"] = self.open.spectrum.t_load_ns
fl["C1"] = self.C1_poly.coefficients
fl["C2"] = self.C2_poly.coefficients
fl["Tunc"] = self.Tunc_poly.coefficients
fl["Tcos"] = self.Tcos_poly.coefficients
fl["Tsin"] = self.Tsin_poly.coefficients
fl["frequencies"] = self.freq.freq
fl["lna_s11_real"] = self.lna.s11_model(self.freq.freq).real
fl["lna_s11_imag"] = self.lna.s11_model(self.freq.freq).imag
fl["internal_switch_s11_real"] = np.real(
self.internal_switch.s11_model(self.freq.freq)
)
fl["internal_switch_s11_imag"] = np.imag(
self.internal_switch.s11_model(self.freq.freq)
)
fl["internal_switch_s12_real"] = np.real(
self.internal_switch.s12_model(self.freq.freq)
)
fl["internal_switch_s12_imag"] = np.imag(
self.internal_switch.s12_model(self.freq.freq)
)
fl["internal_switch_s22_real"] = np.real(
self.internal_switch.s22_model(self.freq.freq)
)
fl["internal_switch_s22_imag"] = np.imag(
self.internal_switch.s22_model(self.freq.freq)
)
load_grp = fl.create_group("loads")
for name, load in self._loads.items():
grp = load_grp.create_group(name)
grp.attrs["s11_model"] = yaml.dump(load.s11_model)
grp["averaged_Q"] = load.spectrum.averaged_Q
grp["variance_Q"] = load.spectrum.variance_Q
grp["temp_ave"] = load.temp_ave
grp.attrs["n_integrations"] = load.spectrum.n_integrations
def to_calfile(self):
return Calibration.from_calobs(self)
def inject(
self,
lna_s11: np.ndarray = None,
source_s11s: Dict[str, np.ndarray] = None,
c1: np.ndarray = None,
c2: np.ndarray = None,
t_unc: np.ndarray = None,
t_cos: np.ndarray = None,
t_sin: np.ndarray = None,
averaged_spectra: Dict[str, np.ndarray] = None,
thermistor_temp_ave: Dict[str, np.ndarray] = None,
) -> CalibrationObservation:
new = copy(self)
new.invalidate_cache()
new._injected_lna_s11 = lna_s11
new._injected_source_s11s = source_s11s
new._injected_c1 = c1
new._injected_c2 = c2
new._injected_t_unc = t_unc
new._injected_t_cos = t_cos
new._injected_t_sin = t_sin
new._injected_averaged_spectra = averaged_spectra
new._injected_source_temps = thermistor_temp_ave
return new
@attr.s
class _LittleS11:
s11_model: Callable = attr.ib()
@attr.s
class _LittleSpectrum:
averaged_Q: np.ndarray = attr.ib()
variance_Q: np.ndarray = attr.ib()
n_integrations: int = attr.ib()
@attr.s
class _LittleLoad:
reflections: _LittleS11 = attr.ib()
spectrum: _LittleSpectrum = attr.ib()
temp_ave: np.ndarray = attr.ib()
class Calibration:
def __init__(self, filename: Union[str, Path]):
self.calfile = Path(filename)
with h5py.File(filename, "r") as fl:
self.calobs_path = fl.attrs["path"]
self.cterms = int(fl.attrs["cterms"])
self.wterms = int(fl.attrs["wterms"])
self.t_load = fl.attrs.get("t_load", 300)
self.t_load_ns = fl.attrs.get("t_load_ns", 400)
self.C1_poly = np.poly1d(fl["C1"][...])
self.C2_poly = np.poly1d(fl["C2"][...])
self.Tcos_poly = np.poly1d(fl["Tcos"][...])
self.Tsin_poly = np.poly1d(fl["Tsin"][...])
self.Tunc_poly = np.poly1d(fl["Tunc"][...])
self.freq = FrequencyRange(fl["frequencies"][...])
self._loads = {}
if "loads" in fl:
lg = fl["loads"]
self.load_names = list(lg.keys())
for name, grp in lg.items():
self._loads[name] = _LittleLoad(
reflections=_LittleS11(
s11_model=yaml.load(
grp.attrs["s11_model"], Loader=yaml.FullLoader
).at(x=self.freq.freq)
),
spectrum=_LittleSpectrum(
averaged_Q=grp["averaged_Q"][...],
variance_Q=grp["variance_Q"][...],
n_integrations=grp.attrs["n_integrations"],
),
temp_ave=grp["temp_ave"][...],
)
self._lna_s11_rl = Spline(self.freq.freq, fl["lna_s11_real"][...])
self._lna_s11_im = Spline(self.freq.freq, fl["lna_s11_imag"][...])
self._intsw_s11_rl = Spline(
self.freq.freq, fl["internal_switch_s11_real"][...]
)
self._intsw_s11_im = Spline(
self.freq.freq, fl["internal_switch_s11_imag"][...]
)
self._intsw_s12_rl = Spline(
self.freq.freq, fl["internal_switch_s12_real"][...]
)
self._intsw_s12_im = Spline(
self.freq.freq, fl["internal_switch_s12_imag"][...]
)
self._intsw_s22_rl = Spline(
self.freq.freq, fl["internal_switch_s22_real"][...]
)
self._intsw_s22_im = Spline(
self.freq.freq, fl["internal_switch_s22_imag"][...]
)
@classmethod
def from_calobs(cls, calobs: CalibrationObservation) -> Calibration:
tmp = tempfile.mktemp()
calobs.write(tmp)
return cls(tmp)
def lna_s11(self, freq=None):
if freq is None:
freq = self.freq.freq
return self._lna_s11_rl(freq) + 1j * self._lna_s11_im(freq)
def internal_switch_s11(self, freq=None):
if freq is None:
freq = self.freq.freq
return self._intsw_s11_rl(freq) + 1j * self._intsw_s11_im(freq)
def internal_switch_s12(self, freq=None):
if freq is None:
freq = self.freq.freq
return self._intsw_s12_rl(freq) + 1j * self._intsw_s12_im(freq)
def internal_switch_s22(self, freq=None):
if freq is None:
freq = self.freq.freq
return self._intsw_s22_rl(freq) + 1j * self._intsw_s22_im(freq)
def C1(self, freq=None):
if freq is None:
freq = self.freq.freq
return self.C1_poly(self.freq.normalize(freq))
def C2(self, freq=None):
if freq is None:
freq = self.freq.freq
return self.C2_poly(self.freq.normalize(freq))
def Tcos(self, freq=None):
if freq is None:
freq = self.freq.freq
return self.Tcos_poly(self.freq.normalize(freq))
def Tsin(self, freq=None):
if freq is None:
freq = self.freq.freq
return self.Tsin_poly(self.freq.normalize(freq))
def Tunc(self, freq=None):
if freq is None:
freq = self.freq.freq
return self.Tunc_poly(self.freq.normalize(freq))
def _linear_coefficients(self, freq, ant_s11):
return rcf.get_linear_coefficients(
ant_s11,
self.lna_s11(freq),
self.C1(freq),
self.C2(freq),
self.Tunc(freq),
self.Tcos(freq),
self.Tsin(freq),
self.t_load,
)
def calibrate_temp(self, freq: np.ndarray, temp: np.ndarray, ant_s11: np.ndarray):
a, b = self._linear_coefficients(freq, ant_s11)
return temp * a + b
def decalibrate_temp(self, freq, temp, ant_s11):
a, b = self._linear_coefficients(freq, ant_s11)
return (temp - b) / a
def calibrate_Q(
self, freq: np.ndarray, q: np.ndarray, ant_s11: np.ndarray
) -> np.ndarray:
uncal_temp = self.t_load_ns * q + self.t_load
return self.calibrate_temp(freq, uncal_temp, ant_s11)
def perform_term_sweep(
calobs: CalibrationObservation,
delta_rms_thresh: float = 0,
max_cterms: int = 15,
max_wterms: int = 15,
explore_run_nums: bool = False,
explore_repeat_nums: bool = False,
direc=".",
verbose=False,
) -> CalibrationObservation:
cterms = range(calobs.cterms, max_cterms)
wterms = range(calobs.wterms, max_wterms)
winner = np.zeros(len(cterms), dtype=int)
s11_keys = ["switching_state", "receiver_reading"] + list(io.LOAD_ALIASES.keys())
if explore_repeat_nums:
# Note that we don't explore run_nums for spectra/resistance, because it's rare
# to have those, and they'll only exist if one got completely botched (and that
rep_num = {
k: range(1, getattr(calobs.io.s11, k).max_repeat_num + 1) for k in s11_keys
}
else:
rep_num = {k: [getattr(calobs.io.s11, k).repeat_num] for k in s11_keys}
rep_num = tools.dct_of_list_to_list_of_dct(rep_num)
if explore_run_nums:
run_num = {
"switching_state": range(
1, calobs.io.s11.get_highest_run_num("SwitchingState") + 1
),
"receiver_reading": range(
1, calobs.io.s11.get_highest_run_num("ReceiverReading") + 1
),
}
else:
run_num = {
"switching_state": [calobs.io.s11.switching_state.run_num],
"receiver_reading": [calobs.io.s11.receiver_reading.run_num],
}
run_num = tools.dct_of_list_to_list_of_dct(run_num)
best_rms = np.inf
for this_rep_num in rep_num:
for this_run_num in run_num:
tmp_run_num = copy(calobs.io.run_num)
tmp_run_num.update(this_run_num)
calobs.io = io.CalibrationObservation(
path=calobs.io.path,
run_num=tmp_run_num,
repeat_num=this_rep_num,
fix=False,
compile_from_def=calobs.compiled_from_def,
include_previous=calobs.previous_included,
)
calobs.lna = LNA(
calobs.io.s11.receiver_reading,
f_low=calobs.freq.min,
f_high=calobs.freq.max,
resistance=calobs.lna.resistance,
)
for name, load in calobs._loads.items():
load.reflections = LoadS11.from_path(
load_name=name,
path=calobs.io.path,
repeat_num_load=this_rep_num[name],
run_num_switch=this_run_num["switching_state"],
repeat_num_switch=this_rep_num["switching_state"],
)
if verbose:
print(
f"SWEEPING SwSt={calobs.io.s11.switching_state.repeat_num}, "
f"RcvRd={calobs.io.s11.receiver_reading.repeat_num} "
f"[Sw={calobs.io.s11.switching_state.run_num}, "
f"RR={calobs.io.s11.receiver_reading.run_num}, "
f"open={calobs.io.s11.open.run_num}, "
f"short={calobs.io.s11.short.run_num}, "
f"ambient={calobs.io.s11.ambient.run_num}, "
f"hot={calobs.io.s11.hot_load.run_num}]"
)
print("-" * 30)
rms = np.zeros((len(cterms), len(wterms)))
for i, c in enumerate(cterms):
for j, w in enumerate(wterms):
calobs.update(cterms=c, wterms=w)
res = calobs.get_load_residuals()
dof = sum(len(r) for r in res.values()) - c - w
rms[i, j] = np.sqrt(
sum(np.nansum(np.square(x)) for x in res.values()) / dof
)
if verbose:
print(f"Nc = {c:02}, Nw = {w:02}; RMS/dof = {rms[i, j]:1.3e}")
# If we've decreased by more than the threshold, this wterms becomes
if j > 0 and rms[i, j] >= rms[i, j - 1] - delta_rms_thresh:
winner[i] = j - 1
break
if (
i > 0
and rms[i, winner[i]]
>= rms[i - 1, winner[i - 1]] - delta_rms_thresh
):
break
if verbose:
print(
f"Best parameters found for Nc={cterms[i-1]}, "
f"Nw={wterms[winner[i-1]]}, "
f"with RMS = {rms[i-1, winner[i-1]]}."
)
print()
if rms[i - 1, winner[i - 1]] < best_rms:
best_run_combo = (
calobs.io.run_num,
calobs.io.s11.receiver_reading.repeat_num,
calobs.io.s11.switching_state.repeat_num,
)
best_cterms = cterms[i - 1]
best_wterms = wterms[winner[i - 1]]
if verbose and (explore_repeat_nums or explore_run_nums):
print("The very best parameters were found were for:")
print(f"\tSwitchingState Repeat = {best_run_combo[2]}")
print(f"\tReceiverReading Repeat = {best_run_combo[1]}")
print(f"\tRun Numbers = {best_run_combo[0]}")
print(f"\t# C-terms = {best_cterms}")
print(f"\t# W-terms = {best_wterms}")
calobs.update(cterms=best_cterms, wterms=best_wterms)
calobs.io = io.CalibrationObservation(
path=calobs.io.path,
run_num=best_run_combo[0],
repeat_num={
"switching_state": best_run_combo[2],
"receiver_reading": best_run_combo[1],
},
fix=False,
compile_from_def=calobs.compiled_from_def,
include_previous=calobs.previous_included,
)
calobs.lna = LNA(
calobs.io.s11.receiver_reading,
f_low=calobs.freq.min,
f_high=calobs.freq.max,
resistance=calobs.lna.resistance,
)
if direc is not None:
direc = Path(direc)
if not direc.exists():
direc.mkdir(parents=True)
pth = Path(calobs.path).parent.name
pth = str(pth) + f"_c{calobs.cterms}_w{calobs.wterms}.h5"
calobs.write(direc / pth)
return calobs
| true | true |
f710ac528885b1b93f31c632c55a3507e9b7fd6d | 3,475 | py | Python | pipe-cli/mount/pipefuse/fslock.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | pipe-cli/mount/pipefuse/fslock.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | pipe-cli/mount/pipefuse/fslock.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from abc import ABCMeta, abstractmethod
from threading import RLock, Thread
from fuse import fuse_get_context
def get_lock(threads, monitoring_delay):
return PathLock(monitoring_delay=monitoring_delay) if threads else DummyLock()
def monitor_locks(monitor_lock, locks, timeout):
while True:
try:
monitor_lock.acquire()
logging.debug('Updating path lock status')
free_paths = [path for path, lock in locks.iteritems() if lock.acquire(blocking=False)]
logging.debug('Releasing %d locks' % len(free_paths))
for path in free_paths:
del locks[path]
logging.debug('Finished path lock status update')
finally:
monitor_lock.release()
time.sleep(timeout)
class FileSystemLock:
__metaclass__ = ABCMeta
@abstractmethod
def lock(self, path):
pass
@abstractmethod
def unlock(self, path):
pass
class DummyLock(FileSystemLock):
def lock(self, path):
pass
def unlock(self, path):
pass
class PathLock(FileSystemLock):
def __init__(self, monitoring_delay=600):
self._mutex = RLock()
self._monitor_lock = RLock()
self._locks = {}
self._monitor = Thread(target=monitor_locks, args=(self._monitor_lock, self._locks, monitoring_delay,))
self._monitor.daemon = True
self._monitor.start()
def lock(self, path):
try:
self._monitor_lock.acquire()
logging.debug('Locking path %s for %s' % (path, str(fuse_get_context())))
path_lock = self._get_path_lock(path)
self._lock_path(path_lock)
logging.debug('Acquired lock for %s' % path)
finally:
self._monitor_lock.release()
def unlock(self, path):
logging.debug('Unlocking path %s for %s' % (path, str(fuse_get_context())))
self._release_path(path)
def _release_path(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
logging.debug('Cannot release non-existing lock.')
else:
self._locks[path].release()
logging.debug('Released lock for %s' % path)
finally:
self._mutex.release()
logging.debug('Finished unlocking for %s' % path)
def _get_path_lock(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
self._locks[path] = RLock()
logging.debug('Created new lock for %s' % path)
return self._locks[path]
finally:
self._mutex.release()
def _lock_path(self, path_lock):
try:
path_lock.acquire()
except:
path_lock.release()
raise
| 30.217391 | 111 | 0.624748 |
import logging
import time
from abc import ABCMeta, abstractmethod
from threading import RLock, Thread
from fuse import fuse_get_context
def get_lock(threads, monitoring_delay):
return PathLock(monitoring_delay=monitoring_delay) if threads else DummyLock()
def monitor_locks(monitor_lock, locks, timeout):
while True:
try:
monitor_lock.acquire()
logging.debug('Updating path lock status')
free_paths = [path for path, lock in locks.iteritems() if lock.acquire(blocking=False)]
logging.debug('Releasing %d locks' % len(free_paths))
for path in free_paths:
del locks[path]
logging.debug('Finished path lock status update')
finally:
monitor_lock.release()
time.sleep(timeout)
class FileSystemLock:
__metaclass__ = ABCMeta
@abstractmethod
def lock(self, path):
pass
@abstractmethod
def unlock(self, path):
pass
class DummyLock(FileSystemLock):
def lock(self, path):
pass
def unlock(self, path):
pass
class PathLock(FileSystemLock):
def __init__(self, monitoring_delay=600):
self._mutex = RLock()
self._monitor_lock = RLock()
self._locks = {}
self._monitor = Thread(target=monitor_locks, args=(self._monitor_lock, self._locks, monitoring_delay,))
self._monitor.daemon = True
self._monitor.start()
def lock(self, path):
try:
self._monitor_lock.acquire()
logging.debug('Locking path %s for %s' % (path, str(fuse_get_context())))
path_lock = self._get_path_lock(path)
self._lock_path(path_lock)
logging.debug('Acquired lock for %s' % path)
finally:
self._monitor_lock.release()
def unlock(self, path):
logging.debug('Unlocking path %s for %s' % (path, str(fuse_get_context())))
self._release_path(path)
def _release_path(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
logging.debug('Cannot release non-existing lock.')
else:
self._locks[path].release()
logging.debug('Released lock for %s' % path)
finally:
self._mutex.release()
logging.debug('Finished unlocking for %s' % path)
def _get_path_lock(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
self._locks[path] = RLock()
logging.debug('Created new lock for %s' % path)
return self._locks[path]
finally:
self._mutex.release()
def _lock_path(self, path_lock):
try:
path_lock.acquire()
except:
path_lock.release()
raise
| true | true |
f710ad5b4f762a06f4cdaff930cc88bfc18dba57 | 1,778 | py | Python | tutorials/ngsi_v2/e2_healthcheck/e2_healthcheck_solution.py | N5GEH/FiLiP | d24f47daa272a65ccf9c92522374bc5228b9a3d1 | [
"BSD-3-Clause"
] | null | null | null | tutorials/ngsi_v2/e2_healthcheck/e2_healthcheck_solution.py | N5GEH/FiLiP | d24f47daa272a65ccf9c92522374bc5228b9a3d1 | [
"BSD-3-Clause"
] | null | null | null | tutorials/ngsi_v2/e2_healthcheck/e2_healthcheck_solution.py | N5GEH/FiLiP | d24f47daa272a65ccf9c92522374bc5228b9a3d1 | [
"BSD-3-Clause"
] | null | null | null | """
# # Exercise 2: Service Health Check
# Create one or multiple filip clients and check if the corresponding services
# are up and running by accessing their version information.
# The input sections are marked with 'ToDo'
# #### Steps to complete:
# 1. Set up the missing parameters in the parameter section
# 2. Create filip ngsi_v2 clients for the individual services and check for
# their version
# 3. Create a config object for the ngsi_v2 multi client (HttpClient),
# create the multi client and again check for services' versions
"""
# ## Import packages
from filip.clients.ngsi_v2 import \
HttpClient, \
HttpClientConfig, \
ContextBrokerClient, \
IoTAClient, \
QuantumLeapClient
# ## Parameters
# ToDo: Enter your context broker url and port, e.g. http://localhost:1026
CB_URL = "http://localhost:1026"
# ToDo: Enter your IoT-Agent url and port, e.g. http://localhost:4041
IOTA_URL = "http://localhost:4041"
# ToDo: Enter your QuantumLeap url and port, e.g. http://localhost:8668
QL_URL = "http://localhost:8668"
# ## Main script
if __name__ == "__main__":
# ToDo: Create a single client for each service and check the service for
# its version
cbc = ContextBrokerClient(url=CB_URL)
print(cbc.get_version())
iotac = IoTAClient(url=IOTA_URL)
print(iotac.get_version())
qlc = QuantumLeapClient(url=QL_URL)
print(qlc.get_version())
# ToDo: Create a configuration object for a multi client
config = HttpClientConfig(cb_url=CB_URL, iota_url=IOTA_URL, ql_url=QL_URL)
# ToDo: Create a multi client check again all services for their version
multic = HttpClient(config=config)
print(multic.cb.get_version())
print(multic.iota.get_version())
print(multic.timeseries.get_version())
| 32.327273 | 78 | 0.722722 |
\
HttpClient, \
HttpClientConfig, \
ContextBrokerClient, \
IoTAClient, \
QuantumLeapClient
ost:1026"
IOTA_URL = "http://localhost:4041"
QL_URL = "http://localhost:8668"
cbc = ContextBrokerClient(url=CB_URL)
print(cbc.get_version())
iotac = IoTAClient(url=IOTA_URL)
print(iotac.get_version())
qlc = QuantumLeapClient(url=QL_URL)
print(qlc.get_version())
config = HttpClientConfig(cb_url=CB_URL, iota_url=IOTA_URL, ql_url=QL_URL)
multic = HttpClient(config=config)
print(multic.cb.get_version())
print(multic.iota.get_version())
print(multic.timeseries.get_version())
| true | true |
f710adad5bc915650b1798112ca08af0d8455670 | 87 | py | Python | urban_dictionary/__init__.py | accessware/urban_dictionary | 8ebe477dc477850c3e2ce3c0fbb6a32b2ffb3e80 | [
"MIT"
] | null | null | null | urban_dictionary/__init__.py | accessware/urban_dictionary | 8ebe477dc477850c3e2ce3c0fbb6a32b2ffb3e80 | [
"MIT"
] | null | null | null | urban_dictionary/__init__.py | accessware/urban_dictionary | 8ebe477dc477850c3e2ce3c0fbb6a32b2ffb3e80 | [
"MIT"
] | null | null | null | from .base import AsyncUrbanClient, UrbanClient, UrbanDefinition, UrbanDictionaryError
| 43.5 | 86 | 0.873563 | from .base import AsyncUrbanClient, UrbanClient, UrbanDefinition, UrbanDictionaryError
| true | true |
f710adfbd40b4b969e51b988eebe67de9aac564e | 976 | py | Python | cstock/model.py | dwarf-miner/midas | 68ff19da4a1f1a095b9c37e2fd53b77a2e27e562 | [
"MIT"
] | null | null | null | cstock/model.py | dwarf-miner/midas | 68ff19da4a1f1a095b9c37e2fd53b77a2e27e562 | [
"MIT"
] | null | null | null | cstock/model.py | dwarf-miner/midas | 68ff19da4a1f1a095b9c37e2fd53b77a2e27e562 | [
"MIT"
] | null | null | null | # Copyright (c) 2015 Walt Chen
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
class ParserException(Exception):
pass
class Stock(object):
# yesterday_close is yesterday close price
# close is today close price
# volume: unit of stock transacted
# turnover: total transaction money
def __init__(self, code, date, open, high, low, close, volume):
self.code = code
self.date = str(date)
self.open = open
self.high = high
self.low = low
self.close = close
self.volume = volume
def __str__(self):
return "%s\tvol: %s\topen: %s\tHI: %s\t LO: %s\tclose: %s" %\
(self.date, self.volume, self.open, self.high, self.low, self.close)
__all__ = ['ParserException', 'Stock']
| 27.111111 | 80 | 0.623975 |
class ParserException(Exception):
pass
class Stock(object):
def __init__(self, code, date, open, high, low, close, volume):
self.code = code
self.date = str(date)
self.open = open
self.high = high
self.low = low
self.close = close
self.volume = volume
def __str__(self):
return "%s\tvol: %s\topen: %s\tHI: %s\t LO: %s\tclose: %s" %\
(self.date, self.volume, self.open, self.high, self.low, self.close)
__all__ = ['ParserException', 'Stock']
| true | true |
f710ae170b6af5321a2a68a244660e923a688a59 | 17,480 | py | Python | test_module.py | aivision2020/OctSceneScan | 3b22ecb4f701270f457a7c2d2702f758b8d584cf | [
"MIT"
] | 2 | 2019-01-18T04:10:00.000Z | 2019-12-03T13:03:03.000Z | test_module.py | aivision2020/OctSceneScan | 3b22ecb4f701270f457a7c2d2702f758b8d584cf | [
"MIT"
] | null | null | null | test_module.py | aivision2020/OctSceneScan | 3b22ecb4f701270f457a7c2d2702f758b8d584cf | [
"MIT"
] | 1 | 2019-12-03T13:03:04.000Z | 2019-12-03T13:03:04.000Z | from pathlib import Path
import copy
import time
import torch.optim as optim
import numpy as np
import torch
from torch.autograd import Variable
from model import *
from data_utils import *
import torch.nn as nn
from loguru import logger
feature_dim = 8
block_size = 16
pad=2
n_conv=3
thresh=0.5
debug = False
def test_bottom_io():
tsdf = [torch.from_numpy(np.random.rand(1, 1, block_size+2*pad+2*n_conv,
block_size+2*pad+2*n_conv,
block_size+2*pad+2*n_conv)).float().to(device)]
prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)
).float().to(device)}
mod = BottomLevel(feature_dim, block_size=block_size)
if device == 'cuda':
mod.cuda()
out = mod(tsdf, prev)
assert type(out) == list
assert len(out) == 1
out = out[0]
assert len(out) == 1
for X in out.keys():
assert out[X].shape == (1, 2, block_size, block_size, block_size), out[X].shape
def test_convtrans():
conv1 = nn.ConvTranspose3d(10, 10, kernel_size=4, stride=2, output_padding=0, padding=0, bias=False)
dat = torch.ones(1, 10, block_size, block_size, block_size)
y = conv1(dat)
assert y.shape[-1] == block_size*2+2 , (y.shape, dat.shape)
pad = nn.ReplicationPad3d(1)
conv1 = nn.ConvTranspose3d(1, 1, kernel_size=3, stride=2,
output_padding=1, padding=1, bias=False)
dat = Variable(torch.ones(1, 1, 4, 4, 4))
y = conv1(dat)
assert y.shape[-1] == 8, y.shape
def test_data():
data = TsdfGenerator(64)
vis = visdom.Visdom()
gt, tsdf_in = data.__getitem__(0)
assert np.abs(tsdf_in).max() < 33
def test_ellipsoid():
arr = ellipsoid(10, 10, 10, levelset=True)*10 # the output is ~normalized. multiple by 10
assert arr.shape == (23, 23, 23), arr.shape
dist = np.sqrt(11**2*3)-10
assert np.abs(arr[0, 0, 0]) > dist, (arr[0, 0, 0], dist)
print(arr[0, 0, 0], dist)
a, b, c = 10, 15, 25
arr = ellipsoid(a, b, c, levelset=True)
# if we move 1 voxel in space the sdf should also not change by more than 1
# compare to 1.01 for numeric reasons
assert np.all(np.abs(np.diff(arr, axis=0)) <= 1.01), np.abs(np.diff(arr, axis=0)).max()
assert np.all(np.abs(np.diff(arr, axis=1)) <= 1.01)
assert np.all(np.abs(np.diff(arr, axis=2)) <= 1.01)
def test_criteria_trivial():
data = TsdfGenerator(block_size, sigma=0.)
gt, tsdf_in = data.__getitem_split__()
gt = gt[None, :] # add dim for batch
assert np.abs(tsdf_in).max() < 33
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
assert len(criteria.gt_octree) == 1
mock_out = np.concatenate((tsdf_in[None,:]<0, tsdf_in[None,:]>=0),
axis=1).astype(float)
mock_out=1000*(mock_out-0.5)
mock_out = [{(0,0,0):torch.from_numpy(mock_out).float()}]
loss = criteria(mock_out)
assert loss.dim()==0
assert loss < 0.01, loss
def test_gt():
pass
#get gt,
#get gt_octree
#retnder gt
#render gt_octree
def test_criteria(levels=2):
res=2**(levels-1)*block_size
data = TsdfGenerator(res, sigma=0.9)
gt, tsdf_in = data.__getitem_split__()
gt = gt[None, :] # add dim for batch
assert np.abs(tsdf_in).max() < res
#labels should be symetric
def count_label(gt, label, level=1):
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
gt=criteria.gt_octree[level]
return np.count_nonzero(np.array(list(gt.values()))==label)
n_outside = count_label(gt, OUTSIDE)
n_inside = count_label(gt, INSIDE)
n_mixed = count_label(gt, MIXED)
assert n_outside+n_inside+n_mixed==(2**(levels-2))**3
rev_inside = count_label(-gt, OUTSIDE)
assert n_inside==rev_inside, (n_inside, rev_inside)
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
assert len(criteria.gt_octree) == levels
assert len(criteria.gt_octree[0]) == (2**(levels-1))**3, len(criteria.gt_octree[0])
assert len(criteria.gt_octree[-1]) == 1, len(criteria.gt_octree[-1])
for l, level in enumerate(criteria.gt_octree):
for k, v in level.items():
assert v.dim() > 0, (l, k, v)
def test_basic_debug():
T = torch.zeros(1,1,36,36,36)
outplane = 16
mod = nn.Conv3d(1, outplane, kernel_size=3, stride=1,
padding=0, bias=False)
T = mod(T)
mod = nn.BatchNorm3d(outplane)
T = mod(T)
mod = nn.ReLU(inplace=True)
T = mod(T)
mod = nn.Conv3d(outplane, outplane, kernel_size=3, stride=1,
padding=0, bias=False)
T = mod(T)
mod = nn.BatchNorm3d(outplane)
T = mod(T)
assert T.shape == (1,16,32,32,32)
def test_simple_net_single_data():
data = TsdfGenerator(block_size, sigma=0.9)
vis = visdom.Visdom()
gt, tsdf_in = data.__getitem__(0)
gt = gt[None, :] # add dim for batch
assert np.abs(tsdf_in).max() < block_size
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int)).to(device)
rep_pad = nn.ReplicationPad3d(pad+n_conv)
tsdf = [rep_pad(torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device))]
#prev = {(0, 0, 0): torch.rand(1, feature_dim, block_size//2, block_size//2,
# block_size//2).float().to(device)}
prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)
).float().to(device)}
#assert tsdf[0].shape == (1, 1, block_size, block_size, block_size)
assert gt_label.shape == (1, block_size, block_size, block_size)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
mod = BottomLevel(feature_dim, block_size)
if device=='cuda':
mod.cuda()
criteria.cuda()
optimizer = optim.Adam(mod.parameters(), lr=0.001) # , momentum=0.9)
for it in range(1, 100):
out = mod(tsdf, prev)
assert len(out) == 1
assert out[0][(0,0,0)].shape[1] == 2, out.shape
loss = criteria(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (it+1) % 10 == 0:
sdf_ = octree_to_sdf(out, block_size)
print('level ', np.count_nonzero(sdf_ == 1))
err = plotVoxelVisdom(gt[0], sdf_, tsdf_in[0], vis)
assert np.abs(tsdf_in).max() < 33
print(err)
print(it, loss)
assert err < 2
def test_bottom_layer( block_size = 32):
dataset = TsdfGenerator(block_size, n_elips=1, sigma=0.9, epoch_size=1000)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
num_workers=4)
vis = visdom.Visdom()
mod = BottomLevel(feature_dim, block_size)
if device=='cuda':
mod.cuda()
optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.9)
m = nn.ReplicationPad3d(mod.pad+mod.n_conv)
prev = {(0, 0, 0): torch.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad
).float().to(device)}
gt_label = None
for it, (gt, tsdf_in) in enumerate(train_loader):
assert np.abs(tsdf_in).max() < 33
assert gt.max() > 1 and gt.min() < -1
gt_label = torch.ones_like(gt)*INSIDE
gt_label[gt >= 0] = OUTSIDE
gt_label = gt_label.long().to(device)
tsdf = [m(tsdf_in).float().to(device)]
for T in prev.values():
assert torch.all(torch.isfinite(T))
for T in tsdf:
assert torch.all(torch.isfinite(T))
out = mod(tsdf, prev)
assert out[0][(0,0,0)].max()>out[0][(0,0,0)].min()
for oct in out:
if not np.all([torch.all(torch.isfinite(o)) for o in oct.values()]):
import ipdb; ipdb.set_trace()
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device=='cuda':
criteria.cuda()
loss = criteria(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(it, loss)
if it>1 and it%100 == 0:
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
print(it, err)
assert err < 2, err
def test_2tier_net_single_data():
res = block_size*2
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=100)
vis = visdom.Visdom()
mod = TopLevel(feature_dim, BottomLevel(feature_dim, block_size), block_size=block_size)
if device == 'cuda':
mod.cuda()
optimizer = optim.Adam(mod.parameters(), lr=0.01)#, momentum=0.9)
gt, tsdf_in = dataset.__getitem__(0)
assert np.abs(tsdf_in).max() < 33
assert gt.max() > 1 and gt.min() < -1
gt = torch.from_numpy(gt[None, :])
gt_label = torch.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
tsdf = torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device)
for it in range(1000):
out = mod(tsdf)
assert len(out) == 2
for l in out[1:]:
for v in l.values():
# only level 0 can have a full bloc
assert v.shape[-1] < block_size, (v.shape)
loss = criteria(out)
assert len(out) == 2
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(it, loss)
if (it+1) % 10 == 0:
#mod.eval()
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0], vis)
#mod.train()
print(it, err)
assert err < 2,err
def test_4tier_data(block_size=block_size):
res=block_size*(2**3)
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000)
gt, tsdf = dataset.__getitem__(0)
mod = BottomLevel(feature_dim, block_size)
for i in range(2): #add 2 mid layers
print('adding mid layer')
mod = MidLevel(feature_dim, feature_dim, mod, block_size,
thresh=thresh, budget=4)
mod = TopLevel(feature_dim, mod, block_size=block_size)
out = mod(torch.from_numpy(tsdf[None,:]).float())
def test_2tier_net(res=64, block_size=block_size):
dataset = TsdfGenerator(res, n_elips=1, sigma=0.9, epoch_size=10000, debug=False)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
num_workers=2)
vis = visdom.Visdom()
Force = False
if not Force and Path('model_2tier.pth').exists():
mod = torch.load('model_2tier.pth')
else:
layers = []
layers.append(BottomLevel(feature_dim, block_size))
while block_size*2**len(layers) <= res/2:
print('adding mid layer', len(layers))
layers.append(MidLevel(feature_dim, feature_dim, layers[-1],
block_size, thresh=0.5, budget=4))
mod = TopLevel(feature_dim, layers[-1], block_size=block_size)
if device == 'cuda':
mod.cuda()
optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.95)
for it, (gt, tsdf_in) in enumerate(train_loader):
assert np.abs(tsdf_in).max() < res
assert gt.max() > 1 and gt.min() < -1
gt_label = torch.zeros_like(gt, device=device)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
#tsdf = tsdf_in.float().cuda()
t_start = time.time()
tsdf = tsdf_in.float().to(device)
pred = mod(tsdf)
forward_t = time.time()-t_start
t = time.time()
loss = criteria(pred)
loss_t = time.time()-t
t = time.time()
optimizer.zero_grad()
loss.backward()
back_t = time.time()-t
t = time.time()
optimizer.step()
step_t = time.time()-t
t = time.time()
print(it, loss.data)
print('valuated ', [len(o) for o in pred])
print('GT voxels ', np.count_nonzero([o.numel()>3 for o in criteria.gt_octree]))
print('timing:{total:.3f}. forward {forward_t:.3f}, loss {loss_t:.3f}, back {back_t:.3f}, step {step_t:.3f}'.format(
total=t-t_start, forward_t=forward_t, loss_t=loss_t, back_t=back_t, step_t=step_t))
if (it+1) % 100 == 0:
mod.eval()
out = mod(tsdf)
loss = criteria(out)
for i in range(len(out)):
resample = (2**i)
print('Eval: level %d, %d/%d evaluated' % (i, len(out[i]),
(res/block_size/resample)**3))
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
if loss.data<1:
import ipdb; ipdb.set_trace()
mod.train()
print(it, err)
torch.save(mod, 'model_2tier.pth')
if err < 2 :
break
#assert err < 2
def create_model(block_size, feature_dim, res):
layers = []
layers.append(BottomLevel(feature_dim, block_size))
while block_size*2**len(layers) <= res/2:
print('adding mid layer', len(layers))
layers.append(MidLevel(feature_dim, feature_dim, layers[-1],
block_size, thresh=0.1))
mod = TopLevel(feature_dim, layers[-1], block_size=block_size)
return mod
def test_simple_split(res=64, block_size=block_size):
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000, debug=True)
vis = visdom.Visdom()
mod = torch.load('model.pth')
if device == 'cuda':
mod.cuda()
mod.eval()
gt, tsdf_in = dataset.__getitem_split__()
gt = torch.from_numpy(gt[None, :])
tsdf_in = torch.from_numpy(tsdf_in[None, :])
gt_label = torch.zeros_like(gt, device=device)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
tsdf = tsdf_in.float().to(device)
pred = mod(tsdf)
loss = criteria(pred)
print(loss.data)
print('evaluated ', [len(o) for o in pred])
for X in pred[0]:
X_ = tuple(np.array(X)//2)
print (X, pred[1][X_])
assert pred[1][X_][0,2]>0.5
sdf_ = octree_to_sdf(pred, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
import ipdb; ipdb.set_trace()
for X,v in criteria.gt_octree[0].items():
if v.numel()>1:
assert X[2]==1 #that's how we built the space
def test_split_subtree(padding=0):
feat = torch.rand(1, feature_dim, block_size+2*padding,
block_size+2*padding,
block_size+2*padding
).float()
split = split_tree(feat,padding=padding)
assert len(split) == 8, len(split)
assert torch.all(split[(0, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, padding, padding, padding])
assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, block_size//2+padding, padding, padding])
split[(1, 0, 0)][0, 0, padding, padding, padding] = 12.13
#this is no longer true, I don't know how to do this inplace
#assert feat[0, 0, block_size//2, 0, 0] == 12.13
def test_split_subtree_with_padding():
padding=2
feat = torch.rand(1, feature_dim, block_size, block_size,
block_size).float()
split = split_tree(feat, padding=2)
assert len(split) == 8, len(split)
octant = split[(0,0,0)]
assert torch.all(octant[0, :padding, 0, 0, 0] == 0)
assert torch.all(octant[0, -padding:, 0, 0, 0] == 0)
assert octant.shape[-3:]==feat.shape[-3:]//2+padding*2
assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])
assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])
assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, block_size//2, 0, 0])
split[(1, 0, 0)][0, 0, 0, 0, 0] = 12.13
assert feat[0, 0, block_size//2+padding, 0, 0] == 12.13
if __name__ == '__main__':
import sys
logger.remove()
logger.add(sys.stderr , format="{time} {level} {message}", level="INFO")
#test_4tier_data()
#test_criteria_trivial()
#test_criteria()
#test_criteria(4)
#test_data()
#test_ellipsoid()
#test_convtrans()
#test_split_subtree()
#test_split_subtree(padding=2)
#test_basic_debug()
#test_bottom_io()
#test_simple_net_single_data()
#test_bottom_layer()
# TODO why does this not converge? interesting
#test_2tier_net_single_data()
#test_2tier_net(res=32, block_size=block_size)
test_2tier_net(res=64, block_size=block_size)
test_simple_split(res=64, block_size=block_size)
import ipdb; ipdb.set_trace()
test_2tier_net(res=128, block_size=block_size)
| 36.877637 | 124 | 0.602918 | from pathlib import Path
import copy
import time
import torch.optim as optim
import numpy as np
import torch
from torch.autograd import Variable
from model import *
from data_utils import *
import torch.nn as nn
from loguru import logger
feature_dim = 8
block_size = 16
pad=2
n_conv=3
thresh=0.5
debug = False
def test_bottom_io():
tsdf = [torch.from_numpy(np.random.rand(1, 1, block_size+2*pad+2*n_conv,
block_size+2*pad+2*n_conv,
block_size+2*pad+2*n_conv)).float().to(device)]
prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)
).float().to(device)}
mod = BottomLevel(feature_dim, block_size=block_size)
if device == 'cuda':
mod.cuda()
out = mod(tsdf, prev)
assert type(out) == list
assert len(out) == 1
out = out[0]
assert len(out) == 1
for X in out.keys():
assert out[X].shape == (1, 2, block_size, block_size, block_size), out[X].shape
def test_convtrans():
conv1 = nn.ConvTranspose3d(10, 10, kernel_size=4, stride=2, output_padding=0, padding=0, bias=False)
dat = torch.ones(1, 10, block_size, block_size, block_size)
y = conv1(dat)
assert y.shape[-1] == block_size*2+2 , (y.shape, dat.shape)
pad = nn.ReplicationPad3d(1)
conv1 = nn.ConvTranspose3d(1, 1, kernel_size=3, stride=2,
output_padding=1, padding=1, bias=False)
dat = Variable(torch.ones(1, 1, 4, 4, 4))
y = conv1(dat)
assert y.shape[-1] == 8, y.shape
def test_data():
data = TsdfGenerator(64)
vis = visdom.Visdom()
gt, tsdf_in = data.__getitem__(0)
assert np.abs(tsdf_in).max() < 33
def test_ellipsoid():
arr = ellipsoid(10, 10, 10, levelset=True)*10
assert arr.shape == (23, 23, 23), arr.shape
dist = np.sqrt(11**2*3)-10
assert np.abs(arr[0, 0, 0]) > dist, (arr[0, 0, 0], dist)
print(arr[0, 0, 0], dist)
a, b, c = 10, 15, 25
arr = ellipsoid(a, b, c, levelset=True)
assert np.all(np.abs(np.diff(arr, axis=0)) <= 1.01), np.abs(np.diff(arr, axis=0)).max()
assert np.all(np.abs(np.diff(arr, axis=1)) <= 1.01)
assert np.all(np.abs(np.diff(arr, axis=2)) <= 1.01)
def test_criteria_trivial():
data = TsdfGenerator(block_size, sigma=0.)
gt, tsdf_in = data.__getitem_split__()
gt = gt[None, :]
assert np.abs(tsdf_in).max() < 33
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
assert len(criteria.gt_octree) == 1
mock_out = np.concatenate((tsdf_in[None,:]<0, tsdf_in[None,:]>=0),
axis=1).astype(float)
mock_out=1000*(mock_out-0.5)
mock_out = [{(0,0,0):torch.from_numpy(mock_out).float()}]
loss = criteria(mock_out)
assert loss.dim()==0
assert loss < 0.01, loss
def test_gt():
pass
def test_criteria(levels=2):
res=2**(levels-1)*block_size
data = TsdfGenerator(res, sigma=0.9)
gt, tsdf_in = data.__getitem_split__()
gt = gt[None, :]
assert np.abs(tsdf_in).max() < res
def count_label(gt, label, level=1):
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
gt=criteria.gt_octree[level]
return np.count_nonzero(np.array(list(gt.values()))==label)
n_outside = count_label(gt, OUTSIDE)
n_inside = count_label(gt, INSIDE)
n_mixed = count_label(gt, MIXED)
assert n_outside+n_inside+n_mixed==(2**(levels-2))**3
rev_inside = count_label(-gt, OUTSIDE)
assert n_inside==rev_inside, (n_inside, rev_inside)
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int))
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
assert len(criteria.gt_octree) == levels
assert len(criteria.gt_octree[0]) == (2**(levels-1))**3, len(criteria.gt_octree[0])
assert len(criteria.gt_octree[-1]) == 1, len(criteria.gt_octree[-1])
for l, level in enumerate(criteria.gt_octree):
for k, v in level.items():
assert v.dim() > 0, (l, k, v)
def test_basic_debug():
T = torch.zeros(1,1,36,36,36)
outplane = 16
mod = nn.Conv3d(1, outplane, kernel_size=3, stride=1,
padding=0, bias=False)
T = mod(T)
mod = nn.BatchNorm3d(outplane)
T = mod(T)
mod = nn.ReLU(inplace=True)
T = mod(T)
mod = nn.Conv3d(outplane, outplane, kernel_size=3, stride=1,
padding=0, bias=False)
T = mod(T)
mod = nn.BatchNorm3d(outplane)
T = mod(T)
assert T.shape == (1,16,32,32,32)
def test_simple_net_single_data():
data = TsdfGenerator(block_size, sigma=0.9)
vis = visdom.Visdom()
gt, tsdf_in = data.__getitem__(0)
gt = gt[None, :]
assert np.abs(tsdf_in).max() < block_size
gt_label = np.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = torch.from_numpy(gt_label.astype(int)).to(device)
rep_pad = nn.ReplicationPad3d(pad+n_conv)
tsdf = [rep_pad(torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device))]
prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)
).float().to(device)}
assert gt_label.shape == (1, block_size, block_size, block_size)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
mod = BottomLevel(feature_dim, block_size)
if device=='cuda':
mod.cuda()
criteria.cuda()
optimizer = optim.Adam(mod.parameters(), lr=0.001)
for it in range(1, 100):
out = mod(tsdf, prev)
assert len(out) == 1
assert out[0][(0,0,0)].shape[1] == 2, out.shape
loss = criteria(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (it+1) % 10 == 0:
sdf_ = octree_to_sdf(out, block_size)
print('level ', np.count_nonzero(sdf_ == 1))
err = plotVoxelVisdom(gt[0], sdf_, tsdf_in[0], vis)
assert np.abs(tsdf_in).max() < 33
print(err)
print(it, loss)
assert err < 2
def test_bottom_layer( block_size = 32):
dataset = TsdfGenerator(block_size, n_elips=1, sigma=0.9, epoch_size=1000)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
num_workers=4)
vis = visdom.Visdom()
mod = BottomLevel(feature_dim, block_size)
if device=='cuda':
mod.cuda()
optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.9)
m = nn.ReplicationPad3d(mod.pad+mod.n_conv)
prev = {(0, 0, 0): torch.rand(1, feature_dim,
block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad
).float().to(device)}
gt_label = None
for it, (gt, tsdf_in) in enumerate(train_loader):
assert np.abs(tsdf_in).max() < 33
assert gt.max() > 1 and gt.min() < -1
gt_label = torch.ones_like(gt)*INSIDE
gt_label[gt >= 0] = OUTSIDE
gt_label = gt_label.long().to(device)
tsdf = [m(tsdf_in).float().to(device)]
for T in prev.values():
assert torch.all(torch.isfinite(T))
for T in tsdf:
assert torch.all(torch.isfinite(T))
out = mod(tsdf, prev)
assert out[0][(0,0,0)].max()>out[0][(0,0,0)].min()
for oct in out:
if not np.all([torch.all(torch.isfinite(o)) for o in oct.values()]):
import ipdb; ipdb.set_trace()
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device=='cuda':
criteria.cuda()
loss = criteria(out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(it, loss)
if it>1 and it%100 == 0:
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
print(it, err)
assert err < 2, err
def test_2tier_net_single_data():
res = block_size*2
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=100)
vis = visdom.Visdom()
mod = TopLevel(feature_dim, BottomLevel(feature_dim, block_size), block_size=block_size)
if device == 'cuda':
mod.cuda()
optimizer = optim.Adam(mod.parameters(), lr=0.01)
gt, tsdf_in = dataset.__getitem__(0)
assert np.abs(tsdf_in).max() < 33
assert gt.max() > 1 and gt.min() < -1
gt = torch.from_numpy(gt[None, :])
gt_label = torch.zeros_like(gt)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
tsdf = torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device)
for it in range(1000):
out = mod(tsdf)
assert len(out) == 2
for l in out[1:]:
for v in l.values():
assert v.shape[-1] < block_size, (v.shape)
loss = criteria(out)
assert len(out) == 2
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(it, loss)
if (it+1) % 10 == 0:
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0], vis)
print(it, err)
assert err < 2,err
def test_4tier_data(block_size=block_size):
res=block_size*(2**3)
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000)
gt, tsdf = dataset.__getitem__(0)
mod = BottomLevel(feature_dim, block_size)
for i in range(2):
print('adding mid layer')
mod = MidLevel(feature_dim, feature_dim, mod, block_size,
thresh=thresh, budget=4)
mod = TopLevel(feature_dim, mod, block_size=block_size)
out = mod(torch.from_numpy(tsdf[None,:]).float())
def test_2tier_net(res=64, block_size=block_size):
dataset = TsdfGenerator(res, n_elips=1, sigma=0.9, epoch_size=10000, debug=False)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,
num_workers=2)
vis = visdom.Visdom()
Force = False
if not Force and Path('model_2tier.pth').exists():
mod = torch.load('model_2tier.pth')
else:
layers = []
layers.append(BottomLevel(feature_dim, block_size))
while block_size*2**len(layers) <= res/2:
print('adding mid layer', len(layers))
layers.append(MidLevel(feature_dim, feature_dim, layers[-1],
block_size, thresh=0.5, budget=4))
mod = TopLevel(feature_dim, layers[-1], block_size=block_size)
if device == 'cuda':
mod.cuda()
optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.95)
for it, (gt, tsdf_in) in enumerate(train_loader):
assert np.abs(tsdf_in).max() < res
assert gt.max() > 1 and gt.min() < -1
gt_label = torch.zeros_like(gt, device=device)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
t_start = time.time()
tsdf = tsdf_in.float().to(device)
pred = mod(tsdf)
forward_t = time.time()-t_start
t = time.time()
loss = criteria(pred)
loss_t = time.time()-t
t = time.time()
optimizer.zero_grad()
loss.backward()
back_t = time.time()-t
t = time.time()
optimizer.step()
step_t = time.time()-t
t = time.time()
print(it, loss.data)
print('valuated ', [len(o) for o in pred])
print('GT voxels ', np.count_nonzero([o.numel()>3 for o in criteria.gt_octree]))
print('timing:{total:.3f}. forward {forward_t:.3f}, loss {loss_t:.3f}, back {back_t:.3f}, step {step_t:.3f}'.format(
total=t-t_start, forward_t=forward_t, loss_t=loss_t, back_t=back_t, step_t=step_t))
if (it+1) % 100 == 0:
mod.eval()
out = mod(tsdf)
loss = criteria(out)
for i in range(len(out)):
resample = (2**i)
print('Eval: level %d, %d/%d evaluated' % (i, len(out[i]),
(res/block_size/resample)**3))
sdf_ = octree_to_sdf(out, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
if loss.data<1:
import ipdb; ipdb.set_trace()
mod.train()
print(it, err)
torch.save(mod, 'model_2tier.pth')
if err < 2 :
break
def create_model(block_size, feature_dim, res):
layers = []
layers.append(BottomLevel(feature_dim, block_size))
while block_size*2**len(layers) <= res/2:
print('adding mid layer', len(layers))
layers.append(MidLevel(feature_dim, feature_dim, layers[-1],
block_size, thresh=0.1))
mod = TopLevel(feature_dim, layers[-1], block_size=block_size)
return mod
def test_simple_split(res=64, block_size=block_size):
dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000, debug=True)
vis = visdom.Visdom()
mod = torch.load('model.pth')
if device == 'cuda':
mod.cuda()
mod.eval()
gt, tsdf_in = dataset.__getitem_split__()
gt = torch.from_numpy(gt[None, :])
tsdf_in = torch.from_numpy(tsdf_in[None, :])
gt_label = torch.zeros_like(gt, device=device)
gt_label[gt >= 0] = 1
gt_label = gt_label.long().to(device)
criteria = OctreeCrossEntropyLoss(gt_label, block_size)
if device == 'cuda':
criteria.cuda()
tsdf = tsdf_in.float().to(device)
pred = mod(tsdf)
loss = criteria(pred)
print(loss.data)
print('evaluated ', [len(o) for o in pred])
for X in pred[0]:
X_ = tuple(np.array(X)//2)
print (X, pred[1][X_])
assert pred[1][X_][0,2]>0.5
sdf_ = octree_to_sdf(pred, block_size)
err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)
import ipdb; ipdb.set_trace()
for X,v in criteria.gt_octree[0].items():
if v.numel()>1:
assert X[2]==1
def test_split_subtree(padding=0):
feat = torch.rand(1, feature_dim, block_size+2*padding,
block_size+2*padding,
block_size+2*padding
).float()
split = split_tree(feat,padding=padding)
assert len(split) == 8, len(split)
assert torch.all(split[(0, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, padding, padding, padding])
assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, block_size//2+padding, padding, padding])
split[(1, 0, 0)][0, 0, padding, padding, padding] = 12.13
#this is no longer true, I don't know how to do this inplace
def test_split_subtree_with_padding():
padding=2
feat = torch.rand(1, feature_dim, block_size, block_size,
block_size).float()
split = split_tree(feat, padding=2)
assert len(split) == 8, len(split)
octant = split[(0,0,0)]
assert torch.all(octant[0, :padding, 0, 0, 0] == 0)
assert torch.all(octant[0, -padding:, 0, 0, 0] == 0)
assert octant.shape[-3:]==feat.shape[-3:]//2+padding*2
assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])
assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])
assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==
feat[0, :, block_size//2, 0, 0])
split[(1, 0, 0)][0, 0, 0, 0, 0] = 12.13
assert feat[0, 0, block_size//2+padding, 0, 0] == 12.13
if __name__ == '__main__':
import sys
logger.remove()
logger.add(sys.stderr , format="{time} {level} {message}", level="INFO")
test_2tier_net(res=64, block_size=block_size)
test_simple_split(res=64, block_size=block_size)
import ipdb; ipdb.set_trace()
test_2tier_net(res=128, block_size=block_size)
| true | true |
f710aff6c2d00b414cf21367b621f613665ccf10 | 14,123 | py | Python | sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_indexing_buffered_sender_async.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 1 | 2020-03-05T18:10:35.000Z | 2020-03-05T18:10:35.000Z | sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_indexing_buffered_sender_async.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_indexing_buffered_sender_async.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import cast, List, TYPE_CHECKING
import time
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.exceptions import ServiceResponseTimeoutError
from ._timer import Timer
from .._utils import is_retryable_status_code
from .._search_indexing_buffered_sender_base import SearchIndexingBufferedSenderBase
from ...indexes.aio import SearchIndexClient as SearchServiceClient
from .._generated.aio import SearchIndexClient
from .._generated.models import IndexBatch, IndexingResult
from .._search_documents_error import RequestEntityTooLargeError
from ._index_documents_batch_async import IndexDocumentsBatch
from ..._headers_mixin import HeadersMixin
from ..._version import SDK_MONIKER
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import AzureKeyCredential
class SearchIndexingBufferedSender(SearchIndexingBufferedSenderBase, HeadersMixin):
"""A buffered sender for document indexing actions.
:param endpoint: The URL endpoint of an Azure search service
:type endpoint: str
:param index_name: The name of the index to connect to
:type index_name: str
:param credential: A credential to authorize search client requests
:type credential: ~azure.core.credentials.AzureKeyCredential
:keyword bool auto_flush: if the auto flush mode is on. Default to True.
:keyword int auto_flush_interval: how many max seconds if between 2 flushes. This only takes effect
when auto_flush is on. Default to 60 seconds. If a non-positive number is set, it will be default
to 86400s (1 day)
:keyword int initial_batch_action_count: The initial number of actions to group into a batch when
tuning the behavior of the sender. The default value is 512.
:keyword int max_retries: The number of times to retry a failed document. The default value is 3.
:keyword callable on_new: If it is set, the client will call corresponding methods when there
is a new IndexAction added.
:keyword callable on_progress: If it is set, the client will call corresponding methods when there
is a IndexAction succeeds.
:keyword callable on_error: If it is set, the client will call corresponding methods when there
is a IndexAction fails.
:keyword callable on_remove: If it is set, the client will call corresponding methods when there
is a IndexAction removed from the queue (succeeds or fails).
:keyword str api_version: The Search API version to use for requests.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, endpoint, index_name, credential, **kwargs):
# type: (str, str, AzureKeyCredential, **Any) -> None
super(SearchIndexingBufferedSender, self).__init__(
endpoint=endpoint,
index_name=index_name,
credential=credential,
**kwargs)
self._index_documents_batch = IndexDocumentsBatch()
self._client = SearchIndexClient(
endpoint=endpoint, index_name=index_name, sdk_moniker=SDK_MONIKER, **kwargs
) # type: SearchIndexClient
self._reset_timer()
async def _cleanup(self, flush=True):
# type: () -> None
"""Clean up the client.
:param bool flush: flush the actions queue before shutdown the client
Default to True.
"""
if flush:
await self.flush()
if self._auto_flush:
self._timer.cancel()
def __repr__(self):
# type: () -> str
return "<SearchIndexingBufferedSender [endpoint={}, index={}]>".format(
repr(self._endpoint), repr(self._index_name)
)[:1024]
@property
def actions(self):
# type: () -> List[IndexAction]
"""The list of currently index actions in queue to index.
:rtype: List[IndexAction]
"""
return self._index_documents_batch.actions
@distributed_trace_async
async def close(self, **kwargs): # pylint: disable=unused-argument
# type: () -> None
"""Close the :class:`~azure.search.documents.aio.SearchClient` session."""
await self._cleanup(flush=True)
return await self._client.close()
@distributed_trace_async
async def flush(self, timeout=86400, **kwargs): # pylint:disable=unused-argument
# type: (bool) -> bool
"""Flush the batch.
:param int timeout: time out setting. Default is 86400s (one day)
:return: True if there are errors. Else False
:rtype: bool
"""
has_error = False
begin_time = int(time.time())
while len(self.actions) > 0:
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
result = await self._process(timeout=remaining, raise_error=False)
if result:
has_error = True
return has_error
async def _process(self, timeout=86400, **kwargs):
# type: (int) -> bool
raise_error = kwargs.pop("raise_error", True)
actions = await self._index_documents_batch.dequeue_actions()
has_error = False
if not self._index_key:
try:
client = SearchServiceClient(self._endpoint, self._credential)
result = await client.get_index(self._index_name)
if result:
for field in result.fields:
if field.key:
self._index_key = field.name
break
except Exception: # pylint: disable=broad-except
pass
self._reset_timer()
try:
results = await self._index_documents_actions(actions=actions, timeout=timeout)
for result in results:
try:
action = next(x for x in actions if x.additional_properties.get(self._index_key) == result.key)
if result.succeeded:
await self._callback_succeed(action)
elif is_retryable_status_code(result.status_code):
await self._retry_action(action)
has_error = True
else:
await self._callback_fail(action)
has_error = True
except StopIteration:
pass
return has_error
except Exception: # pylint: disable=broad-except
for action in actions:
await self._retry_action(action)
if raise_error:
raise
return True
async def _process_if_needed(self):
# type: () -> bool
""" Every time when a new action is queued, this method
will be triggered. It checks the actions already queued and flushes them if:
1. Auto_flush is on
2. There are self._batch_action_count actions queued
"""
if not self._auto_flush:
return
if len(self._index_documents_batch.actions) < self._batch_action_count:
return
await self._process(raise_error=False)
def _reset_timer(self):
# pylint: disable=access-member-before-definition
try:
self._timer.cancel()
except AttributeError:
pass
if self._auto_flush:
self._timer = Timer(self._auto_flush_interval, self._process)
@distributed_trace_async
async def upload_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue upload documents actions.
:param documents: A list of documents to upload.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def delete_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue delete documents actions
:param documents: A list of documents to delete.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_delete_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue merge documents actions
:param documents: A list of documents to merge.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_merge_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_or_upload_documents(self, documents, **kwargs): # pylint: disable=unused-argument
# type: (List[dict]) -> None
"""Queue merge documents or upload documents actions
:param documents: A list of documents to merge or upload.
:type documents: List[dict]
"""
actions = await self._index_documents_batch.add_merge_or_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def index_documents(self, batch, **kwargs):
# type: (IndexDocumentsBatch, **Any) -> List[IndexingResult]
"""Specify a document operations to perform as a batch.
:param batch: A batch of document operations to perform.
:type batch: IndexDocumentsBatch
:rtype: List[IndexingResult]
:raises :class:`~azure.search.documents.RequestEntityTooLargeError`
"""
return await self._index_documents_actions(actions=batch.actions, **kwargs)
async def _index_documents_actions(self, actions, **kwargs):
# type: (List[IndexAction], **Any) -> List[IndexingResult]
error_map = {413: RequestEntityTooLargeError}
timeout = kwargs.pop('timeout', 86400)
begin_time = int(time.time())
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
try:
index_documents = IndexBatch(actions=actions)
batch_response = await self._client.documents.index(batch=index_documents, error_map=error_map, **kwargs)
return cast(List[IndexingResult], batch_response.results)
except RequestEntityTooLargeError:
if len(actions) == 1:
raise
pos = round(len(actions) / 2)
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_first_half = await self._index_documents_actions(
actions=actions[:pos],
error_map=error_map,
**kwargs
)
if len(batch_response_first_half) > 0:
result_first_half = cast(List[IndexingResult], batch_response_first_half.results)
else:
result_first_half = []
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_second_half = await self._index_documents_actions(
actions=actions[pos:],
error_map=error_map,
**kwargs
)
if len(batch_response_second_half) > 0:
result_second_half = cast(List[IndexingResult], batch_response_second_half.results)
else:
result_second_half = []
return result_first_half.extend(result_second_half)
async def __aenter__(self):
# type: () -> SearchIndexingBufferedSender
await self._client.__aenter__() # pylint: disable=no-member
return self
async def __aexit__(self, *args):
# type: (*Any) -> None
await self.close()
await self._client.__aexit__(*args) # pylint: disable=no-member
async def _retry_action(self, action):
# type: (IndexAction) -> None
if not self._index_key:
await self._callback_fail(action)
return
key = action.additional_properties.get(self._index_key)
counter = self._retry_counter.get(key)
if not counter:
# first time that fails
self._retry_counter[key] = 1
await self._index_documents_batch.enqueue_action(action)
elif counter < self._max_retries - 1:
# not reach retry limit yet
self._retry_counter[key] = counter + 1
await self._index_documents_batch.enqueue_action(action)
else:
await self._callback_fail(action)
async def _callback_succeed(self, action):
# type: (IndexAction) -> None
if self._on_remove:
await self._on_remove(action)
if self._on_progress:
await self._on_progress(action)
async def _callback_fail(self, action):
# type: (IndexAction) -> None
if self._on_remove:
await self._on_remove(action)
if self._on_error:
await self._on_error(action)
async def _callback_new(self, actions):
# type: (List[IndexAction]) -> None
if self._on_new:
for action in actions:
await self._on_new(action)
| 42.032738 | 117 | 0.631523 |
from typing import cast, List, TYPE_CHECKING
import time
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.exceptions import ServiceResponseTimeoutError
from ._timer import Timer
from .._utils import is_retryable_status_code
from .._search_indexing_buffered_sender_base import SearchIndexingBufferedSenderBase
from ...indexes.aio import SearchIndexClient as SearchServiceClient
from .._generated.aio import SearchIndexClient
from .._generated.models import IndexBatch, IndexingResult
from .._search_documents_error import RequestEntityTooLargeError
from ._index_documents_batch_async import IndexDocumentsBatch
from ..._headers_mixin import HeadersMixin
from ..._version import SDK_MONIKER
if TYPE_CHECKING:
from typing import Any
from azure.core.credentials import AzureKeyCredential
class SearchIndexingBufferedSender(SearchIndexingBufferedSenderBase, HeadersMixin):
def __init__(self, endpoint, index_name, credential, **kwargs):
super(SearchIndexingBufferedSender, self).__init__(
endpoint=endpoint,
index_name=index_name,
credential=credential,
**kwargs)
self._index_documents_batch = IndexDocumentsBatch()
self._client = SearchIndexClient(
endpoint=endpoint, index_name=index_name, sdk_moniker=SDK_MONIKER, **kwargs
)
self._reset_timer()
async def _cleanup(self, flush=True):
if flush:
await self.flush()
if self._auto_flush:
self._timer.cancel()
def __repr__(self):
return "<SearchIndexingBufferedSender [endpoint={}, index={}]>".format(
repr(self._endpoint), repr(self._index_name)
)[:1024]
@property
def actions(self):
return self._index_documents_batch.actions
@distributed_trace_async
async def close(self, **kwargs):
await self._cleanup(flush=True)
return await self._client.close()
@distributed_trace_async
async def flush(self, timeout=86400, **kwargs):
has_error = False
begin_time = int(time.time())
while len(self.actions) > 0:
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
result = await self._process(timeout=remaining, raise_error=False)
if result:
has_error = True
return has_error
async def _process(self, timeout=86400, **kwargs):
raise_error = kwargs.pop("raise_error", True)
actions = await self._index_documents_batch.dequeue_actions()
has_error = False
if not self._index_key:
try:
client = SearchServiceClient(self._endpoint, self._credential)
result = await client.get_index(self._index_name)
if result:
for field in result.fields:
if field.key:
self._index_key = field.name
break
except Exception:
pass
self._reset_timer()
try:
results = await self._index_documents_actions(actions=actions, timeout=timeout)
for result in results:
try:
action = next(x for x in actions if x.additional_properties.get(self._index_key) == result.key)
if result.succeeded:
await self._callback_succeed(action)
elif is_retryable_status_code(result.status_code):
await self._retry_action(action)
has_error = True
else:
await self._callback_fail(action)
has_error = True
except StopIteration:
pass
return has_error
except Exception:
for action in actions:
await self._retry_action(action)
if raise_error:
raise
return True
async def _process_if_needed(self):
if not self._auto_flush:
return
if len(self._index_documents_batch.actions) < self._batch_action_count:
return
await self._process(raise_error=False)
def _reset_timer(self):
try:
self._timer.cancel()
except AttributeError:
pass
if self._auto_flush:
self._timer = Timer(self._auto_flush_interval, self._process)
@distributed_trace_async
async def upload_documents(self, documents, **kwargs):
actions = await self._index_documents_batch.add_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def delete_documents(self, documents, **kwargs):
actions = await self._index_documents_batch.add_delete_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_documents(self, documents, **kwargs):
actions = await self._index_documents_batch.add_merge_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def merge_or_upload_documents(self, documents, **kwargs):
actions = await self._index_documents_batch.add_merge_or_upload_actions(documents)
await self._callback_new(actions)
await self._process_if_needed()
@distributed_trace_async
async def index_documents(self, batch, **kwargs):
return await self._index_documents_actions(actions=batch.actions, **kwargs)
async def _index_documents_actions(self, actions, **kwargs):
error_map = {413: RequestEntityTooLargeError}
timeout = kwargs.pop('timeout', 86400)
begin_time = int(time.time())
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
try:
index_documents = IndexBatch(actions=actions)
batch_response = await self._client.documents.index(batch=index_documents, error_map=error_map, **kwargs)
return cast(List[IndexingResult], batch_response.results)
except RequestEntityTooLargeError:
if len(actions) == 1:
raise
pos = round(len(actions) / 2)
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_first_half = await self._index_documents_actions(
actions=actions[:pos],
error_map=error_map,
**kwargs
)
if len(batch_response_first_half) > 0:
result_first_half = cast(List[IndexingResult], batch_response_first_half.results)
else:
result_first_half = []
now = int(time.time())
remaining = timeout - (now - begin_time)
if remaining < 0:
raise ServiceResponseTimeoutError("Service response time out")
batch_response_second_half = await self._index_documents_actions(
actions=actions[pos:],
error_map=error_map,
**kwargs
)
if len(batch_response_second_half) > 0:
result_second_half = cast(List[IndexingResult], batch_response_second_half.results)
else:
result_second_half = []
return result_first_half.extend(result_second_half)
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *args):
await self.close()
await self._client.__aexit__(*args)
async def _retry_action(self, action):
if not self._index_key:
await self._callback_fail(action)
return
key = action.additional_properties.get(self._index_key)
counter = self._retry_counter.get(key)
if not counter:
self._retry_counter[key] = 1
await self._index_documents_batch.enqueue_action(action)
elif counter < self._max_retries - 1:
self._retry_counter[key] = counter + 1
await self._index_documents_batch.enqueue_action(action)
else:
await self._callback_fail(action)
async def _callback_succeed(self, action):
if self._on_remove:
await self._on_remove(action)
if self._on_progress:
await self._on_progress(action)
async def _callback_fail(self, action):
if self._on_remove:
await self._on_remove(action)
if self._on_error:
await self._on_error(action)
async def _callback_new(self, actions):
if self._on_new:
for action in actions:
await self._on_new(action)
| true | true |
f710b041e326ba3e1326272da4562dee10691198 | 22,120 | py | Python | reports/migrations/0002_populate_weights.py | digideskio/gmmp | d82a4be0787c3a3a9e27dc590d7974f9f884fbb6 | [
"Apache-2.0"
] | 2 | 2015-04-02T23:09:03.000Z | 2015-12-03T00:19:06.000Z | reports/migrations/0002_populate_weights.py | digideskio/gmmp | d82a4be0787c3a3a9e27dc590d7974f9f884fbb6 | [
"Apache-2.0"
] | 13 | 2015-04-01T07:39:43.000Z | 2015-08-26T06:24:07.000Z | reports/migrations/0002_populate_weights.py | OpenUpSA/gmmp | d82a4be0787c3a3a9e27dc590d7974f9f884fbb6 | [
"Apache-2.0"
] | 2 | 2019-07-25T11:53:10.000Z | 2020-06-22T02:07:40.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
item.pop('Region')
for media_type, weight in item.iteritems():
if media_type != 'Country' or media_type != 'Region':
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0001_initial'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [{'Country': 'AF',
'Internet': '0.37',
'Print': '0.33',
'Radio': '0.93',
'Region': 'Asia',
'Television': '0.93',
'Twitter': 1},
{'Country': 'AL',
'Internet': '0.36',
'Print': '1.02',
'Radio': '0.30',
'Region': 'Europe',
'Television': '0.30',
'Twitter': 1},
{'Country': 'AG',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'AR',
'Internet': '1.34',
'Print': '0.74',
'Radio': '1.07',
'Region': 'Latin America',
'Television': '1.07',
'Twitter': 1},
{'Country': 'AM',
'Internet': '0.31',
'Print': '1.02',
'Radio': '0.29',
'Region': 'Europe',
'Television': '0.29',
'Twitter': 1},
{'Country': 'AU',
'Internet': '1.23',
'Print': '0.98',
'Radio': '0.81',
'Region': 'Pacific Islands',
'Television': '0.81',
'Twitter': 1},
{'Country': 'AT',
'Internet': '0.72',
'Print': '0.58',
'Radio': '0.48',
'Region': 'Europe',
'Television': '0.48',
'Twitter': 1},
{'Country': 'BS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BD',
'Internet': '0.88',
'Print': '3.63',
'Radio': '2.09',
'Region': 'Asia',
'Television': '2.09',
'Twitter': 1},
{'Country': 'BB',
'Internet': '0.13',
'Print': '0.13',
'Radio': '0.09',
'Region': 'Caribbean',
'Television': '0.09',
'Twitter': 1},
{'Country': 'BY',
'Internet': '0.59',
'Print': '0.47',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'BE',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BZ',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BJ',
'Internet': '0.18',
'Print': '0.03',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'BT',
'Internet': '0.12',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Asia',
'Television': '0.14',
'Twitter': 1},
{'Country': 'BO',
'Internet': '0.53',
'Print': '0.42',
'Radio': '0.55',
'Region': 'Latin America',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BA',
'Internet': '0.43',
'Print': '0.68',
'Radio': '0.32',
'Region': 'Europe',
'Television': '0.32',
'Twitter': 1},
{'Country': 'BW',
'Internet': '0.14',
'Print': '0.18',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'BR',
'Internet': '2.78',
'Print': '1.64',
'Radio': '2.35',
'Region': 'Latin America',
'Television': '2.35',
'Twitter': 1},
{'Country': 'BG',
'Internet': '0.54',
'Print': '0.41',
'Radio': '0.44',
'Region': 'Europe',
'Television': '0.44',
'Twitter': 1},
{'Country': 'BF',
'Internet': '0.23',
'Print': '0.10',
'Radio': '0.69',
'Region': 'Africa',
'Television': '0.69',
'Twitter': 1},
{'Country': 'BI',
'Internet': '0.10',
'Print': '0.10',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'CM',
'Internet': '0.33',
'Print': '0.17',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'CA',
'Internet': '1.54',
'Print': '1.31',
'Radio': '0.99',
'Region': 'North America',
'Television': '0.99',
'Twitter': 1},
{'Country': 'CV',
'Internet': '0.12',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Africa',
'Television': '0.12',
'Twitter': 1},
{'Country': 'CF',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'TD',
'Internet': '0.15',
'Print': '0.00',
'Radio': '0.60',
'Region': 'Africa',
'Television': '0.60',
'Twitter': 1},
{'Country': 'CL',
'Internet': '0.92',
'Print': '0.37',
'Radio': '0.70',
'Region': 'Latin America',
'Television': '0.70',
'Twitter': 1},
{'Country': 'CN',
'Internet': '6.79',
'Print': '6.23',
'Radio': '6.18',
'Region': 'Asia',
'Television': '6.18',
'Twitter': 1},
{'Country': 'CO',
'Internet': '1.36',
'Print': '0.66',
'Radio': '1.16',
'Region': 'Latin America',
'Television': '1.16',
'Twitter': 1},
{'Country': 'KM',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Africa',
'Television': '0.14',
'Twitter': 1},
{'Country': 'CD',
'Internet': '0.08',
'Print': '0.28',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'CG',
'Internet': '0.33',
'Print': '0.11',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'CR',
'Internet': '0.42',
'Print': '0.34',
'Radio': '0.37',
'Region': 'Latin America',
'Television': '0.37',
'Twitter': 1},
{'Country': 'HR',
'Internet': '0.45',
'Print': '0.41',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'CU',
'Internet': '0.47',
'Print': '0.12',
'Radio': '0.56',
'Region': 'Caribbean',
'Television': '0.56',
'Twitter': 1},
{'Country': 'CY',
'Internet': '0.23',
'Print': '0.13',
'Radio': '0.18',
'Region': 'Middle East',
'Television': '0.18',
'Twitter': 1},
{'Country': 'DK',
'Internet': '0.50',
'Print': '0.74',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'DO',
'Internet': '0.60',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'EC',
'Internet': '0.66',
'Print': '0.72',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'EG',
'Internet': '1.70',
'Print': '1.43',
'Radio': '1.51',
'Region': 'Middle East',
'Television': '1.51',
'Twitter': 1},
{'Country': 'SV',
'Internet': '0.35',
'Print': '0.32',
'Radio': '0.42',
'Region': 'Latin America',
'Television': '0.42',
'Twitter': 1},
{'Country': 'GQ',
'Internet': '0.09',
'Print': '0.68',
'Radio': '0.15',
'Region': 'Africa',
'Television': '0.15',
'Twitter': 1},
{'Country': 'EE',
'Internet': '0.27',
'Print': '0.27',
'Radio': '0.19',
'Region': 'Europe',
'Television': '0.19',
'Twitter': 1},
{'Country': 'ET',
'Internet': '0.34',
'Print': '0.39',
'Radio': '1.63',
'Region': 'Africa',
'Television': '1.63',
'Twitter': 1},
{'Country': 'FJ',
'Internet': '0.15',
'Print': '0.12',
'Radio': '0.16',
'Region': 'Pacific Islands',
'Television': '0.16',
'Twitter': 1},
{'Country': 'FI',
'Internet': '0.61',
'Print': '0.03',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'FR',
'Internet': '1.99',
'Print': '1.69',
'Radio': '1.33',
'Region': 'Europe',
'Television': '1.33',
'Twitter': 1},
{'Country': 'GA',
'Internet': '0.11',
'Print': '0.58',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GM',
'Internet': '0.14',
'Print': '0.04',
'Radio': '0.23',
'Region': 'Africa',
'Television': '0.23',
'Twitter': 1},
{'Country': 'GE',
'Internet': '0.40',
'Print': '1.02',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'DE',
'Internet': '2.27',
'Print': '2.50',
'Radio': '1.51',
'Region': 'Europe',
'Television': '1.51',
'Twitter': 1},
{'Country': 'GH',
'Internet': '0.61',
'Print': '0.39',
'Radio': '0.85',
'Region': 'Africa',
'Television': '0.85',
'Twitter': 1},
{'Country': 'GR',
'Internet': '0.68',
'Print': '0.44',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'GD',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'GT',
'Internet': '0.44',
'Print': '0.38',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'GW',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GN',
'Internet': '0.68',
'Print': '1.67',
'Radio': '0.56',
'Region': 'Africa',
'Television': '0.56',
'Twitter': 1},
{'Country': 'GY',
'Internet': '0.15',
'Print': '0.15',
'Radio': '0.15',
'Region': 'Caribbean',
'Television': '0.15',
'Twitter': 1},
{'Country': 'HT',
'Internet': '0.30',
'Print': '0.17',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'HU',
'Internet': '0.73',
'Print': '0.68',
'Radio': '0.52',
'Region': 'Europe',
'Television': '0.52',
'Twitter': 1},
{'Country': 'IS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Europe',
'Television': '0.10',
'Twitter': 1},
{'Country': 'IN',
'Internet': '4.18',
'Print': '5.72',
'Radio': '5.90',
'Region': 'Asia',
'Television': '5.90',
'Twitter': 1},
{'Country': 'IE',
'Internet': '0.52',
'Print': '0.18',
'Radio': '0.36',
'Region': 'Europe',
'Television': '0.36',
'Twitter': 1},
{'Country': 'IL',
'Internet': '0.65',
'Print': '0.89',
'Radio': '0.46',
'Region': 'Middle East',
'Television': '0.46',
'Twitter': 1},
{'Country': 'IT',
'Internet': '1.62',
'Print': '1.51',
'Radio': '1.29',
'Region': 'Europe',
'Television': '1.29',
'Twitter': 1},
{'Country': 'CI',
'Internet': '0.73',
'Print': '1.02',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'JM',
'Internet': '0.32',
'Print': '0.27',
'Radio': '0.28',
'Region': 'Caribbean',
'Television': '0.28',
'Twitter': 1},
{'Country': 'JP',
'Internet': '2.80',
'Print': '5.27',
'Radio': '1.87',
'Region': 'Asia',
'Television': '1.87',
'Twitter': 1},
{'Country': 'KZ',
'Internet': '0.84',
'Print': '0.58',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'KE',
'Internet': '1.10',
'Print': '0.44',
'Radio': '1.12',
'Region': 'Africa',
'Television': '1.12',
'Twitter': 1},
{'Country': 'KG',
'Internet': '0.31',
'Print': '0.05',
'Radio': '0.39',
'Region': 'Asia',
'Television': '0.39',
'Twitter': 1},
{'Country': 'LB',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.37',
'Region': 'Middle East',
'Television': '0.37',
'Twitter': 1},
{'Country': 'LS',
'Internet': '0.09',
'Print': '0.08',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'LR',
'Internet': '0.12',
'Print': '0.13',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'LU',
'Internet': '0.19',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Europe',
'Television': '0.12',
'Twitter': 1},
{'Country': 'MK',
'Internet': '0.22',
'Print': '0.58',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'MG',
'Internet': '1.11',
'Print': '0.19',
'Radio': '0.80',
'Region': 'Africa',
'Television': '0.80',
'Twitter': 1},
{'Country': 'MW',
'Internet': '0.93',
'Print': '0.11',
'Radio': '0.68',
'Region': 'Africa',
'Television': '0.68',
'Twitter': 1},
{'Country': 'MY',
'Internet': '0.22',
'Print': '1.07',
'Radio': '0.91',
'Region': 'Asia',
'Television': '0.91',
'Twitter': 1},
{'Country': 'ML',
'Internet': '0.92',
'Print': '0.68',
'Radio': '0.66',
'Region': 'Africa',
'Television': '0.66',
'Twitter': 1},
{'Country': 'MT',
'Internet': '0.11',
'Print': '0.13',
'Radio': '0.11',
'Region': 'Europe',
'Television': '0.11',
'Twitter': 1},
{'Country': 'MR',
'Internet': '0.18',
'Print': '0.68',
'Radio': '0.33',
'Region': 'Africa',
'Television': '0.33',
'Twitter': 1},
{'Country': 'MU',
'Internet': '0.07',
'Print': '0.62',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'MX',
'Internet': '1.91',
'Print': '0.06',
'Radio': '1.84',
'Region': 'Latin America',
'Television': '1.84',
'Twitter': 1},
{'Country': 'MD',
'Internet': '0.33',
'Print': '0.16',
'Radio': '0.31',
'Region': 'Europe',
'Television': '0.31',
'Twitter': 1},
{'Country': 'MN',
'Internet': '0.19',
'Print': '0.14',
'Radio': '0.28',
'Region': 'Asia',
'Television': '0.28',
'Twitter': 1},
{'Country': 'ME',
'Internet': '0.16',
'Print': '0.00',
'Radio': '0.13',
'Region': 'Europe',
'Television': '0.13',
'Twitter': 1},
{'Country': 'MA',
'Internet': '1.20',
'Print': '0.38',
'Radio': '0.96',
'Region': 'Middle East',
'Television': '0.96',
'Twitter': 1},
{'Country': 'NA',
'Internet': '0.16',
'Print': '0.15',
'Radio': '0.25',
'Region': 'Africa',
'Television': '0.25',
'Twitter': 1},
{'Country': 'NP',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.88',
'Region': 'Asia',
'Television': '0.88',
'Twitter': 1},
{'Country': 'NL',
'Internet': '1.08',
'Print': '1.19',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'NZ',
'Internet': '0.55',
'Print': '0.68',
'Radio': '0.35',
'Region': 'Pacific Islands',
'Television': '0.35',
'Twitter': 1},
{'Country': 'NI',
'Internet': '0.25',
'Print': '0.26',
'Radio': '0.41',
'Region': 'Latin America',
'Television': '0.41',
'Twitter': 1},
{'Country': 'NE',
'Internet': '0.15',
'Print': '0.08',
'Radio': '0.71',
'Region': 'Africa',
'Television': '0.71',
'Twitter': 1},
{'Country': 'NG',
'Internet': '2.19',
'Print': '1.19',
'Radio': '2.21',
'Region': 'Africa',
'Television': '2.21',
'Twitter': 1},
{'Country': 'NO',
'Internet': '0.59',
'Print': '0.83',
'Radio': '0.37',
'Region': 'Europe',
'Television': '0.37',
'Twitter': 1},
{'Country': 'PK',
'Internet': '1.20',
'Print': '0.06',
'Radio': '2.25',
'Region': 'Asia',
'Television': '2.25',
'Twitter': 1},
{'Country': 'PS',
'Internet': '0.54',
'Print': '0.00',
'Radio': '0.59',
'Region': 'Middle East',
'Television': '0.59',
'Twitter': 1},
{'Country': 'PY',
'Internet': '0.38',
'Print': '0.31',
'Radio': '0.44',
'Region': 'Latin America',
'Television': '0.44',
'Twitter': 1},
{'Country': 'PE',
'Internet': '0.95',
'Print': '1.92',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'PH',
'Internet': '1.68',
'Print': '1.65',
'Radio': '1.66',
'Region': 'Asia',
'Television': '1.66',
'Twitter': 1},
{'Country': 'PL',
'Internet': '1.36',
'Print': '1.11',
'Radio': '1.02',
'Region': 'Europe',
'Television': '1.02',
'Twitter': 1},
{'Country': 'PT',
'Internet': '0.71',
'Print': '0.63',
'Radio': '0.54',
'Region': 'Europe',
'Television': '0.54',
'Twitter': 1},
{'Country': 'PR',
'Internet': '0.38',
'Print': '0.53',
'Radio': '0.32',
'Region': 'Latin America',
'Television': '0.32',
'Twitter': 1},
{'Country': 'RO',
'Internet': '0.90',
'Print': '0.65',
'Radio': '0.77',
'Region': 'Europe',
'Television': '0.77',
'Twitter': 1},
{'Country': 'WS',
'Internet': '0.04',
'Print': '0.68',
'Radio': '0.07',
'Region': 'Pacific Islands',
'Television': '0.07',
'Twitter': 1},
{'Country': 'SN',
'Internet': '0.48',
'Print': '0.21',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'RS',
'Internet': '0.58',
'Print': '0.58',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'SL',
'Internet': '0.08',
'Print': '0.07',
'Radio': '0.41',
'Region': 'Africa',
'Television': '0.41',
'Twitter': 1},
{'Country': 'SK',
'Internet': '0.57',
'Print': '0.68',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'SI',
'Internet': '0.33',
'Print': '0.31',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'SB',
'Internet': '0.06',
'Print': '0.04',
'Radio': '0.13',
'Region': 'Pacific Islands',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SO',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'ZA',
'Internet': '1.34',
'Print': '0.76',
'Radio': '1.21',
'Region': 'Africa',
'Television': '1.21',
'Twitter': 1},
{'Country': 'KR',
'Internet': '1.80',
'Print': '1.67',
'Radio': '1.17',
'Region': 'Asia',
'Television': '1.17',
'Twitter': 1},
{'Country': 'ES',
'Internet': '1.59',
'Print': '1.35',
'Radio': '1.14',
'Region': 'Europe',
'Television': '1.14',
'Twitter': 1},
{'Country': 'LC',
'Internet': '0.06',
'Print': '0.18',
'Radio': '0.07',
'Region': 'Caribbean',
'Television': '0.07',
'Twitter': 1},
{'Country': 'VC',
'Internet': '0.05',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'SD',
'Internet': '0.82',
'Print': '0.60',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'SS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.48',
'Region': 'Africa',
'Television': '0.48',
'Twitter': 1},
{'Country': 'SR',
'Internet': '0.12',
'Print': '0.12',
'Radio': '0.13',
'Region': 'Caribbean',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SZ',
'Internet': '0.15',
'Print': '0.10',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'SE',
'Internet': '0.78',
'Print': '1.11',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'CH',
'Internet': '0.72',
'Print': '0.94',
'Radio': '0.47',
'Region': 'Europe',
'Television': '0.47',
'Twitter': 1},
{'Country': 'TW',
'Internet': '1.00',
'Print': '0.68',
'Radio': '0.80',
'Region': 'Asia',
'Television': '0.80',
'Twitter': 1},
{'Country': 'TZ',
'Internet': '0.74',
'Print': '0.35',
'Radio': '1.18',
'Region': 'Africa',
'Television': '1.18',
'Twitter': 1},
{'Country': 'TG',
'Internet': '0.15',
'Print': '0.07',
'Radio': '0.44',
'Region': 'Africa',
'Television': '0.44',
'Twitter': 1},
{'Country': 'TO',
'Internet': '0.05',
'Print': '0.05',
'Radio': '0.05',
'Region': 'Pacific Islands',
'Television': '0.05',
'Twitter': 1},
{'Country': 'TT',
'Internet': '0.25',
'Print': '0.18',
'Radio': '0.19',
'Region': 'Caribbean',
'Television': '0.19',
'Twitter': 1},
{'Country': 'TN',
'Internet': '0.60',
'Print': '0.31',
'Radio': '0.55',
'Region': 'Middle East',
'Television': '0.55',
'Twitter': 1},
{'Country': 'TR',
'Internet': '1.59',
'Print': '0.94',
'Radio': '1.44',
'Region': 'Europe',
'Television': '1.44',
'Twitter': 1},
{'Country': 'UG',
'Internet': '0.68',
'Print': '0.16',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'GB',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'US',
'Internet': '4.48',
'Print': '4.43',
'Radio': '2.98',
'Region': 'North America',
'Television': '2.98',
'Twitter': 1},
{'Country': 'UY',
'Internet': '0.38',
'Print': '0.56',
'Radio': '0.31',
'Region': 'Latin America',
'Television': '0.31',
'Twitter': 1},
{'Country': 'VU',
'Internet': '0.05',
'Print': '0.58',
'Radio': '0.08',
'Region': 'Asia',
'Television': '0.08',
'Twitter': 1},
{'Country': 'VE',
'Internet': '1.02',
'Print': '1.01',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'VN',
'Internet': '1.69',
'Print': '0.52',
'Radio': '1.59',
'Region': 'Asia',
'Television': '1.59',
'Twitter': 1},
{'Country': 'ZM',
'Internet': '0.41',
'Print': '0.15',
'Radio': '0.64',
'Region': 'Africa',
'Television': '0.64',
'Twitter': 1},
{'Country': 'ZW',
'Internet': '0.45',
'Print': '0.30',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'WL',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'SQ',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'B1',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'B2',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1}]
| 20.692236 | 65 | 0.494711 |
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
item.pop('Region')
for media_type, weight in item.iteritems():
if media_type != 'Country' or media_type != 'Region':
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0001_initial'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [{'Country': 'AF',
'Internet': '0.37',
'Print': '0.33',
'Radio': '0.93',
'Region': 'Asia',
'Television': '0.93',
'Twitter': 1},
{'Country': 'AL',
'Internet': '0.36',
'Print': '1.02',
'Radio': '0.30',
'Region': 'Europe',
'Television': '0.30',
'Twitter': 1},
{'Country': 'AG',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'AR',
'Internet': '1.34',
'Print': '0.74',
'Radio': '1.07',
'Region': 'Latin America',
'Television': '1.07',
'Twitter': 1},
{'Country': 'AM',
'Internet': '0.31',
'Print': '1.02',
'Radio': '0.29',
'Region': 'Europe',
'Television': '0.29',
'Twitter': 1},
{'Country': 'AU',
'Internet': '1.23',
'Print': '0.98',
'Radio': '0.81',
'Region': 'Pacific Islands',
'Television': '0.81',
'Twitter': 1},
{'Country': 'AT',
'Internet': '0.72',
'Print': '0.58',
'Radio': '0.48',
'Region': 'Europe',
'Television': '0.48',
'Twitter': 1},
{'Country': 'BS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BD',
'Internet': '0.88',
'Print': '3.63',
'Radio': '2.09',
'Region': 'Asia',
'Television': '2.09',
'Twitter': 1},
{'Country': 'BB',
'Internet': '0.13',
'Print': '0.13',
'Radio': '0.09',
'Region': 'Caribbean',
'Television': '0.09',
'Twitter': 1},
{'Country': 'BY',
'Internet': '0.59',
'Print': '0.47',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'BE',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BZ',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BJ',
'Internet': '0.18',
'Print': '0.03',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'BT',
'Internet': '0.12',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Asia',
'Television': '0.14',
'Twitter': 1},
{'Country': 'BO',
'Internet': '0.53',
'Print': '0.42',
'Radio': '0.55',
'Region': 'Latin America',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BA',
'Internet': '0.43',
'Print': '0.68',
'Radio': '0.32',
'Region': 'Europe',
'Television': '0.32',
'Twitter': 1},
{'Country': 'BW',
'Internet': '0.14',
'Print': '0.18',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'BR',
'Internet': '2.78',
'Print': '1.64',
'Radio': '2.35',
'Region': 'Latin America',
'Television': '2.35',
'Twitter': 1},
{'Country': 'BG',
'Internet': '0.54',
'Print': '0.41',
'Radio': '0.44',
'Region': 'Europe',
'Television': '0.44',
'Twitter': 1},
{'Country': 'BF',
'Internet': '0.23',
'Print': '0.10',
'Radio': '0.69',
'Region': 'Africa',
'Television': '0.69',
'Twitter': 1},
{'Country': 'BI',
'Internet': '0.10',
'Print': '0.10',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'CM',
'Internet': '0.33',
'Print': '0.17',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'CA',
'Internet': '1.54',
'Print': '1.31',
'Radio': '0.99',
'Region': 'North America',
'Television': '0.99',
'Twitter': 1},
{'Country': 'CV',
'Internet': '0.12',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Africa',
'Television': '0.12',
'Twitter': 1},
{'Country': 'CF',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'TD',
'Internet': '0.15',
'Print': '0.00',
'Radio': '0.60',
'Region': 'Africa',
'Television': '0.60',
'Twitter': 1},
{'Country': 'CL',
'Internet': '0.92',
'Print': '0.37',
'Radio': '0.70',
'Region': 'Latin America',
'Television': '0.70',
'Twitter': 1},
{'Country': 'CN',
'Internet': '6.79',
'Print': '6.23',
'Radio': '6.18',
'Region': 'Asia',
'Television': '6.18',
'Twitter': 1},
{'Country': 'CO',
'Internet': '1.36',
'Print': '0.66',
'Radio': '1.16',
'Region': 'Latin America',
'Television': '1.16',
'Twitter': 1},
{'Country': 'KM',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Africa',
'Television': '0.14',
'Twitter': 1},
{'Country': 'CD',
'Internet': '0.08',
'Print': '0.28',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'CG',
'Internet': '0.33',
'Print': '0.11',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'CR',
'Internet': '0.42',
'Print': '0.34',
'Radio': '0.37',
'Region': 'Latin America',
'Television': '0.37',
'Twitter': 1},
{'Country': 'HR',
'Internet': '0.45',
'Print': '0.41',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'CU',
'Internet': '0.47',
'Print': '0.12',
'Radio': '0.56',
'Region': 'Caribbean',
'Television': '0.56',
'Twitter': 1},
{'Country': 'CY',
'Internet': '0.23',
'Print': '0.13',
'Radio': '0.18',
'Region': 'Middle East',
'Television': '0.18',
'Twitter': 1},
{'Country': 'DK',
'Internet': '0.50',
'Print': '0.74',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'DO',
'Internet': '0.60',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'EC',
'Internet': '0.66',
'Print': '0.72',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'EG',
'Internet': '1.70',
'Print': '1.43',
'Radio': '1.51',
'Region': 'Middle East',
'Television': '1.51',
'Twitter': 1},
{'Country': 'SV',
'Internet': '0.35',
'Print': '0.32',
'Radio': '0.42',
'Region': 'Latin America',
'Television': '0.42',
'Twitter': 1},
{'Country': 'GQ',
'Internet': '0.09',
'Print': '0.68',
'Radio': '0.15',
'Region': 'Africa',
'Television': '0.15',
'Twitter': 1},
{'Country': 'EE',
'Internet': '0.27',
'Print': '0.27',
'Radio': '0.19',
'Region': 'Europe',
'Television': '0.19',
'Twitter': 1},
{'Country': 'ET',
'Internet': '0.34',
'Print': '0.39',
'Radio': '1.63',
'Region': 'Africa',
'Television': '1.63',
'Twitter': 1},
{'Country': 'FJ',
'Internet': '0.15',
'Print': '0.12',
'Radio': '0.16',
'Region': 'Pacific Islands',
'Television': '0.16',
'Twitter': 1},
{'Country': 'FI',
'Internet': '0.61',
'Print': '0.03',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'FR',
'Internet': '1.99',
'Print': '1.69',
'Radio': '1.33',
'Region': 'Europe',
'Television': '1.33',
'Twitter': 1},
{'Country': 'GA',
'Internet': '0.11',
'Print': '0.58',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GM',
'Internet': '0.14',
'Print': '0.04',
'Radio': '0.23',
'Region': 'Africa',
'Television': '0.23',
'Twitter': 1},
{'Country': 'GE',
'Internet': '0.40',
'Print': '1.02',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'DE',
'Internet': '2.27',
'Print': '2.50',
'Radio': '1.51',
'Region': 'Europe',
'Television': '1.51',
'Twitter': 1},
{'Country': 'GH',
'Internet': '0.61',
'Print': '0.39',
'Radio': '0.85',
'Region': 'Africa',
'Television': '0.85',
'Twitter': 1},
{'Country': 'GR',
'Internet': '0.68',
'Print': '0.44',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'GD',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'GT',
'Internet': '0.44',
'Print': '0.38',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'GW',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GN',
'Internet': '0.68',
'Print': '1.67',
'Radio': '0.56',
'Region': 'Africa',
'Television': '0.56',
'Twitter': 1},
{'Country': 'GY',
'Internet': '0.15',
'Print': '0.15',
'Radio': '0.15',
'Region': 'Caribbean',
'Television': '0.15',
'Twitter': 1},
{'Country': 'HT',
'Internet': '0.30',
'Print': '0.17',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'HU',
'Internet': '0.73',
'Print': '0.68',
'Radio': '0.52',
'Region': 'Europe',
'Television': '0.52',
'Twitter': 1},
{'Country': 'IS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Europe',
'Television': '0.10',
'Twitter': 1},
{'Country': 'IN',
'Internet': '4.18',
'Print': '5.72',
'Radio': '5.90',
'Region': 'Asia',
'Television': '5.90',
'Twitter': 1},
{'Country': 'IE',
'Internet': '0.52',
'Print': '0.18',
'Radio': '0.36',
'Region': 'Europe',
'Television': '0.36',
'Twitter': 1},
{'Country': 'IL',
'Internet': '0.65',
'Print': '0.89',
'Radio': '0.46',
'Region': 'Middle East',
'Television': '0.46',
'Twitter': 1},
{'Country': 'IT',
'Internet': '1.62',
'Print': '1.51',
'Radio': '1.29',
'Region': 'Europe',
'Television': '1.29',
'Twitter': 1},
{'Country': 'CI',
'Internet': '0.73',
'Print': '1.02',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'JM',
'Internet': '0.32',
'Print': '0.27',
'Radio': '0.28',
'Region': 'Caribbean',
'Television': '0.28',
'Twitter': 1},
{'Country': 'JP',
'Internet': '2.80',
'Print': '5.27',
'Radio': '1.87',
'Region': 'Asia',
'Television': '1.87',
'Twitter': 1},
{'Country': 'KZ',
'Internet': '0.84',
'Print': '0.58',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'KE',
'Internet': '1.10',
'Print': '0.44',
'Radio': '1.12',
'Region': 'Africa',
'Television': '1.12',
'Twitter': 1},
{'Country': 'KG',
'Internet': '0.31',
'Print': '0.05',
'Radio': '0.39',
'Region': 'Asia',
'Television': '0.39',
'Twitter': 1},
{'Country': 'LB',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.37',
'Region': 'Middle East',
'Television': '0.37',
'Twitter': 1},
{'Country': 'LS',
'Internet': '0.09',
'Print': '0.08',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'LR',
'Internet': '0.12',
'Print': '0.13',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'LU',
'Internet': '0.19',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Europe',
'Television': '0.12',
'Twitter': 1},
{'Country': 'MK',
'Internet': '0.22',
'Print': '0.58',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'MG',
'Internet': '1.11',
'Print': '0.19',
'Radio': '0.80',
'Region': 'Africa',
'Television': '0.80',
'Twitter': 1},
{'Country': 'MW',
'Internet': '0.93',
'Print': '0.11',
'Radio': '0.68',
'Region': 'Africa',
'Television': '0.68',
'Twitter': 1},
{'Country': 'MY',
'Internet': '0.22',
'Print': '1.07',
'Radio': '0.91',
'Region': 'Asia',
'Television': '0.91',
'Twitter': 1},
{'Country': 'ML',
'Internet': '0.92',
'Print': '0.68',
'Radio': '0.66',
'Region': 'Africa',
'Television': '0.66',
'Twitter': 1},
{'Country': 'MT',
'Internet': '0.11',
'Print': '0.13',
'Radio': '0.11',
'Region': 'Europe',
'Television': '0.11',
'Twitter': 1},
{'Country': 'MR',
'Internet': '0.18',
'Print': '0.68',
'Radio': '0.33',
'Region': 'Africa',
'Television': '0.33',
'Twitter': 1},
{'Country': 'MU',
'Internet': '0.07',
'Print': '0.62',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'MX',
'Internet': '1.91',
'Print': '0.06',
'Radio': '1.84',
'Region': 'Latin America',
'Television': '1.84',
'Twitter': 1},
{'Country': 'MD',
'Internet': '0.33',
'Print': '0.16',
'Radio': '0.31',
'Region': 'Europe',
'Television': '0.31',
'Twitter': 1},
{'Country': 'MN',
'Internet': '0.19',
'Print': '0.14',
'Radio': '0.28',
'Region': 'Asia',
'Television': '0.28',
'Twitter': 1},
{'Country': 'ME',
'Internet': '0.16',
'Print': '0.00',
'Radio': '0.13',
'Region': 'Europe',
'Television': '0.13',
'Twitter': 1},
{'Country': 'MA',
'Internet': '1.20',
'Print': '0.38',
'Radio': '0.96',
'Region': 'Middle East',
'Television': '0.96',
'Twitter': 1},
{'Country': 'NA',
'Internet': '0.16',
'Print': '0.15',
'Radio': '0.25',
'Region': 'Africa',
'Television': '0.25',
'Twitter': 1},
{'Country': 'NP',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.88',
'Region': 'Asia',
'Television': '0.88',
'Twitter': 1},
{'Country': 'NL',
'Internet': '1.08',
'Print': '1.19',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'NZ',
'Internet': '0.55',
'Print': '0.68',
'Radio': '0.35',
'Region': 'Pacific Islands',
'Television': '0.35',
'Twitter': 1},
{'Country': 'NI',
'Internet': '0.25',
'Print': '0.26',
'Radio': '0.41',
'Region': 'Latin America',
'Television': '0.41',
'Twitter': 1},
{'Country': 'NE',
'Internet': '0.15',
'Print': '0.08',
'Radio': '0.71',
'Region': 'Africa',
'Television': '0.71',
'Twitter': 1},
{'Country': 'NG',
'Internet': '2.19',
'Print': '1.19',
'Radio': '2.21',
'Region': 'Africa',
'Television': '2.21',
'Twitter': 1},
{'Country': 'NO',
'Internet': '0.59',
'Print': '0.83',
'Radio': '0.37',
'Region': 'Europe',
'Television': '0.37',
'Twitter': 1},
{'Country': 'PK',
'Internet': '1.20',
'Print': '0.06',
'Radio': '2.25',
'Region': 'Asia',
'Television': '2.25',
'Twitter': 1},
{'Country': 'PS',
'Internet': '0.54',
'Print': '0.00',
'Radio': '0.59',
'Region': 'Middle East',
'Television': '0.59',
'Twitter': 1},
{'Country': 'PY',
'Internet': '0.38',
'Print': '0.31',
'Radio': '0.44',
'Region': 'Latin America',
'Television': '0.44',
'Twitter': 1},
{'Country': 'PE',
'Internet': '0.95',
'Print': '1.92',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'PH',
'Internet': '1.68',
'Print': '1.65',
'Radio': '1.66',
'Region': 'Asia',
'Television': '1.66',
'Twitter': 1},
{'Country': 'PL',
'Internet': '1.36',
'Print': '1.11',
'Radio': '1.02',
'Region': 'Europe',
'Television': '1.02',
'Twitter': 1},
{'Country': 'PT',
'Internet': '0.71',
'Print': '0.63',
'Radio': '0.54',
'Region': 'Europe',
'Television': '0.54',
'Twitter': 1},
{'Country': 'PR',
'Internet': '0.38',
'Print': '0.53',
'Radio': '0.32',
'Region': 'Latin America',
'Television': '0.32',
'Twitter': 1},
{'Country': 'RO',
'Internet': '0.90',
'Print': '0.65',
'Radio': '0.77',
'Region': 'Europe',
'Television': '0.77',
'Twitter': 1},
{'Country': 'WS',
'Internet': '0.04',
'Print': '0.68',
'Radio': '0.07',
'Region': 'Pacific Islands',
'Television': '0.07',
'Twitter': 1},
{'Country': 'SN',
'Internet': '0.48',
'Print': '0.21',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'RS',
'Internet': '0.58',
'Print': '0.58',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'SL',
'Internet': '0.08',
'Print': '0.07',
'Radio': '0.41',
'Region': 'Africa',
'Television': '0.41',
'Twitter': 1},
{'Country': 'SK',
'Internet': '0.57',
'Print': '0.68',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'SI',
'Internet': '0.33',
'Print': '0.31',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'SB',
'Internet': '0.06',
'Print': '0.04',
'Radio': '0.13',
'Region': 'Pacific Islands',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SO',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'ZA',
'Internet': '1.34',
'Print': '0.76',
'Radio': '1.21',
'Region': 'Africa',
'Television': '1.21',
'Twitter': 1},
{'Country': 'KR',
'Internet': '1.80',
'Print': '1.67',
'Radio': '1.17',
'Region': 'Asia',
'Television': '1.17',
'Twitter': 1},
{'Country': 'ES',
'Internet': '1.59',
'Print': '1.35',
'Radio': '1.14',
'Region': 'Europe',
'Television': '1.14',
'Twitter': 1},
{'Country': 'LC',
'Internet': '0.06',
'Print': '0.18',
'Radio': '0.07',
'Region': 'Caribbean',
'Television': '0.07',
'Twitter': 1},
{'Country': 'VC',
'Internet': '0.05',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'SD',
'Internet': '0.82',
'Print': '0.60',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'SS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.48',
'Region': 'Africa',
'Television': '0.48',
'Twitter': 1},
{'Country': 'SR',
'Internet': '0.12',
'Print': '0.12',
'Radio': '0.13',
'Region': 'Caribbean',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SZ',
'Internet': '0.15',
'Print': '0.10',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'SE',
'Internet': '0.78',
'Print': '1.11',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'CH',
'Internet': '0.72',
'Print': '0.94',
'Radio': '0.47',
'Region': 'Europe',
'Television': '0.47',
'Twitter': 1},
{'Country': 'TW',
'Internet': '1.00',
'Print': '0.68',
'Radio': '0.80',
'Region': 'Asia',
'Television': '0.80',
'Twitter': 1},
{'Country': 'TZ',
'Internet': '0.74',
'Print': '0.35',
'Radio': '1.18',
'Region': 'Africa',
'Television': '1.18',
'Twitter': 1},
{'Country': 'TG',
'Internet': '0.15',
'Print': '0.07',
'Radio': '0.44',
'Region': 'Africa',
'Television': '0.44',
'Twitter': 1},
{'Country': 'TO',
'Internet': '0.05',
'Print': '0.05',
'Radio': '0.05',
'Region': 'Pacific Islands',
'Television': '0.05',
'Twitter': 1},
{'Country': 'TT',
'Internet': '0.25',
'Print': '0.18',
'Radio': '0.19',
'Region': 'Caribbean',
'Television': '0.19',
'Twitter': 1},
{'Country': 'TN',
'Internet': '0.60',
'Print': '0.31',
'Radio': '0.55',
'Region': 'Middle East',
'Television': '0.55',
'Twitter': 1},
{'Country': 'TR',
'Internet': '1.59',
'Print': '0.94',
'Radio': '1.44',
'Region': 'Europe',
'Television': '1.44',
'Twitter': 1},
{'Country': 'UG',
'Internet': '0.68',
'Print': '0.16',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'GB',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'US',
'Internet': '4.48',
'Print': '4.43',
'Radio': '2.98',
'Region': 'North America',
'Television': '2.98',
'Twitter': 1},
{'Country': 'UY',
'Internet': '0.38',
'Print': '0.56',
'Radio': '0.31',
'Region': 'Latin America',
'Television': '0.31',
'Twitter': 1},
{'Country': 'VU',
'Internet': '0.05',
'Print': '0.58',
'Radio': '0.08',
'Region': 'Asia',
'Television': '0.08',
'Twitter': 1},
{'Country': 'VE',
'Internet': '1.02',
'Print': '1.01',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'VN',
'Internet': '1.69',
'Print': '0.52',
'Radio': '1.59',
'Region': 'Asia',
'Television': '1.59',
'Twitter': 1},
{'Country': 'ZM',
'Internet': '0.41',
'Print': '0.15',
'Radio': '0.64',
'Region': 'Africa',
'Television': '0.64',
'Twitter': 1},
{'Country': 'ZW',
'Internet': '0.45',
'Print': '0.30',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'WL',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'SQ',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'B1',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'B2',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1}]
| true | true |
f710b17920f4b27ff5f612fbae4bb033721cb956 | 5,980 | py | Python | Flask-Web/flasky/app/auth/views.py | fengzse/Feng_Repository | a0c64cbdff09e536be23eeccf45bdf6cab62d78b | [
"Apache-2.0"
] | 1 | 2021-01-02T22:03:13.000Z | 2021-01-02T22:03:13.000Z | Flask-Web/flasky/app/auth/views.py | fengzse/Feng_Repository | a0c64cbdff09e536be23eeccf45bdf6cab62d78b | [
"Apache-2.0"
] | null | null | null | Flask-Web/flasky/app/auth/views.py | fengzse/Feng_Repository | a0c64cbdff09e536be23eeccf45bdf6cab62d78b | [
"Apache-2.0"
] | null | null | null | from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, ResetPassword, ResetPasswordRequest, \
ChangeEmailForm
from ..email import send_email
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint \
and request.blueprint != 'auth' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data) # 第二个参数为保持登录的Boolean
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('main.index')
return redirect(next)
flash('Invalid email or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token(3600)
send_email(user.email, 'Confirm your account', 'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
db.session.commit()
flash('Account has been confirmed.Thanks')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token(3600)
send_email(current_user.email, 'Confirm your account', 'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.new_password.data
db.session.add(current_user)
db.session.commit()
flash('New password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def reset_password_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = ResetPasswordRequest()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset your password', 'auth/email/reset_password', user=user, token=token)
flash('An email with instructions to reset your password has been sent to you.')
return redirect(url_for('auth/login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index')) # 防止已登录用户误点击
form = ResetPassword()
if form.validate_on_submit():
if User.reset_password(token, form.reset_password.data):
db.session.commit()
flash('Password had been updated')
return redirect(url_for('auth.login'))
else:
flash('Error Please reset your password again')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change_email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.newemail.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Update your Email Address', 'auth/email/change_email', user=current_user,
token=token)
flash('A confirmation link has been sent to your new Email address')
return redirect(url_for('auth.login'))
else:
flash('Invalid email or password')
return render_template('auth/change_email.html', form=form)
@auth.route('/change_email/<token>')
@login_required
def email_change(token):
if current_user.change_email(token):
db.session.commit()
flash('New Email address has been updated')
else:
flash('Invalid request')
return redirect(url_for('main.index'))
| 37.142857 | 112 | 0.673746 | from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, ResetPassword, ResetPasswordRequest, \
ChangeEmailForm
from ..email import send_email
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint \
and request.blueprint != 'auth' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('main.index')
return redirect(next)
flash('Invalid email or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token(3600)
send_email(user.email, 'Confirm your account', 'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
db.session.commit()
flash('Account has been confirmed.Thanks')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token(3600)
send_email(current_user.email, 'Confirm your account', 'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.new_password.data
db.session.add(current_user)
db.session.commit()
flash('New password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def reset_password_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = ResetPasswordRequest()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset your password', 'auth/email/reset_password', user=user, token=token)
flash('An email with instructions to reset your password has been sent to you.')
return redirect(url_for('auth/login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = ResetPassword()
if form.validate_on_submit():
if User.reset_password(token, form.reset_password.data):
db.session.commit()
flash('Password had been updated')
return redirect(url_for('auth.login'))
else:
flash('Error Please reset your password again')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change_email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.newemail.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Update your Email Address', 'auth/email/change_email', user=current_user,
token=token)
flash('A confirmation link has been sent to your new Email address')
return redirect(url_for('auth.login'))
else:
flash('Invalid email or password')
return render_template('auth/change_email.html', form=form)
@auth.route('/change_email/<token>')
@login_required
def email_change(token):
if current_user.change_email(token):
db.session.commit()
flash('New Email address has been updated')
else:
flash('Invalid request')
return redirect(url_for('main.index'))
| true | true |
f710b29b9753ff4ea7a019d0d600cff9936b42f5 | 6,366 | py | Python | examples/table.py | gungnir888/transitfeed3 | 406e7ca3fe274521ef5dbf9277c729182b5183cb | [
"Apache-2.0"
] | null | null | null | examples/table.py | gungnir888/transitfeed3 | 406e7ca3fe274521ef5dbf9277c729182b5183cb | [
"Apache-2.0"
] | null | null | null | examples/table.py | gungnir888/transitfeed3 | 406e7ca3fe274521ef5dbf9277c729182b5183cb | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An example script that demonstrates converting a proprietary format to a
# Google Transit Feed Specification file.
#
# You can load table.txt, the example input, in Excel. It contains three
# sections:
# 1) A list of global options, starting with a line containing the word
# 'options'. Each option has an name in the first column and most options
# have a value in the second column.
# 2) A table of stops, starting with a line containing the word 'stops'. Each
# row of the table has 3 columns: name, latitude, longitude
# 3) A list of routes. There is an empty row between each route. The first row
# for a route lists the short_name and long_name. After the first row the
# left-most column lists the stop names visited by the route. Each column
# contains the times a single trip visits the stops.
#
# This is very simple example which you could use as a base for your own
# transit feed builder.
import transitfeed
from optparse import OptionParser
import re
stops = {}
# table is a list of lists in this form
# [ ['Short Name', 'Long Name'],
# ['Stop 1', 'Stop 2', ...]
# [time_at_1, time_at_2, ...] # times for trip 1
# [time_at_1, time_at_2, ...] # times for trip 2
# ... ]
def add_route_to_schedule(schedule, table):
if len(table) >= 2:
r = schedule.add_route(short_name=table[0][0], long_name=table[0][1], route_type='Bus')
for trip in table[2:]:
if len(trip) > len(table[1]):
print("ignoring %s" % trip[len(table[1]):])
trip = trip[0:len(table[1])]
t = r.add_trip(schedule, headsign='My headsign')
trip_stops = [] # Build a list of (time, stopname) tuples
for i in range(0, len(trip)):
if re.search(r'\S', trip[i]):
trip_stops.append( (transitfeed.time_to_seconds_since_midnight(trip[i]), table[1][i]) )
trip_stops.sort() # Sort by time
for (time, stopname) in trip_stops:
t.add_stop_time(stop=stops[stopname.lower()], arrival_secs=time, departure_secs=time)
def transpose_table(table):
"""Transpose a list of lists, using None to extend all input lists to the
same length.
For example:
>>> transpose_table(
[ [11, 12, 13],
[21, 22],
[31, 32, 33, 34]])
[ [11, 21, 31],
[12, 22, 32],
[13, None, 33],
[None, None, 34]]
"""
transposed = []
rows = len(table)
cols = max(len(row) for row in table)
for x in range(cols):
transposed.append([])
for y in range(rows):
if x < len(table[y]):
transposed[x].append(table[y][x])
else:
transposed[x].append(None)
return transposed
def process_options(schedule, table):
service_period = schedule.get_default_service_period()
agency_name, agency_url, agency_timezone = (None, None, None)
for row in table[1:]:
command = row[0].lower()
if command == 'weekday':
service_period.set_weekday_service()
elif command == 'start_date':
service_period.set_start_date(row[1])
elif command == 'end_date':
service_period.set_end_date(row[1])
elif command == 'add_date':
service_period.set_date_has_service(date=row[1])
elif command == 'remove_date':
service_period.set_date_has_service(date=row[1], has_service=False)
elif command == 'agency_name':
agency_name = row[1]
elif command == 'agency_url':
agency_url = row[1]
elif command == 'agency_timezone':
agency_timezone = row[1]
if not (agency_name and agency_url and agency_timezone):
print("You must provide agency information")
schedule.new_default_agency(agency_name=agency_name, agency_url=agency_url, agency_timezone=agency_timezone)
def add_stops(schedule, table):
for name, lat_str, lng_str in table[1:]:
stop = schedule.add_stop(lat=float(lat_str), lng=float(lng_str), name=name)
stops[name.lower()] = stop
def process_table(schedule, table):
if table[0][0].lower() == 'options':
process_options(schedule, table)
elif table[0][0].lower() == 'stops':
add_stops(schedule, table)
else:
transposed = [table[0]] # Keep route_short_name and route_long_name on first row
# Transpose rest of table. Input contains the stop names in table[x][0], x
# >= 1 with trips found in columns, so we need to transpose table[1:].
# As a diagram Transpose from
# [['stop 1', '10:00', '11:00', '12:00'],
# ['stop 2', '10:10', '11:10', '12:10'],
# ['stop 3', '10:20', '11:20', '12:20']]
# to
# [['stop 1', 'stop 2', 'stop 3'],
# ['10:00', '10:10', '10:20'],
# ['11:00', '11:11', '11:20'],
# ['12:00', '12:12', '12:20']]
transposed.extend(transpose_table(table[1:]))
add_route_to_schedule(schedule, transposed)
def main():
parser = OptionParser()
parser.add_option('--input', dest='input',
help='Path of input file')
parser.add_option('--output', dest='output',
help='Path of output file, should end in .zip')
parser.set_defaults(output='feed.zip')
(options, args) = parser.parse_args()
schedule = transitfeed.Schedule()
table = []
for line in open(options.input):
line = line.rstrip()
if not line:
process_table(schedule, table)
table = []
else:
table.append(line.split('\t'))
process_table(schedule, table)
schedule.write_google_transit_feed(options.output)
if __name__ == '__main__':
main()
| 35.966102 | 112 | 0.617499 |
import transitfeed
from optparse import OptionParser
import re
stops = {}
ule, table):
if len(table) >= 2:
r = schedule.add_route(short_name=table[0][0], long_name=table[0][1], route_type='Bus')
for trip in table[2:]:
if len(trip) > len(table[1]):
print("ignoring %s" % trip[len(table[1]):])
trip = trip[0:len(table[1])]
t = r.add_trip(schedule, headsign='My headsign')
trip_stops = []
for i in range(0, len(trip)):
if re.search(r'\S', trip[i]):
trip_stops.append( (transitfeed.time_to_seconds_since_midnight(trip[i]), table[1][i]) )
trip_stops.sort()
for (time, stopname) in trip_stops:
t.add_stop_time(stop=stops[stopname.lower()], arrival_secs=time, departure_secs=time)
def transpose_table(table):
transposed = []
rows = len(table)
cols = max(len(row) for row in table)
for x in range(cols):
transposed.append([])
for y in range(rows):
if x < len(table[y]):
transposed[x].append(table[y][x])
else:
transposed[x].append(None)
return transposed
def process_options(schedule, table):
service_period = schedule.get_default_service_period()
agency_name, agency_url, agency_timezone = (None, None, None)
for row in table[1:]:
command = row[0].lower()
if command == 'weekday':
service_period.set_weekday_service()
elif command == 'start_date':
service_period.set_start_date(row[1])
elif command == 'end_date':
service_period.set_end_date(row[1])
elif command == 'add_date':
service_period.set_date_has_service(date=row[1])
elif command == 'remove_date':
service_period.set_date_has_service(date=row[1], has_service=False)
elif command == 'agency_name':
agency_name = row[1]
elif command == 'agency_url':
agency_url = row[1]
elif command == 'agency_timezone':
agency_timezone = row[1]
if not (agency_name and agency_url and agency_timezone):
print("You must provide agency information")
schedule.new_default_agency(agency_name=agency_name, agency_url=agency_url, agency_timezone=agency_timezone)
def add_stops(schedule, table):
for name, lat_str, lng_str in table[1:]:
stop = schedule.add_stop(lat=float(lat_str), lng=float(lng_str), name=name)
stops[name.lower()] = stop
def process_table(schedule, table):
if table[0][0].lower() == 'options':
process_options(schedule, table)
elif table[0][0].lower() == 'stops':
add_stops(schedule, table)
else:
transposed = [table[0]]
transposed.extend(transpose_table(table[1:]))
add_route_to_schedule(schedule, transposed)
def main():
parser = OptionParser()
parser.add_option('--input', dest='input',
help='Path of input file')
parser.add_option('--output', dest='output',
help='Path of output file, should end in .zip')
parser.set_defaults(output='feed.zip')
(options, args) = parser.parse_args()
schedule = transitfeed.Schedule()
table = []
for line in open(options.input):
line = line.rstrip()
if not line:
process_table(schedule, table)
table = []
else:
table.append(line.split('\t'))
process_table(schedule, table)
schedule.write_google_transit_feed(options.output)
if __name__ == '__main__':
main()
| true | true |
f710b3d9f778c9716dcab7db75b7a4bc66a1cc43 | 1,565 | py | Python | fairseq/data/fairseq_dataset.py | nadongguri/fairseq | b651b000033fd8ff51d1c3bea76f4fd1897bdf9c | [
"MIT"
] | 50 | 2021-11-15T02:34:43.000Z | 2021-11-18T07:24:46.000Z | codes_src/fairseq/data/fairseq_dataset.py | yujun531/WeTS | bba33ad64e10efd7d3d95b5a0b6ad125216542cf | [
"Unlicense"
] | null | null | null | codes_src/fairseq/data/fairseq_dataset.py | yujun531/WeTS | bba33ad64e10efd7d3d95b5a0b6ad125216542cf | [
"Unlicense"
] | 26 | 2021-11-15T02:35:14.000Z | 2021-11-15T08:25:42.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch.utils.data
class FairseqDataset(torch.utils.data.Dataset):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
| 29.528302 | 80 | 0.654313 |
import numpy as np
import torch.utils.data
class FairseqDataset(torch.utils.data.Dataset):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
raise NotImplementedError
def num_tokens(self, index):
raise NotImplementedError
def size(self, index):
raise NotImplementedError
def ordered_indices(self):
return np.arange(len(self))
@property
def supports_prefetch(self):
return False
def prefetch(self, indices):
raise NotImplementedError
| true | true |
f710b53acc9fd1364a5a8782c79a63384e6720e2 | 2,158 | py | Python | aptronics/bundling.py | agritheory/aptronics | 0a40ae3bf787fc3a1525ae3556ea6dca0ca31408 | [
"MIT"
] | null | null | null | aptronics/bundling.py | agritheory/aptronics | 0a40ae3bf787fc3a1525ae3556ea6dca0ca31408 | [
"MIT"
] | 34 | 2019-09-28T15:04:32.000Z | 2020-02-26T11:11:20.000Z | aptronics/bundling.py | agritheory/aptronics | 0a40ae3bf787fc3a1525ae3556ea6dca0ca31408 | [
"MIT"
] | 2 | 2016-02-17T16:39:55.000Z | 2019-10-15T21:11:51.000Z | import frappe
from frappe.utils import flt
def merge_bundled_items(self, method):
bundles = {}
item_meta = frappe.get_meta(self.doctype + " Item")
count = 0
copy_fields = ['qty', 'stock_qty']
sum_fields = ['total_weight', 'amount', 'net_amount']
rate_fields = [('rate', 'amount'), ('net_rate', 'net_amount'), ('weight_per_unit', 'total_weight')]
base_fields = [('base_' + f, f) for f in sum_fields if item_meta.has_field('base_' + f)]
base_fields += [('base_' + f, f) for f in copy_fields if item_meta.has_field('base_' + f)]
base_fields += [('base_' + t, t) for t, s in rate_fields if item_meta.has_field('base_' + t)]
# Sum amounts
in_bundle = 0
for item in self.items:
if item.bsbt == 'Bundle Start':
in_bundle = item.idx
if not in_bundle or item.bsbt == 'Bundle Start':
new_bundle = frappe._dict()
for f in copy_fields:
new_bundle[f] = item.get(f)
bundles[item.idx] = new_bundle
group_item = bundles[in_bundle or item.idx]
if item.bsbt == 'Bundle Terminate':
in_bundle = 0
for f in sum_fields:
group_item[f] = group_item.get(f, 0) + flt(item.get(f))
group_item_serial_nos = group_item.setdefault('serial_no', [])
if item.get('serial_no'):
group_item_serial_nos += filter(lambda s: s, item.serial_no.split('\n'))
# Calculate average rates and get serial nos string
for group_item in bundles.values():
if group_item.qty:
for target, source in rate_fields:
group_item[target] = flt(group_item[source]) / flt(group_item.qty)
else:
for target, source in rate_fields:
group_item[target] = 0
group_item.serial_no = '\n'.join(group_item.serial_no)
# Calculate company currency values
for group_item in bundles.values():
for target, source in base_fields:
group_item[target] = group_item.get(source, 0) * self.conversion_rate
# Remove duplicates and set aggregated values
to_remove = []
for item in self.items:
if item.idx in bundles.keys():
count += 1
item.update(bundles[item.idx])
del bundles[item.idx]
item.idx = count
else:
to_remove.append(item)
for item in to_remove:
self.remove(item)
self.total_qty = sum([d.qty for d in self.items]) | 30.394366 | 100 | 0.694161 | import frappe
from frappe.utils import flt
def merge_bundled_items(self, method):
bundles = {}
item_meta = frappe.get_meta(self.doctype + " Item")
count = 0
copy_fields = ['qty', 'stock_qty']
sum_fields = ['total_weight', 'amount', 'net_amount']
rate_fields = [('rate', 'amount'), ('net_rate', 'net_amount'), ('weight_per_unit', 'total_weight')]
base_fields = [('base_' + f, f) for f in sum_fields if item_meta.has_field('base_' + f)]
base_fields += [('base_' + f, f) for f in copy_fields if item_meta.has_field('base_' + f)]
base_fields += [('base_' + t, t) for t, s in rate_fields if item_meta.has_field('base_' + t)]
in_bundle = 0
for item in self.items:
if item.bsbt == 'Bundle Start':
in_bundle = item.idx
if not in_bundle or item.bsbt == 'Bundle Start':
new_bundle = frappe._dict()
for f in copy_fields:
new_bundle[f] = item.get(f)
bundles[item.idx] = new_bundle
group_item = bundles[in_bundle or item.idx]
if item.bsbt == 'Bundle Terminate':
in_bundle = 0
for f in sum_fields:
group_item[f] = group_item.get(f, 0) + flt(item.get(f))
group_item_serial_nos = group_item.setdefault('serial_no', [])
if item.get('serial_no'):
group_item_serial_nos += filter(lambda s: s, item.serial_no.split('\n'))
for group_item in bundles.values():
if group_item.qty:
for target, source in rate_fields:
group_item[target] = flt(group_item[source]) / flt(group_item.qty)
else:
for target, source in rate_fields:
group_item[target] = 0
group_item.serial_no = '\n'.join(group_item.serial_no)
for group_item in bundles.values():
for target, source in base_fields:
group_item[target] = group_item.get(source, 0) * self.conversion_rate
to_remove = []
for item in self.items:
if item.idx in bundles.keys():
count += 1
item.update(bundles[item.idx])
del bundles[item.idx]
item.idx = count
else:
to_remove.append(item)
for item in to_remove:
self.remove(item)
self.total_qty = sum([d.qty for d in self.items]) | true | true |
f710b5892d3ceb61675ec956d63092918bac41e6 | 4,736 | py | Python | src/sentry/interfaces/contexts.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/interfaces/contexts.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/interfaces/contexts.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import six
import string
from django.utils.encoding import force_text
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
from sentry.utils.safe import get_path, trim
__all__ = ("Contexts",)
context_types = {}
class _IndexFormatter(string.Formatter):
def format_field(self, value, format_spec):
if not format_spec and isinstance(value, bool):
return value and "yes" or "no"
return string.Formatter.format_field(self, value, format_spec)
def format_index_expr(format_string, data):
return six.text_type(_IndexFormatter().vformat(six.text_type(format_string), (), data).strip())
def contexttype(cls):
context_types[cls.type] = cls
return cls
class ContextType(object):
indexed_fields = None
type = None
def __init__(self, alias, data):
self.alias = alias
ctx_data = {}
for key, value in six.iteritems(trim(data)):
# we use simple checks here, rathern than ' in set()' to avoid
# issues with maps/lists
if value is not None and value != "":
ctx_data[force_text(key)] = value
self.data = ctx_data
def to_json(self):
rv = dict(self.data)
rv["type"] = self.type
return prune_empty_keys(rv)
@classmethod
def values_for_data(cls, data):
rv = []
for context in six.itervalues(data.get("contexts") or {}):
if context and context.get("type") == cls.type:
rv.append(context)
return rv
@classmethod
def primary_value_for_data(cls, data):
val = get_path(data, "contexts", cls.type)
if val and val.get("type") == cls.type:
return val
rv = cls.values_for_data(data)
if len(rv) == 1:
return rv[0]
def iter_tags(self):
if self.indexed_fields:
for field, f_string in six.iteritems(self.indexed_fields):
try:
value = format_index_expr(f_string, self.data)
except KeyError:
continue
if value:
if not field:
yield (self.alias, value)
else:
yield ("%s.%s" % (self.alias, field), value)
# TODO(dcramer): contexts need to document/describe expected (optional) fields
@contexttype
class DefaultContextType(ContextType):
type = "default"
@contexttype
class AppContextType(ContextType):
type = "app"
indexed_fields = {"device": u"{device_app_hash}"}
@contexttype
class DeviceContextType(ContextType):
type = "device"
indexed_fields = {"": u"{model}", "family": u"{family}"}
# model_id, arch
@contexttype
class RuntimeContextType(ContextType):
type = "runtime"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
@contexttype
class BrowserContextType(ContextType):
type = "browser"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
# viewport
@contexttype
class OsContextType(ContextType):
type = "os"
indexed_fields = {"": u"{name} {version}", "name": u"{name}", "rooted": u"{rooted}"}
# build, rooted
@contexttype
class GpuContextType(ContextType):
type = "gpu"
indexed_fields = {"name": u"{name}", "vendor": u"{vendor_name}"}
@contexttype
class MonitorContextType(ContextType):
type = "monitor"
indexed_fields = {"id": u"{id}"}
@contexttype
class TraceContextType(ContextType):
type = "trace"
indexed_fields = {"": u"{trace_id}", "span": u"{span_id}", "ctx": u"{trace_id}-{span_id}"}
class Contexts(Interface):
"""
This interface stores context specific information.
"""
display_score = 1100
score = 800
@classmethod
def to_python(cls, data):
rv = {}
for alias, value in six.iteritems(data):
# XXX(markus): The `None`-case should be handled in the UI and
# other consumers of this interface
if value is not None:
rv[alias] = cls.normalize_context(alias, value)
return cls(**rv)
@classmethod
def normalize_context(cls, alias, data):
ctx_type = data.get("type", alias)
ctx_cls = context_types.get(ctx_type, DefaultContextType)
return ctx_cls(alias, data)
def iter_contexts(self):
return six.itervalues(self._data)
def to_json(self):
rv = {}
for alias, inst in six.iteritems(self._data):
rv[alias] = inst.to_json()
return rv
def iter_tags(self):
for inst in self.iter_contexts():
for tag in inst.iter_tags():
yield tag
| 26.606742 | 99 | 0.611275 | from __future__ import absolute_import
import six
import string
from django.utils.encoding import force_text
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
from sentry.utils.safe import get_path, trim
__all__ = ("Contexts",)
context_types = {}
class _IndexFormatter(string.Formatter):
def format_field(self, value, format_spec):
if not format_spec and isinstance(value, bool):
return value and "yes" or "no"
return string.Formatter.format_field(self, value, format_spec)
def format_index_expr(format_string, data):
return six.text_type(_IndexFormatter().vformat(six.text_type(format_string), (), data).strip())
def contexttype(cls):
context_types[cls.type] = cls
return cls
class ContextType(object):
indexed_fields = None
type = None
def __init__(self, alias, data):
self.alias = alias
ctx_data = {}
for key, value in six.iteritems(trim(data)):
if value is not None and value != "":
ctx_data[force_text(key)] = value
self.data = ctx_data
def to_json(self):
rv = dict(self.data)
rv["type"] = self.type
return prune_empty_keys(rv)
@classmethod
def values_for_data(cls, data):
rv = []
for context in six.itervalues(data.get("contexts") or {}):
if context and context.get("type") == cls.type:
rv.append(context)
return rv
@classmethod
def primary_value_for_data(cls, data):
val = get_path(data, "contexts", cls.type)
if val and val.get("type") == cls.type:
return val
rv = cls.values_for_data(data)
if len(rv) == 1:
return rv[0]
def iter_tags(self):
if self.indexed_fields:
for field, f_string in six.iteritems(self.indexed_fields):
try:
value = format_index_expr(f_string, self.data)
except KeyError:
continue
if value:
if not field:
yield (self.alias, value)
else:
yield ("%s.%s" % (self.alias, field), value)
@contexttype
class DefaultContextType(ContextType):
type = "default"
@contexttype
class AppContextType(ContextType):
type = "app"
indexed_fields = {"device": u"{device_app_hash}"}
@contexttype
class DeviceContextType(ContextType):
type = "device"
indexed_fields = {"": u"{model}", "family": u"{family}"}
@contexttype
class RuntimeContextType(ContextType):
type = "runtime"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
@contexttype
class BrowserContextType(ContextType):
type = "browser"
indexed_fields = {"": u"{name} {version}", "name": u"{name}"}
@contexttype
class OsContextType(ContextType):
type = "os"
indexed_fields = {"": u"{name} {version}", "name": u"{name}", "rooted": u"{rooted}"}
@contexttype
class GpuContextType(ContextType):
type = "gpu"
indexed_fields = {"name": u"{name}", "vendor": u"{vendor_name}"}
@contexttype
class MonitorContextType(ContextType):
type = "monitor"
indexed_fields = {"id": u"{id}"}
@contexttype
class TraceContextType(ContextType):
type = "trace"
indexed_fields = {"": u"{trace_id}", "span": u"{span_id}", "ctx": u"{trace_id}-{span_id}"}
class Contexts(Interface):
display_score = 1100
score = 800
@classmethod
def to_python(cls, data):
rv = {}
for alias, value in six.iteritems(data):
if value is not None:
rv[alias] = cls.normalize_context(alias, value)
return cls(**rv)
@classmethod
def normalize_context(cls, alias, data):
ctx_type = data.get("type", alias)
ctx_cls = context_types.get(ctx_type, DefaultContextType)
return ctx_cls(alias, data)
def iter_contexts(self):
return six.itervalues(self._data)
def to_json(self):
rv = {}
for alias, inst in six.iteritems(self._data):
rv[alias] = inst.to_json()
return rv
def iter_tags(self):
for inst in self.iter_contexts():
for tag in inst.iter_tags():
yield tag
| true | true |
f710b5ffbebe49e837f19c94522a3272a6027073 | 1,784 | py | Python | Python3/79.word-search.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/79.word-search.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/79.word-search.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
# @lc code=start
class Solution:
def exist(self, board, word):
start = [None, None]
h = len(board)
l = len(board[0])
walked = [[0] * l for _ in range(h)]
for i in range(h):
for j in range(l):
if board[i][j] == word[0]:
start = [i, j]
walked[i][j] = 1
if self.helper(word[1:], board, walked, start):
return True
walked[i][j] = 0
return False
def helper(self, rest, board, walked, current_pos):
if len(rest) == 0:
return True
i = current_pos[0]
j = current_pos[1]
if i > 0 and board[i - 1][j] == rest[0] and walked[i - 1][j] == 0:
walked[i - 1][j] = 1
if self.helper(rest[1:], board, walked, [i - 1, j]):
return True
walked[i - 1][j] = 0
if i < len(board) - 1 and board[i + 1][j] == rest[0] and walked[i + 1][j] == 0:
walked[i + 1][j] = 1
if self.helper(rest[1:], board, walked, [i + 1, j]):
return True
walked[i + 1][j] = 0
if j > 0 and board[i][j - 1] == rest[0] and walked[i][j - 1] == 0:
walked[i][j - 1] = 1
if self.helper(rest[1:], board, walked, [i, j - 1]):
return True
walked[i][j - 1] = 0
if j < len(board[0]) - 1 and board[i][j + 1] == rest[0] and walked[i][j + 1] == 0:
walked[i][j + 1] = 1
if self.helper(rest[1:], board, walked, [i, j + 1]):
return True
walked[i][j + 1] = 0
return False
# @lc code=end
| 30.758621 | 90 | 0.421525 |
class Solution:
def exist(self, board, word):
start = [None, None]
h = len(board)
l = len(board[0])
walked = [[0] * l for _ in range(h)]
for i in range(h):
for j in range(l):
if board[i][j] == word[0]:
start = [i, j]
walked[i][j] = 1
if self.helper(word[1:], board, walked, start):
return True
walked[i][j] = 0
return False
def helper(self, rest, board, walked, current_pos):
if len(rest) == 0:
return True
i = current_pos[0]
j = current_pos[1]
if i > 0 and board[i - 1][j] == rest[0] and walked[i - 1][j] == 0:
walked[i - 1][j] = 1
if self.helper(rest[1:], board, walked, [i - 1, j]):
return True
walked[i - 1][j] = 0
if i < len(board) - 1 and board[i + 1][j] == rest[0] and walked[i + 1][j] == 0:
walked[i + 1][j] = 1
if self.helper(rest[1:], board, walked, [i + 1, j]):
return True
walked[i + 1][j] = 0
if j > 0 and board[i][j - 1] == rest[0] and walked[i][j - 1] == 0:
walked[i][j - 1] = 1
if self.helper(rest[1:], board, walked, [i, j - 1]):
return True
walked[i][j - 1] = 0
if j < len(board[0]) - 1 and board[i][j + 1] == rest[0] and walked[i][j + 1] == 0:
walked[i][j + 1] = 1
if self.helper(rest[1:], board, walked, [i, j + 1]):
return True
walked[i][j + 1] = 0
return False
| true | true |
f710b63ba31a89c01f4bf06cfb94875dfffd398e | 6,376 | py | Python | fragment.py | soumitrasamanta/FragGenie | 9ce493d88e3479a286ce88dc0c5b199ea7c7e441 | [
"MIT"
] | 1 | 2021-07-08T15:29:53.000Z | 2021-07-08T15:29:53.000Z | fragment.py | soumitrasamanta/FragGenie | 9ce493d88e3479a286ce88dc0c5b199ea7c7e441 | [
"MIT"
] | null | null | null | fragment.py | soumitrasamanta/FragGenie | 9ce493d88e3479a286ce88dc0c5b199ea7c7e441 | [
"MIT"
] | null | null | null | """
-----------------------------------------------------------------------------
AUTHOR: Soumitra Samanta (soumitramath39@gmail.com)
-----------------------------------------------------------------------------
"""
import subprocess
import os
import numpy as np
from datetime import datetime
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
__all__ = [
'FragGenie'
]
class FragGenie():
def __init__(self, dir_fraggenie=''):
self.dir_fraggenie = dir_fraggenie
def to_numpy(self, array_str, sep=','):
return np.fromstring(array_str[1:-1], sep=sep)
def create_folder(self, folder_name):
if len(folder_name):
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
return folder_name
def mol_prop_mass(self, smiles):
"""
Molecular mass
"""
return [Descriptors.ExactMolWt(Chem.MolFromSmiles(sm)) for sm in smiles]
def smiles2fraggenie_csv(
self,
input_path='',
input_filename='test_input.csv',
smiles_col='smiles',
output_path='',
output_filename='',
num_bonds_to_break=3,
min_fragment_mass=50,
max_smiles_len=250,
max_num_smiles=1000000000,
flag_display='true',
masses_option='METFRAG_MZ'
):
"""Calculate FragGenie from csv file"""
if(len(output_path)==0):
output_path = input_path
if(len(output_filename)==0):
output_filename = ''.join([
'fraggenie_', datetime.today().strftime('%d%m%Y%H%M%S'),
'_', str(np.random.random(1)[0])[2:],
'_nbonds_', str(num_bonds_to_break),
'_frgms_', str(min_fragment_mass),
'_smlen_', str(max_smiles_len),
'_', input_filename
])
bash_cmd = ''.join([
'bash ', self.dir_fraggenie,
'fragment.sh ',
input_path,
input_filename,
' ', output_path,
output_filename,
' ', smiles_col,
' ', str(num_bonds_to_break),
' ', str(min_fragment_mass),
' ', str(max_smiles_len),
' ', str(max_num_smiles),
' ', flag_display,
' ', masses_option
])
subprocess.call(bash_cmd, shell=True)
return output_path, output_filename, bash_cmd
def smiles2fraggenie(
self,
smiles,
num_bonds_to_break=3,
min_fragment_mass=50,
max_smiles_len=250,
max_num_smiles=1000000000,
flag_display='true',
masses_option='METFRAG_MZ',
input_path='dump/',
input_filename='',
massspec_sep=',',
fill_non_break_mol=1,
flag_del_temp_file=1,
verbose=0
):
"""Calculate FragGenie from smiles"""
input_path = self.create_folder(input_path)
if len(input_filename)==0:
input_filename = ''.join(['smiles_', datetime.today().strftime('%d%m%Y%H%M%S'),
'_', str(np.random.random(1)[0])[2:],
'.csv'
])
pd.DataFrame.from_dict({'smiles':smiles}).to_csv(''.join([input_path, input_filename]), index=False)
output_path, output_filename, bash_cmd = self.smiles2fraggenie_csv(
input_path=input_path,
input_filename=input_filename,
num_bonds_to_break=num_bonds_to_break,
min_fragment_mass=min_fragment_mass,
max_smiles_len=max_smiles_len,
max_num_smiles=max_num_smiles,
flag_display=flag_display,
masses_option=masses_option
)
df_smiles = pd.read_csv(output_path+output_filename)
# handle very small molecules which is unable to break into fraggenie (fill with mol mass) or unbreakable molecules
if fill_non_break_mol:
fraggenie = [None]*len(smiles)
fraggenie_smiles = df_smiles['smiles'].tolist()
count1 = 0
count2 = 0
for i, sm in enumerate(smiles):
try:
fraggenie[i] = self.to_numpy(df_smiles[masses_option][fraggenie_smiles.index(sm)], sep=massspec_sep)
if len(fraggenie[i])==0:
if verbose:
print('Unable to break molecules: {}-{}' .format(i, smiles[i]))
fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])
count1 += 1
except:
if verbose:
print('Unable to break molecules: {}-{}' .format(i, smiles[i]))
fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])
count2 += 1
print('Total number of unbreakable molecules: {} (empty-{}, not all-{})' .format(count1+count2, count1, count2))
else:
fraggenie = df_smiles[masses_option].apply(self.to_numpy, sep=massspec_sep).tolist()
if flag_del_temp_file:
filename = ''.join([input_path, input_filename])
if os.path.isfile(filename):
if verbose:
print('Removing "{}"' .format(filename))
os.remove(filename)
filename = ''.join([output_path, output_filename])
if os.path.isfile(filename):
if verbose:
print('Removing "{}"' .format(filename))
os.remove(filename)
return fraggenie
if __name__ == '__main__':
fraggenie = FragGenie()
output_path, output_filename, bash_cmd = fraggenie.smiles2fraggenie_csv(output_filename='fraggenie_test_input.csv')
smiles = ['Cn1cnc2n(C)c(=O)n(C)c(=O)c12',
'BrC1CCCCc1CC',
'C#1C#CC1',
'C#1C#CCcCCCc1',
'C#1CCCCCCC=1',
'C#1CCcNccccccccc1',
'Cn1cnc2n(C)c(=O)n(C)c(=O)c12']
fragment = fraggenie.smiles2fraggenie(smiles, fill_non_break_mol=1)
for i in range(len(smiles)):
print('smiles: {}\nfragment: {}' .format(smiles[i], fragment[i]))
| 33.382199 | 124 | 0.528545 |
import subprocess
import os
import numpy as np
from datetime import datetime
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
__all__ = [
'FragGenie'
]
class FragGenie():
def __init__(self, dir_fraggenie=''):
self.dir_fraggenie = dir_fraggenie
def to_numpy(self, array_str, sep=','):
return np.fromstring(array_str[1:-1], sep=sep)
def create_folder(self, folder_name):
if len(folder_name):
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
return folder_name
def mol_prop_mass(self, smiles):
return [Descriptors.ExactMolWt(Chem.MolFromSmiles(sm)) for sm in smiles]
def smiles2fraggenie_csv(
self,
input_path='',
input_filename='test_input.csv',
smiles_col='smiles',
output_path='',
output_filename='',
num_bonds_to_break=3,
min_fragment_mass=50,
max_smiles_len=250,
max_num_smiles=1000000000,
flag_display='true',
masses_option='METFRAG_MZ'
):
if(len(output_path)==0):
output_path = input_path
if(len(output_filename)==0):
output_filename = ''.join([
'fraggenie_', datetime.today().strftime('%d%m%Y%H%M%S'),
'_', str(np.random.random(1)[0])[2:],
'_nbonds_', str(num_bonds_to_break),
'_frgms_', str(min_fragment_mass),
'_smlen_', str(max_smiles_len),
'_', input_filename
])
bash_cmd = ''.join([
'bash ', self.dir_fraggenie,
'fragment.sh ',
input_path,
input_filename,
' ', output_path,
output_filename,
' ', smiles_col,
' ', str(num_bonds_to_break),
' ', str(min_fragment_mass),
' ', str(max_smiles_len),
' ', str(max_num_smiles),
' ', flag_display,
' ', masses_option
])
subprocess.call(bash_cmd, shell=True)
return output_path, output_filename, bash_cmd
def smiles2fraggenie(
self,
smiles,
num_bonds_to_break=3,
min_fragment_mass=50,
max_smiles_len=250,
max_num_smiles=1000000000,
flag_display='true',
masses_option='METFRAG_MZ',
input_path='dump/',
input_filename='',
massspec_sep=',',
fill_non_break_mol=1,
flag_del_temp_file=1,
verbose=0
):
input_path = self.create_folder(input_path)
if len(input_filename)==0:
input_filename = ''.join(['smiles_', datetime.today().strftime('%d%m%Y%H%M%S'),
'_', str(np.random.random(1)[0])[2:],
'.csv'
])
pd.DataFrame.from_dict({'smiles':smiles}).to_csv(''.join([input_path, input_filename]), index=False)
output_path, output_filename, bash_cmd = self.smiles2fraggenie_csv(
input_path=input_path,
input_filename=input_filename,
num_bonds_to_break=num_bonds_to_break,
min_fragment_mass=min_fragment_mass,
max_smiles_len=max_smiles_len,
max_num_smiles=max_num_smiles,
flag_display=flag_display,
masses_option=masses_option
)
df_smiles = pd.read_csv(output_path+output_filename)
if fill_non_break_mol:
fraggenie = [None]*len(smiles)
fraggenie_smiles = df_smiles['smiles'].tolist()
count1 = 0
count2 = 0
for i, sm in enumerate(smiles):
try:
fraggenie[i] = self.to_numpy(df_smiles[masses_option][fraggenie_smiles.index(sm)], sep=massspec_sep)
if len(fraggenie[i])==0:
if verbose:
print('Unable to break molecules: {}-{}' .format(i, smiles[i]))
fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])
count1 += 1
except:
if verbose:
print('Unable to break molecules: {}-{}' .format(i, smiles[i]))
fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])
count2 += 1
print('Total number of unbreakable molecules: {} (empty-{}, not all-{})' .format(count1+count2, count1, count2))
else:
fraggenie = df_smiles[masses_option].apply(self.to_numpy, sep=massspec_sep).tolist()
if flag_del_temp_file:
filename = ''.join([input_path, input_filename])
if os.path.isfile(filename):
if verbose:
print('Removing "{}"' .format(filename))
os.remove(filename)
filename = ''.join([output_path, output_filename])
if os.path.isfile(filename):
if verbose:
print('Removing "{}"' .format(filename))
os.remove(filename)
return fraggenie
if __name__ == '__main__':
fraggenie = FragGenie()
output_path, output_filename, bash_cmd = fraggenie.smiles2fraggenie_csv(output_filename='fraggenie_test_input.csv')
smiles = ['Cn1cnc2n(C)c(=O)n(C)c(=O)c12',
'BrC1CCCCc1CC',
'C#1C#CC1',
'C#1C#CCcCCCc1',
'C#1CCCCCCC=1',
'C#1CCcNccccccccc1',
'Cn1cnc2n(C)c(=O)n(C)c(=O)c12']
fragment = fraggenie.smiles2fraggenie(smiles, fill_non_break_mol=1)
for i in range(len(smiles)):
print('smiles: {}\nfragment: {}' .format(smiles[i], fragment[i]))
| true | true |
f710b66b0ac6b8256d7003a72fab84b564edbb14 | 6,541 | py | Python | cirq/optimizers/expand_composite_test.py | jlmayfield/Cirq | dc1294f54118a9a4f92546ca13780b91615dd675 | [
"Apache-2.0"
] | 1 | 2019-05-10T10:26:42.000Z | 2019-05-10T10:26:42.000Z | cirq/optimizers/expand_composite_test.py | jlmayfield/Cirq | dc1294f54118a9a4f92546ca13780b91615dd675 | [
"Apache-2.0"
] | null | null | null | cirq/optimizers/expand_composite_test.py | jlmayfield/Cirq | dc1294f54118a9a4f92546ca13780b91615dd675 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the expand composite optimization pass."""
import cirq
def assert_equal_mod_empty(expected, actual):
drop_empty = cirq.DropEmptyMoments()
drop_empty.optimize_circuit(actual)
if expected != actual:
# coverage: ignore
print('EXPECTED')
print(expected)
print('ACTUAL')
print(actual)
assert expected == actual
def test_empty_circuit():
circuit = cirq.Circuit()
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(cirq.Circuit(), circuit)
def test_empty_moment():
circuit = cirq.Circuit([])
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(cirq.Circuit([]), circuit)
def test_ignore_non_composite():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit()
circuit.append([cirq.X(q0), cirq.Y(q1), cirq.CZ(q0, q1), cirq.Z(q0)])
expected = circuit.copy()
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(expected, circuit)
def test_composite_default():
q0, q1 = cirq.LineQubit.range(2)
cnot = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
circuit.append(cnot)
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit()
expected.append([cirq.Y(q1) ** -0.5, cirq.CZ(q0, q1), cirq.Y(q1) ** 0.5])
assert_equal_mod_empty(expected, circuit)
def test_multiple_composite_default():
q0, q1 = cirq.LineQubit.range(2)
cnot = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
circuit.append([cnot, cnot])
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit()
decomp = [cirq.Y(q1) ** -0.5, cirq.CZ(q0, q1), cirq.Y(q1) ** 0.5]
expected.append([decomp, decomp])
assert_equal_mod_empty(expected, circuit)
def test_mix_composite_non_composite():
q0, q1 = cirq.LineQubit.range(2)
actual = cirq.Circuit.from_ops(cirq.X(q0), cirq.CNOT(q0, q1), cirq.X(q1))
opt = cirq.ExpandComposite()
opt.optimize_circuit(actual)
expected = cirq.Circuit.from_ops(cirq.X(q0),
cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5,
cirq.X(q1),
strategy=cirq.InsertStrategy.NEW)
assert_equal_mod_empty(expected, actual)
def test_recursive_composite():
q0, q1 = cirq.LineQubit.range(2)
swap = cirq.SWAP(q0, q1)
circuit = cirq.Circuit()
circuit.append(swap)
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5,
cirq.Y(q0) ** -0.5,
cirq.CZ(q1, q0),
cirq.Y(q0) ** 0.5,
cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5)
assert_equal_mod_empty(expected, circuit)
def test_decompose_returns_not_flat_op_tree():
class DummyGate(cirq.SingleQubitGate):
def _decompose_(self, qubits):
q0, = qubits
# Yield a tuple of gates instead of yielding a gate
yield cirq.X(q0),
q0 = cirq.NamedQubit('q0')
circuit = cirq.Circuit.from_ops(DummyGate()(q0))
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(cirq.X(q0))
assert_equal_mod_empty(expected, circuit)
def test_decompose_returns_deep_op_tree():
class DummyGate(cirq.TwoQubitGate):
def _decompose_(self, qubits):
q0, q1 = qubits
# Yield a tuple
yield ((cirq.X(q0), cirq.Y(q0)), cirq.Z(q0))
# Yield nested lists
yield [cirq.X(q0), [cirq.Y(q0), cirq.Z(q0)]]
def generator(depth):
if depth <= 0:
yield cirq.CZ(q0, q1), cirq.Y(q0)
else:
yield cirq.X(q0), generator(depth - 1)
yield cirq.Z(q0)
# Yield nested generators
yield generator(2)
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit.from_ops(DummyGate()(q0, q1))
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(
cirq.X(q0), cirq.Y(q0), cirq.Z(q0), # From tuple
cirq.X(q0), cirq.Y(q0), cirq.Z(q0), # From nested lists
# From nested generators
cirq.X(q0), cirq.X(q0),
cirq.CZ(q0, q1), cirq.Y(q0),
cirq.Z(q0), cirq.Z(q0))
assert_equal_mod_empty(expected, circuit)
def test_nonrecursive_expansion():
qubits = [cirq.NamedQubit(s) for s in 'xy']
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
op.gate == cirq.ISWAP)
expander = cirq.ExpandComposite(no_decomp=no_decomp)
unexpanded_circuit = cirq.Circuit.from_ops(cirq.ISWAP(*qubits))
circuit = unexpanded_circuit.__copy__()
expander.optimize_circuit(circuit)
assert circuit == unexpanded_circuit
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
isinstance(op.gate, (cirq.CNotPowGate,
cirq.HPowGate)))
expander = cirq.ExpandComposite(no_decomp=no_decomp)
circuit = unexpanded_circuit.__copy__()
expander.optimize_circuit(circuit)
actual_text_diagram = circuit.to_text_diagram().strip()
expected_text_diagram = """
x: ───@───H───X───S───X───S^-1───H───@───
│ │ │ │
y: ───X───────@───────@──────────────X───
""".strip()
assert actual_text_diagram == expected_text_diagram
| 34.792553 | 77 | 0.591041 |
import cirq
def assert_equal_mod_empty(expected, actual):
drop_empty = cirq.DropEmptyMoments()
drop_empty.optimize_circuit(actual)
if expected != actual:
print('EXPECTED')
print(expected)
print('ACTUAL')
print(actual)
assert expected == actual
def test_empty_circuit():
circuit = cirq.Circuit()
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(cirq.Circuit(), circuit)
def test_empty_moment():
circuit = cirq.Circuit([])
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(cirq.Circuit([]), circuit)
def test_ignore_non_composite():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit()
circuit.append([cirq.X(q0), cirq.Y(q1), cirq.CZ(q0, q1), cirq.Z(q0)])
expected = circuit.copy()
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
assert_equal_mod_empty(expected, circuit)
def test_composite_default():
q0, q1 = cirq.LineQubit.range(2)
cnot = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
circuit.append(cnot)
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit()
expected.append([cirq.Y(q1) ** -0.5, cirq.CZ(q0, q1), cirq.Y(q1) ** 0.5])
assert_equal_mod_empty(expected, circuit)
def test_multiple_composite_default():
q0, q1 = cirq.LineQubit.range(2)
cnot = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
circuit.append([cnot, cnot])
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit()
decomp = [cirq.Y(q1) ** -0.5, cirq.CZ(q0, q1), cirq.Y(q1) ** 0.5]
expected.append([decomp, decomp])
assert_equal_mod_empty(expected, circuit)
def test_mix_composite_non_composite():
q0, q1 = cirq.LineQubit.range(2)
actual = cirq.Circuit.from_ops(cirq.X(q0), cirq.CNOT(q0, q1), cirq.X(q1))
opt = cirq.ExpandComposite()
opt.optimize_circuit(actual)
expected = cirq.Circuit.from_ops(cirq.X(q0),
cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5,
cirq.X(q1),
strategy=cirq.InsertStrategy.NEW)
assert_equal_mod_empty(expected, actual)
def test_recursive_composite():
q0, q1 = cirq.LineQubit.range(2)
swap = cirq.SWAP(q0, q1)
circuit = cirq.Circuit()
circuit.append(swap)
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5,
cirq.Y(q0) ** -0.5,
cirq.CZ(q1, q0),
cirq.Y(q0) ** 0.5,
cirq.Y(q1) ** -0.5,
cirq.CZ(q0, q1),
cirq.Y(q1) ** 0.5)
assert_equal_mod_empty(expected, circuit)
def test_decompose_returns_not_flat_op_tree():
class DummyGate(cirq.SingleQubitGate):
def _decompose_(self, qubits):
q0, = qubits
yield cirq.X(q0),
q0 = cirq.NamedQubit('q0')
circuit = cirq.Circuit.from_ops(DummyGate()(q0))
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(cirq.X(q0))
assert_equal_mod_empty(expected, circuit)
def test_decompose_returns_deep_op_tree():
class DummyGate(cirq.TwoQubitGate):
def _decompose_(self, qubits):
q0, q1 = qubits
yield ((cirq.X(q0), cirq.Y(q0)), cirq.Z(q0))
yield [cirq.X(q0), [cirq.Y(q0), cirq.Z(q0)]]
def generator(depth):
if depth <= 0:
yield cirq.CZ(q0, q1), cirq.Y(q0)
else:
yield cirq.X(q0), generator(depth - 1)
yield cirq.Z(q0)
yield generator(2)
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit.from_ops(DummyGate()(q0, q1))
opt = cirq.ExpandComposite()
opt.optimize_circuit(circuit)
expected = cirq.Circuit().from_ops(
cirq.X(q0), cirq.Y(q0), cirq.Z(q0),
cirq.X(q0), cirq.Y(q0), cirq.Z(q0),
cirq.X(q0), cirq.X(q0),
cirq.CZ(q0, q1), cirq.Y(q0),
cirq.Z(q0), cirq.Z(q0))
assert_equal_mod_empty(expected, circuit)
def test_nonrecursive_expansion():
qubits = [cirq.NamedQubit(s) for s in 'xy']
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
op.gate == cirq.ISWAP)
expander = cirq.ExpandComposite(no_decomp=no_decomp)
unexpanded_circuit = cirq.Circuit.from_ops(cirq.ISWAP(*qubits))
circuit = unexpanded_circuit.__copy__()
expander.optimize_circuit(circuit)
assert circuit == unexpanded_circuit
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
isinstance(op.gate, (cirq.CNotPowGate,
cirq.HPowGate)))
expander = cirq.ExpandComposite(no_decomp=no_decomp)
circuit = unexpanded_circuit.__copy__()
expander.optimize_circuit(circuit)
actual_text_diagram = circuit.to_text_diagram().strip()
expected_text_diagram = """
x: ───@───H───X───S───X───S^-1───H───@───
│ │ │ │
y: ───X───────@───────@──────────────X───
""".strip()
assert actual_text_diagram == expected_text_diagram
| true | true |
f710b6dee76ed44e8b32fb3065ab9d427703ea7d | 329 | py | Python | fs_warehouser/fs_warehouser.py | JesseAldridge/fs_warehouser | ad8c6794313729cff07b964b91fa0335154fee3c | [
"MIT"
] | null | null | null | fs_warehouser/fs_warehouser.py | JesseAldridge/fs_warehouser | ad8c6794313729cff07b964b91fa0335154fee3c | [
"MIT"
] | null | null | null | fs_warehouser/fs_warehouser.py | JesseAldridge/fs_warehouser | ad8c6794313729cff07b964b91fa0335154fee3c | [
"MIT"
] | null | null | null | import os, glob
def get_last_timestamped_dir_path(data_dir_path):
glob_path = os.path.join(os.path.expanduser(data_dir_path), '2*')
date_paths = glob.glob(glob_path)
date_paths.sort()
return date_paths[-1] if date_paths else None
if __name__ == '__main__':
print(get_last_timestamped_dir_path('~/fake_scraper_data'))
| 29.909091 | 67 | 0.768997 | import os, glob
def get_last_timestamped_dir_path(data_dir_path):
glob_path = os.path.join(os.path.expanduser(data_dir_path), '2*')
date_paths = glob.glob(glob_path)
date_paths.sort()
return date_paths[-1] if date_paths else None
if __name__ == '__main__':
print(get_last_timestamped_dir_path('~/fake_scraper_data'))
| true | true |
f710b77221f9ccb42a7760e5cf57e07915eb7a7e | 42,626 | py | Python | test/functional/test_framework/messages.py | paymastercoinproject/paymastercoin | 8b1807fbff947bf67378376aa3c522db637157ba | [
"MIT"
] | 1 | 2022-03-05T14:50:58.000Z | 2022-03-05T14:50:58.000Z | test/functional/test_framework/messages.py | paymastercoinproject/paymastercoin | 8b1807fbff947bf67378376aa3c522db637157ba | [
"MIT"
] | null | null | null | test/functional/test_framework/messages.py | paymastercoinproject/paymastercoin | 8b1807fbff947bf67378376aa3c522db637157ba | [
"MIT"
] | 2 | 2021-12-25T12:39:07.000Z | 2022-02-14T03:03:36.000Z | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
paymastercoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
MAX_MONEY = 21000000 * COIN
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_FILTERED_BLOCK = 3
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return obj.serialize().hex()
# Objects that map to paymastercoind objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
MSG_TX: "TX",
MSG_BLOCK: "Block",
MSG_TX | MSG_WITNESS_FLAG: "WitnessTx",
MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock",
MSG_FILTERED_BLOCK: "filtered Block",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in paymastercoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv:
__slots__ = ("inv",)
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_with_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_no_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_without_witness()
class msg_block:
__slots__ = ("block",)
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_no_witness_block(msg_block):
__slots__ = ()
def serialize(self):
return self.block.serialize(with_witness=False)
class msg_getaddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in paymastercoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_merkleblock:
command = b"merkleblock"
def deserialize(self, f):
pass # Placeholder for now
class msg_filterload:
__slots__ = ("data", "nHashFuncs", "nTweak", "nFlags")
command = b"filterload"
def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0):
self.data = data
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
def deserialize(self, f):
self.data = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
def serialize(self):
r = b""
r += ser_string(self.data)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "msg_filterload(data={}, nHashFuncs={}, nTweak={}, nFlags={})".format(
self.data, self.nHashFuncs, self.nTweak, self.nFlags)
class msg_filteradd:
__slots__ = ("data")
command = b"filteradd"
def __init__(self, data):
self.data = data
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.data)
return r
def __repr__(self):
return "msg_filteradd(data={})".format(self.data)
class msg_filterclear:
__slots__ = ()
command = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
class msg_feefilter:
__slots__ = ("feerate",)
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_no_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
| 28.417333 | 262 | 0.599446 |
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000
MAX_MONEY = 21000000 * COIN
BIP125_SEQUENCE_NUMBER = 0xfffffffd
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_FILTERED_BLOCK = 3
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
def ToHex(obj):
return obj.serialize().hex()
class CAddress:
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
MSG_TX: "TX",
MSG_BLOCK: "Block",
MSG_TX | MSG_WITNESS_FLAG: "WitnessTx",
MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock",
MSG_FILTERED_BLOCK: "filtered Block",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
def serialize(self):
return self.serialize_with_witness()
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
def calc_sha256(self, with_witness=False):
if with_witness:
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv:
__slots__ = ("inv",)
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_with_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_no_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_without_witness()
class msg_block:
__slots__ = ("block",)
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_no_witness_block(msg_block):
__slots__ = ()
def serialize(self):
return self.block.serialize(with_witness=False)
class msg_getaddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in paymastercoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_merkleblock:
command = b"merkleblock"
def deserialize(self, f):
pass # Placeholder for now
class msg_filterload:
__slots__ = ("data", "nHashFuncs", "nTweak", "nFlags")
command = b"filterload"
def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0):
self.data = data
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
def deserialize(self, f):
self.data = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
def serialize(self):
r = b""
r += ser_string(self.data)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "msg_filterload(data={}, nHashFuncs={}, nTweak={}, nFlags={})".format(
self.data, self.nHashFuncs, self.nTweak, self.nFlags)
class msg_filteradd:
__slots__ = ("data")
command = b"filteradd"
def __init__(self, data):
self.data = data
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.data)
return r
def __repr__(self):
return "msg_filteradd(data={})".format(self.data)
class msg_filterclear:
__slots__ = ()
command = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
class msg_feefilter:
__slots__ = ("feerate",)
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_no_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
| true | true |
f710b7ec16f6c2d873e98254f0217de121369296 | 24,902 | py | Python | official/recommend/ncf/src/dataset.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | official/recommend/ncf/src/dataset.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | official/recommend/ncf/src/dataset.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dataset loading, creation and processing"""
import logging
import math
import os
import time
import timeit
import pickle
import numpy as np
import pandas as pd
from mindspore.dataset import GeneratorDataset, Sampler
import src.constants as rconst
import src.movielens as movielens
import src.stat_utils as stat_utils
DATASET_TO_NUM_USERS_AND_ITEMS = {
"ml-1m": (6040, 3706),
"ml-20m": (138493, 26744)
}
_EXPECTED_CACHE_KEYS = (
rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,
rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)
def load_data(data_dir, dataset):
"""
Load data in .csv format and output structured data.
This function reads in the raw CSV of positive items, and performs three
preprocessing transformations:
1) Filter out all users who have not rated at least a certain number
of items. (Typically 20 items)
2) Zero index the users and items such that the largest user_id is
`num_users - 1` and the largest item_id is `num_items - 1`
3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
This allows the dataframe to be sliced by user in-place, and for the last
item to be selected simply by calling the `-1` index of a user's slice.
While all of these transformations are performed by Pandas (and are therefore
single-threaded), they only take ~2 minutes, and the overhead to apply a
MapReduce pattern to parallel process the dataset adds significant complexity
for no computational gain. For a larger dataset parallelizing this
preprocessing could yield speedups. (Also, this preprocessing step is only
performed once for an entire run.
"""
logging.info("Beginning loading data...")
raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)
valid_cache = os.path.exists(cache_path)
if valid_cache:
with open(cache_path, 'rb') as f:
cached_data = pickle.load(f)
for key in _EXPECTED_CACHE_KEYS:
if key not in cached_data:
valid_cache = False
if not valid_cache:
logging.info("Removing stale raw data cache file.")
os.remove(cache_path)
if valid_cache:
data = cached_data
else:
# process data and save to .csv
with open(raw_rating_path) as f:
df = pd.read_csv(f)
# Get the info of users who have more than 20 ratings on items
grouped = df.groupby(movielens.USER_COLUMN)
df = grouped.filter(lambda x: len(x) >= rconst.MIN_NUM_RATINGS)
original_users = df[movielens.USER_COLUMN].unique()
original_items = df[movielens.ITEM_COLUMN].unique()
# Map the ids of user and item to 0 based index for following processing
logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
lambda user: user_map[user])
df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
lambda item: item_map[item])
num_users = len(original_users)
num_items = len(original_items)
assert num_users <= np.iinfo(rconst.USER_DTYPE).max
assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
assert df[movielens.USER_COLUMN].max() == num_users - 1
assert df[movielens.ITEM_COLUMN].max() == num_items - 1
# This sort is used to shard the dataframe by user, and later to select
# the last item for a user to be used in validation.
logging.info("Sorting by user, timestamp...")
# This sort is equivalent to
# df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
# inplace=True)
# except that the order of items with the same user and timestamp are
# sometimes different. For some reason, this sort results in a better
# hit-rate during evaluation, matching the performance of the MLPerf
# reference implementation.
df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True, kind="mergesort")
# The dataframe does not reconstruct indices in the sort or filter steps.
df = df.reset_index()
grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])
data = {
rconst.TRAIN_USER_KEY:
train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.TRAIN_ITEM_KEY:
train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.EVAL_USER_KEY:
eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.EVAL_ITEM_KEY:
eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.USER_MAP: user_map,
rconst.ITEM_MAP: item_map,
"create_time": time.time(),
}
logging.info("Writing raw data cache.")
with open(cache_path, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
if num_users != len(data[rconst.USER_MAP]):
raise ValueError("Expected to find {} users, but found {}".format(
num_users, len(data[rconst.USER_MAP])))
if num_items != len(data[rconst.ITEM_MAP]):
raise ValueError("Expected to find {} items, but found {}".format(
num_items, len(data[rconst.ITEM_MAP])))
return data, num_users, num_items
def construct_lookup_variables(train_pos_users, train_pos_items, num_users):
"""Lookup variables"""
index_bounds = None
sorted_train_pos_items = None
def index_segment(user):
lower, upper = index_bounds[user:user + 2]
items = sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
start_time = timeit.default_timer()
inner_bounds = np.argwhere(train_pos_users[1:] -
train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = train_pos_users.shape
index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
# Later logic will assume that the users are in sequential ascending order.
assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))
sorted_train_pos_items = train_pos_items.copy()
for i in range(num_users):
lower, upper = index_bounds[i:i + 2]
sorted_train_pos_items[lower:upper].sort()
total_negatives = np.concatenate([
index_segment(i) for i in range(num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
return total_negatives, index_bounds, sorted_train_pos_items
class NCFDataset:
"""
A dataset for NCF network.
"""
def __init__(self,
pos_users,
pos_items,
num_users,
num_items,
batch_size,
total_negatives,
index_bounds,
sorted_train_pos_items,
num_neg,
is_training=True):
self._pos_users = pos_users
self._pos_items = pos_items
self._num_users = num_users
self._num_items = num_items
self._batch_size = batch_size
self._total_negatives = total_negatives
self._index_bounds = index_bounds
self._sorted_train_pos_items = sorted_train_pos_items
self._is_training = is_training
if self._is_training:
self._train_pos_count = self._pos_users.shape[0]
else:
self._eval_users_per_batch = int(
batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
_pos_count = pos_users.shape[0]
_num_samples = (1 + num_neg) * _pos_count
self.dataset_len = math.ceil(_num_samples / batch_size)
def lookup_negative_items(self, negative_users):
"""Lookup negative items"""
output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1
left_index = self._index_bounds[negative_users]
right_index = self._index_bounds[negative_users + 1] - 1
num_positives = right_index - left_index + 1
num_negatives = self._num_items - num_positives
neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)
# Shortcuts:
# For points where the negative is greater than or equal to the tally before
# the last positive point there is no need to bisect. Instead the item id
# corresponding to the negative item choice is simply:
# last_postive_index + 1 + (neg_choice - last_negative_tally)
# Similarly, if the selection is less than the tally at the first positive
# then the item_id is simply the selection.
#
# Because MovieLens organizes popular movies into low integers (which is
# preserved through the preprocessing), the first shortcut is very
# efficient, allowing ~60% of samples to bypass the bisection. For the same
# reason, the second shortcut is rarely triggered (<0.02%) and is therefore
# not worth implementing.
use_shortcut = neg_item_choice >= self._total_negatives[right_index]
output[use_shortcut] = (
self._sorted_train_pos_items[right_index] + 1 +
(neg_item_choice - self._total_negatives[right_index])
)[use_shortcut]
if np.all(use_shortcut):
# The bisection code is ill-posed when there are no elements.
return output
not_use_shortcut = np.logical_not(use_shortcut)
left_index = left_index[not_use_shortcut]
right_index = right_index[not_use_shortcut]
neg_item_choice = neg_item_choice[not_use_shortcut]
num_loops = np.max(
np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))
for _ in range(num_loops):
mid_index = (left_index + right_index) // 2
right_criteria = self._total_negatives[mid_index] > neg_item_choice
left_criteria = np.logical_not(right_criteria)
right_index[right_criteria] = mid_index[right_criteria]
left_index[left_criteria] = mid_index[left_criteria]
# Expected state after bisection pass:
# The right index is the smallest index whose tally is greater than the
# negative item choice index.
assert np.all((right_index - left_index) <= 1)
output[not_use_shortcut] = (
self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice)
)
assert np.all(output >= 0)
return output
def _get_train_item(self, index):
"""Get train item"""
(mask_start_index,) = index.shape
index_mod = np.mod(index, self._train_pos_count)
# get batch of users
users = self._pos_users[index_mod]
# get batch of items
negative_indices = np.greater_equal(index, self._train_pos_count)
negative_users = users[negative_indices]
negative_items = self.lookup_negative_items(negative_users=negative_users)
items = self._pos_items[index_mod]
items[negative_indices] = negative_items
# get batch of labels
labels = np.logical_not(negative_indices)
# pad last partial batch
pad_length = self._batch_size - index.shape[0]
if pad_length:
user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users
item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items
label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)
users = np.concatenate([users, user_pad])
items = np.concatenate([items, item_pad])
labels = np.concatenate([labels, label_pad])
users = np.reshape(users, (self._batch_size, 1)) # (_batch_size, 1), int32
items = np.reshape(items, (self._batch_size, 1)) # (_batch_size, 1), int32
mask_start_index = np.array(mask_start_index, dtype=np.int32) # (_batch_size, 1), int32
valid_pt_mask = np.expand_dims(
np.less(np.arange(self._batch_size), mask_start_index), -1).astype(np.float32) # (_batch_size, 1), bool
labels = np.reshape(labels, (self._batch_size, 1)).astype(np.int32) # (_batch_size, 1), bool
return users, items, labels, valid_pt_mask
@staticmethod
def _assemble_eval_batch(users, positive_items, negative_items,
users_per_batch):
"""Construct duplicate_mask and structure data accordingly.
The positive items should be last so that they lose ties. However, they
should not be masked out if the true eval positive happens to be
selected as a negative. So instead, the positive is placed in the first
position, and then switched with the last element after the duplicate
mask has been computed.
Args:
users: An array of users in a batch. (should be identical along axis 1)
positive_items: An array (batch_size x 1) of positive item indices.
negative_items: An array of negative item indices.
users_per_batch: How many users should be in the batch. This is passed
as an argument so that ncf_test.py can use this method.
Returns:
User, item, and duplicate_mask arrays.
"""
items = np.concatenate([positive_items, negative_items], axis=1)
# We pad the users and items here so that the duplicate mask calculation
# will include padding. The metric function relies on all padded elements
# except the positive being marked as duplicate to mask out padded points.
if users.shape[0] < users_per_batch:
pad_rows = users_per_batch - users.shape[0]
padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)
users = np.concatenate([users, padding.astype(users.dtype)], axis=0)
items = np.concatenate([items, padding.astype(items.dtype)], axis=0)
duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.float32)
items[:, (0, -1)] = items[:, (-1, 0)]
duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]
assert users.shape == items.shape == duplicate_mask.shape
return users, items, duplicate_mask
def _get_eval_item(self, index):
"""Get eval item"""
low_index, high_index = index
users = np.repeat(self._pos_users[low_index:high_index, np.newaxis],
1 + rconst.NUM_EVAL_NEGATIVES, axis=1)
positive_items = self._pos_items[low_index:high_index, np.newaxis]
negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])
.reshape(-1, rconst.NUM_EVAL_NEGATIVES))
users, items, duplicate_mask = self._assemble_eval_batch(
users, positive_items, negative_items, self._eval_users_per_batch)
users = np.reshape(users.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32
items = np.reshape(items.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32
duplicate_mask = np.reshape(duplicate_mask.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), bool
return users, items, duplicate_mask
def __getitem__(self, index):
"""
Get a batch of samples.
"""
if self._is_training:
return self._get_train_item(index)
return self._get_eval_item(index)
def __len__(self):
"""
Return length of the dataset, i.e., the number of batches for an epoch
"""
return self.dataset_len
class RandomSampler(Sampler):
"""
A random sampler for dataset.
"""
def __init__(self, pos_count, num_train_negatives, batch_size):
self.pos_count = pos_count
self._num_samples = (1 + num_train_negatives) * self.pos_count
self._batch_size = batch_size
self._num_batches = math.ceil(self._num_samples / self._batch_size)
super().__init__(self._num_batches)
def __iter__(self):
"""
Return indices of all batches within an epoch.
"""
indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))
batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._num_batches)]
# padding last batch indices if necessary
if len(batch_indices) > 2 and len(batch_indices[-2]) != len(batch_indices[-1]):
pad_nums = len(batch_indices[-2]) - len(batch_indices[-1])
pad_indices = np.random.randint(0, self._num_samples, pad_nums)
batch_indices[-1] = np.hstack((batch_indices[-1], pad_indices))
return iter(batch_indices)
class DistributedSamplerOfTrain:
"""
A distributed sampler for dataset.
"""
def __init__(self, pos_count, num_train_negatives, batch_size, rank_id, rank_size):
"""
Distributed sampler of training dataset.
"""
self._num_samples = (1 + num_train_negatives) * pos_count
self._rank_id = rank_id
self._rank_size = rank_size
self._batch_size = batch_size
self._batchs_per_rank = int(math.ceil(self._num_samples / self._batch_size / rank_size))
self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._batch_size))
self._total_num_samples = self._samples_per_rank * self._rank_size
def __iter__(self):
"""
Returns the data after each sampling.
"""
indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))
indices = indices.tolist()
indices.extend(indices[:self._total_num_samples - len(indices)])
indices = indices[self._rank_id:self._total_num_samples:self._rank_size]
batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._batchs_per_rank)]
return iter(np.array(batch_indices))
def __len__(self):
"""
Returns the length after each sampling.
"""
return self._batchs_per_rank
class SequenceSampler(Sampler):
"""
A sequence sampler for dataset.
"""
def __init__(self, eval_batch_size, num_users):
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self._eval_batches_per_epoch = self.count_batches(
self._eval_elements_in_epoch, eval_batch_size)
super().__init__(self._eval_batches_per_epoch)
def __iter__(self):
indices = [(x * self._eval_users_per_batch, (x + 1) * self._eval_users_per_batch)
for x in range(self._eval_batches_per_epoch)]
# padding last batch indices if necessary
if len(indices) > 2 and len(indices[-2]) != len(indices[-1]):
pad_nums = len(indices[-2]) - len(indices[-1])
pad_indices = np.random.randint(0, self._eval_elements_in_epoch, pad_nums)
indices[-1] = np.hstack((indices[-1], pad_indices))
return iter(indices)
@staticmethod
def count_batches(example_count, batch_size, batches_per_step=1):
"""Determine the number of batches, rounding up to fill all devices."""
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
class DistributedSamplerOfEval:
"""
A distributed sampler for eval dataset.
"""
def __init__(self, eval_batch_size, num_users, rank_id, rank_size):
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self._eval_batches_per_epoch = self.count_batches(
self._eval_elements_in_epoch, eval_batch_size)
self._rank_id = rank_id
self._rank_size = rank_size
self._eval_batch_size = eval_batch_size
self._batchs_per_rank = int(math.ceil(self._eval_batches_per_epoch / rank_size))
def __iter__(self):
indices = [(x * self._eval_users_per_batch, (x + self._rank_id + 1) * self._eval_users_per_batch)
for x in range(self._batchs_per_rank)]
return iter(np.array(indices))
@staticmethod
def count_batches(example_count, batch_size, batches_per_step=1):
"""Determine the number of batches, rounding up to fill all devices."""
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
def __len__(self):
return self._batchs_per_rank
def parse_eval_batch_size(eval_batch_size):
"""
Parse eval batch size.
"""
if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):
raise ValueError("Eval batch size {} is not divisible by {}".format(
eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))
return eval_batch_size
def create_dataset(test_train=True, data_dir='./dataset/', dataset='ml-1m', train_epochs=14, batch_size=256,
eval_batch_size=160000, num_neg=4, rank_id=None, rank_size=None):
"""
Create NCF dataset.
"""
data, num_users, num_items = load_data(data_dir, dataset)
train_pos_users = data[rconst.TRAIN_USER_KEY]
train_pos_items = data[rconst.TRAIN_ITEM_KEY]
eval_pos_users = data[rconst.EVAL_USER_KEY]
eval_pos_items = data[rconst.EVAL_ITEM_KEY]
total_negatives, index_bounds, sorted_train_pos_items = \
construct_lookup_variables(train_pos_users, train_pos_items, num_users)
if test_train:
print(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives, index_bounds,
sorted_train_pos_items)
dataset = NCFDataset(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives,
index_bounds, sorted_train_pos_items, num_neg)
sampler = RandomSampler(train_pos_users.shape[0], num_neg, batch_size)
if rank_id is not None and rank_size is not None:
sampler = DistributedSamplerOfTrain(train_pos_users.shape[0], num_neg, batch_size, rank_id, rank_size)
ds = GeneratorDataset(dataset,
column_names=[movielens.USER_COLUMN,
movielens.ITEM_COLUMN,
"labels",
rconst.VALID_POINT_MASK],
sampler=sampler)
else:
eval_batch_size = parse_eval_batch_size(eval_batch_size=eval_batch_size)
dataset = NCFDataset(eval_pos_users, eval_pos_items, num_users, num_items,
eval_batch_size, total_negatives, index_bounds,
sorted_train_pos_items, num_neg, is_training=False)
sampler = SequenceSampler(eval_batch_size, num_users)
ds = GeneratorDataset(dataset,
column_names=[movielens.USER_COLUMN,
movielens.ITEM_COLUMN,
rconst.DUPLICATE_MASK],
sampler=sampler)
repeat_count = train_epochs if test_train else train_epochs + 1
ds = ds.repeat(repeat_count)
return ds, num_users, num_items
| 41.024712 | 120 | 0.656132 |
import logging
import math
import os
import time
import timeit
import pickle
import numpy as np
import pandas as pd
from mindspore.dataset import GeneratorDataset, Sampler
import src.constants as rconst
import src.movielens as movielens
import src.stat_utils as stat_utils
DATASET_TO_NUM_USERS_AND_ITEMS = {
"ml-1m": (6040, 3706),
"ml-20m": (138493, 26744)
}
_EXPECTED_CACHE_KEYS = (
rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,
rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)
def load_data(data_dir, dataset):
logging.info("Beginning loading data...")
raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)
valid_cache = os.path.exists(cache_path)
if valid_cache:
with open(cache_path, 'rb') as f:
cached_data = pickle.load(f)
for key in _EXPECTED_CACHE_KEYS:
if key not in cached_data:
valid_cache = False
if not valid_cache:
logging.info("Removing stale raw data cache file.")
os.remove(cache_path)
if valid_cache:
data = cached_data
else:
with open(raw_rating_path) as f:
df = pd.read_csv(f)
grouped = df.groupby(movielens.USER_COLUMN)
df = grouped.filter(lambda x: len(x) >= rconst.MIN_NUM_RATINGS)
original_users = df[movielens.USER_COLUMN].unique()
original_items = df[movielens.ITEM_COLUMN].unique()
logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
lambda user: user_map[user])
df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
lambda item: item_map[item])
num_users = len(original_users)
num_items = len(original_items)
assert num_users <= np.iinfo(rconst.USER_DTYPE).max
assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
assert df[movielens.USER_COLUMN].max() == num_users - 1
assert df[movielens.ITEM_COLUMN].max() == num_items - 1
logging.info("Sorting by user, timestamp...")
df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True, kind="mergesort")
df = df.reset_index()
grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])
data = {
rconst.TRAIN_USER_KEY:
train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.TRAIN_ITEM_KEY:
train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.EVAL_USER_KEY:
eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.EVAL_ITEM_KEY:
eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.USER_MAP: user_map,
rconst.ITEM_MAP: item_map,
"create_time": time.time(),
}
logging.info("Writing raw data cache.")
with open(cache_path, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
if num_users != len(data[rconst.USER_MAP]):
raise ValueError("Expected to find {} users, but found {}".format(
num_users, len(data[rconst.USER_MAP])))
if num_items != len(data[rconst.ITEM_MAP]):
raise ValueError("Expected to find {} items, but found {}".format(
num_items, len(data[rconst.ITEM_MAP])))
return data, num_users, num_items
def construct_lookup_variables(train_pos_users, train_pos_items, num_users):
index_bounds = None
sorted_train_pos_items = None
def index_segment(user):
lower, upper = index_bounds[user:user + 2]
items = sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
start_time = timeit.default_timer()
inner_bounds = np.argwhere(train_pos_users[1:] -
train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = train_pos_users.shape
index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))
sorted_train_pos_items = train_pos_items.copy()
for i in range(num_users):
lower, upper = index_bounds[i:i + 2]
sorted_train_pos_items[lower:upper].sort()
total_negatives = np.concatenate([
index_segment(i) for i in range(num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
return total_negatives, index_bounds, sorted_train_pos_items
class NCFDataset:
def __init__(self,
pos_users,
pos_items,
num_users,
num_items,
batch_size,
total_negatives,
index_bounds,
sorted_train_pos_items,
num_neg,
is_training=True):
self._pos_users = pos_users
self._pos_items = pos_items
self._num_users = num_users
self._num_items = num_items
self._batch_size = batch_size
self._total_negatives = total_negatives
self._index_bounds = index_bounds
self._sorted_train_pos_items = sorted_train_pos_items
self._is_training = is_training
if self._is_training:
self._train_pos_count = self._pos_users.shape[0]
else:
self._eval_users_per_batch = int(
batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
_pos_count = pos_users.shape[0]
_num_samples = (1 + num_neg) * _pos_count
self.dataset_len = math.ceil(_num_samples / batch_size)
def lookup_negative_items(self, negative_users):
output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1
left_index = self._index_bounds[negative_users]
right_index = self._index_bounds[negative_users + 1] - 1
num_positives = right_index - left_index + 1
num_negatives = self._num_items - num_positives
neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)
use_shortcut = neg_item_choice >= self._total_negatives[right_index]
output[use_shortcut] = (
self._sorted_train_pos_items[right_index] + 1 +
(neg_item_choice - self._total_negatives[right_index])
)[use_shortcut]
if np.all(use_shortcut):
return output
not_use_shortcut = np.logical_not(use_shortcut)
left_index = left_index[not_use_shortcut]
right_index = right_index[not_use_shortcut]
neg_item_choice = neg_item_choice[not_use_shortcut]
num_loops = np.max(
np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))
for _ in range(num_loops):
mid_index = (left_index + right_index) // 2
right_criteria = self._total_negatives[mid_index] > neg_item_choice
left_criteria = np.logical_not(right_criteria)
right_index[right_criteria] = mid_index[right_criteria]
left_index[left_criteria] = mid_index[left_criteria]
assert np.all((right_index - left_index) <= 1)
output[not_use_shortcut] = (
self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice)
)
assert np.all(output >= 0)
return output
def _get_train_item(self, index):
(mask_start_index,) = index.shape
index_mod = np.mod(index, self._train_pos_count)
users = self._pos_users[index_mod]
negative_indices = np.greater_equal(index, self._train_pos_count)
negative_users = users[negative_indices]
negative_items = self.lookup_negative_items(negative_users=negative_users)
items = self._pos_items[index_mod]
items[negative_indices] = negative_items
labels = np.logical_not(negative_indices)
pad_length = self._batch_size - index.shape[0]
if pad_length:
user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users
item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items
label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)
users = np.concatenate([users, user_pad])
items = np.concatenate([items, item_pad])
labels = np.concatenate([labels, label_pad])
users = np.reshape(users, (self._batch_size, 1))
items = np.reshape(items, (self._batch_size, 1))
mask_start_index = np.array(mask_start_index, dtype=np.int32)
valid_pt_mask = np.expand_dims(
np.less(np.arange(self._batch_size), mask_start_index), -1).astype(np.float32)
labels = np.reshape(labels, (self._batch_size, 1)).astype(np.int32)
return users, items, labels, valid_pt_mask
@staticmethod
def _assemble_eval_batch(users, positive_items, negative_items,
users_per_batch):
items = np.concatenate([positive_items, negative_items], axis=1)
if users.shape[0] < users_per_batch:
pad_rows = users_per_batch - users.shape[0]
padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)
users = np.concatenate([users, padding.astype(users.dtype)], axis=0)
items = np.concatenate([items, padding.astype(items.dtype)], axis=0)
duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.float32)
items[:, (0, -1)] = items[:, (-1, 0)]
duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]
assert users.shape == items.shape == duplicate_mask.shape
return users, items, duplicate_mask
def _get_eval_item(self, index):
low_index, high_index = index
users = np.repeat(self._pos_users[low_index:high_index, np.newaxis],
1 + rconst.NUM_EVAL_NEGATIVES, axis=1)
positive_items = self._pos_items[low_index:high_index, np.newaxis]
negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])
.reshape(-1, rconst.NUM_EVAL_NEGATIVES))
users, items, duplicate_mask = self._assemble_eval_batch(
users, positive_items, negative_items, self._eval_users_per_batch)
users = np.reshape(users.flatten(), (self._batch_size, 1))
items = np.reshape(items.flatten(), (self._batch_size, 1))
duplicate_mask = np.reshape(duplicate_mask.flatten(), (self._batch_size, 1))
return users, items, duplicate_mask
def __getitem__(self, index):
if self._is_training:
return self._get_train_item(index)
return self._get_eval_item(index)
def __len__(self):
return self.dataset_len
class RandomSampler(Sampler):
def __init__(self, pos_count, num_train_negatives, batch_size):
self.pos_count = pos_count
self._num_samples = (1 + num_train_negatives) * self.pos_count
self._batch_size = batch_size
self._num_batches = math.ceil(self._num_samples / self._batch_size)
super().__init__(self._num_batches)
def __iter__(self):
indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))
batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._num_batches)]
if len(batch_indices) > 2 and len(batch_indices[-2]) != len(batch_indices[-1]):
pad_nums = len(batch_indices[-2]) - len(batch_indices[-1])
pad_indices = np.random.randint(0, self._num_samples, pad_nums)
batch_indices[-1] = np.hstack((batch_indices[-1], pad_indices))
return iter(batch_indices)
class DistributedSamplerOfTrain:
def __init__(self, pos_count, num_train_negatives, batch_size, rank_id, rank_size):
self._num_samples = (1 + num_train_negatives) * pos_count
self._rank_id = rank_id
self._rank_size = rank_size
self._batch_size = batch_size
self._batchs_per_rank = int(math.ceil(self._num_samples / self._batch_size / rank_size))
self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._batch_size))
self._total_num_samples = self._samples_per_rank * self._rank_size
def __iter__(self):
indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))
indices = indices.tolist()
indices.extend(indices[:self._total_num_samples - len(indices)])
indices = indices[self._rank_id:self._total_num_samples:self._rank_size]
batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._batchs_per_rank)]
return iter(np.array(batch_indices))
def __len__(self):
return self._batchs_per_rank
class SequenceSampler(Sampler):
def __init__(self, eval_batch_size, num_users):
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self._eval_batches_per_epoch = self.count_batches(
self._eval_elements_in_epoch, eval_batch_size)
super().__init__(self._eval_batches_per_epoch)
def __iter__(self):
indices = [(x * self._eval_users_per_batch, (x + 1) * self._eval_users_per_batch)
for x in range(self._eval_batches_per_epoch)]
if len(indices) > 2 and len(indices[-2]) != len(indices[-1]):
pad_nums = len(indices[-2]) - len(indices[-1])
pad_indices = np.random.randint(0, self._eval_elements_in_epoch, pad_nums)
indices[-1] = np.hstack((indices[-1], pad_indices))
return iter(indices)
@staticmethod
def count_batches(example_count, batch_size, batches_per_step=1):
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
class DistributedSamplerOfEval:
def __init__(self, eval_batch_size, num_users, rank_id, rank_size):
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self._eval_batches_per_epoch = self.count_batches(
self._eval_elements_in_epoch, eval_batch_size)
self._rank_id = rank_id
self._rank_size = rank_size
self._eval_batch_size = eval_batch_size
self._batchs_per_rank = int(math.ceil(self._eval_batches_per_epoch / rank_size))
def __iter__(self):
indices = [(x * self._eval_users_per_batch, (x + self._rank_id + 1) * self._eval_users_per_batch)
for x in range(self._batchs_per_rank)]
return iter(np.array(indices))
@staticmethod
def count_batches(example_count, batch_size, batches_per_step=1):
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
def __len__(self):
return self._batchs_per_rank
def parse_eval_batch_size(eval_batch_size):
if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):
raise ValueError("Eval batch size {} is not divisible by {}".format(
eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))
return eval_batch_size
def create_dataset(test_train=True, data_dir='./dataset/', dataset='ml-1m', train_epochs=14, batch_size=256,
eval_batch_size=160000, num_neg=4, rank_id=None, rank_size=None):
data, num_users, num_items = load_data(data_dir, dataset)
train_pos_users = data[rconst.TRAIN_USER_KEY]
train_pos_items = data[rconst.TRAIN_ITEM_KEY]
eval_pos_users = data[rconst.EVAL_USER_KEY]
eval_pos_items = data[rconst.EVAL_ITEM_KEY]
total_negatives, index_bounds, sorted_train_pos_items = \
construct_lookup_variables(train_pos_users, train_pos_items, num_users)
if test_train:
print(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives, index_bounds,
sorted_train_pos_items)
dataset = NCFDataset(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives,
index_bounds, sorted_train_pos_items, num_neg)
sampler = RandomSampler(train_pos_users.shape[0], num_neg, batch_size)
if rank_id is not None and rank_size is not None:
sampler = DistributedSamplerOfTrain(train_pos_users.shape[0], num_neg, batch_size, rank_id, rank_size)
ds = GeneratorDataset(dataset,
column_names=[movielens.USER_COLUMN,
movielens.ITEM_COLUMN,
"labels",
rconst.VALID_POINT_MASK],
sampler=sampler)
else:
eval_batch_size = parse_eval_batch_size(eval_batch_size=eval_batch_size)
dataset = NCFDataset(eval_pos_users, eval_pos_items, num_users, num_items,
eval_batch_size, total_negatives, index_bounds,
sorted_train_pos_items, num_neg, is_training=False)
sampler = SequenceSampler(eval_batch_size, num_users)
ds = GeneratorDataset(dataset,
column_names=[movielens.USER_COLUMN,
movielens.ITEM_COLUMN,
rconst.DUPLICATE_MASK],
sampler=sampler)
repeat_count = train_epochs if test_train else train_epochs + 1
ds = ds.repeat(repeat_count)
return ds, num_users, num_items
| true | true |
f710b8147a553fabf38ebede8c94806bd534a143 | 2,307 | py | Python | labml_nn/normalization/group_norm/experiment.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | 3,714 | 2021-05-26T03:42:15.000Z | 2022-03-31T16:45:20.000Z | labml_nn/normalization/group_norm/experiment.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | 43 | 2021-05-26T05:26:42.000Z | 2022-03-23T11:50:56.000Z | labml_nn/normalization/group_norm/experiment.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | 349 | 2021-05-26T21:07:09.000Z | 2022-03-31T07:52:00.000Z | """
---
title: CIFAR10 Experiment to try Group Normalization
summary: >
This trains is a simple convolutional neural network that uses group normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment for Group Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_helpers.module import Module
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.normalization.group_norm import GroupNorm
class Model(Module):
"""
### VGG model for CIFAR-10 classification
"""
def __init__(self, groups: int = 32):
super().__init__()
layers = []
# RGB channels
in_channels = 3
# Number of channels in each layer in each block
for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
# Convolution, Normalization and Activation layers
for channels in block:
layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),
GroupNorm(groups, channels),
nn.ReLU(inplace=True)]
in_channels = channels
# Max pooling at end of each block
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
# Create a sequential model with the layers
self.layers = nn.Sequential(*layers)
# Final logits layer
self.fc = nn.Linear(512, 10)
def forward(self, x):
# The VGG layers
x = self.layers(x)
# Reshape for classification layer
x = x.view(x.shape[0], -1)
# Final linear layer
return self.fc(x)
class Configs(CIFAR10Configs):
# Number of groups
groups: int = 16
@option(Configs.model)
def model(c: Configs):
"""
### Create model
"""
return Model(c.groups).to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='group norm')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| 26.517241 | 95 | 0.618986 |
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_helpers.module import Module
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.normalization.group_norm import GroupNorm
class Model(Module):
def __init__(self, groups: int = 32):
super().__init__()
layers = []
in_channels = 3
for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
for channels in block:
layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),
GroupNorm(groups, channels),
nn.ReLU(inplace=True)]
in_channels = channels
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
self.layers = nn.Sequential(*layers)
self.fc = nn.Linear(512, 10)
def forward(self, x):
x = self.layers(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
class Configs(CIFAR10Configs):
groups: int = 16
@option(Configs.model)
def model(c: Configs):
return Model(c.groups).to(c.device)
def main():
experiment.create(name='cifar10', comment='group norm')
conf = Configs()
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| true | true |
f710b83bd8594a1934a5829a1392dae4395ef186 | 305 | py | Python | 2017/07/obamacare-popularity-20170707/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2017/07/obamacare-popularity-20170707/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2017/07/obamacare-popularity-20170707/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1QOOhihZdUwdAJcUgkeokbx7YaDSkSGWtlXHvKXhHW3E'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.819672 |
import base_filters
COPY_GOOGLE_DOC_KEY = '1QOOhihZdUwdAJcUgkeokbx7YaDSkSGWtlXHvKXhHW3E'
USE_ASSETS = False
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| true | true |
f710b90a4060a39f957fd2a19f35fac1f130b7b9 | 2,324 | py | Python | nca47/objects/dns/sp_policy_info.py | WosunOO/nca_xianshu | bbb548cb67b755a57528796d4c5a66ee68df2678 | [
"Apache-2.0"
] | null | null | null | nca47/objects/dns/sp_policy_info.py | WosunOO/nca_xianshu | bbb548cb67b755a57528796d4c5a66ee68df2678 | [
"Apache-2.0"
] | null | null | null | nca47/objects/dns/sp_policy_info.py | WosunOO/nca_xianshu | bbb548cb67b755a57528796d4c5a66ee68df2678 | [
"Apache-2.0"
] | null | null | null | from nca47.db import api as db_api
from nca47.db.sqlalchemy.models import Proximity as ProximityModel
from nca47.objects import base
from nca47.objects import fields as object_fields
class ProximityInfo(base.Nca47Object):
VERSION = '1.0'
fields = {
'tenant_id': object_fields.StringField(),
'sp_policy_id': object_fields.StringField(),
'src_type': object_fields.StringField(),
'src_logic': object_fields.StringField(),
'src_data1': object_fields.StringField(),
'src_data2': object_fields.StringField(),
'src_data3': object_fields.StringField(),
'src_data4': object_fields.StringField(),
'dst_type': object_fields.StringField(),
'dst_logic': object_fields.StringField(),
'dst_data1': object_fields.StringField(),
'dst_data2': object_fields.StringField(),
}
def __init__(self, context=None, **kwarg):
self.db_api = db_api.get_instance()
super(ProximityInfo, self).__init__(context=None, **kwarg)
@staticmethod
def _from_db_object(dns_proximity, db_dns_proximity):
"""Converts a database entity to a formal :class:`Proximity` object.
:param dns_proximity: An object of :class:`Proximity`.
:param db_dns_proximity: A DB model of a Proximity.
:return: a :class:`Proximity` object.
"""
for field in dns_proximity.fields:
dns_proximity[field] = db_dns_proximity[field]
dns_proximity.obj_reset_changes()
return dns_proximity
def create(self, context, values):
region = self.db_api.create(ProximityModel, values)
return region
def update(self, context, id, values):
region = self.db_api.update_object(ProximityModel, id, values)
return region
def get_object(self, context, **values):
region = self.db_api.get_object(ProximityModel, **values)
return region
def delete(self, context, id):
region = self.db_api.delete_object(ProximityModel, id)
return region
def get_objects(self, context, **values):
region = self.db_api.get_objects(ProximityModel, **values)
return region
def get_all_object(self, context, values):
region = self.db_api.get_all_object(ProximityModel, values)
return region
| 35.212121 | 76 | 0.674269 | from nca47.db import api as db_api
from nca47.db.sqlalchemy.models import Proximity as ProximityModel
from nca47.objects import base
from nca47.objects import fields as object_fields
class ProximityInfo(base.Nca47Object):
VERSION = '1.0'
fields = {
'tenant_id': object_fields.StringField(),
'sp_policy_id': object_fields.StringField(),
'src_type': object_fields.StringField(),
'src_logic': object_fields.StringField(),
'src_data1': object_fields.StringField(),
'src_data2': object_fields.StringField(),
'src_data3': object_fields.StringField(),
'src_data4': object_fields.StringField(),
'dst_type': object_fields.StringField(),
'dst_logic': object_fields.StringField(),
'dst_data1': object_fields.StringField(),
'dst_data2': object_fields.StringField(),
}
def __init__(self, context=None, **kwarg):
self.db_api = db_api.get_instance()
super(ProximityInfo, self).__init__(context=None, **kwarg)
@staticmethod
def _from_db_object(dns_proximity, db_dns_proximity):
for field in dns_proximity.fields:
dns_proximity[field] = db_dns_proximity[field]
dns_proximity.obj_reset_changes()
return dns_proximity
def create(self, context, values):
region = self.db_api.create(ProximityModel, values)
return region
def update(self, context, id, values):
region = self.db_api.update_object(ProximityModel, id, values)
return region
def get_object(self, context, **values):
region = self.db_api.get_object(ProximityModel, **values)
return region
def delete(self, context, id):
region = self.db_api.delete_object(ProximityModel, id)
return region
def get_objects(self, context, **values):
region = self.db_api.get_objects(ProximityModel, **values)
return region
def get_all_object(self, context, values):
region = self.db_api.get_all_object(ProximityModel, values)
return region
| true | true |
f710b99e0cb3fb44f98ec0ea8aa3312cc37d3fa8 | 719 | py | Python | drkcode/python/kktmat.py | kdeweese/DualRandomizedKaczmarz | 3d339e893fe1dcb91677f3240047801ca3c43162 | [
"BSD-3-Clause"
] | 2 | 2016-03-09T08:05:42.000Z | 2020-05-30T02:33:51.000Z | drkcode/python/kktmat.py | kdeweese/DualRandomizedKaczmarz | 3d339e893fe1dcb91677f3240047801ca3c43162 | [
"BSD-3-Clause"
] | null | null | null | drkcode/python/kktmat.py | kdeweese/DualRandomizedKaczmarz | 3d339e893fe1dcb91677f3240047801ca3c43162 | [
"BSD-3-Clause"
] | 2 | 2016-03-09T08:07:03.000Z | 2020-10-01T16:37:28.000Z | #!/usr/bin/env python
# kktmat.py -- KKT matrix from Laplacian matrix
#
# Copyright (C) <2016> <Kevin Deweese>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import scipy
def kktmat(L):
mat=scipy.sparse.coo_matrix(scipy.sparse.tril(L,-1))
row=mat.row
m=len(row)
n=L.shape[0]
col=mat.col
val=mat.data
#R=scipy.sparse.diags(-1/val,0)
R=scipy.array(-1/val)
i=scipy.concatenate([scipy.arange(0,m),scipy.arange(0,m)])
j=scipy.concatenate([row,col])
data=scipy.concatenate([scipy.ones(m),-scipy.ones(m)])
B=scipy.sparse.coo_matrix((data,(i,j)))
return {'R':R,'B':B}
| 26.62963 | 63 | 0.659249 |
import scipy
def kktmat(L):
mat=scipy.sparse.coo_matrix(scipy.sparse.tril(L,-1))
row=mat.row
m=len(row)
n=L.shape[0]
col=mat.col
val=mat.data
R=scipy.array(-1/val)
i=scipy.concatenate([scipy.arange(0,m),scipy.arange(0,m)])
j=scipy.concatenate([row,col])
data=scipy.concatenate([scipy.ones(m),-scipy.ones(m)])
B=scipy.sparse.coo_matrix((data,(i,j)))
return {'R':R,'B':B}
| true | true |
f710bb121dc39fe025e869b3c95d8b40ae0689d1 | 21,508 | py | Python | readthedocs/settings/base.py | santos22/readthedocs.org | 9802ad0d8677b9c4f2eea317a9574812e4e8ff8a | [
"MIT"
] | null | null | null | readthedocs/settings/base.py | santos22/readthedocs.org | 9802ad0d8677b9c4f2eea317a9574812e4e8ff8a | [
"MIT"
] | null | null | null | readthedocs/settings/base.py | santos22/readthedocs.org | 9802ad0d8677b9c4f2eea317a9574812e4e8ff8a | [
"MIT"
] | null | null | null | # pylint: disable=missing-docstring
import getpass
import os
from celery.schedules import crontab
from readthedocs.core.settings import Settings
from readthedocs.projects.constants import CELERY_LOW, CELERY_MEDIUM, CELERY_HIGH
try:
import readthedocsext # noqa
ext = True
except ImportError:
ext = False
_ = gettext = lambda s: s
class CommunityBaseSettings(Settings):
"""Community base settings, don't use this directly."""
# Django settings
SITE_ID = 1
ROOT_URLCONF = 'readthedocs.urls'
SUBDOMAIN_URLCONF = 'readthedocs.core.urls.subdomain'
SINGLE_VERSION_URLCONF = 'readthedocs.core.urls.single_version'
LOGIN_REDIRECT_URL = '/dashboard/'
FORCE_WWW = False
SECRET_KEY = 'replace-this-please' # noqa
ATOMIC_REQUESTS = True
# Debug settings
DEBUG = True
# Domains and URLs
PRODUCTION_DOMAIN = 'readthedocs.org'
PUBLIC_DOMAIN = None
PUBLIC_DOMAIN_USES_HTTPS = False
USE_SUBDOMAIN = False
PUBLIC_API_URL = 'https://{}'.format(PRODUCTION_DOMAIN)
RTD_EXTERNAL_VERSION_DOMAIN = 'external-builds.readthedocs.io'
# Doc Builder Backends
MKDOCS_BACKEND = 'readthedocs.doc_builder.backends.mkdocs'
SPHINX_BACKEND = 'readthedocs.doc_builder.backends.sphinx'
# slumber settings
SLUMBER_API_HOST = 'https://readthedocs.org'
SLUMBER_USERNAME = None
SLUMBER_PASSWORD = None
# Email
DEFAULT_FROM_EMAIL = 'no-reply@readthedocs.org'
SERVER_EMAIL = DEFAULT_FROM_EMAIL
SUPPORT_EMAIL = None
# Sessions
SESSION_COOKIE_DOMAIN = 'readthedocs.org'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 30 * 24 * 60 * 60 # 30 days
SESSION_SAVE_EVERY_REQUEST = True
# This cookie is used in cross-origin API requests from *.readthedocs.io to readthedocs.org
SESSION_COOKIE_SAMESITE = None
# CSRF
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_AGE = 30 * 24 * 60 * 60
# Security & X-Frame-Options Middleware
# https://docs.djangoproject.com/en/1.11/ref/middleware/#django.middleware.security.SecurityMiddleware
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
X_FRAME_OPTIONS = 'DENY'
# Content Security Policy
# https://django-csp.readthedocs.io/
CSP_BLOCK_ALL_MIXED_CONTENT = True
CSP_DEFAULT_SRC = None # This could be improved
CSP_FRAME_ANCESTORS = ("'none'",)
CSP_OBJECT_SRC = ("'none'",)
CSP_REPORT_URI = None
CSP_REPORT_ONLY = True # Set to false to enable CSP in blocking mode
CSP_EXCLUDE_URL_PREFIXES = (
"/admin/",
)
# Read the Docs
READ_THE_DOCS_EXTENSIONS = ext
RTD_LATEST = 'latest'
RTD_LATEST_VERBOSE_NAME = 'latest'
RTD_STABLE = 'stable'
RTD_STABLE_VERBOSE_NAME = 'stable'
RTD_CLEAN_AFTER_BUILD = False
RTD_MAX_CONCURRENT_BUILDS = 4
RTD_BUILD_STATUS_API_NAME = 'docs/readthedocs'
# Database and API hitting settings
DONT_HIT_API = False
DONT_HIT_DB = True
SYNC_USER = getpass.getuser()
USER_MATURITY_DAYS = 7
# override classes
CLASS_OVERRIDES = {}
DOC_PATH_PREFIX = '_/'
# Application classes
@property
def INSTALLED_APPS(self): # noqa
apps = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
# third party apps
'dj_pagination',
'taggit',
'django_gravatar',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'textclassifier',
'annoying',
'django_extensions',
'crispy_forms',
'messages_extends',
'django_elasticsearch_dsl',
'django_filters',
'polymorphic',
# our apps
'readthedocs.projects',
'readthedocs.builds',
'readthedocs.core',
'readthedocs.doc_builder',
'readthedocs.oauth',
'readthedocs.redirects',
'readthedocs.rtd_tests',
'readthedocs.api.v2',
'readthedocs.api.v3',
'readthedocs.gold',
'readthedocs.payments',
'readthedocs.notifications',
'readthedocs.integrations',
'readthedocs.analytics',
'readthedocs.sphinx_domains',
'readthedocs.search',
# allauth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.gitlab',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.bitbucket_oauth2',
]
if ext:
apps.append('django_countries')
apps.append('readthedocsext.donate')
apps.append('readthedocsext.embed')
apps.append('readthedocsext.spamfighting')
return apps
@property
def USE_PROMOS(self): # noqa
return 'readthedocsext.donate' in self.INSTALLED_APPS
MIDDLEWARE = (
'readthedocs.core.middleware.ReadTheDocsSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dj_pagination.middleware.PaginationMiddleware',
'readthedocs.core.middleware.SubdomainMiddleware',
'readthedocs.core.middleware.SingleVersionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'csp.middleware.CSPMiddleware',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 9,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MESSAGE_STORAGE = 'readthedocs.notifications.storages.FallbackUniqueStorage'
NOTIFICATION_BACKENDS = [
'readthedocs.notifications.backends.EmailBackend',
'readthedocs.notifications.backends.SiteBackend',
]
# Paths
SITE_ROOT = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')
DOCROOT = os.path.join(SITE_ROOT, 'user_builds')
UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')
CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')
LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')
PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')
PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')
# Assets and media
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
STATICFILES_DIRS = [
os.path.join(SITE_ROOT, 'readthedocs', 'static'),
os.path.join(SITE_ROOT, 'media'),
]
STATICFILES_FINDERS = [
'readthedocs.core.static.SelectiveFileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
PYTHON_MEDIA = False
# Django Storage subclass used to write build artifacts to cloud or local storage
# https://docs.readthedocs.io/page/development/settings.html#rtd-build-media-storage
RTD_BUILD_MEDIA_STORAGE = 'readthedocs.builds.storage.BuildMediaFileSystemStorage'
RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.builds.storage.BuildMediaFileSystemStorage'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_ROOT],
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
# Read the Docs processor
'readthedocs.core.context_processors.readthedocs_processor',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'PREFIX': 'docs',
}
}
CACHE_MIDDLEWARE_SECONDS = 60
# I18n
TIME_ZONE = 'UTC'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('ca', gettext('Catalan')),
('en', gettext('English')),
('es', gettext('Spanish')),
('pt-br', gettext('Brazilian Portuguese')),
('nb', gettext('Norwegian Bokmål')),
('fr', gettext('French')),
('ru', gettext('Russian')),
('de', gettext('German')),
('gl', gettext('Galician')),
('vi', gettext('Vietnamese')),
('zh-cn', gettext('Simplified Chinese')),
('zh-tw', gettext('Traditional Chinese')),
('ja', gettext('Japanese')),
('uk', gettext('Ukrainian')),
('it', gettext('Italian')),
('ko', gettext('Korean')),
)
LOCALE_PATHS = [
os.path.join(SITE_ROOT, 'readthedocs', 'locale'),
]
USE_I18N = True
USE_L10N = True
# Celery
CELERY_APP_NAME = 'readthedocs'
CELERY_ALWAYS_EAGER = True
CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes
CELERY_SEND_TASK_ERROR_EMAILS = False
CELERYD_HIJACK_ROOT_LOGGER = False
# This stops us from pre-fetching a task that then sits around on the builder
CELERY_ACKS_LATE = True
# Don't queue a bunch of tasks in the workers
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_CREATE_MISSING_QUEUES = True
BROKER_TRANSPORT_OPTIONS = {
'queue_order_strategy': 'priority',
'priority_steps': [CELERY_LOW, CELERY_MEDIUM, CELERY_HIGH],
}
CELERY_DEFAULT_QUEUE = 'celery'
CELERYBEAT_SCHEDULE = {
# Ran every hour on minute 30
'hourly-remove-orphan-symlinks': {
'task': 'readthedocs.projects.tasks.broadcast_remove_orphan_symlinks',
'schedule': crontab(minute=30),
'options': {'queue': 'web'},
},
'quarter-finish-inactive-builds': {
'task': 'readthedocs.projects.tasks.finish_inactive_builds',
'schedule': crontab(minute='*/15'),
'options': {'queue': 'web'},
},
'every-three-hour-clear-persistent-messages': {
'task': 'readthedocs.core.tasks.clear_persistent_messages',
'schedule': crontab(minute=0, hour='*/3'),
'options': {'queue': 'web'},
},
'every-day-delete-old-search-queries': {
'task': 'readthedocs.search.tasks.delete_old_search_queries_from_db',
'schedule': crontab(minute=0, hour=0),
'options': {'queue': 'web'},
}
}
MULTIPLE_APP_SERVERS = [CELERY_DEFAULT_QUEUE]
MULTIPLE_BUILD_SERVERS = [CELERY_DEFAULT_QUEUE]
# Sentry
SENTRY_CELERY_IGNORE_EXPECTED = True
# Docker
DOCKER_ENABLE = False
DOCKER_SOCKET = 'unix:///var/run/docker.sock'
# This settings has been deprecated in favor of DOCKER_IMAGE_SETTINGS
DOCKER_BUILD_IMAGES = None
# User used to create the container.
# In production we use the same user than the one defined by the
# ``USER docs`` instruction inside the Dockerfile.
# In development, we can use the "UID:GID" of the current user running the
# instance to avoid file permissions issues.
# https://docs.docker.com/engine/reference/run/#user
RTD_DOCKER_USER = 'docs:docs'
RTD_DOCKER_COMPOSE = False
DOCKER_DEFAULT_IMAGE = 'readthedocs/build'
DOCKER_VERSION = 'auto'
DOCKER_DEFAULT_VERSION = 'latest'
DOCKER_IMAGE = '{}:{}'.format(DOCKER_DEFAULT_IMAGE, DOCKER_DEFAULT_VERSION)
DOCKER_IMAGE_SETTINGS = {
# A large number of users still have this pinned in their config file.
# We must have documented it at some point.
'readthedocs/build:2.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5],
'default_version': {
2: 2.7,
3: 3.5,
},
},
},
'readthedocs/build:4.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:5.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:6.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 3.8, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:7.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 3.8, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
}
# Alias tagged via ``docker tag`` on the build servers
DOCKER_IMAGE_SETTINGS.update({
'readthedocs/build:stable': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:5.0'),
'readthedocs/build:latest': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:6.0'),
'readthedocs/build:testing': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:7.0'),
})
# All auth
ACCOUNT_ADAPTER = 'readthedocs.core.adapters.AccountAdapter'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_ACTIVATION_DAYS = 7
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_PROVIDERS = {
'github': {
'SCOPE': [
'user:email',
'read:org',
'admin:repo_hook',
'repo:status',
],
},
'gitlab': {
'SCOPE': [
'api',
'read_user',
],
},
# Bitbucket scope/permissions are determined by the Oauth consumer setup on bitbucket.org
}
# CORS
CORS_ORIGIN_REGEX_WHITELIST = (
r'^http://(.+)\.readthedocs\.io$',
r'^https://(.+)\.readthedocs\.io$',
)
# So people can post to their accounts
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken'
)
# RTD Settings
REPO_LOCK_SECONDS = 30
ALLOW_PRIVATE_REPOS = False
DEFAULT_PRIVACY_LEVEL = 'public'
DEFAULT_VERSION_PRIVACY_LEVEL = 'public'
GROK_API_HOST = 'https://api.grokthedocs.com'
SERVE_DOCS = ['public']
ALLOW_ADMIN = True
# Elasticsearch settings.
ES_HOSTS = ['search:9200']
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'search:9200'
},
}
# Chunk size for elasticsearch reindex celery tasks
ES_TASK_CHUNK_SIZE = 100
# Info from Honza about this:
# The key to determine shard number is actually usually not the node count,
# but the size of your data.
# There are advantages to just having a single shard in an index since
# you don't have to do the distribute/collect steps when executing a search.
# If your data will allow it (not significantly larger than 40GB)
# I would recommend going to a single shard and one replica meaning
# any of the two nodes will be able to serve any search without talking to the other one.
# Scaling to more searches will then just mean adding a third node
# and a second replica resulting in immediate 50% bump in max search throughput.
ES_INDEXES = {
'project': {
'name': 'project_index',
'settings': {'number_of_shards': 1,
'number_of_replicas': 1
}
},
'page': {
'name': 'page_index',
'settings': {
'number_of_shards': 1,
'number_of_replicas': 1,
}
},
}
# ANALYZER = 'analysis': {
# 'analyzer': {
# 'default_icu': {
# 'type': 'custom',
# 'tokenizer': 'icu_tokenizer',
# 'filter': ['word_delimiter', 'icu_folding', 'icu_normalizer'],
# }
# }
# }
# Disable auto refresh for increasing index performance
ELASTICSEARCH_DSL_AUTO_REFRESH = False
ALLOWED_HOSTS = ['*']
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda o: '/profiles/{}/'.format(o.username)
}
INTERNAL_IPS = ('127.0.0.1',)
# Taggit
# https://django-taggit.readthedocs.io
TAGGIT_TAGS_FROM_STRING = 'readthedocs.projects.tag_utils.rtd_parse_tags'
# Stripe
STRIPE_SECRET = None
STRIPE_PUBLISHABLE = None
# Do Not Track support
DO_NOT_TRACK_ENABLED = False
# Advertising configuration defaults
ADSERVER_API_BASE = None
ADSERVER_API_KEY = None
ADSERVER_API_TIMEOUT = 0.35 # seconds
# Misc application settings
GLOBAL_ANALYTICS_CODE = None
DASHBOARD_ANALYTICS_CODE = None # For the dashboard, not docs
GRAVATAR_DEFAULT_IMAGE = 'https://assets.readthedocs.org/static/images/silhouette.png' # NOQA
OAUTH_AVATAR_USER_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
OAUTH_AVATAR_ORG_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
RESTRICTEDSESSIONS_AUTHED_ONLY = True
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': True,
'sectsubtitle_xform': True,
'initial_header_level': 2,
'report_level': 5,
'syntax_highlight': 'none',
'math_output': 'latex',
'field_name_limit': 50,
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', # NOQA
'DEFAULT_THROTTLE_RATES': {
'anon': '5/minute',
'user': '60/minute',
},
'PAGE_SIZE': 10,
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
SILENCED_SYSTEM_CHECKS = ['fields.W342']
# Logging
LOG_FORMAT = '%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': LOG_FORMAT,
'datefmt': '%d/%b/%Y %H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'debug': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, 'debug.log'),
'formatter': 'default',
},
'null': {
'class': 'logging.NullHandler',
},
},
'loggers': {
'': { # root logger
'handlers': ['debug', 'console'],
# Always send from the root, handlers can filter levels
'level': 'INFO',
},
'readthedocs': {
'handlers': ['debug', 'console'],
'level': 'DEBUG',
# Don't double log at the root logger for these.
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
},
}
| 33.038402 | 106 | 0.589455 |
import getpass
import os
from celery.schedules import crontab
from readthedocs.core.settings import Settings
from readthedocs.projects.constants import CELERY_LOW, CELERY_MEDIUM, CELERY_HIGH
try:
import readthedocsext
ext = True
except ImportError:
ext = False
_ = gettext = lambda s: s
class CommunityBaseSettings(Settings):
SITE_ID = 1
ROOT_URLCONF = 'readthedocs.urls'
SUBDOMAIN_URLCONF = 'readthedocs.core.urls.subdomain'
SINGLE_VERSION_URLCONF = 'readthedocs.core.urls.single_version'
LOGIN_REDIRECT_URL = '/dashboard/'
FORCE_WWW = False
SECRET_KEY = 'replace-this-please'
ATOMIC_REQUESTS = True
DEBUG = True
PRODUCTION_DOMAIN = 'readthedocs.org'
PUBLIC_DOMAIN = None
PUBLIC_DOMAIN_USES_HTTPS = False
USE_SUBDOMAIN = False
PUBLIC_API_URL = 'https://{}'.format(PRODUCTION_DOMAIN)
RTD_EXTERNAL_VERSION_DOMAIN = 'external-builds.readthedocs.io'
MKDOCS_BACKEND = 'readthedocs.doc_builder.backends.mkdocs'
SPHINX_BACKEND = 'readthedocs.doc_builder.backends.sphinx'
SLUMBER_API_HOST = 'https://readthedocs.org'
SLUMBER_USERNAME = None
SLUMBER_PASSWORD = None
DEFAULT_FROM_EMAIL = 'no-reply@readthedocs.org'
SERVER_EMAIL = DEFAULT_FROM_EMAIL
SUPPORT_EMAIL = None
SESSION_COOKIE_DOMAIN = 'readthedocs.org'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 30 * 24 * 60 * 60
SESSION_SAVE_EVERY_REQUEST = True
SESSION_COOKIE_SAMESITE = None
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_AGE = 30 * 24 * 60 * 60
RE_CONTENT_TYPE_NOSNIFF = True
X_FRAME_OPTIONS = 'DENY'
CSP_BLOCK_ALL_MIXED_CONTENT = True
CSP_DEFAULT_SRC = None
CSP_FRAME_ANCESTORS = ("'none'",)
CSP_OBJECT_SRC = ("'none'",)
CSP_REPORT_URI = None
CSP_REPORT_ONLY = True
CSP_EXCLUDE_URL_PREFIXES = (
"/admin/",
)
READ_THE_DOCS_EXTENSIONS = ext
RTD_LATEST = 'latest'
RTD_LATEST_VERBOSE_NAME = 'latest'
RTD_STABLE = 'stable'
RTD_STABLE_VERBOSE_NAME = 'stable'
RTD_CLEAN_AFTER_BUILD = False
RTD_MAX_CONCURRENT_BUILDS = 4
RTD_BUILD_STATUS_API_NAME = 'docs/readthedocs'
DONT_HIT_API = False
DONT_HIT_DB = True
SYNC_USER = getpass.getuser()
USER_MATURITY_DAYS = 7
CLASS_OVERRIDES = {}
DOC_PATH_PREFIX = '_/'
@property
def INSTALLED_APPS(self):
apps = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
'dj_pagination',
'taggit',
'django_gravatar',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'textclassifier',
'annoying',
'django_extensions',
'crispy_forms',
'messages_extends',
'django_elasticsearch_dsl',
'django_filters',
'polymorphic',
'readthedocs.projects',
'readthedocs.builds',
'readthedocs.core',
'readthedocs.doc_builder',
'readthedocs.oauth',
'readthedocs.redirects',
'readthedocs.rtd_tests',
'readthedocs.api.v2',
'readthedocs.api.v3',
'readthedocs.gold',
'readthedocs.payments',
'readthedocs.notifications',
'readthedocs.integrations',
'readthedocs.analytics',
'readthedocs.sphinx_domains',
'readthedocs.search',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.gitlab',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.bitbucket_oauth2',
]
if ext:
apps.append('django_countries')
apps.append('readthedocsext.donate')
apps.append('readthedocsext.embed')
apps.append('readthedocsext.spamfighting')
return apps
@property
def USE_PROMOS(self):
return 'readthedocsext.donate' in self.INSTALLED_APPS
MIDDLEWARE = (
'readthedocs.core.middleware.ReadTheDocsSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dj_pagination.middleware.PaginationMiddleware',
'readthedocs.core.middleware.SubdomainMiddleware',
'readthedocs.core.middleware.SingleVersionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'csp.middleware.CSPMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 9,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MESSAGE_STORAGE = 'readthedocs.notifications.storages.FallbackUniqueStorage'
NOTIFICATION_BACKENDS = [
'readthedocs.notifications.backends.EmailBackend',
'readthedocs.notifications.backends.SiteBackend',
]
SITE_ROOT = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')
DOCROOT = os.path.join(SITE_ROOT, 'user_builds')
UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')
CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')
LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')
PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')
PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
STATICFILES_DIRS = [
os.path.join(SITE_ROOT, 'readthedocs', 'static'),
os.path.join(SITE_ROOT, 'media'),
]
STATICFILES_FINDERS = [
'readthedocs.core.static.SelectiveFileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
PYTHON_MEDIA = False
RAGE = 'readthedocs.builds.storage.BuildMediaFileSystemStorage'
RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.builds.storage.BuildMediaFileSystemStorage'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_ROOT],
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'readthedocs.core.context_processors.readthedocs_processor',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'PREFIX': 'docs',
}
}
CACHE_MIDDLEWARE_SECONDS = 60
TIME_ZONE = 'UTC'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('ca', gettext('Catalan')),
('en', gettext('English')),
('es', gettext('Spanish')),
('pt-br', gettext('Brazilian Portuguese')),
('nb', gettext('Norwegian Bokmål')),
('fr', gettext('French')),
('ru', gettext('Russian')),
('de', gettext('German')),
('gl', gettext('Galician')),
('vi', gettext('Vietnamese')),
('zh-cn', gettext('Simplified Chinese')),
('zh-tw', gettext('Traditional Chinese')),
('ja', gettext('Japanese')),
('uk', gettext('Ukrainian')),
('it', gettext('Italian')),
('ko', gettext('Korean')),
)
LOCALE_PATHS = [
os.path.join(SITE_ROOT, 'readthedocs', 'locale'),
]
USE_I18N = True
USE_L10N = True
CELERY_APP_NAME = 'readthedocs'
CELERY_ALWAYS_EAGER = True
CELERYD_TASK_TIME_LIMIT = 60 * 60
CELERY_SEND_TASK_ERROR_EMAILS = False
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_ACKS_LATE = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_CREATE_MISSING_QUEUES = True
BROKER_TRANSPORT_OPTIONS = {
'queue_order_strategy': 'priority',
'priority_steps': [CELERY_LOW, CELERY_MEDIUM, CELERY_HIGH],
}
CELERY_DEFAULT_QUEUE = 'celery'
CELERYBEAT_SCHEDULE = {
# Ran every hour on minute 30
'hourly-remove-orphan-symlinks': {
'task': 'readthedocs.projects.tasks.broadcast_remove_orphan_symlinks',
'schedule': crontab(minute=30),
'options': {'queue': 'web'},
},
'quarter-finish-inactive-builds': {
'task': 'readthedocs.projects.tasks.finish_inactive_builds',
'schedule': crontab(minute='*/15'),
'options': {'queue': 'web'},
},
'every-three-hour-clear-persistent-messages': {
'task': 'readthedocs.core.tasks.clear_persistent_messages',
'schedule': crontab(minute=0, hour='*/3'),
'options': {'queue': 'web'},
},
'every-day-delete-old-search-queries': {
'task': 'readthedocs.search.tasks.delete_old_search_queries_from_db',
'schedule': crontab(minute=0, hour=0),
'options': {'queue': 'web'},
}
}
MULTIPLE_APP_SERVERS = [CELERY_DEFAULT_QUEUE]
MULTIPLE_BUILD_SERVERS = [CELERY_DEFAULT_QUEUE]
# Sentry
SENTRY_CELERY_IGNORE_EXPECTED = True
# Docker
DOCKER_ENABLE = False
DOCKER_SOCKET = 'unix:///var/run/docker.sock'
# This settings has been deprecated in favor of DOCKER_IMAGE_SETTINGS
DOCKER_BUILD_IMAGES = None
# User used to create the container.
# In production we use the same user than the one defined by the
# ``USER docs`` instruction inside the Dockerfile.
# In development, we can use the "UID:GID" of the current user running the
# instance to avoid file permissions issues.
# https://docs.docker.com/engine/reference/run/#user
RTD_DOCKER_USER = 'docs:docs'
RTD_DOCKER_COMPOSE = False
DOCKER_DEFAULT_IMAGE = 'readthedocs/build'
DOCKER_VERSION = 'auto'
DOCKER_DEFAULT_VERSION = 'latest'
DOCKER_IMAGE = '{}:{}'.format(DOCKER_DEFAULT_IMAGE, DOCKER_DEFAULT_VERSION)
DOCKER_IMAGE_SETTINGS = {
# A large number of users still have this pinned in their config file.
# We must have documented it at some point.
'readthedocs/build:2.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5],
'default_version': {
2: 2.7,
3: 3.5,
},
},
},
'readthedocs/build:4.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:5.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:6.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 3.8, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
'readthedocs/build:7.0': {
'python': {
'supported_versions': [2, 2.7, 3, 3.5, 3.6, 3.7, 3.8, 'pypy3.5'],
'default_version': {
2: 2.7,
3: 3.7,
},
},
},
}
# Alias tagged via ``docker tag`` on the build servers
DOCKER_IMAGE_SETTINGS.update({
'readthedocs/build:stable': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:5.0'),
'readthedocs/build:latest': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:6.0'),
'readthedocs/build:testing': DOCKER_IMAGE_SETTINGS.get('readthedocs/build:7.0'),
})
# All auth
ACCOUNT_ADAPTER = 'readthedocs.core.adapters.AccountAdapter'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_ACTIVATION_DAYS = 7
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_PROVIDERS = {
'github': {
'SCOPE': [
'user:email',
'read:org',
'admin:repo_hook',
'repo:status',
],
},
'gitlab': {
'SCOPE': [
'api',
'read_user',
],
},
# Bitbucket scope/permissions are determined by the Oauth consumer setup on bitbucket.org
}
# CORS
CORS_ORIGIN_REGEX_WHITELIST = (
r'^http://(.+)\.readthedocs\.io$',
r'^https://(.+)\.readthedocs\.io$',
)
# So people can post to their accounts
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken'
)
# RTD Settings
REPO_LOCK_SECONDS = 30
ALLOW_PRIVATE_REPOS = False
DEFAULT_PRIVACY_LEVEL = 'public'
DEFAULT_VERSION_PRIVACY_LEVEL = 'public'
GROK_API_HOST = 'https://api.grokthedocs.com'
SERVE_DOCS = ['public']
ALLOW_ADMIN = True
# Elasticsearch settings.
ES_HOSTS = ['search:9200']
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'search:9200'
},
}
# Chunk size for elasticsearch reindex celery tasks
ES_TASK_CHUNK_SIZE = 100
# Info from Honza about this:
# The key to determine shard number is actually usually not the node count,
# but the size of your data.
# There are advantages to just having a single shard in an index since
# you don't have to do the distribute/collect steps when executing a search.
ES_INDEXES = {
'project': {
'name': 'project_index',
'settings': {'number_of_shards': 1,
'number_of_replicas': 1
}
},
'page': {
'name': 'page_index',
'settings': {
'number_of_shards': 1,
'number_of_replicas': 1,
}
},
}
ELASTICSEARCH_DSL_AUTO_REFRESH = False
ALLOWED_HOSTS = ['*']
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda o: '/profiles/{}/'.format(o.username)
}
INTERNAL_IPS = ('127.0.0.1',)
TAGGIT_TAGS_FROM_STRING = 'readthedocs.projects.tag_utils.rtd_parse_tags'
STRIPE_SECRET = None
STRIPE_PUBLISHABLE = None
DO_NOT_TRACK_ENABLED = False
ADSERVER_API_BASE = None
ADSERVER_API_KEY = None
ADSERVER_API_TIMEOUT = 0.35
GLOBAL_ANALYTICS_CODE = None
DASHBOARD_ANALYTICS_CODE = None
GRAVATAR_DEFAULT_IMAGE = 'https://assets.readthedocs.org/static/images/silhouette.png'
OAUTH_AVATAR_USER_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
OAUTH_AVATAR_ORG_DEFAULT_URL = GRAVATAR_DEFAULT_IMAGE
RESTRICTEDSESSIONS_AUTHED_ONLY = True
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': True,
'sectsubtitle_xform': True,
'initial_header_level': 2,
'report_level': 5,
'syntax_highlight': 'none',
'math_output': 'latex',
'field_name_limit': 50,
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_THROTTLE_RATES': {
'anon': '5/minute',
'user': '60/minute',
},
'PAGE_SIZE': 10,
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
SILENCED_SYSTEM_CHECKS = ['fields.W342']
LOG_FORMAT = '%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': LOG_FORMAT,
'datefmt': '%d/%b/%Y %H:%M:%S',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'debug': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, 'debug.log'),
'formatter': 'default',
},
'null': {
'class': 'logging.NullHandler',
},
},
'loggers': {
'': {
'handlers': ['debug', 'console'],
'level': 'INFO',
},
'readthedocs': {
'handlers': ['debug', 'console'],
'level': 'DEBUG',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
},
}
| true | true |
f710bc0c7a452a8d63c48e69d4a6a414fc921a2e | 2,793 | py | Python | pdc/apps/contact/filters.py | hluk/product-definition-center | af79f73c30fa5f5709ba03d584b7a49b83166b81 | [
"MIT"
] | 18 | 2015-12-15T17:56:18.000Z | 2021-04-10T13:49:48.000Z | pdc/apps/contact/filters.py | hluk/product-definition-center | af79f73c30fa5f5709ba03d584b7a49b83166b81 | [
"MIT"
] | 303 | 2015-11-18T07:37:06.000Z | 2021-05-26T12:34:01.000Z | pdc/apps/contact/filters.py | hluk/product-definition-center | af79f73c30fa5f5709ba03d584b7a49b83166b81 | [
"MIT"
] | 27 | 2015-11-19T20:33:54.000Z | 2021-03-25T08:15:28.000Z | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import django_filters
from django.db.models import Q
from django_filters import FilterSet
from pdc.apps.common.filters import MultiValueFilter, MultiValueRegexFilter, value_is_not_empty
from . import models
from .models import (Person,
Maillist,
GlobalComponentContact,
ReleaseComponentContact)
class PersonFilterSet(django_filters.FilterSet):
username = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Person
fields = ('username', 'email')
class MaillistFilterSet(django_filters.FilterSet):
mail_name = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Maillist
fields = ('mail_name', 'email')
class ContactRoleFilterSet(django_filters.FilterSet):
name = MultiValueFilter()
class Meta:
model = models.ContactRole
fields = ('name',)
def _filter_contacts(people_filter, maillist_filter, qs, values):
"""Helper for filtering based on subclassed contacts.
Runs the filter on separately on each subclass (field defined by argument,
the same values are used), then filters the queryset to only keep items
that have matching.
"""
people = Person.objects.filter(**{people_filter + '__in': values})
mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values})
return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists))
class _BaseComponentContactFilter(FilterSet):
contact = MultiValueFilter(method='filter_by_contact')
email = MultiValueFilter(method='filter_by_email')
role = MultiValueFilter(name='role__name')
component = MultiValueRegexFilter(name='component__name')
@value_is_not_empty
def filter_by_contact(self, qs, name, value):
return _filter_contacts('username', 'mail_name', qs, value)
@value_is_not_empty
def filter_by_email(self, qs, name, value):
return _filter_contacts('email', 'email', qs, value)
class GlobalComponentContactFilter(_BaseComponentContactFilter):
class Meta:
model = GlobalComponentContact
fields = ('role', 'email', 'contact', 'component')
class ReleaseComponentContactFilter(_BaseComponentContactFilter):
dist_git_branch = MultiValueFilter(name='component__dist_git_branch')
release = MultiValueFilter(name='component__release__release_id')
global_component = MultiValueFilter(name='component__global_component__name')
class Meta:
model = ReleaseComponentContact
fields = ('role', 'email', 'contact', 'component', 'dist_git_branch', 'release',
'global_component')
| 32.103448 | 95 | 0.712496 |
import django_filters
from django.db.models import Q
from django_filters import FilterSet
from pdc.apps.common.filters import MultiValueFilter, MultiValueRegexFilter, value_is_not_empty
from . import models
from .models import (Person,
Maillist,
GlobalComponentContact,
ReleaseComponentContact)
class PersonFilterSet(django_filters.FilterSet):
username = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Person
fields = ('username', 'email')
class MaillistFilterSet(django_filters.FilterSet):
mail_name = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Maillist
fields = ('mail_name', 'email')
class ContactRoleFilterSet(django_filters.FilterSet):
name = MultiValueFilter()
class Meta:
model = models.ContactRole
fields = ('name',)
def _filter_contacts(people_filter, maillist_filter, qs, values):
people = Person.objects.filter(**{people_filter + '__in': values})
mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values})
return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists))
class _BaseComponentContactFilter(FilterSet):
contact = MultiValueFilter(method='filter_by_contact')
email = MultiValueFilter(method='filter_by_email')
role = MultiValueFilter(name='role__name')
component = MultiValueRegexFilter(name='component__name')
@value_is_not_empty
def filter_by_contact(self, qs, name, value):
return _filter_contacts('username', 'mail_name', qs, value)
@value_is_not_empty
def filter_by_email(self, qs, name, value):
return _filter_contacts('email', 'email', qs, value)
class GlobalComponentContactFilter(_BaseComponentContactFilter):
class Meta:
model = GlobalComponentContact
fields = ('role', 'email', 'contact', 'component')
class ReleaseComponentContactFilter(_BaseComponentContactFilter):
dist_git_branch = MultiValueFilter(name='component__dist_git_branch')
release = MultiValueFilter(name='component__release__release_id')
global_component = MultiValueFilter(name='component__global_component__name')
class Meta:
model = ReleaseComponentContact
fields = ('role', 'email', 'contact', 'component', 'dist_git_branch', 'release',
'global_component')
| true | true |
f710bc6f51d67c9c28e1e8d61df7113edf1ef689 | 191 | py | Python | jp.atcoder/abc156/abc156_a/10265687.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc156/abc156_a/10265687.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc156/abc156_a/10265687.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
n, r = map(int, sys.stdin.readline().split())
def main():
res = r + 100 * max(10 - n, 0)
return res
if __name__ == '__main__':
ans = main()
print(ans)
| 15.916667 | 46 | 0.528796 | import sys
n, r = map(int, sys.stdin.readline().split())
def main():
res = r + 100 * max(10 - n, 0)
return res
if __name__ == '__main__':
ans = main()
print(ans)
| true | true |
f710bcbd79d2d6ab9a9da2e8a815f6df2e583197 | 21,262 | py | Python | sdk/python/pulumi_azure_nextgen/healthcareapis/v20200330/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/healthcareapis/v20200330/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/healthcareapis/v20200330/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'PrivateEndpointConnectionResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'ServiceAccessPolicyEntryResponse',
'ServiceAuthenticationConfigurationInfoResponse',
'ServiceCorsConfigurationInfoResponse',
'ServiceCosmosDbConfigurationInfoResponse',
'ServiceExportConfigurationInfoResponse',
'ServicesPropertiesResponse',
'ServicesResourceResponseIdentity',
]
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, *,
id: str,
name: str,
private_link_service_connection_state: 'outputs.PrivateLinkServiceConnectionStateResponse',
provisioning_state: str,
type: str,
private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None):
"""
The Private Endpoint Connection resource.
:param str id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:param str name: The name of the resource
:param 'PrivateLinkServiceConnectionStateResponseArgs' private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param str provisioning_state: The provisioning state of the private endpoint connection resource.
:param str type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
:param 'PrivateEndpointResponseArgs' private_endpoint: The resource of private end point.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
The Private Endpoint resource.
"""
def __init__(__self__, *,
id: str):
"""
The Private Endpoint resource.
:param str id: The ARM identifier for Private Endpoint
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
The ARM identifier for Private Endpoint
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
A collection of information about the state of the connection between service consumer and provider.
"""
def __init__(__self__, *,
actions_required: Optional[str] = None,
description: Optional[str] = None,
status: Optional[str] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param str actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param str description: The reason for approval/rejection of the connection.
:param str status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[str]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAccessPolicyEntryResponse(dict):
"""
An access policy entry.
"""
def __init__(__self__, *,
object_id: str):
"""
An access policy entry.
:param str object_id: An Azure AD object ID (User or Apps) that is allowed access to the FHIR service.
"""
pulumi.set(__self__, "object_id", object_id)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> str:
"""
An Azure AD object ID (User or Apps) that is allowed access to the FHIR service.
"""
return pulumi.get(self, "object_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAuthenticationConfigurationInfoResponse(dict):
"""
Authentication configuration information
"""
def __init__(__self__, *,
audience: Optional[str] = None,
authority: Optional[str] = None,
smart_proxy_enabled: Optional[bool] = None):
"""
Authentication configuration information
:param str audience: The audience url for the service
:param str authority: The authority url for the service
:param bool smart_proxy_enabled: If the SMART on FHIR proxy is enabled
"""
if audience is not None:
pulumi.set(__self__, "audience", audience)
if authority is not None:
pulumi.set(__self__, "authority", authority)
if smart_proxy_enabled is not None:
pulumi.set(__self__, "smart_proxy_enabled", smart_proxy_enabled)
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
The audience url for the service
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter
def authority(self) -> Optional[str]:
"""
The authority url for the service
"""
return pulumi.get(self, "authority")
@property
@pulumi.getter(name="smartProxyEnabled")
def smart_proxy_enabled(self) -> Optional[bool]:
"""
If the SMART on FHIR proxy is enabled
"""
return pulumi.get(self, "smart_proxy_enabled")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceCorsConfigurationInfoResponse(dict):
"""
The settings for the CORS configuration of the service instance.
"""
def __init__(__self__, *,
allow_credentials: Optional[bool] = None,
headers: Optional[Sequence[str]] = None,
max_age: Optional[int] = None,
methods: Optional[Sequence[str]] = None,
origins: Optional[Sequence[str]] = None):
"""
The settings for the CORS configuration of the service instance.
:param bool allow_credentials: If credentials are allowed via CORS.
:param Sequence[str] headers: The headers to be allowed via CORS.
:param int max_age: The max age to be allowed via CORS.
:param Sequence[str] methods: The methods to be allowed via CORS.
:param Sequence[str] origins: The origins to be allowed via CORS.
"""
if allow_credentials is not None:
pulumi.set(__self__, "allow_credentials", allow_credentials)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if max_age is not None:
pulumi.set(__self__, "max_age", max_age)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if origins is not None:
pulumi.set(__self__, "origins", origins)
@property
@pulumi.getter(name="allowCredentials")
def allow_credentials(self) -> Optional[bool]:
"""
If credentials are allowed via CORS.
"""
return pulumi.get(self, "allow_credentials")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[str]]:
"""
The headers to be allowed via CORS.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="maxAge")
def max_age(self) -> Optional[int]:
"""
The max age to be allowed via CORS.
"""
return pulumi.get(self, "max_age")
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
"""
The methods to be allowed via CORS.
"""
return pulumi.get(self, "methods")
@property
@pulumi.getter
def origins(self) -> Optional[Sequence[str]]:
"""
The origins to be allowed via CORS.
"""
return pulumi.get(self, "origins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceCosmosDbConfigurationInfoResponse(dict):
"""
The settings for the Cosmos DB database backing the service.
"""
def __init__(__self__, *,
key_vault_key_uri: Optional[str] = None,
offer_throughput: Optional[int] = None):
"""
The settings for the Cosmos DB database backing the service.
:param str key_vault_key_uri: The URI of the customer-managed key for the backing database.
:param int offer_throughput: The provisioned throughput for the backing database.
"""
if key_vault_key_uri is not None:
pulumi.set(__self__, "key_vault_key_uri", key_vault_key_uri)
if offer_throughput is not None:
pulumi.set(__self__, "offer_throughput", offer_throughput)
@property
@pulumi.getter(name="keyVaultKeyUri")
def key_vault_key_uri(self) -> Optional[str]:
"""
The URI of the customer-managed key for the backing database.
"""
return pulumi.get(self, "key_vault_key_uri")
@property
@pulumi.getter(name="offerThroughput")
def offer_throughput(self) -> Optional[int]:
"""
The provisioned throughput for the backing database.
"""
return pulumi.get(self, "offer_throughput")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceExportConfigurationInfoResponse(dict):
"""
Export operation configuration information
"""
def __init__(__self__, *,
storage_account_name: Optional[str] = None):
"""
Export operation configuration information
:param str storage_account_name: The name of the default export storage account.
"""
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[str]:
"""
The name of the default export storage account.
"""
return pulumi.get(self, "storage_account_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServicesPropertiesResponse(dict):
"""
The properties of a service instance.
"""
def __init__(__self__, *,
provisioning_state: str,
access_policies: Optional[Sequence['outputs.ServiceAccessPolicyEntryResponse']] = None,
authentication_configuration: Optional['outputs.ServiceAuthenticationConfigurationInfoResponse'] = None,
cors_configuration: Optional['outputs.ServiceCorsConfigurationInfoResponse'] = None,
cosmos_db_configuration: Optional['outputs.ServiceCosmosDbConfigurationInfoResponse'] = None,
export_configuration: Optional['outputs.ServiceExportConfigurationInfoResponse'] = None,
private_endpoint_connections: Optional[Sequence['outputs.PrivateEndpointConnectionResponse']] = None,
public_network_access: Optional[str] = None):
"""
The properties of a service instance.
:param str provisioning_state: The provisioning state.
:param Sequence['ServiceAccessPolicyEntryResponseArgs'] access_policies: The access policies of the service instance.
:param 'ServiceAuthenticationConfigurationInfoResponseArgs' authentication_configuration: The authentication configuration for the service instance.
:param 'ServiceCorsConfigurationInfoResponseArgs' cors_configuration: The settings for the CORS configuration of the service instance.
:param 'ServiceCosmosDbConfigurationInfoResponseArgs' cosmos_db_configuration: The settings for the Cosmos DB database backing the service.
:param 'ServiceExportConfigurationInfoResponseArgs' export_configuration: The settings for the export operation of the service instance.
:param Sequence['PrivateEndpointConnectionResponseArgs'] private_endpoint_connections: The list of private endpoint connections that are set up for this resource.
:param str public_network_access: Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if access_policies is not None:
pulumi.set(__self__, "access_policies", access_policies)
if authentication_configuration is not None:
pulumi.set(__self__, "authentication_configuration", authentication_configuration)
if cors_configuration is not None:
pulumi.set(__self__, "cors_configuration", cors_configuration)
if cosmos_db_configuration is not None:
pulumi.set(__self__, "cosmos_db_configuration", cosmos_db_configuration)
if export_configuration is not None:
pulumi.set(__self__, "export_configuration", export_configuration)
if private_endpoint_connections is not None:
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="accessPolicies")
def access_policies(self) -> Optional[Sequence['outputs.ServiceAccessPolicyEntryResponse']]:
"""
The access policies of the service instance.
"""
return pulumi.get(self, "access_policies")
@property
@pulumi.getter(name="authenticationConfiguration")
def authentication_configuration(self) -> Optional['outputs.ServiceAuthenticationConfigurationInfoResponse']:
"""
The authentication configuration for the service instance.
"""
return pulumi.get(self, "authentication_configuration")
@property
@pulumi.getter(name="corsConfiguration")
def cors_configuration(self) -> Optional['outputs.ServiceCorsConfigurationInfoResponse']:
"""
The settings for the CORS configuration of the service instance.
"""
return pulumi.get(self, "cors_configuration")
@property
@pulumi.getter(name="cosmosDbConfiguration")
def cosmos_db_configuration(self) -> Optional['outputs.ServiceCosmosDbConfigurationInfoResponse']:
"""
The settings for the Cosmos DB database backing the service.
"""
return pulumi.get(self, "cosmos_db_configuration")
@property
@pulumi.getter(name="exportConfiguration")
def export_configuration(self) -> Optional['outputs.ServiceExportConfigurationInfoResponse']:
"""
The settings for the export operation of the service instance.
"""
return pulumi.get(self, "export_configuration")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Optional[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
The list of private endpoint connections that are set up for this resource.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
return pulumi.get(self, "public_network_access")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServicesResourceResponseIdentity(dict):
"""
Setting indicating whether the service has a managed identity associated with it.
"""
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None):
"""
Setting indicating whether the service has a managed identity associated with it.
:param str principal_id: The principal ID of the resource identity.
:param str tenant_id: The tenant ID of the resource.
:param str type: Type of identity being specified, currently SystemAssigned and None are allowed.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of the resource identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of the resource.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of identity being specified, currently SystemAssigned and None are allowed.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 38.588022 | 208 | 0.667764 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'PrivateEndpointConnectionResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'ServiceAccessPolicyEntryResponse',
'ServiceAuthenticationConfigurationInfoResponse',
'ServiceCorsConfigurationInfoResponse',
'ServiceCosmosDbConfigurationInfoResponse',
'ServiceExportConfigurationInfoResponse',
'ServicesPropertiesResponse',
'ServicesResourceResponseIdentity',
]
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
def __init__(__self__, *,
id: str,
name: str,
private_link_service_connection_state: 'outputs.PrivateLinkServiceConnectionStateResponse',
provisioning_state: str,
type: str,
private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None):
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "type", type)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
return pulumi.get(self, "private_endpoint")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointResponse(dict):
def __init__(__self__, *,
id: str):
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
def __init__(__self__, *,
actions_required: Optional[str] = None,
description: Optional[str] = None,
status: Optional[str] = None):
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[str]:
return pulumi.get(self, "actions_required")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAccessPolicyEntryResponse(dict):
def __init__(__self__, *,
object_id: str):
pulumi.set(__self__, "object_id", object_id)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> str:
return pulumi.get(self, "object_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceAuthenticationConfigurationInfoResponse(dict):
def __init__(__self__, *,
audience: Optional[str] = None,
authority: Optional[str] = None,
smart_proxy_enabled: Optional[bool] = None):
if audience is not None:
pulumi.set(__self__, "audience", audience)
if authority is not None:
pulumi.set(__self__, "authority", authority)
if smart_proxy_enabled is not None:
pulumi.set(__self__, "smart_proxy_enabled", smart_proxy_enabled)
@property
@pulumi.getter
def audience(self) -> Optional[str]:
return pulumi.get(self, "audience")
@property
@pulumi.getter
def authority(self) -> Optional[str]:
return pulumi.get(self, "authority")
@property
@pulumi.getter(name="smartProxyEnabled")
def smart_proxy_enabled(self) -> Optional[bool]:
return pulumi.get(self, "smart_proxy_enabled")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceCorsConfigurationInfoResponse(dict):
def __init__(__self__, *,
allow_credentials: Optional[bool] = None,
headers: Optional[Sequence[str]] = None,
max_age: Optional[int] = None,
methods: Optional[Sequence[str]] = None,
origins: Optional[Sequence[str]] = None):
if allow_credentials is not None:
pulumi.set(__self__, "allow_credentials", allow_credentials)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if max_age is not None:
pulumi.set(__self__, "max_age", max_age)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if origins is not None:
pulumi.set(__self__, "origins", origins)
@property
@pulumi.getter(name="allowCredentials")
def allow_credentials(self) -> Optional[bool]:
return pulumi.get(self, "allow_credentials")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="maxAge")
def max_age(self) -> Optional[int]:
return pulumi.get(self, "max_age")
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "methods")
@property
@pulumi.getter
def origins(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "origins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceCosmosDbConfigurationInfoResponse(dict):
def __init__(__self__, *,
key_vault_key_uri: Optional[str] = None,
offer_throughput: Optional[int] = None):
if key_vault_key_uri is not None:
pulumi.set(__self__, "key_vault_key_uri", key_vault_key_uri)
if offer_throughput is not None:
pulumi.set(__self__, "offer_throughput", offer_throughput)
@property
@pulumi.getter(name="keyVaultKeyUri")
def key_vault_key_uri(self) -> Optional[str]:
return pulumi.get(self, "key_vault_key_uri")
@property
@pulumi.getter(name="offerThroughput")
def offer_throughput(self) -> Optional[int]:
return pulumi.get(self, "offer_throughput")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceExportConfigurationInfoResponse(dict):
def __init__(__self__, *,
storage_account_name: Optional[str] = None):
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[str]:
return pulumi.get(self, "storage_account_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServicesPropertiesResponse(dict):
def __init__(__self__, *,
provisioning_state: str,
access_policies: Optional[Sequence['outputs.ServiceAccessPolicyEntryResponse']] = None,
authentication_configuration: Optional['outputs.ServiceAuthenticationConfigurationInfoResponse'] = None,
cors_configuration: Optional['outputs.ServiceCorsConfigurationInfoResponse'] = None,
cosmos_db_configuration: Optional['outputs.ServiceCosmosDbConfigurationInfoResponse'] = None,
export_configuration: Optional['outputs.ServiceExportConfigurationInfoResponse'] = None,
private_endpoint_connections: Optional[Sequence['outputs.PrivateEndpointConnectionResponse']] = None,
public_network_access: Optional[str] = None):
pulumi.set(__self__, "provisioning_state", provisioning_state)
if access_policies is not None:
pulumi.set(__self__, "access_policies", access_policies)
if authentication_configuration is not None:
pulumi.set(__self__, "authentication_configuration", authentication_configuration)
if cors_configuration is not None:
pulumi.set(__self__, "cors_configuration", cors_configuration)
if cosmos_db_configuration is not None:
pulumi.set(__self__, "cosmos_db_configuration", cosmos_db_configuration)
if export_configuration is not None:
pulumi.set(__self__, "export_configuration", export_configuration)
if private_endpoint_connections is not None:
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="accessPolicies")
def access_policies(self) -> Optional[Sequence['outputs.ServiceAccessPolicyEntryResponse']]:
return pulumi.get(self, "access_policies")
@property
@pulumi.getter(name="authenticationConfiguration")
def authentication_configuration(self) -> Optional['outputs.ServiceAuthenticationConfigurationInfoResponse']:
return pulumi.get(self, "authentication_configuration")
@property
@pulumi.getter(name="corsConfiguration")
def cors_configuration(self) -> Optional['outputs.ServiceCorsConfigurationInfoResponse']:
return pulumi.get(self, "cors_configuration")
@property
@pulumi.getter(name="cosmosDbConfiguration")
def cosmos_db_configuration(self) -> Optional['outputs.ServiceCosmosDbConfigurationInfoResponse']:
return pulumi.get(self, "cosmos_db_configuration")
@property
@pulumi.getter(name="exportConfiguration")
def export_configuration(self) -> Optional['outputs.ServiceExportConfigurationInfoResponse']:
return pulumi.get(self, "export_configuration")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Optional[Sequence['outputs.PrivateEndpointConnectionResponse']]:
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
return pulumi.get(self, "public_network_access")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServicesResourceResponseIdentity(dict):
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None):
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| true | true |
f710bcdd6aae55098faa5368f8cdcae5b8cac447 | 408 | py | Python | educa/courses/api/urls.py | kformanowicz/educa | 290883dc973345c6d3784c6bb0cb784cec80fa60 | [
"MIT"
] | null | null | null | educa/courses/api/urls.py | kformanowicz/educa | 290883dc973345c6d3784c6bb0cb784cec80fa60 | [
"MIT"
] | 9 | 2020-06-05T20:29:39.000Z | 2022-03-12T00:10:48.000Z | educa/courses/api/urls.py | kformanowicz/educa | 290883dc973345c6d3784c6bb0cb784cec80fa60 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('courses', views.CourseViewSet)
urlpatterns = [
url(r'^subjects/$', views.SubjectListView.as_view(), name='subject_list'),
url(r'^subjects/(?P<pk>\d+)/$', views.SubjectDetailView.as_view(), name='subject_detail'),
url(r'^', include(router.urls))
] | 31.384615 | 94 | 0.723039 | from django.conf.urls import include, url
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('courses', views.CourseViewSet)
urlpatterns = [
url(r'^subjects/$', views.SubjectListView.as_view(), name='subject_list'),
url(r'^subjects/(?P<pk>\d+)/$', views.SubjectDetailView.as_view(), name='subject_detail'),
url(r'^', include(router.urls))
] | true | true |
f710be72959fb0be0368f06abdc06ed4a9466c4c | 1,401 | py | Python | zygoat/components/backend/docker_compose.py | Ian-MacLeod/zygoat | 83773fdebf8cddf06903c2d32bd575e33e23e252 | [
"MIT"
] | null | null | null | zygoat/components/backend/docker_compose.py | Ian-MacLeod/zygoat | 83773fdebf8cddf06903c2d32bd575e33e23e252 | [
"MIT"
] | null | null | null | zygoat/components/backend/docker_compose.py | Ian-MacLeod/zygoat | 83773fdebf8cddf06903c2d32bd575e33e23e252 | [
"MIT"
] | null | null | null | import importlib
import logging
from zygoat.constants import Phases, Projects
from zygoat.components import Component
from zygoat.config import yaml
from . import resources
log = logging.getLogger()
file_name = 'docker-compose.yml'
class DockerCompose(Component):
def _dump_config(self, data):
with open(file_name, 'w') as root_config:
yaml.dump(data, root_config)
def _load_config(self):
with open(file_name) as root_config:
return yaml.load(root_config.read())
def create(self):
log.info(f'Reading {file_name} from the repo')
config = self._load_config()
config['services'].update(yaml.load(importlib.resources.read_text(resources, file_name)))
log.info('Dumping updated docker-compose config')
self._dump_config(config)
def update(self):
self.call_phase(Phases.CREATE, force_create=True)
def delete(self):
config = self._load_config()
log.info('Removing backend and DB services from config')
del config['services'][Projects.BACKEND]
del config['services']['db']
log.info('Dumping updated docker-compose config')
self._dump_config(config)
@property
def installed(self):
services = self._load_config()['services']
return Projects.BACKEND in services and 'db' in services
docker_compose = DockerCompose()
| 26.942308 | 97 | 0.681656 | import importlib
import logging
from zygoat.constants import Phases, Projects
from zygoat.components import Component
from zygoat.config import yaml
from . import resources
log = logging.getLogger()
file_name = 'docker-compose.yml'
class DockerCompose(Component):
def _dump_config(self, data):
with open(file_name, 'w') as root_config:
yaml.dump(data, root_config)
def _load_config(self):
with open(file_name) as root_config:
return yaml.load(root_config.read())
def create(self):
log.info(f'Reading {file_name} from the repo')
config = self._load_config()
config['services'].update(yaml.load(importlib.resources.read_text(resources, file_name)))
log.info('Dumping updated docker-compose config')
self._dump_config(config)
def update(self):
self.call_phase(Phases.CREATE, force_create=True)
def delete(self):
config = self._load_config()
log.info('Removing backend and DB services from config')
del config['services'][Projects.BACKEND]
del config['services']['db']
log.info('Dumping updated docker-compose config')
self._dump_config(config)
@property
def installed(self):
services = self._load_config()['services']
return Projects.BACKEND in services and 'db' in services
docker_compose = DockerCompose()
| true | true |
f710be848a7796c69f453a60a5b769bb077868cc | 11,427 | py | Python | main.py | ikuroNoriiwa/shellcode_transform | fac7d04168d9f3888a63c7ce76cc93bc8bef0058 | [
"Apache-2.0"
] | 2 | 2021-09-13T09:40:12.000Z | 2021-09-13T11:37:54.000Z | main.py | ikuroNoriiwa/shellcode_transform | fac7d04168d9f3888a63c7ce76cc93bc8bef0058 | [
"Apache-2.0"
] | null | null | null | main.py | ikuroNoriiwa/shellcode_transform | fac7d04168d9f3888a63c7ce76cc93bc8bef0058 | [
"Apache-2.0"
] | 1 | 2021-07-09T17:06:46.000Z | 2021-07-09T17:06:46.000Z | #!/usr/bin/python3
import argparse
from os.path import isfile
from pathlib import Path
from re import compile, findall, split as re_split, sub, search, match
from utils import error
def parse_buffer(encode_detail_buffer, shellcode, numberbefore=0, numberafter=0):
"""
parse le buffer et renvoie un tuple comme suit :
("Type Encode", start, end, param option)
:param encode_detail_buffer:
:param shellcode:
:param numberbefore:
:param numberafter:
:return:
"""
print(encode_detail_buffer)
print(shellcode)
print(numberafter)
print(numberbefore)
to_ret = None
try:
if encode_detail_buffer == ")":
to_ret = None
pass
elif len(encode_detail_buffer) == 1:
## Param Only char (X|x|L|l|R|r|\+|-)
if numberbefore != 0:
begin = numberbefore
else:
begin = 0
if numberafter != 0:
end = numberafter
else:
end = len(shellcode) - 1
to_ret = (encode_detail_buffer, begin, end, 1)
#print("({},{},{},{})".format(encode_detail_buffer, begin, end, 1))
elif ":" in encode_detail_buffer:
## Gestion des ranges (ex 9:13X)
tmp = encode_detail_buffer[:-1].split(":")
if not encode_detail_buffer[-1].isdigit():
## Gestion des ranges ne terminant pas par un chiffre (ex: 9:13X)
to_ret = (encode_detail_buffer[-1], tmp[0], tmp[1], 1)
#print("({},{},{},{})".format(encode_detail_buffer[-1], tmp[0], tmp[1], 1))
elif encode_detail_buffer[-1].isdigit():
## Gestion des ranges terminant par un chiffre (ex: 9:13X4)
letter = findall("(" + regex_shellcode_encodage_detail + ")" + regex_entier,
encode_detail_buffer)
to_ret = (letter[0][0], tmp[0], tmp[1].split(letter[0][0])[0],
letter[0][1])
#print("({},{},{},{})".format(letter[0][0], tmp[0], tmp[1].split(letter[0][0])[0], letter[0][1]))
elif encode_detail_buffer[0].isdigit() and not encode_detail_buffer[-1].isdigit():
## Commence par un chiffre et ne finis pas par un chiffre (ex: 12l)
to_ret = (encode_detail_buffer[-1], encode_detail_buffer[:-1],
encode_detail_buffer[:-1], 1)
#print("({},{},{},{})".format(encode_detail_buffer[-1], encode_detail_buffer[:-1], encode_detail_buffer[:-1], 1))
elif not encode_detail_buffer[0].isdigit() and encode_detail_buffer[-1].isdigit():
## ne commence pas par un chiffre et finis par un chiffre (ex: r32)
if numberbefore != 0:
begin = numberbefore
else:
begin = 0
if numberafter != 0:
end = numberafter
else:
end = len(shellcode) - 1
to_ret = (encode_detail_buffer[0], begin, end, encode_detail_buffer[1:])
#print("({},{},{},{})".format(encode_detail_buffer[0], begin, end, encode_detail_buffer[1:]))
elif encode_detail_buffer[0].isdigit() and encode_detail_buffer[-1].isdigit():
## Commence et finis par un chiffre (ex: 422X5)
before = ""
after = ""
letter = ""
passed_letter = False
for i in encode_detail_buffer:
if i.isdigit() and passed_letter == False:
before += i
elif not i.isdigit():
letter = i
passed_letter = True
elif i.isdigit() and passed_letter == True:
after += i
to_ret = (letter, before, before, after)
# rint(to_ret)
print("({},{},{},{})".format(letter, before, before, after))
except IndexError as er:
print(er)
to_ret = None
pass
finally:
return to_ret
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', '--infile', dest='infile',
help='file to encode, expects filetype of data i.e. msfvenom ... -f raw or echo -ne "........"',
required=True)
parser.add_argument('-e', '--enocde', '--encode-list', dest='encodelist',
help='list of encodage to use in suite X=xor +=+1 -=-1 add payload behind to get it inside, X,-,2,X,+3,+')
parser.add_argument('-o', '--out', '--outfile', dest='outfile', help='write assembly to file (default: STDOUT)')
# parser.add_argument('-d', dest='decode', default=False, action='store_true', help='Decode what is passed via -f or -s')
args = parser.parse_args()
if not isfile(args.infile):
error("No such file: {}".format(args.infile))
try:
shellcode = Path(args.infile).read_bytes()
except:
error("While reading input shellcode file")
if args.encodelist:
regex_entier = r"([0-9]+)"
regex_shellcode_section = r"(" + regex_entier + r"?:)?" + regex_entier + r"?"
regex_shellcode_encodage_simple = r"\*|<|>"
regex_shellcode_encodage_detail = r"X|x|L|l|R|r|\+|-"
regex_shellcode_encodage_list = r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r")"
for sub_encode_list in args.encodelist.split('|'):
if search(
r"(" + regex_shellcode_encodage_detail + r")" + regex_entier + r"(\(|:|" + regex_shellcode_encodage_list + r")",
sub_encode_list):
print(search(
r"(" + regex_shellcode_encodage_detail + r")" + regex_entier + r"(\(|:|" + regex_shellcode_encodage_list + r")",
sub_encode_list))
error(
"invalid encode list add ; between encodage that need details and all encode sort " + regex_shellcode_encodage_detail + " and of course before : if need")
if search(r"\([^\)]*:[^\)]*\)", sub_encode_list):
error("invalid encode list, you cant put positionnal detail inside brackets")
if search(r"(\([^\)]*\()|(\)[^\(]*\))", sub_encode_list):
error("invalid choice, you can't get a encode list with imbrick parenthesis")
# sub_encode_list = sub( r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|\))(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|\()", r"\1;\2", sub_encode_list)
sub_encode_list = sub(
regex_shellcode_encodage_list + r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|:|\(|\))",
r"\1,\2", sub_encode_list)
sub_encode_list = sub(
regex_shellcode_encodage_list + r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|:|\(|\))",
r"\1,\2", sub_encode_list)
sub_encode_list = sub(r"\);", r")", sub_encode_list)
encode_detail_buffer = ""
tab_tupl = []
sub_encode_list += ",a"
for encode_detail in sub_encode_list:
# print('schema all : {}'.format(repr(encode_detail)))
if encode_detail == "," and "(" not in encode_detail_buffer or encode_detail == ")":
encode_detail = encode_detail.replace(',', '')
print(encode_detail_buffer)
# tab_tupl.append((param1, param2, param3 , param4))
if encode_detail_buffer == ")":
pass
elif "(" in encode_detail_buffer or ")" in encode_detail_buffer:
## Gestion des parenthèses
start_number = findall("(.*)\((.*)", encode_detail_buffer)
start = start_number[0][0]
if ":" in start:
## Range dans parentheses
tmp = start.split(":")
print(len(start.split(":")))
start = tmp[0]
end = tmp[1]
else:
## no range in parenthese
end = 0
param = start_number[0][1]
for spl in param.split(','):
ret = parse_buffer(spl, shellcode, start, end)
if ret != None:
tab_tupl.append(ret)
else:
ret = parse_buffer(encode_detail_buffer, shellcode)
if ret != None:
tab_tupl.append(ret)
encode_detail_buffer = ""
encode_detail_buffer += encode_detail
print(tab_tupl)
# print(encode_detail_buffer)
# regex_encode_type_ba = r"((([0-9]*):)?([0-9]*))?\((((X|x|L|l|R|r|\+|-)([0-9]*))
# regex_encode_type_base = r"((([0-9]*):([0-9]*))?((\*|<|>)|((X|x|L|l|R|r|\+|-)([0-9]*)));)"
# regex_split = compile(r"\(|\)")
# regex_sub_encode_type = compile(regex_encode_type_base)
# for sub_encode_list in args.encodelist.split('|'):
# regex_encode_type=compile( regex_encode_type_base + r"\(" + regex_encode_type_base + r"\)?" + regex_encode_type_base + r"?" )
# for sub_encode_list in args.encodelist.split('|'):
# sub_encode_list_parsed = []
# for encode in findall(regex_encode_type, sub_encode_list) :
# offset = 1 if encode[4]=='' else int(encode[4])
# encode_type=encode[1]+encode[3]
# sub_encode_list_parsed.append((offset, encode_type))
# for encode in sub_encode_list_parsed:
# print(encode)
# for encode in findall(regex_encode_type, args.encodelist) :
# encode_type=encode[1]+encode[3]
# offset = 1 if encode[4]=='' else int(encode[4])
# if encode_type == "X" or encode_type == "x":
# print("Running XOR encoder")
# shellcode = rolling_xor(shellcode)
# shellcode = nasm( template_XOR.format(ecx_len(len(shellcode) - 1))) + shellcode
# # ','.join(hex(x) for x in shellcode)
# elif encode_type == "L" or encode_type == "l" or encode_type == "R" or encode_type == "r":
# print("Running right or left bit shifting encoder")
# shellcode = right_left_rotation_bit(shellcode, encode_type == "R" or encode_type == "r", offset)
# shellcode=nasm( template_rotation.format( ecx_len(len(shellcode)), 'rol' if encode_type == "R" or encode_type == "r" else 'ror', offset)) + shellcode
# elif encode_type == "+" or encode_type == "-":
# print("Running + or - encoder")
# shellcode = add_sub(shellcode, add_or_sub=(encode_type=='+'), to_num=offset)
# shellcode = nasm( template_sub_add.format(ecx_len(len(shellcode)), 'sub' if encode_type=='+' else 'add', offset)) + shellcode
# else:
# error("The input encoding action {} is not valid".format(encode_type))
if 0 in shellcode:
print("\033[31mIt looks like your shellcode will not be valid, there is a 00 byte\033[0m")
# print_shellcode(shellcode)
| 45.891566 | 240 | 0.543362 |
import argparse
from os.path import isfile
from pathlib import Path
from re import compile, findall, split as re_split, sub, search, match
from utils import error
def parse_buffer(encode_detail_buffer, shellcode, numberbefore=0, numberafter=0):
print(encode_detail_buffer)
print(shellcode)
print(numberafter)
print(numberbefore)
to_ret = None
try:
if encode_detail_buffer == ")":
to_ret = None
pass
elif len(encode_detail_buffer) == 1:
begin = numberbefore
else:
begin = 0
if numberafter != 0:
end = numberafter
else:
end = len(shellcode) - 1
to_ret = (encode_detail_buffer, begin, end, 1)
elif ":" in encode_detail_buffer:
l_buffer[:-1].split(":")
if not encode_detail_buffer[-1].isdigit():
[1], 1)
elif encode_detail_buffer[-1].isdigit():
codage_detail + ")" + regex_entier,
encode_detail_buffer)
to_ret = (letter[0][0], tmp[0], tmp[1].split(letter[0][0])[0],
letter[0][1])
elif encode_detail_buffer[0].isdigit() and not encode_detail_buffer[-1].isdigit():
fer[:-1],
encode_detail_buffer[:-1], 1)
elif not encode_detail_buffer[0].isdigit() and encode_detail_buffer[-1].isdigit():
efore
else:
begin = 0
if numberafter != 0:
end = numberafter
else:
end = len(shellcode) - 1
to_ret = (encode_detail_buffer[0], begin, end, encode_detail_buffer[1:])
elif encode_detail_buffer[0].isdigit() and encode_detail_buffer[-1].isdigit():
"
letter = ""
passed_letter = False
for i in encode_detail_buffer:
if i.isdigit() and passed_letter == False:
before += i
elif not i.isdigit():
letter = i
passed_letter = True
elif i.isdigit() and passed_letter == True:
after += i
to_ret = (letter, before, before, after)
print("({},{},{},{})".format(letter, before, before, after))
except IndexError as er:
print(er)
to_ret = None
pass
finally:
return to_ret
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', '--infile', dest='infile',
help='file to encode, expects filetype of data i.e. msfvenom ... -f raw or echo -ne "........"',
required=True)
parser.add_argument('-e', '--enocde', '--encode-list', dest='encodelist',
help='list of encodage to use in suite X=xor +=+1 -=-1 add payload behind to get it inside, X,-,2,X,+3,+')
parser.add_argument('-o', '--out', '--outfile', dest='outfile', help='write assembly to file (default: STDOUT)')
args = parser.parse_args()
if not isfile(args.infile):
error("No such file: {}".format(args.infile))
try:
shellcode = Path(args.infile).read_bytes()
except:
error("While reading input shellcode file")
if args.encodelist:
regex_entier = r"([0-9]+)"
regex_shellcode_section = r"(" + regex_entier + r"?:)?" + regex_entier + r"?"
regex_shellcode_encodage_simple = r"\*|<|>"
regex_shellcode_encodage_detail = r"X|x|L|l|R|r|\+|-"
regex_shellcode_encodage_list = r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r")"
for sub_encode_list in args.encodelist.split('|'):
if search(
r"(" + regex_shellcode_encodage_detail + r")" + regex_entier + r"(\(|:|" + regex_shellcode_encodage_list + r")",
sub_encode_list):
print(search(
r"(" + regex_shellcode_encodage_detail + r")" + regex_entier + r"(\(|:|" + regex_shellcode_encodage_list + r")",
sub_encode_list))
error(
"invalid encode list add ; between encodage that need details and all encode sort " + regex_shellcode_encodage_detail + " and of course before : if need")
if search(r"\([^\)]*:[^\)]*\)", sub_encode_list):
error("invalid encode list, you cant put positionnal detail inside brackets")
if search(r"(\([^\)]*\()|(\)[^\(]*\))", sub_encode_list):
error("invalid choice, you can't get a encode list with imbrick parenthesis")
# sub_encode_list = sub( r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|\))(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|\()", r"\1;\2", sub_encode_list)
sub_encode_list = sub(
regex_shellcode_encodage_list + r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|:|\(|\))",
r"\1,\2", sub_encode_list)
sub_encode_list = sub(
regex_shellcode_encodage_list + r"(" + regex_shellcode_encodage_detail + r"|" + regex_shellcode_encodage_simple + r"|:|\(|\))",
r"\1,\2", sub_encode_list)
sub_encode_list = sub(r"\);", r")", sub_encode_list)
encode_detail_buffer = ""
tab_tupl = []
sub_encode_list += ",a"
for encode_detail in sub_encode_list:
# print('schema all : {}'.format(repr(encode_detail)))
if encode_detail == "," and "(" not in encode_detail_buffer or encode_detail == ")":
encode_detail = encode_detail.replace(',', '')
print(encode_detail_buffer)
# tab_tupl.append((param1, param2, param3 , param4))
if encode_detail_buffer == ")":
pass
elif "(" in encode_detail_buffer or ")" in encode_detail_buffer:
## Gestion des parenthèses
start_number = findall("(.*)\((.*)", encode_detail_buffer)
start = start_number[0][0]
if ":" in start:
## Range dans parentheses
tmp = start.split(":")
print(len(start.split(":")))
start = tmp[0]
end = tmp[1]
else:
## no range in parenthese
end = 0
param = start_number[0][1]
for spl in param.split(','):
ret = parse_buffer(spl, shellcode, start, end)
if ret != None:
tab_tupl.append(ret)
else:
ret = parse_buffer(encode_detail_buffer, shellcode)
if ret != None:
tab_tupl.append(ret)
encode_detail_buffer = ""
encode_detail_buffer += encode_detail
print(tab_tupl)
# print(encode_detail_buffer)
# regex_encode_type_ba = r"((([0-9]*):)?([0-9]*))?\((((X|x|L|l|R|r|\+|-)([0-9]*))
# regex_encode_type_base = r"((([0-9]*):([0-9]*))?((\*|<|>)|((X|x|L|l|R|r|\+|-)([0-9]*)));)"
# regex_split = compile(r"\(|\)")
# regex_sub_encode_type = compile(regex_encode_type_base)
# for sub_encode_list in args.encodelist.split('|'):
# regex_encode_type=compile( regex_encode_type_base + r"\(" + regex_encode_type_base + r"\)?" + regex_encode_type_base + r"?" )
# for sub_encode_list in args.encodelist.split('|'):
# sub_encode_list_parsed = []
# for encode in findall(regex_encode_type, sub_encode_list) :
# offset = 1 if encode[4]=='' else int(encode[4])
# encode_type=encode[1]+encode[3]
# sub_encode_list_parsed.append((offset, encode_type))
# for encode in sub_encode_list_parsed:
# print(encode)
# for encode in findall(regex_encode_type, args.encodelist) :
# encode_type=encode[1]+encode[3]
# offset = 1 if encode[4]=='' else int(encode[4])
# if encode_type == "X" or encode_type == "x":
# print("Running XOR encoder")
# shellcode = rolling_xor(shellcode)
# shellcode = nasm( template_XOR.format(ecx_len(len(shellcode) - 1))) + shellcode
# # ','.join(hex(x) for x in shellcode)
# elif encode_type == "L" or encode_type == "l" or encode_type == "R" or encode_type == "r":
# print("Running right or left bit shifting encoder")
# shellcode = right_left_rotation_bit(shellcode, encode_type == "R" or encode_type == "r", offset)
# shellcode=nasm( template_rotation.format( ecx_len(len(shellcode)), 'rol' if encode_type == "R" or encode_type == "r" else 'ror', offset)) + shellcode
# elif encode_type == "+" or encode_type == "-":
# print("Running + or - encoder")
# shellcode = add_sub(shellcode, add_or_sub=(encode_type=='+'), to_num=offset)
# shellcode = nasm( template_sub_add.format(ecx_len(len(shellcode)), 'sub' if encode_type=='+' else 'add', offset)) + shellcode
# else:
# error("The input encoding action {} is not valid".format(encode_type))
if 0 in shellcode:
print("\033[31mIt looks like your shellcode will not be valid, there is a 00 byte\033[0m")
# print_shellcode(shellcode)
| true | true |
f710bec06f616273ff86b3dba79e21d3bc0e6645 | 6,784 | py | Python | examples/basic_example_v1/basic_example_data.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 19 | 2019-07-01T08:25:29.000Z | 2022-01-26T14:46:51.000Z | examples/basic_example_v1/basic_example_data.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 5 | 2019-07-02T13:36:42.000Z | 2021-09-14T06:46:48.000Z | examples/basic_example_v1/basic_example_data.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 10 | 2019-06-07T10:36:19.000Z | 2021-10-15T08:58:11.000Z | # -*- coding: utf-8 -*-
import iso8601
def to_days(date):
timedelta = iso8601.parse_date(date) - iso8601.parse_date("1970-1-1")
return timedelta.days
class Series(object):
__slots__ = ('series_id', 'title', 'release_date', 'series_info')
def __init__(self, series_id, title, release_date, series_info):
self.series_id = series_id
self.title = title
self.release_date = to_days(release_date)
self.series_info = series_info
class Season(object):
__slots__ = ('series_id', 'season_id', 'title', 'first_aired', 'last_aired')
def __init__(self, series_id, season_id, title, first_aired, last_aired):
self.series_id = series_id
self.season_id = season_id
self.title = title
self.first_aired = to_days(first_aired)
self.last_aired = to_days(last_aired)
class Episode(object):
__slots__ = ('series_id', 'season_id', 'episode_id', 'title', 'air_date')
def __init__(self, series_id, season_id, episode_id, title, air_date):
self.series_id = series_id
self.season_id = season_id
self.episode_id = episode_id
self.title = title
self.air_date = to_days(air_date)
def get_series_data():
return [
Series(1, "IT Crowd", "2006-02-03",
"The IT Crowd is a British sitcom produced by Channel 4, written by Graham Linehan, produced by "
"Ash Atalla and starring Chris O'Dowd, Richard Ayoade, Katherine Parkinson, and Matt Berry."),
Series(2, "Silicon Valley", "2014-04-06",
"Silicon Valley is an American comedy television series created by Mike Judge, John Altschuler and "
"Dave Krinsky. The series focuses on five young men who founded a startup company in Silicon Valley.")
]
def get_seasons_data():
return [
Season(1, 1, "Season 1", "2006-02-03", "2006-03-03"),
Season(1, 2, "Season 2", "2007-08-24", "2007-09-28"),
Season(1, 3, "Season 3", "2008-11-21", "2008-12-26"),
Season(1, 4, "Season 4", "2010-06-25", "2010-07-30"),
Season(2, 1, "Season 1", "2014-04-06", "2014-06-01"),
Season(2, 2, "Season 2", "2015-04-12", "2015-06-14"),
Season(2, 3, "Season 3", "2016-04-24", "2016-06-26"),
Season(2, 4, "Season 4", "2017-04-23", "2017-06-25"),
Season(2, 5, "Season 5", "2018-03-25", "2018-05-13")
]
def get_episodes_data():
return [
Episode(1, 1, 1, "Yesterday's Jam", "2006-02-03"),
Episode(1, 1, 2, "Calamity Jen", "2006-02-03"),
Episode(1, 1, 3, "Fifty-Fifty", "2006-02-10"),
Episode(1, 1, 4, "The Red Door", "2006-02-17"),
Episode(1, 1, 5, "The Haunting of Bill Crouse", "2006-02-24"),
Episode(1, 1, 6, "Aunt Irma Visits", "2006-03-03"),
Episode(1, 2, 1, "The Work Outing", "2006-08-24"),
Episode(1, 2, 2, "Return of the Golden Child", "2007-08-31"),
Episode(1, 2, 3, "Moss and the German", "2007-09-07"),
Episode(1, 2, 4, "The Dinner Party", "2007-09-14"),
Episode(1, 2, 5, "Smoke and Mirrors", "2007-09-21"),
Episode(1, 2, 6, "Men Without Women", "2007-09-28"),
Episode(1, 3, 1, "From Hell", "2008-11-21"),
Episode(1, 3, 2, "Are We Not Men?", "2008-11-28"),
Episode(1, 3, 3, "Tramps Like Us", "2008-12-05"),
Episode(1, 3, 4, "The Speech", "2008-12-12"),
Episode(1, 3, 5, "Friendface", "2008-12-19"),
Episode(1, 3, 6, "Calendar Geeks", "2008-12-26"),
Episode(1, 4, 1, "Jen The Fredo", "2010-06-25"),
Episode(1, 4, 2, "The Final Countdown", "2010-07-02"),
Episode(1, 4, 3, "Something Happened", "2010-07-09"),
Episode(1, 4, 4, "Italian For Beginners", "2010-07-16"),
Episode(1, 4, 5, "Bad Boys", "2010-07-23"),
Episode(1, 4, 6, "Reynholm vs Reynholm", "2010-07-30"),
]
def get_episodes_data_for_bulk_upsert():
return [
Episode(2, 1, 1, "Minimum Viable Product", "2014-04-06"),
Episode(2, 1, 2, "The Cap Table", "2014-04-13"),
Episode(2, 1, 3, "Articles of Incorporation", "2014-04-20"),
Episode(2, 1, 4, "Fiduciary Duties", "2014-04-27"),
Episode(2, 1, 5, "Signaling Risk", "2014-05-04"),
Episode(2, 1, 6, "Third Party Insourcing", "2014-05-11"),
Episode(2, 1, 7, "Proof of Concept", "2014-05-18"),
Episode(2, 1, 8, "Optimal Tip-to-Tip Efficiency", "2014-06-01"),
Episode(2, 2, 1, "Sand Hill Shuffle", "2015-04-12"),
Episode(2, 2, 2, "Runaway Devaluation", "2015-04-19"),
Episode(2, 2, 3, "Bad Money", "2015-04-26"),
Episode(2, 2, 4, "The Lady", "2015-05-03"),
Episode(2, 2, 5, "Server Space", "2015-05-10"),
Episode(2, 2, 6, "Homicide", "2015-05-17"),
Episode(2, 2, 7, "Adult Content", "2015-05-24"),
Episode(2, 2, 8, "White Hat/Black Hat", "2015-05-31"),
Episode(2, 2, 9, "Binding Arbitration", "2015-06-07"),
Episode(2, 2, 10, "Two Days of the Condor", "2015-06-14"),
Episode(2, 3, 1, "Founder Friendly", "2016-04-24"),
Episode(2, 3, 2, "Two in the Box", "2016-05-01"),
Episode(2, 3, 3, "Meinertzhagen's Haversack", "2016-05-08"),
Episode(2, 3, 4, "Maleant Data Systems Solutions", "2016-05-15"),
Episode(2, 3, 5, "The Empty Chair", "2016-05-22"),
Episode(2, 3, 6, "Bachmanity Insanity", "2016-05-29"),
Episode(2, 3, 7, "To Build a Better Beta", "2016-06-05"),
Episode(2, 3, 8, "Bachman's Earnings Over-Ride", "2016-06-12"),
Episode(2, 3, 9, "Daily Active Users", "2016-06-19"),
Episode(2, 3, 10, "The Uptick", "2016-06-26"),
Episode(2, 4, 1, "Success Failure", "2017-04-23"),
Episode(2, 4, 2, "Terms of Service", "2017-04-30"),
Episode(2, 4, 3, "Intellectual Property", "2017-05-07"),
Episode(2, 4, 4, "Teambuilding Exercise", "2017-05-14"),
Episode(2, 4, 5, "The Blood Boy", "2017-05-21"),
Episode(2, 4, 6, "Customer Service", "2017-05-28"),
Episode(2, 4, 7, "The Patent Troll", "2017-06-04"),
Episode(2, 4, 8, "The Keenan Vortex", "2017-06-11"),
Episode(2, 4, 9, "Hooli-Con", "2017-06-18"),
Episode(2, 4, 10, "Server Error", "2017-06-25"),
Episode(2, 5, 1, "Grow Fast or Die Slow", "2018-03-25"),
Episode(2, 5, 2, "Reorientation", "2018-04-01"),
Episode(2, 5, 3, "Chief Operating Officer", "2018-04-08"),
Episode(2, 5, 4, "Tech Evangelist", "2018-04-15"),
Episode(2, 5, 5, "Facial Recognition", "2018-04-22"),
Episode(2, 5, 6, "Artificial Emotional Intelligence", "2018-04-29"),
Episode(2, 5, 7, "Initial Coin Offering", "2018-05-06"),
Episode(2, 5, 8, "Fifty-One Percent", "2018-05-13"),
]
| 46.786207 | 117 | 0.573261 |
import iso8601
def to_days(date):
timedelta = iso8601.parse_date(date) - iso8601.parse_date("1970-1-1")
return timedelta.days
class Series(object):
__slots__ = ('series_id', 'title', 'release_date', 'series_info')
def __init__(self, series_id, title, release_date, series_info):
self.series_id = series_id
self.title = title
self.release_date = to_days(release_date)
self.series_info = series_info
class Season(object):
__slots__ = ('series_id', 'season_id', 'title', 'first_aired', 'last_aired')
def __init__(self, series_id, season_id, title, first_aired, last_aired):
self.series_id = series_id
self.season_id = season_id
self.title = title
self.first_aired = to_days(first_aired)
self.last_aired = to_days(last_aired)
class Episode(object):
__slots__ = ('series_id', 'season_id', 'episode_id', 'title', 'air_date')
def __init__(self, series_id, season_id, episode_id, title, air_date):
self.series_id = series_id
self.season_id = season_id
self.episode_id = episode_id
self.title = title
self.air_date = to_days(air_date)
def get_series_data():
return [
Series(1, "IT Crowd", "2006-02-03",
"The IT Crowd is a British sitcom produced by Channel 4, written by Graham Linehan, produced by "
"Ash Atalla and starring Chris O'Dowd, Richard Ayoade, Katherine Parkinson, and Matt Berry."),
Series(2, "Silicon Valley", "2014-04-06",
"Silicon Valley is an American comedy television series created by Mike Judge, John Altschuler and "
"Dave Krinsky. The series focuses on five young men who founded a startup company in Silicon Valley.")
]
def get_seasons_data():
return [
Season(1, 1, "Season 1", "2006-02-03", "2006-03-03"),
Season(1, 2, "Season 2", "2007-08-24", "2007-09-28"),
Season(1, 3, "Season 3", "2008-11-21", "2008-12-26"),
Season(1, 4, "Season 4", "2010-06-25", "2010-07-30"),
Season(2, 1, "Season 1", "2014-04-06", "2014-06-01"),
Season(2, 2, "Season 2", "2015-04-12", "2015-06-14"),
Season(2, 3, "Season 3", "2016-04-24", "2016-06-26"),
Season(2, 4, "Season 4", "2017-04-23", "2017-06-25"),
Season(2, 5, "Season 5", "2018-03-25", "2018-05-13")
]
def get_episodes_data():
return [
Episode(1, 1, 1, "Yesterday's Jam", "2006-02-03"),
Episode(1, 1, 2, "Calamity Jen", "2006-02-03"),
Episode(1, 1, 3, "Fifty-Fifty", "2006-02-10"),
Episode(1, 1, 4, "The Red Door", "2006-02-17"),
Episode(1, 1, 5, "The Haunting of Bill Crouse", "2006-02-24"),
Episode(1, 1, 6, "Aunt Irma Visits", "2006-03-03"),
Episode(1, 2, 1, "The Work Outing", "2006-08-24"),
Episode(1, 2, 2, "Return of the Golden Child", "2007-08-31"),
Episode(1, 2, 3, "Moss and the German", "2007-09-07"),
Episode(1, 2, 4, "The Dinner Party", "2007-09-14"),
Episode(1, 2, 5, "Smoke and Mirrors", "2007-09-21"),
Episode(1, 2, 6, "Men Without Women", "2007-09-28"),
Episode(1, 3, 1, "From Hell", "2008-11-21"),
Episode(1, 3, 2, "Are We Not Men?", "2008-11-28"),
Episode(1, 3, 3, "Tramps Like Us", "2008-12-05"),
Episode(1, 3, 4, "The Speech", "2008-12-12"),
Episode(1, 3, 5, "Friendface", "2008-12-19"),
Episode(1, 3, 6, "Calendar Geeks", "2008-12-26"),
Episode(1, 4, 1, "Jen The Fredo", "2010-06-25"),
Episode(1, 4, 2, "The Final Countdown", "2010-07-02"),
Episode(1, 4, 3, "Something Happened", "2010-07-09"),
Episode(1, 4, 4, "Italian For Beginners", "2010-07-16"),
Episode(1, 4, 5, "Bad Boys", "2010-07-23"),
Episode(1, 4, 6, "Reynholm vs Reynholm", "2010-07-30"),
]
def get_episodes_data_for_bulk_upsert():
return [
Episode(2, 1, 1, "Minimum Viable Product", "2014-04-06"),
Episode(2, 1, 2, "The Cap Table", "2014-04-13"),
Episode(2, 1, 3, "Articles of Incorporation", "2014-04-20"),
Episode(2, 1, 4, "Fiduciary Duties", "2014-04-27"),
Episode(2, 1, 5, "Signaling Risk", "2014-05-04"),
Episode(2, 1, 6, "Third Party Insourcing", "2014-05-11"),
Episode(2, 1, 7, "Proof of Concept", "2014-05-18"),
Episode(2, 1, 8, "Optimal Tip-to-Tip Efficiency", "2014-06-01"),
Episode(2, 2, 1, "Sand Hill Shuffle", "2015-04-12"),
Episode(2, 2, 2, "Runaway Devaluation", "2015-04-19"),
Episode(2, 2, 3, "Bad Money", "2015-04-26"),
Episode(2, 2, 4, "The Lady", "2015-05-03"),
Episode(2, 2, 5, "Server Space", "2015-05-10"),
Episode(2, 2, 6, "Homicide", "2015-05-17"),
Episode(2, 2, 7, "Adult Content", "2015-05-24"),
Episode(2, 2, 8, "White Hat/Black Hat", "2015-05-31"),
Episode(2, 2, 9, "Binding Arbitration", "2015-06-07"),
Episode(2, 2, 10, "Two Days of the Condor", "2015-06-14"),
Episode(2, 3, 1, "Founder Friendly", "2016-04-24"),
Episode(2, 3, 2, "Two in the Box", "2016-05-01"),
Episode(2, 3, 3, "Meinertzhagen's Haversack", "2016-05-08"),
Episode(2, 3, 4, "Maleant Data Systems Solutions", "2016-05-15"),
Episode(2, 3, 5, "The Empty Chair", "2016-05-22"),
Episode(2, 3, 6, "Bachmanity Insanity", "2016-05-29"),
Episode(2, 3, 7, "To Build a Better Beta", "2016-06-05"),
Episode(2, 3, 8, "Bachman's Earnings Over-Ride", "2016-06-12"),
Episode(2, 3, 9, "Daily Active Users", "2016-06-19"),
Episode(2, 3, 10, "The Uptick", "2016-06-26"),
Episode(2, 4, 1, "Success Failure", "2017-04-23"),
Episode(2, 4, 2, "Terms of Service", "2017-04-30"),
Episode(2, 4, 3, "Intellectual Property", "2017-05-07"),
Episode(2, 4, 4, "Teambuilding Exercise", "2017-05-14"),
Episode(2, 4, 5, "The Blood Boy", "2017-05-21"),
Episode(2, 4, 6, "Customer Service", "2017-05-28"),
Episode(2, 4, 7, "The Patent Troll", "2017-06-04"),
Episode(2, 4, 8, "The Keenan Vortex", "2017-06-11"),
Episode(2, 4, 9, "Hooli-Con", "2017-06-18"),
Episode(2, 4, 10, "Server Error", "2017-06-25"),
Episode(2, 5, 1, "Grow Fast or Die Slow", "2018-03-25"),
Episode(2, 5, 2, "Reorientation", "2018-04-01"),
Episode(2, 5, 3, "Chief Operating Officer", "2018-04-08"),
Episode(2, 5, 4, "Tech Evangelist", "2018-04-15"),
Episode(2, 5, 5, "Facial Recognition", "2018-04-22"),
Episode(2, 5, 6, "Artificial Emotional Intelligence", "2018-04-29"),
Episode(2, 5, 7, "Initial Coin Offering", "2018-05-06"),
Episode(2, 5, 8, "Fifty-One Percent", "2018-05-13"),
]
| true | true |
f710c21983c49887ee85a144d3507038da6ab576 | 1,698 | py | Python | matrix/The_Vector_problems.py | tjctw/PythonNote | e93cebbc6bf9748966f761eff6a9ad7b12e9ece5 | [
"CC0-1.0"
] | null | null | null | matrix/The_Vector_problems.py | tjctw/PythonNote | e93cebbc6bf9748966f761eff6a9ad7b12e9ece5 | [
"CC0-1.0"
] | null | null | null | matrix/The_Vector_problems.py | tjctw/PythonNote | e93cebbc6bf9748966f761eff6a9ad7b12e9ece5 | [
"CC0-1.0"
] | null | null | null | # version code 80e56511a793+
# Please fill out this stencil and submit using the provided submission script.
# Some of the GF2 problems require use of the value GF2.one so the stencil imports it.
from GF2 import one
## 1: (Problem 2.14.1) Vector Addition Practice 1
#Please express each answer as a list of numbers
p1_v = [-1, 3]
p1_u = [0, 4]
p1_v_plus_u = [...]
p1_v_minus_u = [...]
p1_three_v_minus_two_u = [...]
## 2: (Problem 2.14.2) Vector Addition Practice 2
p2_u = [-1, 1, 1]
p2_v = [ 2, -1, 5]
p2_v_plus_u = [...]
p2_v_minus_u = [...]
p2_two_v_minus_u = [...]
p2_v_plus_two_u = [...]
## 3: (Problem 2.14.3) Vector Addition Practice 3
# Write your answer using GF2's one instead of the number 1
p3_vector_sum_1 = [...]
p3_vector_sum_2 = [...]
## 4: (Problem 2.14.4) GF2 Vector Addition A
# Please express your solution as a subset of the letters {'a','b','c','d','e','f'}.
# For example, {'a','b','c'} is the subset consisting of:
# a (1100000), b (0110000), and c (0011000).
# The answer should be an empty set, written set(), if the given vector u cannot
# be written as the sum of any subset of the vectors a, b, c, d, e, and f.
u_0010010 = ...
u_0100010 = ...
## 5: (Problem 2.14.5) GF2 Vector Addition B
# Use the same format as the previous problem
v_0010010 = ...
v_0100010 = ...
## 6: (Problem 2.14.6) Solving Linear Equations over GF(2)
#You should be able to solve this without using a computer.
x_gf2 = [...]
## 7: (Problem 2.14.7) Formulating Equations using Dot-Product
#Please provide each answer as a list of numbers
v1 = [...]
v2 = [...]
v3 = [...]
## 8: (Problem 2.14.9) Practice with Dot-Product
uv_a = ...
uv_b = ...
uv_c = ...
uv_d = ...
| 22.051948 | 86 | 0.651943 |
from GF2 import one
.]
p1_v_minus_u = [...]
p1_three_v_minus_two_u = [...]
_u = [...]
p2_v_minus_u = [...]
p2_two_v_minus_u = [...]
p2_v_plus_two_u = [...]
]
## 4: (Problem 2.14.4) GF2 Vector Addition A
# Please express your solution as a subset of the letters {'a','b','c','d','e','f'}.
# For example, {'a','b','c'} is the subset consisting of:
# a (1100000), b (0110000), and c (0011000).
# The answer should be an empty set, written set(), if the given vector u cannot
# be written as the sum of any subset of the vectors a, b, c, d, e, and f.
u_0010010 = ...
u_0100010 = ...
## 5: (Problem 2.14.5) GF2 Vector Addition B
# Use the same format as the previous problem
v_0010010 = ...
v_0100010 = ...
## 6: (Problem 2.14.6) Solving Linear Equations over GF(2)
#You should be able to solve this without using a computer.
x_gf2 = [...]
## 7: (Problem 2.14.7) Formulating Equations using Dot-Product
#Please provide each answer as a list of numbers
v1 = [...]
v2 = [...]
v3 = [...]
## 8: (Problem 2.14.9) Practice with Dot-Product
uv_a = ...
uv_b = ...
uv_c = ...
uv_d = ...
| true | true |
f710c21a88409fcff3b7068fee986b0767faa304 | 2,658 | py | Python | huggingface_ner/preprocess.py | dertilo/sequence-tagging | c7a264ee32fb4b9d2337c466d4c12552e7ddb799 | [
"MIT"
] | 1 | 2020-05-08T09:05:47.000Z | 2020-05-08T09:05:47.000Z | huggingface_ner/preprocess.py | TUB-NLP-OpenData/sequence-tagging | c7a264ee32fb4b9d2337c466d4c12552e7ddb799 | [
"MIT"
] | null | null | null | huggingface_ner/preprocess.py | TUB-NLP-OpenData/sequence-tagging | c7a264ee32fb4b9d2337c466d4c12552e7ddb799 | [
"MIT"
] | null | null | null | import argparse
import sys
from collections import Counter
from tqdm import tqdm
from transformers import AutoTokenizer
def read_and_preprocess(file:str):
subword_len_counter = 0
with open(file, "rt") as f_p:
for line in f_p:
line = line.rstrip()
if not line:
yield line
subword_len_counter = 0
continue
token = line.split()[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
yield ""
yield line
subword_len_counter = current_subwords_len
continue
subword_len_counter += current_subwords_len
yield line
def build_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
# parser.add_argument(
# "--data_dir",
# default=None,
# type=str,
# required=True,
# help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
# )
args = parser.parse_args()
return args
def get_label(s:str):
x = s.split(' ')
if len(x)==2:
label = x[1]
else:
label = None
return label
if __name__ == '__main__':
args = build_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
max_len = args.max_seq_length
max_len -= tokenizer.num_special_tokens_to_add()
label_counter = Counter()
def count_and_return(l:str):
label = get_label(l)
if label is not None:
label_counter.update({label:1})
return l
for split_name in ['train','dev','test']:
dataset = "%s.txt.tmp"%split_name
with open("%s.txt"%split_name,'w') as f:
f.writelines("%s\n"%count_and_return(l) for l in tqdm(read_and_preprocess(dataset)))
with open('labels.txt','w') as f:
f.writelines("%s\n"%l for l in label_counter.keys())
| 28.276596 | 100 | 0.598947 | import argparse
import sys
from collections import Counter
from tqdm import tqdm
from transformers import AutoTokenizer
def read_and_preprocess(file:str):
subword_len_counter = 0
with open(file, "rt") as f_p:
for line in f_p:
line = line.rstrip()
if not line:
yield line
subword_len_counter = 0
continue
token = line.split()[0]
current_subwords_len = len(tokenizer.tokenize(token))
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
yield ""
yield line
subword_len_counter = current_subwords_len
continue
subword_len_counter += current_subwords_len
yield line
def build_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
args = parser.parse_args()
return args
def get_label(s:str):
x = s.split(' ')
if len(x)==2:
label = x[1]
else:
label = None
return label
if __name__ == '__main__':
args = build_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
max_len = args.max_seq_length
max_len -= tokenizer.num_special_tokens_to_add()
label_counter = Counter()
def count_and_return(l:str):
label = get_label(l)
if label is not None:
label_counter.update({label:1})
return l
for split_name in ['train','dev','test']:
dataset = "%s.txt.tmp"%split_name
with open("%s.txt"%split_name,'w') as f:
f.writelines("%s\n"%count_and_return(l) for l in tqdm(read_and_preprocess(dataset)))
with open('labels.txt','w') as f:
f.writelines("%s\n"%l for l in label_counter.keys())
| true | true |
f710c2c88539af8f36a8ca8272678561a8d6d0ba | 1,075 | py | Python | scripts/embeddings.py | serre-lab/brownUnconference | c51758f0bf695648832448c5c166e2a8dea14268 | [
"MIT"
] | null | null | null | scripts/embeddings.py | serre-lab/brownUnconference | c51758f0bf695648832448c5c166e2a8dea14268 | [
"MIT"
] | null | null | null | scripts/embeddings.py | serre-lab/brownUnconference | c51758f0bf695648832448c5c166e2a8dea14268 | [
"MIT"
] | null | null | null | import argparse
import csv
import torch
import transformers
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("papers", default=False, help="papers file to parse")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
tokenizer = transformers.AutoTokenizer.from_pretrained("deepset/sentence_bert")
model = transformers.AutoModel.from_pretrained("deepset/sentence_bert")
model.eval()
with open(args.papers, "r",encoding='utf-8') as f:
abstracts = list(csv.DictReader(f))
all_abstracts = torch.zeros(len(abstracts), 768)
with torch.no_grad():
for i, row in enumerate(abstracts):
input_ids = torch.tensor([tokenizer.encode(row["abstract"])][:512])
all_hidden_states, _ = model(input_ids)[-2:]
all_abstracts[i] = all_hidden_states.mean(0).mean(0)
print(i)
print(row['author'])
torch.save(all_abstracts, "embeddings.torch")
| 31.617647 | 83 | 0.661395 | import argparse
import csv
import torch
import transformers
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("papers", default=False, help="papers file to parse")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
tokenizer = transformers.AutoTokenizer.from_pretrained("deepset/sentence_bert")
model = transformers.AutoModel.from_pretrained("deepset/sentence_bert")
model.eval()
with open(args.papers, "r",encoding='utf-8') as f:
abstracts = list(csv.DictReader(f))
all_abstracts = torch.zeros(len(abstracts), 768)
with torch.no_grad():
for i, row in enumerate(abstracts):
input_ids = torch.tensor([tokenizer.encode(row["abstract"])][:512])
all_hidden_states, _ = model(input_ids)[-2:]
all_abstracts[i] = all_hidden_states.mean(0).mean(0)
print(i)
print(row['author'])
torch.save(all_abstracts, "embeddings.torch")
| true | true |
f710c311e6b5c5805939067fdc6c52f6401f15f1 | 1,319 | py | Python | python-module/setup.py | SSICLOPS/cppl | 265514bc461352b7b5bc58fd7482328601029e4a | [
"Apache-2.0"
] | 1 | 2018-06-02T11:50:06.000Z | 2018-06-02T11:50:06.000Z | python-module/setup.py | SSICLOPS/cppl | 265514bc461352b7b5bc58fd7482328601029e4a | [
"Apache-2.0"
] | 1 | 2018-01-17T04:16:29.000Z | 2018-01-30T09:01:44.000Z | python-module/setup.py | SSICLOPS/cppl | 265514bc461352b7b5bc58fd7482328601029e4a | [
"Apache-2.0"
] | 1 | 2018-11-18T20:31:54.000Z | 2018-11-18T20:31:54.000Z | from distutils.core import setup, Extension
import sys
major_version = '4'
minor_version = '0'
cpplmodule = Extension('cppl_cpp_python_bridge',
define_macros = [('MAJOR_VERSION', major_version),
('MINOR_VERSION', minor_version)],
include_dirs = [],
libraries = ['cppl'],
library_dirs = ['../policy-decision-point'],
sources = ['cpplmodule.cc'],
#extra_compile_args = ['-std=c++11', '-Wall', '-Werror',],
extra_compile_args = ['-std=c++11', '-Wall',],
# extra_objects are included _before_ library_dirs and libraries
extra_objects = [],
# extra_link_args are included _after_ library_dirs and libraries
extra_link_args = [])
setup (name = 'cppl',
version = major_version + '.' + minor_version,
description = 'A C++ - Python bridge for CPPL',
author = 'Jens Hiller',
author_email = 'jens.hiller@comsys.rwth-aachen.de',
url = '',
long_description = '''This package provides a C++-Python bridge for cppl (compact privacy policy language) functionality''',
py_modules = ['cppl'],
ext_modules = [cpplmodule])
| 43.966667 | 131 | 0.551175 | from distutils.core import setup, Extension
import sys
major_version = '4'
minor_version = '0'
cpplmodule = Extension('cppl_cpp_python_bridge',
define_macros = [('MAJOR_VERSION', major_version),
('MINOR_VERSION', minor_version)],
include_dirs = [],
libraries = ['cppl'],
library_dirs = ['../policy-decision-point'],
sources = ['cpplmodule.cc'],
extra_compile_args = ['-std=c++11', '-Wall',],
extra_objects = [],
extra_link_args = [])
setup (name = 'cppl',
version = major_version + '.' + minor_version,
description = 'A C++ - Python bridge for CPPL',
author = 'Jens Hiller',
author_email = 'jens.hiller@comsys.rwth-aachen.de',
url = '',
long_description = '''This package provides a C++-Python bridge for cppl (compact privacy policy language) functionality''',
py_modules = ['cppl'],
ext_modules = [cpplmodule])
| true | true |
f710c366338ee89946b9f99f27d37a342ac49eca | 893 | py | Python | config.py | devseme/Blogs-App | 06e4aed7cfa7b4985e1d11e48c500305d69ef9cc | [
"MIT"
] | null | null | null | config.py | devseme/Blogs-App | 06e4aed7cfa7b4985e1d11e48c500305d69ef9cc | [
"MIT"
] | null | null | null | config.py | devseme/Blogs-App | 06e4aed7cfa7b4985e1d11e48c500305d69ef9cc | [
"MIT"
] | null | null | null | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:moringa@localhost/db'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
} | 20.767442 | 82 | 0.702128 | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:moringa@localhost/db'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
} | true | true |
f710c3c42e825b9a3c844e48547f214cc48ab052 | 5,249 | py | Python | lambda/py/lambda_upload/ask_sdk_model/events/skillevents/skill_enabled_request.py | frivas/alexa-mixed-polly | bf0fde9005a66f3d6f0193799eacef934d166de7 | [
"W3C"
] | null | null | null | lambda/py/lambda_upload/ask_sdk_model/events/skillevents/skill_enabled_request.py | frivas/alexa-mixed-polly | bf0fde9005a66f3d6f0193799eacef934d166de7 | [
"W3C"
] | null | null | null | lambda/py/lambda_upload/ask_sdk_model/events/skillevents/skill_enabled_request.py | frivas/alexa-mixed-polly | bf0fde9005a66f3d6f0193799eacef934d166de7 | [
"W3C"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class SkillEnabledRequest(Request):
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'event_creation_time': 'datetime',
'event_publishing_time': 'datetime'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'event_creation_time': 'eventCreationTime',
'event_publishing_time': 'eventPublishingTime'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, event_creation_time=None, event_publishing_time=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[datetime], Optional[datetime]) -> None
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
self.__discriminator_value = "AlexaSkillEvent.SkillEnabled" # type: str
self.object_type = self.__discriminator_value
super(SkillEnabledRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.event_creation_time = event_creation_time
self.event_publishing_time = event_publishing_time
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillEnabledRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 38.595588 | 182 | 0.637074 |
import pprint
import re
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class SkillEnabledRequest(Request):
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'event_creation_time': 'datetime',
'event_publishing_time': 'datetime'
}
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'event_creation_time': 'eventCreationTime',
'event_publishing_time': 'eventPublishingTime'
}
def __init__(self, request_id=None, timestamp=None, locale=None, event_creation_time=None, event_publishing_time=None):
self.__discriminator_value = "AlexaSkillEvent.SkillEnabled"
self.object_type = self.__discriminator_value
super(SkillEnabledRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.event_creation_time = event_creation_time
self.event_publishing_time = event_publishing_time
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SkillEnabledRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f710c4fea0ba46f981807f2668cecb5daab0e12a | 4,128 | py | Python | examples/algorithms/groupDRO.py | KeAWang/wilds | 3b808a84bd477d7877b77675eec2953128a87033 | [
"MIT"
] | 355 | 2020-12-12T03:29:28.000Z | 2022-03-31T22:47:29.000Z | examples/algorithms/groupDRO.py | KeAWang/wilds | 3b808a84bd477d7877b77675eec2953128a87033 | [
"MIT"
] | 34 | 2020-12-24T11:50:51.000Z | 2022-03-18T00:06:38.000Z | examples/algorithms/groupDRO.py | KeAWang/wilds | 3b808a84bd477d7877b77675eec2953128a87033 | [
"MIT"
] | 87 | 2020-12-16T08:13:21.000Z | 2022-03-24T17:00:17.000Z | import torch
from algorithms.single_model_algorithm import SingleModelAlgorithm
from models.initializer import initialize_model
class GroupDRO(SingleModelAlgorithm):
"""
Group distributionally robust optimization.
Original paper:
@inproceedings{sagawa2019distributionally,
title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle={International Conference on Learning Representations},
year={2019}
}
"""
def __init__(self, config, d_out, grouper, loss, metric, n_train_steps, is_group_in_train):
# check config
assert config.uniform_over_groups
# initialize model
model = initialize_model(config, d_out).to(config.device)
# initialize module
super().__init__(
config=config,
model=model,
grouper=grouper,
loss=loss,
metric=metric,
n_train_steps=n_train_steps,
)
# additional logging
self.logged_fields.append('group_weight')
# step size
self.group_weights_step_size = config.group_dro_step_size
# initialize adversarial weights
self.group_weights = torch.zeros(grouper.n_groups)
self.group_weights[is_group_in_train] = 1
self.group_weights = self.group_weights/self.group_weights.sum()
self.group_weights = self.group_weights.to(self.device)
def process_batch(self, batch):
"""
A helper function for update() and evaluate() that processes the batch
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
all Tensors are of size (batch_size,)
"""
results = super().process_batch(batch)
results['group_weight'] = self.group_weights
return results
def objective(self, results):
"""
Takes an output of SingleModelAlgorithm.process_batch() and computes the
optimized objective. For group DRO, the objective is the weighted average
of losses, where groups have weights groupDRO.group_weights.
Args:
- results (dictionary): output of SingleModelAlgorithm.process_batch()
Output:
- objective (Tensor): optimized objective; size (1,).
"""
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
return group_losses @ self.group_weights
def _update(self, results):
"""
Process the batch, update the log, and update the model, group weights, and scheduler.
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch, such as:
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
- objective (float)
"""
# compute group losses
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
# update group weights
self.group_weights = self.group_weights * torch.exp(self.group_weights_step_size*group_losses.data)
self.group_weights = (self.group_weights/(self.group_weights.sum()))
# save updated group weights
results['group_weight'] = self.group_weights
# update model
super()._update(results)
| 39.314286 | 142 | 0.610707 | import torch
from algorithms.single_model_algorithm import SingleModelAlgorithm
from models.initializer import initialize_model
class GroupDRO(SingleModelAlgorithm):
def __init__(self, config, d_out, grouper, loss, metric, n_train_steps, is_group_in_train):
assert config.uniform_over_groups
model = initialize_model(config, d_out).to(config.device)
super().__init__(
config=config,
model=model,
grouper=grouper,
loss=loss,
metric=metric,
n_train_steps=n_train_steps,
)
self.logged_fields.append('group_weight')
self.group_weights_step_size = config.group_dro_step_size
self.group_weights = torch.zeros(grouper.n_groups)
self.group_weights[is_group_in_train] = 1
self.group_weights = self.group_weights/self.group_weights.sum()
self.group_weights = self.group_weights.to(self.device)
def process_batch(self, batch):
results = super().process_batch(batch)
results['group_weight'] = self.group_weights
return results
def objective(self, results):
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
return group_losses @ self.group_weights
def _update(self, results):
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
self.group_weights = self.group_weights * torch.exp(self.group_weights_step_size*group_losses.data)
self.group_weights = (self.group_weights/(self.group_weights.sum()))
results['group_weight'] = self.group_weights
super()._update(results)
| true | true |
f710c6110376f8c01aecd9ca8aebf4d7950f3199 | 8,006 | py | Python | DATA/Labeling.py | IewNixIl/graduation_project_under | 67d0345208511bb06c35c3453227b2fa4ebef4a3 | [
"MIT"
] | null | null | null | DATA/Labeling.py | IewNixIl/graduation_project_under | 67d0345208511bb06c35c3453227b2fa4ebef4a3 | [
"MIT"
] | null | null | null | DATA/Labeling.py | IewNixIl/graduation_project_under | 67d0345208511bb06c35c3453227b2fa4ebef4a3 | [
"MIT"
] | null | null | null | import numpy
from matplotlib import pyplot
import gdal
from skimage import io,exposure
from skimage.segmentation import slic,mark_boundaries
import os
from PIL import Image
import shelve
import sys
sys.path.append('..')
from Config import config
def seg(path,n_segments=500, compactness=20):
i=io.imread(path)[:,:,[3,2,1,7]]
img=i[:,:,:3]
img=(img-img.min())/(img.max()-img.min())
img=img*255
img=img.astype(numpy.uint8)
img=exposure.adjust_gamma(img,0.5)
segment=slic(img,n_segments=n_segments, compactness=compactness,enforce_connectivity=True)
out=mark_boundaries(img,segment,color=[0,0,0.2])
#img=exposure.adjust_gamma(img,0.5)
#out=exposure.adjust_gamma(out,0.5)
wdi=(i[:,:,3]-i[:,:,1])/(i[:,:,3]+i[:,:,1])
wdi=(wdi/wdi.max())*255
return segment,out,img,wdi
def getname(path,namelist):
if namelist[0]==0:
season='ROIs1158_spring'
elif namelist[0]==1:
season='ROIs1868_summer'
elif namelist[0]==2:
season='ROIs1970_fall'
elif namelist[0]==3:
season='ROIs2017_winter'
path_s2=path+'\\'+season+'\\s2_'+str(namelist[1])+'\\'+season+'_s2_'+str(namelist[1])+'_p'+str(namelist[2])+'.tif'
return path_s2
def transform(name):
if 'spring' in name:
season=0
elif 'summer' in name:
season=1
elif 'fall' in name:
season=2
elif 'winter' in name:
season=3
l=[]
l.append(season)
l.append(int(name.split('_')[3]))
l.append(int(name.split('_')[4].split('.')[0][1:]))
return l
class UI:
def __init__(self,mode='normal',init=0):
'''mode = normal 正常
mode=review 仅仅显示已经标记的
'''
self.mode=mode
self.path_label=config.path_labels
if self.mode=='normal':
with shelve.open(config.path_devision) as f:
self.imglist=f['test']
else:
self.imglist=os.listdir(config.path_labels)
self.n=init
self.ifpress=False
self.ifloadlabel=False
fig=pyplot.figure()
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect('key_press_event',self.on_key_press)
fig.canvas.mpl_connect('button_press_event',self.on_button_press)
fig.canvas.mpl_connect('motion_notify_event',self.on_button_move)
fig.canvas.mpl_connect('button_release_event',self.on_button_release)
self.fig=fig
self.ax1=fig.add_subplot(3,2,1)
self.ax2=fig.add_subplot(3,2,3)
self.ax4=fig.add_subplot(3,2,5)
self.ax3=fig.add_subplot(1,2,2)
pyplot.get_current_fig_manager().window.state('zoomed')
#self.ax2=fig.add_subplot(1,2,2)
self.valuelist=[]
self.label=numpy.zeros((256,256))
self.ifloadlabel=True
self.draw()
pyplot.show()
def on_key_press(self,event):
if event.key=='a' or event.key=='left':
self.n-=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='d' or event.key=='right':
if self.n+1>=len(self.imglist):
return
self.n+=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='e' or event.key=='enter':
self.save_label()
if event.key=='Q':
f=numpy.unique(self.segment).tolist()
for i in f:
if i not in self.valuelist:
self.valuelist.append(i)
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1.0,0)
self.draw()
def on_button_press(self,event):
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.ifpress=True
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.ifpress=True
self.valuelist.remove(value)
def on_button_move(self,event):
if not self.ifpress:
return
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.valuelist.remove(value)
def on_button_release(self,event):
if not self.ifpress:
return
self.ifpress=False
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1,0).astype(int)
self.draw()
def draw(self):
if self.mode=='normal':
segment,out,img,wdi=seg(getname(config.path,self.imglist[self.n]))
else:
segment,out,img,wdi=seg(getname(config.path,transform(self.imglist[self.n])))
self.segment=segment
if self.ifloadlabel:
self.read_label()
self.ifloadlabel=False
#self.ax1.imshow(out)
t=numpy.where(self.label==1,0.5,out[:,:,2])
out[:,:,2]=t
self.ax1.cla()
self.ax2.cla()
self.ax3.cla()
self.ax4.cla()
self.ax1.imshow(img)
self.ax2.imshow(wdi,cmap='gray')
self.ax3.imshow(out)
self.ax4.imshow(self.label,cmap='gray')
d=os.listdir(config.path_labels)
self.ax3.set_title(str(len(d))+'/'+str(self.n+1))
self.fig.canvas.draw_idle()
def save_label(self):
label=self.label*255
label=label.astype(numpy.uint8)
label=Image.fromarray(label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
label.save(self.path_label+'\\'+name)
def read_label(self):
dirlist=os.listdir(self.path_label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
if name in dirlist:
self.label=numpy.array(Image.open(self.path_label+'\\'+name))/255
self.label=self.label.astype(int)
self.valuelist=list(numpy.unique(numpy.where(self.label==1,self.segment,-2)))
self.valuelist.remove(-2)
def statistic():
d=os.listdir(config.path_labels)
n=numpy.array([0,0,0,0])
for i in d:
if 'spring' in i:
n[0]=n[0]+1
if 'summer' in i:
n[1]=n[1]+1
if 'fall' in i:
n[2]=n[2]+1
if 'winter' in i:
n[3]=n[3]+1
print(n)
n=n/len(d)
print(n)
if __name__=='__main__':
test=UI(mode='normal',init=100)
#statistic()
| 28.695341 | 118 | 0.533475 | import numpy
from matplotlib import pyplot
import gdal
from skimage import io,exposure
from skimage.segmentation import slic,mark_boundaries
import os
from PIL import Image
import shelve
import sys
sys.path.append('..')
from Config import config
def seg(path,n_segments=500, compactness=20):
i=io.imread(path)[:,:,[3,2,1,7]]
img=i[:,:,:3]
img=(img-img.min())/(img.max()-img.min())
img=img*255
img=img.astype(numpy.uint8)
img=exposure.adjust_gamma(img,0.5)
segment=slic(img,n_segments=n_segments, compactness=compactness,enforce_connectivity=True)
out=mark_boundaries(img,segment,color=[0,0,0.2])
wdi=(i[:,:,3]-i[:,:,1])/(i[:,:,3]+i[:,:,1])
wdi=(wdi/wdi.max())*255
return segment,out,img,wdi
def getname(path,namelist):
if namelist[0]==0:
season='ROIs1158_spring'
elif namelist[0]==1:
season='ROIs1868_summer'
elif namelist[0]==2:
season='ROIs1970_fall'
elif namelist[0]==3:
season='ROIs2017_winter'
path_s2=path+'\\'+season+'\\s2_'+str(namelist[1])+'\\'+season+'_s2_'+str(namelist[1])+'_p'+str(namelist[2])+'.tif'
return path_s2
def transform(name):
if 'spring' in name:
season=0
elif 'summer' in name:
season=1
elif 'fall' in name:
season=2
elif 'winter' in name:
season=3
l=[]
l.append(season)
l.append(int(name.split('_')[3]))
l.append(int(name.split('_')[4].split('.')[0][1:]))
return l
class UI:
def __init__(self,mode='normal',init=0):
self.mode=mode
self.path_label=config.path_labels
if self.mode=='normal':
with shelve.open(config.path_devision) as f:
self.imglist=f['test']
else:
self.imglist=os.listdir(config.path_labels)
self.n=init
self.ifpress=False
self.ifloadlabel=False
fig=pyplot.figure()
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect('key_press_event',self.on_key_press)
fig.canvas.mpl_connect('button_press_event',self.on_button_press)
fig.canvas.mpl_connect('motion_notify_event',self.on_button_move)
fig.canvas.mpl_connect('button_release_event',self.on_button_release)
self.fig=fig
self.ax1=fig.add_subplot(3,2,1)
self.ax2=fig.add_subplot(3,2,3)
self.ax4=fig.add_subplot(3,2,5)
self.ax3=fig.add_subplot(1,2,2)
pyplot.get_current_fig_manager().window.state('zoomed')
self.valuelist=[]
self.label=numpy.zeros((256,256))
self.ifloadlabel=True
self.draw()
pyplot.show()
def on_key_press(self,event):
if event.key=='a' or event.key=='left':
self.n-=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='d' or event.key=='right':
if self.n+1>=len(self.imglist):
return
self.n+=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='e' or event.key=='enter':
self.save_label()
if event.key=='Q':
f=numpy.unique(self.segment).tolist()
for i in f:
if i not in self.valuelist:
self.valuelist.append(i)
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1.0,0)
self.draw()
def on_button_press(self,event):
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.ifpress=True
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.ifpress=True
self.valuelist.remove(value)
def on_button_move(self,event):
if not self.ifpress:
return
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.valuelist.remove(value)
def on_button_release(self,event):
if not self.ifpress:
return
self.ifpress=False
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1,0).astype(int)
self.draw()
def draw(self):
if self.mode=='normal':
segment,out,img,wdi=seg(getname(config.path,self.imglist[self.n]))
else:
segment,out,img,wdi=seg(getname(config.path,transform(self.imglist[self.n])))
self.segment=segment
if self.ifloadlabel:
self.read_label()
self.ifloadlabel=False
t=numpy.where(self.label==1,0.5,out[:,:,2])
out[:,:,2]=t
self.ax1.cla()
self.ax2.cla()
self.ax3.cla()
self.ax4.cla()
self.ax1.imshow(img)
self.ax2.imshow(wdi,cmap='gray')
self.ax3.imshow(out)
self.ax4.imshow(self.label,cmap='gray')
d=os.listdir(config.path_labels)
self.ax3.set_title(str(len(d))+'/'+str(self.n+1))
self.fig.canvas.draw_idle()
def save_label(self):
label=self.label*255
label=label.astype(numpy.uint8)
label=Image.fromarray(label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
label.save(self.path_label+'\\'+name)
def read_label(self):
dirlist=os.listdir(self.path_label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
if name in dirlist:
self.label=numpy.array(Image.open(self.path_label+'\\'+name))/255
self.label=self.label.astype(int)
self.valuelist=list(numpy.unique(numpy.where(self.label==1,self.segment,-2)))
self.valuelist.remove(-2)
def statistic():
d=os.listdir(config.path_labels)
n=numpy.array([0,0,0,0])
for i in d:
if 'spring' in i:
n[0]=n[0]+1
if 'summer' in i:
n[1]=n[1]+1
if 'fall' in i:
n[2]=n[2]+1
if 'winter' in i:
n[3]=n[3]+1
print(n)
n=n/len(d)
print(n)
if __name__=='__main__':
test=UI(mode='normal',init=100)
| true | true |
f710c7c26ca691b1517407f9a1238bc3759d8852 | 312 | py | Python | tests/urls.py | zonnepanelendelen/django-fsm-log | 28e2469693425efbeaf604f40db836977fbb68ff | [
"MIT"
] | 140 | 2015-01-07T19:12:49.000Z | 2021-08-14T14:17:12.000Z | tests/urls.py | zonnepanelendelen/django-fsm-log | 28e2469693425efbeaf604f40db836977fbb68ff | [
"MIT"
] | 81 | 2015-02-27T13:07:29.000Z | 2022-01-14T11:26:58.000Z | tests/urls.py | kcrebound/django-fsm-log | b2acc23d2a3398f07deacaf0911a763fbc6f4a75 | [
"MIT"
] | 73 | 2015-01-07T17:07:21.000Z | 2021-12-10T07:34:54.000Z | try:
from django.urls import path
from django.contrib import admin
urlpatterns = [path('admin', admin.site.urls)]
except ImportError:
# django < 2.0
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [url(r'^admin/', include(admin.site.urls))]
| 26 | 61 | 0.689103 | try:
from django.urls import path
from django.contrib import admin
urlpatterns = [path('admin', admin.site.urls)]
except ImportError:
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [url(r'^admin/', include(admin.site.urls))]
| true | true |
f710c807738df97a99495e8269c1fb300b203df9 | 2,026 | py | Python | tensorflow/python/ipu/keras/layers/recomputation.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/ipu/keras/layers/recomputation.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/python/ipu/keras/layers/recomputation.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Recomputation IPU Keras layers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ipu.ops import pipelining_ops
class RecomputationCheckpoint(Layer):
"""
Layer for checkpointing values in a computational pipeline stage.
When recomputation is enabled, these values will not be recomputed and they
will be stored in memory instead.
This layer can reduce memory liveness peaks when using recomputation if
there are too many activations which need to be recomputed before the
backpropagation operations can be executed.
This layer should be used with the
`RecomputationMode.RecomputeAndBackpropagateInterleaved` pipelining
recomputation mode.
Note that this layer has no effect when used with the
`RecomputationMode.RecomputeThenBackpropagate` pipelining
recomputation mode.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
Checkpoint the input tensors.
Args:
inputs: A tensor or a structure of tensors which should be checkpointed.
Returns:
A tensor or a structure of tensors which matches shape and type of
`inputs`.
"""
return pipelining_ops.recomputation_checkpoint(inputs, name=self.name)
def get_config(self):
return {}
| 33.766667 | 80 | 0.71619 |
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ipu.ops import pipelining_ops
class RecomputationCheckpoint(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
return pipelining_ops.recomputation_checkpoint(inputs, name=self.name)
def get_config(self):
return {}
| true | true |
f710c8dc0ae9c607360da95e07304279627fd52e | 3,944 | py | Python | pysnmp/ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:43:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
dot1dBasePort, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePort")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, ObjectIdentity, MibIdentifier, NotificationType, iso, Unsigned32, Counter32, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Counter64, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "ObjectIdentity", "MibIdentifier", "NotificationType", "iso", "Unsigned32", "Counter32", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Counter64", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyxelBridgeControlProtocolTransparency = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15))
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparency.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparency.setOrganization('Enterprise Solution ZyXEL')
zyxelBridgeControlProtocolTransparencySetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1))
zyBridgeControlProtocolTransparencyState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyBridgeControlProtocolTransparencyState.setStatus('current')
zyxelBridgeControlProtocolTransparencyPortTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2), )
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparencyPortTable.setStatus('current')
zyxelBridgeControlProtocolTransparencyPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparencyPortEntry.setStatus('current')
zyBridgeControlProtocolTransparencyPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("peer", 0), ("tunnel", 1), ("discard", 2), ("network", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyBridgeControlProtocolTransparencyPortMode.setStatus('current')
mibBuilder.exportSymbols("ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB", zyxelBridgeControlProtocolTransparencySetup=zyxelBridgeControlProtocolTransparencySetup, zyxelBridgeControlProtocolTransparency=zyxelBridgeControlProtocolTransparency, PYSNMP_MODULE_ID=zyxelBridgeControlProtocolTransparency, zyxelBridgeControlProtocolTransparencyPortTable=zyxelBridgeControlProtocolTransparencyPortTable, zyxelBridgeControlProtocolTransparencyPortEntry=zyxelBridgeControlProtocolTransparencyPortEntry, zyBridgeControlProtocolTransparencyPortMode=zyBridgeControlProtocolTransparencyPortMode, zyBridgeControlProtocolTransparencyState=zyBridgeControlProtocolTransparencyState)
| 131.466667 | 665 | 0.811359 |
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
dot1dBasePort, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePort")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, ObjectIdentity, MibIdentifier, NotificationType, iso, Unsigned32, Counter32, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Counter64, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "ObjectIdentity", "MibIdentifier", "NotificationType", "iso", "Unsigned32", "Counter32", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Counter64", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyxelBridgeControlProtocolTransparency = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15))
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparency.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparency.setOrganization('Enterprise Solution ZyXEL')
zyxelBridgeControlProtocolTransparencySetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1))
zyBridgeControlProtocolTransparencyState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyBridgeControlProtocolTransparencyState.setStatus('current')
zyxelBridgeControlProtocolTransparencyPortTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2), )
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparencyPortTable.setStatus('current')
zyxelBridgeControlProtocolTransparencyPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"))
if mibBuilder.loadTexts: zyxelBridgeControlProtocolTransparencyPortEntry.setStatus('current')
zyBridgeControlProtocolTransparencyPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 15, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("peer", 0), ("tunnel", 1), ("discard", 2), ("network", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyBridgeControlProtocolTransparencyPortMode.setStatus('current')
mibBuilder.exportSymbols("ZYXEL-BRIDGE-CONTROL-PROTOCOL-TRANSPARENCY-MIB", zyxelBridgeControlProtocolTransparencySetup=zyxelBridgeControlProtocolTransparencySetup, zyxelBridgeControlProtocolTransparency=zyxelBridgeControlProtocolTransparency, PYSNMP_MODULE_ID=zyxelBridgeControlProtocolTransparency, zyxelBridgeControlProtocolTransparencyPortTable=zyxelBridgeControlProtocolTransparencyPortTable, zyxelBridgeControlProtocolTransparencyPortEntry=zyxelBridgeControlProtocolTransparencyPortEntry, zyBridgeControlProtocolTransparencyPortMode=zyBridgeControlProtocolTransparencyPortMode, zyBridgeControlProtocolTransparencyState=zyBridgeControlProtocolTransparencyState)
| true | true |
f710cd85c0f4915e384055a6b54566ba32288ac7 | 5,447 | py | Python | library/bitcash-master/docs/source/conf.py | Devel484/CryptoPay-Crypto | 76ae0486ea86b5fa121af42c6d0b9efa279b97ee | [
"MIT"
] | 2 | 2020-09-08T22:59:33.000Z | 2020-12-30T06:28:29.000Z | docs/source/conf.py | AlphaGriffin/bitcash | 793e632733b4ea8988b23c7804c00034f9fc0427 | [
"MIT"
] | null | null | null | docs/source/conf.py | AlphaGriffin/bitcash | 793e632733b4ea8988b23c7804c00034f9fc0427 | [
"MIT"
] | 1 | 2020-12-30T06:28:41.000Z | 2020-12-30T06:28:41.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Bitcash documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 20 15:41:44 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from bitcash import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinxcontrib.fulltoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bitcash'
copyright = '2017, Ofek Lev'
author = 'Ofek Lev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html',
'hacks.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html', 'hacks.html']
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'ofek',
'github_repo': 'bitcash',
'github_banner': True,
'show_related': False
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bitcashdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Bitcash.tex', 'Bitcash Documentation',
'Ofek Lev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bitcash', 'Bitcash Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Bitcash', 'Bitcash Documentation',
author, 'Bitcash', 'One line description of project.',
'Miscellaneous'),
]
| 29.603261 | 79 | 0.679457 |
from bitcash import __version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinxcontrib.fulltoc'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Bitcash'
copyright = '2017, Ofek Lev'
author = 'Ofek Lev'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html',
'hacks.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html', 'hacks.html']
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'ofek',
'github_repo': 'bitcash',
'github_banner': True,
'show_related': False
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bitcashdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Bitcash.tex', 'Bitcash Documentation',
'Ofek Lev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bitcash', 'Bitcash Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Bitcash', 'Bitcash Documentation',
author, 'Bitcash', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f710cde440c8546e165803127d3fe665e30c7217 | 136 | py | Python | geo_files/geo_networkx.py | floristevito/SEN9120_Advanced_Agent_Based_Modelling | fc45d02f3add05fb5db23c920d6702f1e704ef21 | [
"MIT"
] | null | null | null | geo_files/geo_networkx.py | floristevito/SEN9120_Advanced_Agent_Based_Modelling | fc45d02f3add05fb5db23c920d6702f1e704ef21 | [
"MIT"
] | null | null | null | geo_files/geo_networkx.py | floristevito/SEN9120_Advanced_Agent_Based_Modelling | fc45d02f3add05fb5db23c920d6702f1e704ef21 | [
"MIT"
] | null | null | null | import geopandas as gpd
# Networkx werkt erg traag
gdf = gpd.read_file(r"C:\Users\bruno\Downloads\snelwegen_provincie.geojson")
gdf
| 15.111111 | 76 | 0.779412 | import geopandas as gpd
gdf = gpd.read_file(r"C:\Users\bruno\Downloads\snelwegen_provincie.geojson")
gdf
| true | true |
f710ce94cd50e7263e3201853259575b309288ba | 260 | py | Python | examples/host/status_receiver.py | ci4rail/esp_test_status_report | a54ffc81adb6cd6ffa22f7dc913010154f7ffca0 | [
"Apache-2.0"
] | null | null | null | examples/host/status_receiver.py | ci4rail/esp_test_status_report | a54ffc81adb6cd6ffa22f7dc913010154f7ffca0 | [
"Apache-2.0"
] | 1 | 2021-11-16T14:36:23.000Z | 2021-11-16T14:36:23.000Z | examples/host/status_receiver.py | ci4rail/esp_test_status_report | a54ffc81adb6cd6ffa22f7dc913010154f7ffca0 | [
"Apache-2.0"
] | null | null | null | import socket
import sys
ESP_IP = '192.168.7.1'
PORT = 10000
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('try to connect')
sock.connect((ESP_IP, PORT))
print('connected...')
data = sock.recv(255)
print('msg: ', data.decode())
sock.close()
| 18.571429 | 56 | 0.707692 | import socket
import sys
ESP_IP = '192.168.7.1'
PORT = 10000
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('try to connect')
sock.connect((ESP_IP, PORT))
print('connected...')
data = sock.recv(255)
print('msg: ', data.decode())
sock.close()
| true | true |
f710cf1996c86a9c50d9c2272c951efd092ad2b2 | 2,288 | py | Python | monasca_api/conf/types.py | MheniMerz/monasca-api | 9c0892a58622082ed8baf81ee2f621cc68f5b42c | [
"Apache-2.0"
] | 50 | 2015-10-18T02:54:52.000Z | 2021-12-05T07:54:08.000Z | monasca_api/conf/types.py | MheniMerz/monasca-api | 9c0892a58622082ed8baf81ee2f621cc68f5b42c | [
"Apache-2.0"
] | 13 | 2015-10-29T12:54:07.000Z | 2021-09-02T06:17:42.000Z | monasca_api/conf/types.py | MheniMerz/monasca-api | 9c0892a58622082ed8baf81ee2f621cc68f5b42c | [
"Apache-2.0"
] | 81 | 2015-10-21T07:43:30.000Z | 2022-01-07T03:35:05.000Z | # Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import types
from oslo_utils import netutils
class HostAddressPortOpt(cfg.Opt):
"""Option for HostAddressPortType.
Accept hostname or ip address with TCP/IP port number.
"""
def __init__(self, name, **kwargs):
ip_port_type = HostAddressPortType()
super(HostAddressPortOpt, self).__init__(name,
type=ip_port_type,
**kwargs)
class HostAddressPortType(types.HostAddress):
"""HostAddress with additional port."""
def __init__(self, version=None):
type_name = 'ip and port value'
super(HostAddressPortType, self).__init__(version, type_name=type_name)
def __call__(self, value):
addr, port = netutils.parse_host_port(value)
# NOTE(gmann): parse_host_port() return port as None if no port is
# supplied in value so setting port as string for correct
# parsing and error otherwise it will not be parsed for NoneType.
port = 'None' if port is None else port
addr = self.validate_addr(addr)
port = self._validate_port(port)
if not addr and not port:
raise ValueError('%s is not valid ip with optional port')
return '%s:%d' % (addr, port)
@staticmethod
def _validate_port(port):
return types.Port()(port)
def validate_addr(self, addr):
try:
addr = self.ip_address(addr)
except ValueError:
try:
addr = self.hostname(addr)
except ValueError:
raise ValueError("%s is not a valid host address", addr)
return addr
| 35.75 | 79 | 0.65035 |
from oslo_config import cfg
from oslo_config import types
from oslo_utils import netutils
class HostAddressPortOpt(cfg.Opt):
def __init__(self, name, **kwargs):
ip_port_type = HostAddressPortType()
super(HostAddressPortOpt, self).__init__(name,
type=ip_port_type,
**kwargs)
class HostAddressPortType(types.HostAddress):
def __init__(self, version=None):
type_name = 'ip and port value'
super(HostAddressPortType, self).__init__(version, type_name=type_name)
def __call__(self, value):
addr, port = netutils.parse_host_port(value)
port = 'None' if port is None else port
addr = self.validate_addr(addr)
port = self._validate_port(port)
if not addr and not port:
raise ValueError('%s is not valid ip with optional port')
return '%s:%d' % (addr, port)
@staticmethod
def _validate_port(port):
return types.Port()(port)
def validate_addr(self, addr):
try:
addr = self.ip_address(addr)
except ValueError:
try:
addr = self.hostname(addr)
except ValueError:
raise ValueError("%s is not a valid host address", addr)
return addr
| true | true |
f710cf7c42a883633b27ede44b5fceb415cbf5e8 | 5,101 | py | Python | scripts/Tennis Ball Detection/ball_detection_taskphase.py | leander-dsouza/Gazebo | 4e4c92115c9132b096f9b5a7fc9a9c0f5ed9e598 | [
"MIT"
] | 17 | 2020-03-27T10:33:16.000Z | 2021-06-07T10:29:13.000Z | scripts/Tennis_Ball_Detection/ball_detection_taskphase.py | leander-dsouza/Gazebo | 4e4c92115c9132b096f9b5a7fc9a9c0f5ed9e598 | [
"MIT"
] | null | null | null | scripts/Tennis_Ball_Detection/ball_detection_taskphase.py | leander-dsouza/Gazebo | 4e4c92115c9132b096f9b5a7fc9a9c0f5ed9e598 | [
"MIT"
] | 7 | 2020-03-06T03:53:57.000Z | 2021-01-15T14:31:31.000Z | #!/usr/bin/env python3
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import numpy as np
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5, 5))
kernel1= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
aratio = 1.0
def nothing(x):
pass
# *********************************************************************************************************************
def adjust_gamma(image, gamma=1.0):
if gamma == 0:
gamma = 0.01
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
# *********************************************************************************************************************
img1= np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('GAMMA')
cv2.createTrackbar('g', 'GAMMA', 1, 10, nothing)
def callback(data):
global aratio
br = CvBridge()
frame1 = br.imgmsg_to_cv2(data)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2BGR)
frame = frame1
gamma = (cv2.getTrackbarPos('g', 'GAMMA')) * 0.1
cv2.imshow('GAMMA', img1)
frame = adjust_gamma(frame, gamma=gamma)
cv2.putText(frame, "g={}".format(gamma), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
#cv2.imshow("camera", frame)
hsv = frame
hsv = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV) #RGB reading
hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
# define range of yellow color in HSV
lower_yellow = np.array([29, 86, 6])
upper_yellow = np.array([64, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel1)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel1)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.dilate(mask, kernel1, iterations=13)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
# BOUNDING RECTANGLE .............................................................................................
conts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
conts = np.array(conts)
if len(conts) > 0:
for i, contour in enumerate(conts):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
aratio = (rect[1][0] / rect[1][1])
if (aratio > 0.9) and (aratio < 1.1):
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
#print("Aspect Ratio", aratio)
# HOUGH CIRCLES........................................................................................................
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 200, param1=255, param2=20, minRadius=0, maxRadius=0)
# # print circles
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle in the image
# corresponding to the center of the circle
if (aratio > 0.9) and (aratio < 1.1):
cv2.circle(res, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(res, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
cv2.putText(frame, "BALL DETECTED", (430, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(255, 0, 0),
3)
# DISPLAY................................................................................................................
cv2.putText(frame1, "ORIGINAL FRAME", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.putText(frame, "OUTPUT FRAME", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.putText(res, "RESULTANT", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
horizontal1 = np.hstack([frame1,frame])
horizontal2 = np.hstack((mask,res))
vertical = np.vstack((horizontal1,horizontal2))
'''cv2.imshow('GAMMA CORRECTED', frame)
cv2.imshow('MASK', mask)
cv2.imshow('RESULT', res)
cv2.imshow('ORIGINAL FRAME', frame1)'''
cv2.putText(vertical, "MASK", (10, 940), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.imshow('RESULT', vertical)
# .....................................................................................................................
k = cv2.waitKey(5) & 0xFF
if k == 27:
quit()
def listener():
rospy.init_node('listener', anonymous=True,disable_signals=True)
rospy.Subscriber('/d435/camera/color/image_raw', Image, callback)
rospy.spin()
cv2.destroyAllWindows()
if __name__ == '__main__':
listener()
| 34.006667 | 125 | 0.53068 |
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import numpy as np
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5, 5))
kernel1= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
aratio = 1.0
def nothing(x):
pass
def adjust_gamma(image, gamma=1.0):
if gamma == 0:
gamma = 0.01
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
img1= np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('GAMMA')
cv2.createTrackbar('g', 'GAMMA', 1, 10, nothing)
def callback(data):
global aratio
br = CvBridge()
frame1 = br.imgmsg_to_cv2(data)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2BGR)
frame = frame1
gamma = (cv2.getTrackbarPos('g', 'GAMMA')) * 0.1
cv2.imshow('GAMMA', img1)
frame = adjust_gamma(frame, gamma=gamma)
cv2.putText(frame, "g={}".format(gamma), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
hsv = frame
hsv = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (5, 5), 0)
lower_yellow = np.array([29, 86, 6])
upper_yellow = np.array([64, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel1)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel1)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.dilate(mask, kernel1, iterations=13)
res = cv2.bitwise_and(frame, frame, mask=mask)
conts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
conts = np.array(conts)
if len(conts) > 0:
for i, contour in enumerate(conts):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
aratio = (rect[1][0] / rect[1][1])
if (aratio > 0.9) and (aratio < 1.1):
cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 200, param1=255, param2=20, minRadius=0, maxRadius=0)
ircles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
if (aratio > 0.9) and (aratio < 1.1):
cv2.circle(res, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(res, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
cv2.putText(frame, "BALL DETECTED", (430, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(255, 0, 0),
3)
cv2.putText(frame1, "ORIGINAL FRAME", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.putText(frame, "OUTPUT FRAME", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.putText(res, "RESULTANT", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
horizontal1 = np.hstack([frame1,frame])
horizontal2 = np.hstack((mask,res))
vertical = np.vstack((horizontal1,horizontal2))
cv2.putText(vertical, "MASK", (10, 940), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)
cv2.imshow('RESULT', vertical)
k = cv2.waitKey(5) & 0xFF
if k == 27:
quit()
def listener():
rospy.init_node('listener', anonymous=True,disable_signals=True)
rospy.Subscriber('/d435/camera/color/image_raw', Image, callback)
rospy.spin()
cv2.destroyAllWindows()
if __name__ == '__main__':
listener()
| true | true |
f710cff3f98e1377242473c9bcc5e0534ef79cd9 | 2,449 | py | Python | glowing/data/glowing/functions/generate_adjust_tags.py | dv-extrarius/dv-datapacks | b03b7f0a02a62ff7d66a60c3c8b7517fac4bc09b | [
"MIT"
] | 3 | 2018-08-30T16:17:09.000Z | 2020-01-13T05:13:08.000Z | glowing/data/glowing/functions/generate_adjust_tags.py | dv-extrarius/dv-datapacks | b03b7f0a02a62ff7d66a60c3c8b7517fac4bc09b | [
"MIT"
] | null | null | null | glowing/data/glowing/functions/generate_adjust_tags.py | dv-extrarius/dv-datapacks | b03b7f0a02a62ff7d66a60c3c8b7517fac4bc09b | [
"MIT"
] | null | null | null | #line = r'''execute if score waveGlowTimer glowTimer matches %s run tag @e[type=!player,type=!dolphin,distance=%s,nbt={Attributes:[{Name:"generic.attackDamage"}]},nbt=!{Glowing: 1b}] add madeGlowing'''
#type=!player,type=!dolphin,distance=16..20,nbt={Attributes:[{Name:"generic.attackDamage"}]},nbt=!{Glowing: 1b}
line = r'''execute if score waveGlowTimer glowTimer matches %s if entity @a[distance=%s] run tag @s add madeGlowing'''
bandDistance = 4
bandDuration = 0
minDistance = 16
maxDistance = 64
timeMod = (3 * bandDistance)
distMod = maxDistance - minDistance
def dotdotspan(start, end):
if start != end:
return "%s..%s" % (start, end)
return str(start)
maxDistance += (minDistance - maxDistance) % timeMod
print("#NOTE: The conditions for waveGlowTimer wrapping in 'dotick' must be made to match the maximum count in this file (%r)" % (timeMod - 1,))
#print(r'''tag @e[type=!player,type=!dolphin,distance=..%s,nbt={Attributes:[{Name:"generic.attackDamage"}]},nbt=!{Glowing: 1b}] add madeGlowing''' % (minDistance-1,))
print(r'''execute if entity @a[distance=%s] run tag @s add madeGlowing''' % (minDistance-1,))
for ii, dd in enumerate(range(minDistance, maxDistance)):
startTime = ii % timeMod
endTime = (startTime + bandDuration) % timeMod
startDist = (dd - minDistance) % distMod + minDistance
endDist = (startDist + bandDistance - minDistance) % distMod + minDistance
if endTime != startTime + bandDuration:
if endDist != startDist + bandDistance:
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(0, endTime), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(minDistance, endDist)))
print(line % (dotdotspan(0, endTime), dotdotspan(minDistance, endDist)))
else:
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(dd, dd + bandDistance)))
print(line % (dotdotspan(0, endTime), dotdotspan(dd, dd + bandDistance)))
else:
if endDist != startDist + bandDistance:
print(line % (dotdotspan(startTime, endTime), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(startTime, endTime), dotdotspan(minDistance, endDist)))
else:
print(line % (dotdotspan(startTime, endTime), dotdotspan(dd, dd + bandDistance)))
| 61.225 | 201 | 0.681503 |
line = r'''execute if score waveGlowTimer glowTimer matches %s if entity @a[distance=%s] run tag @s add madeGlowing'''
bandDistance = 4
bandDuration = 0
minDistance = 16
maxDistance = 64
timeMod = (3 * bandDistance)
distMod = maxDistance - minDistance
def dotdotspan(start, end):
if start != end:
return "%s..%s" % (start, end)
return str(start)
maxDistance += (minDistance - maxDistance) % timeMod
print("#NOTE: The conditions for waveGlowTimer wrapping in 'dotick' must be made to match the maximum count in this file (%r)" % (timeMod - 1,))
print(r'''execute if entity @a[distance=%s] run tag @s add madeGlowing''' % (minDistance-1,))
for ii, dd in enumerate(range(minDistance, maxDistance)):
startTime = ii % timeMod
endTime = (startTime + bandDuration) % timeMod
startDist = (dd - minDistance) % distMod + minDistance
endDist = (startDist + bandDistance - minDistance) % distMod + minDistance
if endTime != startTime + bandDuration:
if endDist != startDist + bandDistance:
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(0, endTime), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(minDistance, endDist)))
print(line % (dotdotspan(0, endTime), dotdotspan(minDistance, endDist)))
else:
print(line % (dotdotspan(startTime, timeMod-1), dotdotspan(dd, dd + bandDistance)))
print(line % (dotdotspan(0, endTime), dotdotspan(dd, dd + bandDistance)))
else:
if endDist != startDist + bandDistance:
print(line % (dotdotspan(startTime, endTime), dotdotspan(startDist, distMod-1+minDistance)))
print(line % (dotdotspan(startTime, endTime), dotdotspan(minDistance, endDist)))
else:
print(line % (dotdotspan(startTime, endTime), dotdotspan(dd, dd + bandDistance)))
| true | true |
f710d02252626211a863cd5b3d96abcfce335204 | 6,435 | py | Python | wip/pulumi/helpers.py | 4c74356b41/IaC | 3938519c33c72fc5c0552a5f4dfd894a5952c527 | [
"MIT"
] | 1 | 2020-08-18T06:05:20.000Z | 2020-08-18T06:05:20.000Z | wip/pulumi/helpers.py | 4c74356b41/IaC | 3938519c33c72fc5c0552a5f4dfd894a5952c527 | [
"MIT"
] | null | null | null | wip/pulumi/helpers.py | 4c74356b41/IaC | 3938519c33c72fc5c0552a5f4dfd894a5952c527 | [
"MIT"
] | null | null | null | import os
import re
import secrets
import string
import pulumi
from pulumi import ResourceOptions
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId
from azure.common.credentials import ServicePrincipalCredentials
def normalize_name(name):
regex = re.compile('[^a-zA-Z0-9]')
replaced = regex.sub('', name)
normalized = replaced[:23] if len(replaced) > 23 else replaced
return normalized
def _get_kvclient():
def auth_callback(server, resource, scope):
credentials = ServicePrincipalCredentials(
client_id = os.getenv('ARM_CLIENT_ID'),
secret = os.getenv('ARM_CLIENT_SECRET'),
tenant = os.getenv('ARM_TENANT_ID'),
resource = "https://vault.azure.net"
)
token = credentials.token
return token['token_type'], token['access_token']
kv_client = KeyVaultClient(KeyVaultAuthentication(auth_callback))
return kv_client
def get_kv_secret(name):
kv_client = _get_kvclient()
secret = kv_client.get_secret("https://placeholder.vault.azure.net/", name, KeyVaultId.version_none).value
return secret
def _get_password():
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(20))
return password
config = pulumi.Config('aks')
PREFIX = pulumi.get_stack()
PASSWORD = config.get('password') or _get_password()
SSHKEY = config.get('sshkey') or 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxinIAIDDCradZPAgX5GzBLv00u4rigOLUbU00E44FrfMTqu5wXiejJ4ycSb1bI+//ZNgaB2UYRbPL7A9OUKY+K4sX5O84Q6DPMjo/90IANHVTLf3xTaSc7hpvXOtIjJTJeiamxClgnTAcR55RV/j9/Wptxa8GGcRmRCcSmJUkx5AZTFI+s8aF0W3aeHHRw7TxNKBuwrX7FDcHyGKvdkFg4OP863Xe5hp5ql1C3XibmCOp1CMPIU2hCmGOy1LGbOf/Pa+QKAdtUSrPNK/jBWvPWo0k02Ii0JtMAdlpVqnJc3czNIp5gEqZCRCGEdkb/kZnJiMRZhmLBYnC8tiMxvZj core@k8s'
LOCATION = config.get('location') or 'westeurope'
NAMESPACE = config.get('namespace') or 'flux'
args_flux = [
"--ssh-keygen-dir=/var/fluxd/keygen",
"--k8s-secret-name=flux-ssh",
"--memcached-hostname=memcached",
"--memcached-service=",
"--git-url=git@ssh.dev.azure.com:v3/xxxxxx",
"--git-branch=master",
"--git-path=flux/cluster-setup,flux/{}".format(PREFIX),
"--git-user=Weave Flux",
"--git-email=support@weave.works",
"--git-set-author=false",
"--git-poll-interval=5m",
"--git-label={}".format(PREFIX),
"--git-timeout=20s",
"--sync-interval=5m",
"--git-ci-skip=false",
"--registry-exclude-image=*",
"--registry-poll-interval=5m",
"--registry-rps=200",
"--registry-burst=125",
"--registry-trace=false"
]
args_memcached = ["-m 64","-p 11211","-I 1m"]
volumeMounts_flux = [
{
"name": "kubedir",
"mountPath": "/root/.kubectl"
},
{
"name": "git-key",
"mountPath": "/etc/fluxd/ssh",
"readOnly": True
},
{
"name": "git-keygen",
"mountPath": "/var/fluxd/keygen"
}
]
volumes_flux = [
{
"name": "kubedir",
"configmap": {
"name": "flux-configmap"
}
},
{
"name": "git-key",
"secret": {
"secretName": "flux-ssh",
"defaultMode": 0o400 # has to be in octal
}
},
{
"name": "git-keygen",
"emptyDir": {
"medium": "Memory"
}
}
]
def _gen_service(name, ports, custom_provider, dependencies=[], service_type="ClusterIP"):
ports = [{"port": port, "target_port": port,
"name": str(port)} for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
Service(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"ports": ports,
"selector": labels,
"type": service_type,
"sessionAffinity": "ClientIP"
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def _gen_deployment(name, ports, image, custom_provider, serviceAccount, args=[], dependencies=[],
replicas=1, resources={}, env={}, volumes=[], volume_mounts=[]):
keys = ['container_port']
ports = [dict.fromkeys(keys, port) for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
container = {
"name": name,
"image": image,
"imagePullPolicy": "Always",
"resources": resources,
"ports": ports,
"args": args,
"env": [
{
"name": "KUBECONFIG",
"value": "/root/.kubectl/config"
}
],
"volumeMounts": volume_mounts
}
Deployment(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"selector": {
"match_labels": labels
},
"replicas": replicas,
"template": {
"metadata": {
"labels": labels
},
"spec": {
"containers": [
container
],
"serviceAccount": serviceAccount,
"volumes": volumes
}
}
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def gen_application(name, ports, image, customProvider, dependencies=[], serviceAccount="default", volumes=False, volumeMounts=False):
args = globals()["args_{}".format(name)]
if volumes:
volumes = globals()["volumes_{}".format(name)]
else:
volumes = []
if volumeMounts:
volumeMounts = globals()["volumeMounts_{}".format(name)]
else:
volumeMounts = []
_gen_service(name, ports, customProvider)
_gen_deployment(name, ports, image, customProvider, serviceAccount, args=args, dependencies=dependencies, volumes=volumes, volume_mounts=volumeMounts)
| 30.070093 | 424 | 0.56519 | import os
import re
import secrets
import string
import pulumi
from pulumi import ResourceOptions
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId
from azure.common.credentials import ServicePrincipalCredentials
def normalize_name(name):
regex = re.compile('[^a-zA-Z0-9]')
replaced = regex.sub('', name)
normalized = replaced[:23] if len(replaced) > 23 else replaced
return normalized
def _get_kvclient():
def auth_callback(server, resource, scope):
credentials = ServicePrincipalCredentials(
client_id = os.getenv('ARM_CLIENT_ID'),
secret = os.getenv('ARM_CLIENT_SECRET'),
tenant = os.getenv('ARM_TENANT_ID'),
resource = "https://vault.azure.net"
)
token = credentials.token
return token['token_type'], token['access_token']
kv_client = KeyVaultClient(KeyVaultAuthentication(auth_callback))
return kv_client
def get_kv_secret(name):
kv_client = _get_kvclient()
secret = kv_client.get_secret("https://placeholder.vault.azure.net/", name, KeyVaultId.version_none).value
return secret
def _get_password():
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(20))
return password
config = pulumi.Config('aks')
PREFIX = pulumi.get_stack()
PASSWORD = config.get('password') or _get_password()
SSHKEY = config.get('sshkey') or 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxinIAIDDCradZPAgX5GzBLv00u4rigOLUbU00E44FrfMTqu5wXiejJ4ycSb1bI+//ZNgaB2UYRbPL7A9OUKY+K4sX5O84Q6DPMjo/90IANHVTLf3xTaSc7hpvXOtIjJTJeiamxClgnTAcR55RV/j9/Wptxa8GGcRmRCcSmJUkx5AZTFI+s8aF0W3aeHHRw7TxNKBuwrX7FDcHyGKvdkFg4OP863Xe5hp5ql1C3XibmCOp1CMPIU2hCmGOy1LGbOf/Pa+QKAdtUSrPNK/jBWvPWo0k02Ii0JtMAdlpVqnJc3czNIp5gEqZCRCGEdkb/kZnJiMRZhmLBYnC8tiMxvZj core@k8s'
LOCATION = config.get('location') or 'westeurope'
NAMESPACE = config.get('namespace') or 'flux'
args_flux = [
"--ssh-keygen-dir=/var/fluxd/keygen",
"--k8s-secret-name=flux-ssh",
"--memcached-hostname=memcached",
"--memcached-service=",
"--git-url=git@ssh.dev.azure.com:v3/xxxxxx",
"--git-branch=master",
"--git-path=flux/cluster-setup,flux/{}".format(PREFIX),
"--git-user=Weave Flux",
"--git-email=support@weave.works",
"--git-set-author=false",
"--git-poll-interval=5m",
"--git-label={}".format(PREFIX),
"--git-timeout=20s",
"--sync-interval=5m",
"--git-ci-skip=false",
"--registry-exclude-image=*",
"--registry-poll-interval=5m",
"--registry-rps=200",
"--registry-burst=125",
"--registry-trace=false"
]
args_memcached = ["-m 64","-p 11211","-I 1m"]
volumeMounts_flux = [
{
"name": "kubedir",
"mountPath": "/root/.kubectl"
},
{
"name": "git-key",
"mountPath": "/etc/fluxd/ssh",
"readOnly": True
},
{
"name": "git-keygen",
"mountPath": "/var/fluxd/keygen"
}
]
volumes_flux = [
{
"name": "kubedir",
"configmap": {
"name": "flux-configmap"
}
},
{
"name": "git-key",
"secret": {
"secretName": "flux-ssh",
"defaultMode": 0o400
}
},
{
"name": "git-keygen",
"emptyDir": {
"medium": "Memory"
}
}
]
def _gen_service(name, ports, custom_provider, dependencies=[], service_type="ClusterIP"):
ports = [{"port": port, "target_port": port,
"name": str(port)} for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
Service(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"ports": ports,
"selector": labels,
"type": service_type,
"sessionAffinity": "ClientIP"
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def _gen_deployment(name, ports, image, custom_provider, serviceAccount, args=[], dependencies=[],
replicas=1, resources={}, env={}, volumes=[], volume_mounts=[]):
keys = ['container_port']
ports = [dict.fromkeys(keys, port) for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
container = {
"name": name,
"image": image,
"imagePullPolicy": "Always",
"resources": resources,
"ports": ports,
"args": args,
"env": [
{
"name": "KUBECONFIG",
"value": "/root/.kubectl/config"
}
],
"volumeMounts": volume_mounts
}
Deployment(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"selector": {
"match_labels": labels
},
"replicas": replicas,
"template": {
"metadata": {
"labels": labels
},
"spec": {
"containers": [
container
],
"serviceAccount": serviceAccount,
"volumes": volumes
}
}
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def gen_application(name, ports, image, customProvider, dependencies=[], serviceAccount="default", volumes=False, volumeMounts=False):
args = globals()["args_{}".format(name)]
if volumes:
volumes = globals()["volumes_{}".format(name)]
else:
volumes = []
if volumeMounts:
volumeMounts = globals()["volumeMounts_{}".format(name)]
else:
volumeMounts = []
_gen_service(name, ports, customProvider)
_gen_deployment(name, ports, image, customProvider, serviceAccount, args=args, dependencies=dependencies, volumes=volumes, volume_mounts=volumeMounts)
| true | true |
f710d0ba4867c668f8feb836bbb131e08fa75b00 | 938 | py | Python | sysflags/cli.py | JosiahKerley/flags | e39a46060efeab778d2e5f15550ad3801cce1a55 | [
"MIT"
] | null | null | null | sysflags/cli.py | JosiahKerley/flags | e39a46060efeab778d2e5f15550ad3801cce1a55 | [
"MIT"
] | null | null | null | sysflags/cli.py | JosiahKerley/flags | e39a46060efeab778d2e5f15550ad3801cce1a55 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
from .database import YamlDatabase as DB
from . import utils
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--scope', default='directory', help="flag scope")
parser.add_argument('-F', '--output-format', default='yaml', dest='format', help="output format")
parser.add_argument('-g', '--get', help="get a value")
parser.add_argument('-s', '--set', help="set a value")
parser.add_argument('-v', '--value', help="set a value")
parser.add_argument('-d', '--dump', action="store_true", help="dump the database")
args = parser.parse_args()
db = DB(scope=args.scope)
if args.get:
utils.print_formatted_message(db.get(query=args.get), format=args.format)
elif args.set:
utils.print_formatted_message(db.set(query=args.set, value=args.value), format=args.format)
elif args.dump:
utils.print_formatted_message(db.dump(), format=args.format)
| 39.083333 | 99 | 0.697228 |
import argparse
from .database import YamlDatabase as DB
from . import utils
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--scope', default='directory', help="flag scope")
parser.add_argument('-F', '--output-format', default='yaml', dest='format', help="output format")
parser.add_argument('-g', '--get', help="get a value")
parser.add_argument('-s', '--set', help="set a value")
parser.add_argument('-v', '--value', help="set a value")
parser.add_argument('-d', '--dump', action="store_true", help="dump the database")
args = parser.parse_args()
db = DB(scope=args.scope)
if args.get:
utils.print_formatted_message(db.get(query=args.get), format=args.format)
elif args.set:
utils.print_formatted_message(db.set(query=args.set, value=args.value), format=args.format)
elif args.dump:
utils.print_formatted_message(db.dump(), format=args.format)
| true | true |
f710d21e432038416b298e6b3a84477228b6564c | 759 | py | Python | spacy_lookups_data/tests/test_da.py | CajuM/spacy-lookups-data | 52d996165f2de57731dbd088493592b1d5dfaaf9 | [
"MIT"
] | null | null | null | spacy_lookups_data/tests/test_da.py | CajuM/spacy-lookups-data | 52d996165f2de57731dbd088493592b1d5dfaaf9 | [
"MIT"
] | null | null | null | spacy_lookups_data/tests/test_da.py | CajuM/spacy-lookups-data | 52d996165f2de57731dbd088493592b1d5dfaaf9 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
from spacy.lang.da import Danish
import pytest
@pytest.fixture(scope="session")
def da_nlp():
return Danish()
@pytest.mark.parametrize(
"string,lemma",
[
("affaldsgruppernes", "affaldsgruppe"),
("detailhandelsstrukturernes", "detailhandelsstruktur"),
("kolesterols", "kolesterol"),
("åsyns", "åsyn"),
],
)
def test_da_lemmatizer_lookup_assigns(da_nlp, string, lemma):
tokens = da_nlp(string)
assert tokens[0].lemma_ == lemma
@pytest.mark.parametrize(
"text,norm", [("akvarium", "akvarie"), ("bedstemoder", "bedstemor")]
)
def test_da_nlp_norm_exceptions(da_nlp, text, norm):
tokens = da_nlp(text)
assert tokens[0].norm_ == norm
| 23 | 72 | 0.673254 |
from __future__ import unicode_literals
from spacy.lang.da import Danish
import pytest
@pytest.fixture(scope="session")
def da_nlp():
return Danish()
@pytest.mark.parametrize(
"string,lemma",
[
("affaldsgruppernes", "affaldsgruppe"),
("detailhandelsstrukturernes", "detailhandelsstruktur"),
("kolesterols", "kolesterol"),
("åsyns", "åsyn"),
],
)
def test_da_lemmatizer_lookup_assigns(da_nlp, string, lemma):
tokens = da_nlp(string)
assert tokens[0].lemma_ == lemma
@pytest.mark.parametrize(
"text,norm", [("akvarium", "akvarie"), ("bedstemoder", "bedstemor")]
)
def test_da_nlp_norm_exceptions(da_nlp, text, norm):
tokens = da_nlp(text)
assert tokens[0].norm_ == norm
| true | true |
f710d25f24ddec548a668960e642e38c0c271832 | 1,156 | py | Python | var/spack/repos/builtin/packages/libcircle/package.py | alkino/spack | b87ff60c7e23d7b50fac620ad60c8e2537312ebd | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-06-25T15:25:29.000Z | 2020-06-25T15:25:29.000Z | var/spack/repos/builtin/packages/libcircle/package.py | alkino/spack | b87ff60c7e23d7b50fac620ad60c8e2537312ebd | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/libcircle/package.py | alkino/spack | b87ff60c7e23d7b50fac620ad60c8e2537312ebd | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libcircle(AutotoolsPackage):
"""libcircle provides an efficient distributed queue on a cluster,
using self-stabilizing work stealing."""
homepage = "https://github.com/hpc/libcircle"
git = "https://github.com/hpc/libcircle.git"
url = "https://github.com/hpc/libcircle/releases/download/0.2.1-rc.1/libcircle-0.2.1-rc.1.tar.gz"
version('master', branch='master')
version('0.3.0', sha256='5ce38eb5b3c2b394bca1316310758f276c893dd3f4c15d7bc14ea05d3110ce58', url='https://github.com/hpc/libcircle/releases/download/v0.3/libcircle-0.3.0.tar.gz')
version('0.2.1-rc.1', sha256='5747f91cf4417023304dcc92fd07e3617ac712ca1eeb698880979bbca3f54865')
depends_on('mpi')
@when('@master')
def autoreconf(self, spec, prefix):
with working_dir(self.configure_directory):
# Bootstrap with autotools
bash = which('bash')
bash('./autogen.sh')
| 39.862069 | 186 | 0.701557 |
from spack import *
class Libcircle(AutotoolsPackage):
homepage = "https://github.com/hpc/libcircle"
git = "https://github.com/hpc/libcircle.git"
url = "https://github.com/hpc/libcircle/releases/download/0.2.1-rc.1/libcircle-0.2.1-rc.1.tar.gz"
version('master', branch='master')
version('0.3.0', sha256='5ce38eb5b3c2b394bca1316310758f276c893dd3f4c15d7bc14ea05d3110ce58', url='https://github.com/hpc/libcircle/releases/download/v0.3/libcircle-0.3.0.tar.gz')
version('0.2.1-rc.1', sha256='5747f91cf4417023304dcc92fd07e3617ac712ca1eeb698880979bbca3f54865')
depends_on('mpi')
@when('@master')
def autoreconf(self, spec, prefix):
with working_dir(self.configure_directory):
bash = which('bash')
bash('./autogen.sh')
| true | true |
f710d28b750d8653844e380b43142853926905f5 | 6,960 | py | Python | macro_benchmark/Mask_RCNN_PyTorch/maskrcnn_benchmark/config/paths_catalog_dbcluster.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 180 | 2018-09-20T07:27:40.000Z | 2022-03-19T07:55:42.000Z | macro_benchmark/Mask_RCNN_PyTorch/maskrcnn_benchmark/config/paths_catalog_dbcluster.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 80 | 2018-09-26T18:55:56.000Z | 2022-02-10T02:03:26.000Z | macro_benchmark/Mask_RCNN_PyTorch/maskrcnn_benchmark/config/paths_catalog_dbcluster.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 72 | 2018-08-30T00:49:15.000Z | 2022-02-15T23:22:40.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = os.environ['DATA_DIR']
DATASETS = {
"coco_2017_train": {
"img_dir": "train2017",
"ann_file": "annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "val2017",
"ann_file": "annotations/instances_val2017.json"
},
"coco_2014_train": {
"img_dir": "coco_train2014",
"ann_file": "annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_valminusminival2014.json"
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
# PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 39.545455 | 117 | 0.604167 |
import os
class DatasetCatalog(object):
DATA_DIR = os.environ['DATA_DIR']
DATASETS = {
"coco_2017_train": {
"img_dir": "train2017",
"ann_file": "annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "val2017",
"ann_file": "annotations/instances_val2017.json"
},
"coco_2014_train": {
"img_dir": "coco_train2014",
"ann_file": "annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco_val2014",
"ann_file": "annotations/instances_valminusminival2014.json"
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
name = name[len("Caffe2Detectron/COCO/"):]
model_id, model_name = name.split("/")
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| true | true |
f710d41f3956bdcf4108ffe790cab0b83a899be7 | 6,249 | py | Python | avod/core/avod_fc_layers/basic_fc_layers.py | AhmedYousriSobhi/avod | 04a8c1edd87811139cbb2318796f0eb226e7c039 | [
"MIT"
] | null | null | null | avod/core/avod_fc_layers/basic_fc_layers.py | AhmedYousriSobhi/avod | 04a8c1edd87811139cbb2318796f0eb226e7c039 | [
"MIT"
] | null | null | null | avod/core/avod_fc_layers/basic_fc_layers.py | AhmedYousriSobhi/avod | 04a8c1edd87811139cbb2318796f0eb226e7c039 | [
"MIT"
] | null | null | null | import tensorflow.compat.v1 as tf
#from tensorflow.contrib import slim
import tf_slim as slim
from avod.core.avod_fc_layers import avod_fc_layer_utils
def build(fc_layers_config,
input_rois, input_weights,
num_final_classes, box_rep,
is_training,
end_points_collection):
"""Builds basic layers
Args:
fc_layers_config: Fully connected layers config object
input_rois: List of input roi feature maps
input_weights: List of weights for each input e.g. [1.0, 1.0]
num_final_classes: Final number of output classes, including
'Background'
box_rep: Box representation (e.g. 'box_3d', 'box_8c', 'box_4c')
is_training: Whether the network is training or evaluating
end_points_collection: End points collection to add entries to
Returns:
cls_logits: Output classification logits
offsets: Output offsets
angle_vectors: Output angle vectors (or None)
end_points: End points dict
"""
# Parse config
fusion_method = fc_layers_config.fusion_method
num_layers = fc_layers_config.num_layers
layer_sizes = fc_layers_config.layer_sizes
l2_weight_decay = fc_layers_config.l2_weight_decay
keep_prob = fc_layers_config.keep_prob
cls_logits, offsets, angle_vectors = \
_basic_fc_layers(num_layers=num_layers,
layer_sizes=layer_sizes,
input_rois=input_rois,
input_weights=input_weights,
fusion_method=fusion_method,
l2_weight_decay=l2_weight_decay,
keep_prob=keep_prob,
num_final_classes=num_final_classes,
box_rep=box_rep,
is_training=is_training)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return cls_logits, offsets, angle_vectors, end_points
def build_output_layers(tensor_in,
num_final_classes,
box_rep,
output):
"""Builds flattened output layers
Args:
tensor_in: Input tensor
num_final_classes: Final number of output classes, including
'Background'
box_rep: Box representation (e.g. 'box_3d', 'box_8c', 'box_4c')
Returns:
Output layers
"""
layer_out = None
if output == 'cls':
# Classification
layer_out = slim.fully_connected(tensor_in,
num_final_classes,
activation_fn=None,
scope='cls_out')
elif output == 'off':
# Offsets
off_out_size = avod_fc_layer_utils.OFFSETS_OUTPUT_SIZE[box_rep]
if off_out_size > 0:
layer_out = slim.fully_connected(tensor_in,
off_out_size,
activation_fn=None,
scope='off_out')
else:
layer_out = None
elif output == 'ang':
# Angle Unit Vectors
ang_out_size = avod_fc_layer_utils.ANG_VECS_OUTPUT_SIZE[box_rep]
if ang_out_size > 0:
layer_out = slim.fully_connected(tensor_in,
ang_out_size,
activation_fn=None,
scope='ang_out')
else:
layer_out = None
return layer_out
def _basic_fc_layers(num_layers, layer_sizes,
input_rois, input_weights, fusion_method,
l2_weight_decay, keep_prob,
num_final_classes, box_rep,
is_training):
if not num_layers == len(layer_sizes):
raise ValueError('num_layers does not match length of layer_sizes')
if l2_weight_decay > 0:
weights_regularizer = slim.l2_regularizer(l2_weight_decay)
else:
weights_regularizer = None
# Feature fusion
fused_features = avod_fc_layer_utils.feature_fusion(fusion_method,
input_rois,
input_weights)
output_names = ['cls', 'off', 'ang']
cls_logits = None
offsets = None
angles = None
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=weights_regularizer):
for output in output_names:
# Flatten
fc_drop = slim.flatten(fused_features,
scope=output + '_flatten')
for layer_idx in range(num_layers):
fc_name_idx = 6 + layer_idx
# Use conv2d instead of fully_connected layers.
fc_layer = slim.fully_connected(fc_drop, layer_sizes[layer_idx],
scope=output + '_fc{}'.format(fc_name_idx))
fc_drop = slim.dropout(fc_layer,
keep_prob=keep_prob,
is_training=is_training,
scope=output + '_fc{}_drop'.format(fc_name_idx))
fc_name_idx += 1
if output == 'cls':
cls_logits= build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
elif output == 'off':
offsets = build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
elif output == 'ang':
angles = build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
return cls_logits, offsets, angles
| 37.872727 | 91 | 0.517843 | import tensorflow.compat.v1 as tf
import tf_slim as slim
from avod.core.avod_fc_layers import avod_fc_layer_utils
def build(fc_layers_config,
input_rois, input_weights,
num_final_classes, box_rep,
is_training,
end_points_collection):
fusion_method = fc_layers_config.fusion_method
num_layers = fc_layers_config.num_layers
layer_sizes = fc_layers_config.layer_sizes
l2_weight_decay = fc_layers_config.l2_weight_decay
keep_prob = fc_layers_config.keep_prob
cls_logits, offsets, angle_vectors = \
_basic_fc_layers(num_layers=num_layers,
layer_sizes=layer_sizes,
input_rois=input_rois,
input_weights=input_weights,
fusion_method=fusion_method,
l2_weight_decay=l2_weight_decay,
keep_prob=keep_prob,
num_final_classes=num_final_classes,
box_rep=box_rep,
is_training=is_training)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return cls_logits, offsets, angle_vectors, end_points
def build_output_layers(tensor_in,
num_final_classes,
box_rep,
output):
layer_out = None
if output == 'cls':
layer_out = slim.fully_connected(tensor_in,
num_final_classes,
activation_fn=None,
scope='cls_out')
elif output == 'off':
off_out_size = avod_fc_layer_utils.OFFSETS_OUTPUT_SIZE[box_rep]
if off_out_size > 0:
layer_out = slim.fully_connected(tensor_in,
off_out_size,
activation_fn=None,
scope='off_out')
else:
layer_out = None
elif output == 'ang':
ang_out_size = avod_fc_layer_utils.ANG_VECS_OUTPUT_SIZE[box_rep]
if ang_out_size > 0:
layer_out = slim.fully_connected(tensor_in,
ang_out_size,
activation_fn=None,
scope='ang_out')
else:
layer_out = None
return layer_out
def _basic_fc_layers(num_layers, layer_sizes,
input_rois, input_weights, fusion_method,
l2_weight_decay, keep_prob,
num_final_classes, box_rep,
is_training):
if not num_layers == len(layer_sizes):
raise ValueError('num_layers does not match length of layer_sizes')
if l2_weight_decay > 0:
weights_regularizer = slim.l2_regularizer(l2_weight_decay)
else:
weights_regularizer = None
fused_features = avod_fc_layer_utils.feature_fusion(fusion_method,
input_rois,
input_weights)
output_names = ['cls', 'off', 'ang']
cls_logits = None
offsets = None
angles = None
with slim.arg_scope(
[slim.fully_connected],
weights_regularizer=weights_regularizer):
for output in output_names:
fc_drop = slim.flatten(fused_features,
scope=output + '_flatten')
for layer_idx in range(num_layers):
fc_name_idx = 6 + layer_idx
fc_layer = slim.fully_connected(fc_drop, layer_sizes[layer_idx],
scope=output + '_fc{}'.format(fc_name_idx))
fc_drop = slim.dropout(fc_layer,
keep_prob=keep_prob,
is_training=is_training,
scope=output + '_fc{}_drop'.format(fc_name_idx))
fc_name_idx += 1
if output == 'cls':
cls_logits= build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
elif output == 'off':
offsets = build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
elif output == 'ang':
angles = build_output_layers(fc_drop,
num_final_classes,
box_rep,
output)
return cls_logits, offsets, angles
| true | true |
f710d496dd5fcb018569353131c4258483deb47c | 5,937 | py | Python | joints_detectors/Alphapose/yolo/video_demo_half.py | rcourivaud/video-to-pose3D | b908014fe2c531c075c11cee72bb798120f970c2 | [
"MIT"
] | 574 | 2019-07-12T08:35:18.000Z | 2022-03-28T06:37:44.000Z | joints_detectors/Alphapose/yolo/video_demo_half.py | rcourivaud/video-to-pose3D | b908014fe2c531c075c11cee72bb798120f970c2 | [
"MIT"
] | 55 | 2019-07-11T11:31:16.000Z | 2022-03-11T23:54:54.000Z | joints_detectors/Alphapose/yolo/video_demo_half.py | rcourivaud/video-to-pose3D | b908014fe2c531c075c11cee72bb798120f970c2 | [
"MIT"
] | 123 | 2019-09-06T07:08:40.000Z | 2022-03-26T21:50:28.000Z | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image, letterbox_image
import pandas as pd
import random
import pickle as pkl
import argparse
def get_test_input(input_dim, CUDA):
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v2 Video Detection Module')
parser.add_argument("--video", dest = 'video', help =
"Video to run detection upon",
default = "video.avi", type = str)
parser.add_argument("--dataset", dest = "dataset", help = "Dataset on which the network has been trained", default = "pascal")
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3-spp.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3-spp.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
CUDA = torch.cuda.is_available()
num_classes = 80
bbox_attrs = 5 + num_classes
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda().half()
model(get_test_input(inp_dim, CUDA), CUDA)
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture(videofile)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
img = img.cuda().half()
im_dim = im_dim.half().cuda()
write_results = write_results_half
predict_transform = predict_transform_half
output = model(Variable(img, volatile = True), CUDA)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
im_dim = im_dim.repeat(output.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| 31.247368 | 130 | 0.544551 | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from .util import *
from .darknet import Darknet
from .preprocess import prep_image, inp_to_image, letterbox_image
import pandas as pd
import random
import pickle as pkl
import argparse
def get_test_input(input_dim, CUDA):
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:,:,::-1].transpose((2,0,1))
img_ = img_[np.newaxis,:,:,:]/255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def prep_image(img, inp_dim):
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = (letterbox_image(orig_im, (inp_dim, inp_dim)))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def write(x, img):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
parser = argparse.ArgumentParser(description='YOLO v2 Video Detection Module')
parser.add_argument("--video", dest = 'video', help =
"Video to run detection upon",
default = "video.avi", type = str)
parser.add_argument("--dataset", dest = "dataset", help = "Dataset on which the network has been trained", default = "pascal")
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3-spp.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3-spp.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
CUDA = torch.cuda.is_available()
num_classes = 80
bbox_attrs = 5 + num_classes
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
if CUDA:
model.cuda().half()
model(get_test_input(inp_dim, CUDA), CUDA)
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture(videofile)
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
if ret:
img, orig_im, dim = prep_image(frame, inp_dim)
im_dim = torch.FloatTensor(dim).repeat(1,2)
if CUDA:
img = img.cuda().half()
im_dim = im_dim.half().cuda()
write_results = write_results_half
predict_transform = predict_transform_half
output = model(Variable(img, volatile = True), CUDA)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
if type(output) == int:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
im_dim = im_dim.repeat(output.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])
classes = load_classes('data/coco.names')
colors = pkl.load(open("pallete", "rb"))
list(map(lambda x: write(x, orig_im), output))
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| true | true |
f710d4d4c51c7045bd1d12faab81c40a48ed0b78 | 5,382 | py | Python | httpclient.py | YeeSkywalker/CMPUT404-assignment-web-client | 0d1a3d8a3aaaeb30320ed156b085ce5e6f6aaf1e | [
"Apache-2.0"
] | null | null | null | httpclient.py | YeeSkywalker/CMPUT404-assignment-web-client | 0d1a3d8a3aaaeb30320ed156b085ce5e6f6aaf1e | [
"Apache-2.0"
] | null | null | null | httpclient.py | YeeSkywalker/CMPUT404-assignment-web-client | 0d1a3d8a3aaaeb30320ed156b085ce5e6f6aaf1e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
from urllib.parse import urlparse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
return int(data.splitlines()[0].split()[1])
def get_headers(self,data):
header = data.split("\r\n\r\n")[0].splitlines()
return " ".join(header[0].split()[1:]) + "\r\n" + "\r\n".join(header[1:]) + "\r\n"
def get_body(self, data):
return data.split("\r\n\r\n")[1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if parsed_url.query:
path += "?"
path += parsed_url.query
self.connect(host, port)
request = "GET {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Accept: */*\r\n"
request += "Connection: close\r\n\r\n"
#print(request)
self.sendall(request)
# print("Request Sent")
response = self.recvall(self.socket)
# print("Response Recieved")
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def POST(self, url, args=None):
code = 500
body = ""
content = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if args:
content = ""
for key, value in args.items():
content += "{}={}&".format(key, value)
content = content[:-1]
content_len = len(content)
self.connect(host, port)
request = "POST {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Content-Type: {}\r\n".format("application/x-www-form-urlencoded")
request += "Content-Length: {}\r\n\r\n".format(content_len)
request += "{}\r\n\r\n".format(content)
self.sendall(request)
response = self.recvall(self.socket)
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| 28.17801 | 93 | 0.551839 |
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
from urllib.parse import urlparse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
return int(data.splitlines()[0].split()[1])
def get_headers(self,data):
header = data.split("\r\n\r\n")[0].splitlines()
return " ".join(header[0].split()[1:]) + "\r\n" + "\r\n".join(header[1:]) + "\r\n"
def get_body(self, data):
return data.split("\r\n\r\n")[1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if parsed_url.query:
path += "?"
path += parsed_url.query
self.connect(host, port)
request = "GET {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Accept: */*\r\n"
request += "Connection: close\r\n\r\n"
#print(request)
self.sendall(request)
# print("Request Sent")
response = self.recvall(self.socket)
# print("Response Recieved")
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def POST(self, url, args=None):
code = 500
body = ""
content = ""
parsed_url = urlparse(url)
host = parsed_url.hostname
port = parsed_url.port
if not port:
if parsed_url.scheme.lower() == 'http':
port = 80
else:
port = 443
path = parsed_url.path if parsed_url.path else "/"
if args:
content = ""
for key, value in args.items():
content += "{}={}&".format(key, value)
content = content[:-1]
content_len = len(content)
self.connect(host, port)
request = "POST {} HTTP/1.1\r\n".format(path)
request += "Host: {}\r\n".format(host)
request += "Content-Type: {}\r\n".format("application/x-www-form-urlencoded")
request += "Content-Length: {}\r\n\r\n".format(content_len)
request += "{}\r\n\r\n".format(content)
self.sendall(request)
response = self.recvall(self.socket)
self.close()
code = self.get_code(response)
body = self.get_body(response)
header = self.get_headers(response)
print("\n#####Response Header#####")
print(header)
print("#######################\n")
print("\n*****Response Body*****")
print(body)
print("***********************\n")
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| true | true |
f710d56e22dcbfc23572787429824b9582db461d | 43,545 | py | Python | laygo/generators/serdes/des_layout_generator_woM5.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 26 | 2017-07-07T08:06:31.000Z | 2021-11-25T06:41:24.000Z | laygo/generators/serdes/des_layout_generator_woM5.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 9 | 2016-12-28T03:08:29.000Z | 2019-01-30T16:00:28.000Z | laygo/generators/serdes/des_layout_generator_woM5.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 10 | 2018-07-14T01:31:28.000Z | 2021-08-21T10:18:30.000Z | #!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""DES library
"""
import laygo
import numpy as np
#from logic_layout_generator import *
from math import log
import yaml
import os
#import logging;logging.basicConfig(level=logging.DEBUG)
def generate_boundary(laygen, objectname_pfix, placement_grid,
devname_bottom, devname_top, devname_left, devname_right,
shape_bottom=None, shape_top=None, shape_left=None, shape_right=None,
transform_bottom=None, transform_top=None, transform_left=None, transform_right=None,
origin=np.array([0, 0])):
#generate a boundary structure to resolve boundary design rules
pg = placement_grid
#parameters
if shape_bottom == None:
shape_bottom = [np.array([1, 1]) for d in devname_bottom]
if shape_top == None:
shape_top = [np.array([1, 1]) for d in devname_top]
if shape_left == None:
shape_left = [np.array([1, 1]) for d in devname_left]
if shape_right == None:
shape_right = [np.array([1, 1]) for d in devname_right]
if transform_bottom == None:
transform_bottom = ['R0' for d in devname_bottom]
if transform_top == None:
transform_top = ['R0' for d in devname_top]
if transform_left == None:
transform_left = ['R0' for d in devname_left]
if transform_right == None:
transform_right = ['R0' for d in devname_right]
#bottom
dev_bottom=[]
dev_bottom.append(laygen.place("I" + objectname_pfix + 'BNDBTM0', devname_bottom[0], pg, xy=origin,
shape=shape_bottom[0], transform=transform_bottom[0]))
for i, d in enumerate(devname_bottom[1:]):
dev_bottom.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDBTM'+str(i+1), templatename = d, gridname = pg, refinstname = dev_bottom[-1].name,
shape=shape_bottom[i+1], transform=transform_bottom[i+1]))
dev_left=[]
dev_left.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDLFT0', templatename = devname_left[0], gridname = pg, refinstname = dev_bottom[0].name, direction='top',
shape=shape_left[0], transform=transform_left[0]))
for i, d in enumerate(devname_left[1:]):
dev_left.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDLFT'+str(i+1), templatename = d, gridname = pg, refinstname = dev_left[-1].name, direction='top',
shape=shape_left[i+1], transform=transform_left[i+1]))
dev_right=[]
dev_right.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDRHT0', templatename = devname_right[0], gridname = pg, refinstname = dev_bottom[-1].name, direction='top',
shape=shape_right[0], transform=transform_right[0]))
for i, d in enumerate(devname_right[1:]):
dev_right.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDRHT'+str(i+1), templatename = d, gridname = pg, refinstname = dev_right[-1].name, direction='top',
shape=shape_right[i+1], transform=transform_right[i+1]))
dev_top=[]
dev_top.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDTOP0', templatename = devname_top[0], gridname = pg, refinstname = dev_left[-1].name, direction='top',
shape=shape_top[0], transform=transform_top[0]))
for i, d in enumerate(devname_top[1:]):
dev_top.append(laygen.relplace(name = "I" + objectname_pfix + 'BNDTOP'+str(i+1), templatename = d, gridname = pg, refinstname = dev_top[-1].name,
shape=shape_top[i+1], transform=transform_top[i+1]))
dev_right=[]
return [dev_bottom, dev_top, dev_left, dev_right]
def generate_deserializer(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3,
routing_grid_m4m5, num_des=8, num_flop=1, m_des_dff=1, origin=np.array([0, 0])):
"""generate deserializer """
pg = placement_grid
rg_m2m3 = routing_grid_m2m3
rg_m4m5 = routing_grid_m4m5
tap_name='tap'
#ff_name = 'dff_1x'
#ff_rst_name = 'dff_strsth_1x'
ff_name = 'dff_'+str(m_des_dff)+'x'
ff_rst_name = 'dff_strsth_'+str(m_des_dff)+'x'
#Calculate layout size
x0=num_flop * (2*laygen.templates.get_template(ff_name, templib_logic).xy[1][0] + laygen.templates.get_template(ff_rst_name, templib_logic).xy[1][0]) \
+ 2*laygen.templates.get_template(tap_name, templib_logic).xy[1][0]
num_row=int((num_des/num_flop + 0.99))+1
#boundaries
m_bnd = int(x0 / laygen.templates.get_template('boundary_bottom').xy[1][0])
devname_bnd_left = []
devname_bnd_right = []
transform_bnd_left = []
transform_bnd_right = []
for i in range(num_row):
if i%2==0:
devname_bnd_left += ['nmos4_fast_left', 'pmos4_fast_left']
devname_bnd_right += ['nmos4_fast_right', 'pmos4_fast_right']
transform_bnd_left += ['R0', 'MX']
transform_bnd_right += ['R0', 'MX']
else:
devname_bnd_left += ['pmos4_fast_left', 'nmos4_fast_left']
devname_bnd_right += ['pmos4_fast_right', 'nmos4_fast_right']
transform_bnd_left += ['R0', 'MX']
transform_bnd_right += ['R0', 'MX']
[bnd_bottom, bnd_top, bnd_left, bnd_right] = generate_boundary(laygen, objectname_pfix='BND0',
placement_grid=pg,
devname_bottom=['boundary_bottomleft',
'boundary_bottom',
'boundary_bottomright'],
shape_bottom=[np.array([1, 1]), np.array([m_bnd, 1]),
np.array([1, 1])],
devname_top=['boundary_topleft', 'boundary_top',
'boundary_topright'],
shape_top=[np.array([1, 1]), np.array([m_bnd, 1]),
np.array([1, 1])],
devname_left=devname_bnd_left,
transform_left=transform_bnd_left,
devname_right=devname_bnd_right,
transform_right=transform_bnd_right,
origin=np.array([0, 0]))
#Calculate origins for placement
tap_origin = origin + laygen.get_xy(obj = bnd_bottom[0], gridname = pg) \
+ laygen.get_xy(obj = bnd_bottom[0].template, gridname = pg)
array_origin = origin + laygen.get_xy(obj = bnd_bottom[0], gridname = pg) \
+ laygen.get_xy(obj = bnd_bottom[0].template, gridname = pg) \
+ np.array([laygen.get_xy(obj=laygen.get_template(name = tap_name, libname = templib_logic), gridname = pg)[0], 0])
tapr_origin = tap_origin + m_bnd*np.array([laygen.get_xy(obj=laygen.get_template(name = 'boundary_bottom'), gridname = pg)[0], 0]) \
- np.array([laygen.get_xy(obj=laygen.get_template(name = tap_name, libname = templib_logic), gridname = pg)[0], 0])
FF0_origin = array_origin + np.array([0, laygen.get_xy(obj=laygen.get_template(name = 'inv_1x', libname = templib_logic), gridname = pg)[1]]) + \
np.array([0, laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[1]])
# placement
iffout=[]
iffin=[]
iffdiv=[]
iclkbuf=[]
idivbuf=[]
isp1x=[]
itapl=[]
itapr=[]
tf='R0'
if num_flop == 1: #Layout height reduction factor, no reduction
for i in range(num_row):
if i%2==0: tf='R0'
else: tf='MX'
if i==0: #Row for clock buffers
itapl.append(laygen.place(name = "I" + objectname_pfix + 'TAPL0', templatename = tap_name,
gridname = pg, xy=tap_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
itapr.append(laygen.place(name = "I" + objectname_pfix + 'TAPR0', templatename = tap_name,
gridname = pg, xy=tapr_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.place(name = "I" + objectname_pfix + 'DIVBUF32x', templatename = 'inv_32x',
gridname = pg, xy=array_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
else:
itapl.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
itapr.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if i==1: #Reference FF: FFOUT1
iffout.append(laygen.place(name = "I" + objectname_pfix + 'FFOUT1', templatename = ff_name,
gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
else:
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
refi = iffout[-1].name
iffin.append(laygen.relplace(name = "I" + objectname_pfix + 'FFIN'+str(i), templatename = ff_name,
gridname = pg, refinstname = refi, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
refi2 = iffin[-1].name
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = refi2, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
if num_flop == 2: #Layout height reduced by half
for i in range(num_row):
if i%2==0: tf='R0'
else: tf='MX'
if i==0: #Low for clock buffers
itapl.append(laygen.place(name = "I" + objectname_pfix + 'TAPL0', templatename = tap_name,
gridname = pg, xy=tap_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
itapr.append(laygen.place(name = "I" + objectname_pfix + 'TAPR0', templatename = tap_name,
gridname = pg, xy=tapr_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.place(name = "I" + objectname_pfix + 'DIVBUF32x', templatename = 'inv_32x',
gridname = pg, xy=array_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
else:
itapl.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
itapr.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if i==1: #Reference FF: FFOUT1 and FFOUT2
iffout.append(laygen.place(name = "I" + objectname_pfix + 'FFOUT1', templatename = ff_name,
gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT2', templatename = ff_name,
gridname = pg, refinstname = iffout[0].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
elif i==(num_row-1): #The last low depending on num_des: even or odd
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,
gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if num_des%2==0: #If not, space should be placed rather than FF
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
else: #FFOUTs will be the reference for FFIN and FFDIV
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,
gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
for j in range(num_des): #Relplace of FFIN and the left side of FFDIV
if iffout[j].transform=='MX': tf='MX'
else: tf='R0'
iffin.append(laygen.relplace(name = "I" + objectname_pfix + 'FFIN'+str(j+1), templatename = ff_name,
gridname = pg, refinstname = iffout[j].name, transform=tf, shape=np.array([1,1]),
xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))
if j%2==0:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(int(j/2+1)), templatename = ff_rst_name,
gridname = pg, refinstname = iffin[j].name, transform=tf, shape=np.array([1,1]),
xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))
for i in range(num_row, num_des+1): #Right side of FFDIV
if num_des%2==1:
if i%2==0: tf='R0'
else: tf='MX'
if num_des%2==0:
if i%2==0: tf='MX'
else: tf='R0'
if i==num_row: #Even: relplaced by top FFDIV, odd: relplaced by second FFDIV from top
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = iffdiv[int(num_des/2)-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
else:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = iffdiv[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'bottom', template_libname=templib_logic))
#Space placement at the first row
space_name = 'space_1x'
space4x_name = 'space_4x'
space_width = laygen.get_xy(obj=laygen.get_template(name = space_name, libname = templib_logic), gridname = pg)[0]
space4_width = laygen.get_xy(obj=laygen.get_template(name = space4x_name, libname = templib_logic), gridname = pg)[0]
inv_width=[]
for i in (1,2,8,32):
inv_width.append(laygen.get_xy(obj=laygen.get_template(name = 'inv_' + str(i) + 'x', libname = templib_logic), gridname = pg)[0])
blank_width = tapr_origin[0] - array_origin[0] - 2 * (inv_width[0]+inv_width[1]+inv_width[2]+inv_width[3])
m_space4 = int(blank_width / space4_width)
m_space1 = int((blank_width-m_space4*space4_width)/space_width)
ispace4=laygen.relplace(name = "I" + objectname_pfix + 'SPACE4', templatename = space4x_name,
gridname = pg, refinstname = iclkbuf[3].name, transform='R0', shape=np.array([m_space4-1,1]),
template_libname=templib_logic)
ispace1=laygen.relplace(name = "I" + objectname_pfix + 'SPACE1', templatename = space_name,
gridname = pg, refinstname = ispace4.name, transform='R0', shape=np.array([m_space1+4,1]),
template_libname=templib_logic)
#Space placement at the last row for odd num_des
m_ff_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0] / space_width)
m_ffrst_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_rst_name, libname = templib_logic), gridname = pg)[0] / space_width)
if (num_des%2)==1:
if num_flop==2:
ispace_out=laygen.relplace(name = "I" + objectname_pfix + 'SPACEOUT', templatename = space_name,
gridname = pg, refinstname = iffout[num_des-1].name, transform=iffout[num_des-1].transform, shape=np.array([m_ff_space,1]),
template_libname=templib_logic)
ispace_in=laygen.relplace(name = "I" + objectname_pfix + 'SPACEIN', templatename = space_name,
gridname = pg, refinstname = iffin[num_des-1].name, transform=iffin[num_des-1].transform, shape=np.array([m_ff_space,1]),
template_libname=templib_logic)
ispace_div=laygen.relplace(name = "I" + objectname_pfix + 'SPACEDIV', templatename = space_name,
gridname = pg, refinstname = iffdiv[int(num_des/2)].name, transform=iffdiv[int(num_des/2)].transform, shape=np.array([m_ffrst_space,1]),
template_libname=templib_logic)
#Internal Pins
ffin_in_xy=[]
ffin_in_xy45=[]
ffin_out_xy=[]
ffout_in_xy=[]
ffout_out_xy=[]
ffdiv_in_xy=[]
ffdiv_in_xy45=[]
ffdiv_out_xy=[]
ffdiv_rst_xy=[]
ffdiv_st_xy=[]
for i in range(num_des):
ffin_in_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m3m4))
ffin_out_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'O', rg_m3m4))
ffout_in_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'I', rg_m3m4))
ffout_out_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4))
ffdiv_in_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m3m4))
ffdiv_out_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'O', rg_m3m4))
ffdiv_rst_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'RST', rg_m3m4))
ffdiv_st_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'ST', rg_m3m4))
ffin_in_xy45.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m4m5))
ffdiv_in_xy45.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m4m5))
# Route
for i in range(num_des):
if num_flop==1: #Routing offset selection for rows in R0 and MX
if iffin[i].transform=='MX': offset=1
if iffin[i].transform=='R0': offset=4
if iffdiv[i].transform=='MX': offset_div=1
if iffdiv[i].transform=='R0': offset_div=3
if num_flop==2: #Offset_div would be different because of different placement
if i in range(int((num_des+1)/2)):
if iffin[i].transform=='MX':
if i%2==1:
offset=1
else:
offset=8
if iffin[i].transform=='R0': offset=3+i%2
if iffdiv[i].transform=='MX': offset_div=1
if iffdiv[i].transform=='R0': offset_div=3
else:
if iffin[i].transform=='MX':
if i%2==1:
offset=1
else:
offset=8
if iffin[i].transform=='R0': offset=3+i%2
if iffdiv[i].transform=='MX': offset_div=10
if iffdiv[i].transform=='R0': offset_div=13
if i in range(num_des-1):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #in-to-in
ffin_out_xy[i][0], ffin_in_xy[i+1][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div-to-div
ffdiv_out_xy[i][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_out_xy[i][1][1]+7-offset_div, rg_m3m4)
#[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_in_xy[i+1][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_in_xy[i+1][0][1], rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #in-to-out
ffin_out_xy[i][0], ffout_in_xy[i][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)
if m_des_dff==1:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div feedback
ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #M3-to-M5
ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
else:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div feedback
ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #M3-to-M5
ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
#CLK Buffer
for i in range(3):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iclkbuf[i + 1].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(idivbuf[2 - i].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)
#DIVCLK Route
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0][1] + 3, rg_m3m4)
for i in range(num_des):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffout[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0][1] + 5, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffin[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)
#RST Route
for i in range(num_des):
if i in range(int((num_des+1)/2)): #First half of FFDIVs
if not i==int((num_des+1)/2)-1:
rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)
rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)
#[rrstv, rrsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_rst_xy[i][0], ffdiv_rst_xy[i+1][0], rg_m3m4)
#[rstv, rsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_st_xy[i][0], ffdiv_st_xy[i+1][0], rg_m3m4)
else:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_rst_xy[i][0], ffdiv_st_xy[i+1][0], ffdiv_rst_xy[i][1][1]+5, rg_m3m4)
else: #Second half of FFDIVs
if not i==num_des-1:
rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)
rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)
#[rrstv, rrsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_rst_xy[i][0], ffdiv_rst_xy[i+1][0], rg_m3m4)
#[rstv, rsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4],
# ffdiv_st_xy[i][0], ffdiv_st_xy[i+1][0], rg_m3m4)
[rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],
laygen.get_inst_pin_xy(iffdiv[0].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'ST', rg_m2m3)[0], rg_m2m3)
[rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],
laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'RST', rg_m2m3)[0], rg_m2m3)
#Pin
clkin_xy=laygen.get_inst_pin_xy(iclkbuf[0].name, 'I', rg_m3m4)
rclkin=laygen.route(None, laygen.layers['metal'][3], xy0=clkin_xy[0], xy1=np.array([clkin_xy[0][0],0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rclkin, rg_m3m4, "clk", laygen.layers['pin'][3], size=0, direction='left')
divin_xy=laygen.get_inst_pin_xy(idivbuf[len(divbuf_list)-1].name, 'I', rg_m3m4)
rdivin=laygen.route(None, laygen.layers['metal'][3], xy0=divin_xy[0], xy1=np.array([divin_xy[0][0],0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rdivin, rg_m3m4, "div<0>", laygen.layers['pin'][3], size=0, direction='left')
din_xy34=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m3m4)
din_xy45=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m4m5)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
din_xy34[0], np.array([din_xy34[0][0]-1,0]), din_xy34[0][1],
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=din_xy34[0], xy1=din_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rv1, rg_m3m4, "in", laygen.layers['pin'][3], size=4, direction='bottom')
for i in range(num_des):
datao_xy = laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4)
laygen.pin(name='dout<'+str(i)+'>', layer=laygen.layers['pin'][3], xy=datao_xy, gridname=rg_m3m4)
clkdiv_xy = laygen.get_inst_pin_xy(iffout[-1].name, 'CLK', rg_m3m4)
laygen.pin(name='clk_div', layer=laygen.layers['pin'][3], xy=clkdiv_xy, gridname=rg_m3m4)
rst_xy34=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m3m4)
rst_xy45=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m4m5)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
rst_xy34[0], np.array([rst_xy34[0][0]-2,0]), rst_xy34[0][1],
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=rst_xy34[0], xy1=rst_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rv1, rg_m3m4, "RST", laygen.layers['pin'][3], size=4, direction='bottom')
# power pin
pwr_dim=laygen.get_xy(obj =itapl[-1].template, gridname=rg_m2m3)
rvdd = []
rvss = []
if num_row%2==0: rp1='VSS'
else: rp1='VDD'
print(int(pwr_dim[0]/2))
for i in range(0, int(pwr_dim[0]/2)):
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i, 0]), xy1=np.array([2*i, 0]), gridname0=rg_m2m3,
refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3,
refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
laygen.pin(name = 'VDD'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')
laygen.pin(name = 'VSS'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2+1, 0]), xy1=np.array([2*i+2+1, 0]), gridname0=rg_m2m3,
refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3,
refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
laygen.pin(name = 'VDD'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')
laygen.pin(name = 'VSS'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')
for i in range(num_row):
for j in range(0, int(pwr_dim[0]/2)):
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j, 0]), xy1=np.array([2*j, 0]), gridname0=rg_m2m3,
refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],
refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+1, 0]), xy1=np.array([2*j+1, 0]), gridname0=rg_m2m3,
refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),
refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2+1, 0]), xy1=np.array([2*j+2+1, 0]), gridname0=rg_m2m3,
refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],
refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2, 0]), xy1=np.array([2*j+2, 0]), gridname0=rg_m2m3,
refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),
refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))
if __name__ == '__main__':
laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml")
import imp
try:
imp.find_module('bag')
laygen.use_phantom = False
except ImportError:
laygen.use_phantom = True
tech=laygen.tech
utemplib = tech+'_microtemplates_dense'
logictemplib = tech+'_logic_templates'
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
#library load or generation
workinglib = 'serdes_generated'
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
if os.path.exists(workinglib+'.yaml'): #generated layout file exists
laygen.load_template(filename=workinglib+'.yaml', libname=workinglib)
laygen.templates.sel_library(utemplib)
#grid
pg = 'placement_basic' #placement grid
rg_m1m2 = 'route_M1_M2_cmos'
rg_m1m2_thick = 'route_M1_M2_thick'
rg_m2m3 = 'route_M2_M3_cmos'
rg_m3m4 = 'route_M3_M4_basic'
rg_m4m5 = 'route_M4_M5_basic'
rg_m5m6 = 'route_M5_M6_basic'
rg_m1m2_pin = 'route_M1_M2_basic'
rg_m2m3_pin = 'route_M2_M3_basic'
#display
#laygen.display()
#laygen.templates.display()
#laygen.save_template(filename=workinglib+'_templates.yaml', libname=workinglib)
mycell_list = []
#load from preset
load_from_file=True
yamlfile_spec="serdes_spec.yaml"
yamlfile_size="serdes_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
cell_name='des_1to'+str(specdict['num_des'])
num_des=specdict['num_des']
num_flop=specdict['num_flop']
m_des_dff=sizedict['m_des_dff']
clkbuf_list=sizedict['des_clkbuf_list']
divbuf_list=sizedict['des_divbuf_list']
print(cell_name+" generating")
mycell_list.append(cell_name)
laygen.add_cell(cell_name)
laygen.sel_cell(cell_name)
generate_deserializer(laygen, objectname_pfix='DES', templib_logic=logictemplib,
placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m4m5=rg_m4m5, num_des=num_des,
num_flop=num_flop, m_des_dff=m_des_dff, origin=np.array([0, 0]))
laygen.add_template_from_cell()
laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
for mycell in mycell_list:
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap") # change layermapfile
| 72.69616 | 195 | 0.578321 |
f, shape=np.array([1,1]),
template_libname=templib_logic))
idivbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',
gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
iclkbuf.append(laygen.relplace(name = "I" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',
gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),
template_libname=templib_logic))
else:
itapl.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
itapr.append(laygen.relplace(name = "I" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,
gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if i==1:
iffout.append(laygen.place(name = "I" + objectname_pfix + 'FFOUT1', templatename = ff_name,
gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT2', templatename = ff_name,
gridname = pg, refinstname = iffout[0].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
elif i==(num_row-1):
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,
gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
if num_des%2==0:
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
else:
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,
gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),
direction = 'top', template_libname=templib_logic))
iffout.append(laygen.relplace(name = "I" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,
gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
for j in range(num_des):
if iffout[j].transform=='MX': tf='MX'
else: tf='R0'
iffin.append(laygen.relplace(name = "I" + objectname_pfix + 'FFIN'+str(j+1), templatename = ff_name,
gridname = pg, refinstname = iffout[j].name, transform=tf, shape=np.array([1,1]),
xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))
if j%2==0:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(int(j/2+1)), templatename = ff_rst_name,
gridname = pg, refinstname = iffin[j].name, transform=tf, shape=np.array([1,1]),
xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))
for i in range(num_row, num_des+1):
if num_des%2==1:
if i%2==0: tf='R0'
else: tf='MX'
if num_des%2==0:
if i%2==0: tf='MX'
else: tf='R0'
if i==num_row:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = iffdiv[int(num_des/2)-1].name, transform=tf, shape=np.array([1,1]),
direction = 'right', template_libname=templib_logic))
else:
iffdiv.append(laygen.relplace(name = "I" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,
gridname = pg, refinstname = iffdiv[-1].name, transform=tf, shape=np.array([1,1]),
direction = 'bottom', template_libname=templib_logic))
space_name = 'space_1x'
space4x_name = 'space_4x'
space_width = laygen.get_xy(obj=laygen.get_template(name = space_name, libname = templib_logic), gridname = pg)[0]
space4_width = laygen.get_xy(obj=laygen.get_template(name = space4x_name, libname = templib_logic), gridname = pg)[0]
inv_width=[]
for i in (1,2,8,32):
inv_width.append(laygen.get_xy(obj=laygen.get_template(name = 'inv_' + str(i) + 'x', libname = templib_logic), gridname = pg)[0])
blank_width = tapr_origin[0] - array_origin[0] - 2 * (inv_width[0]+inv_width[1]+inv_width[2]+inv_width[3])
m_space4 = int(blank_width / space4_width)
m_space1 = int((blank_width-m_space4*space4_width)/space_width)
ispace4=laygen.relplace(name = "I" + objectname_pfix + 'SPACE4', templatename = space4x_name,
gridname = pg, refinstname = iclkbuf[3].name, transform='R0', shape=np.array([m_space4-1,1]),
template_libname=templib_logic)
ispace1=laygen.relplace(name = "I" + objectname_pfix + 'SPACE1', templatename = space_name,
gridname = pg, refinstname = ispace4.name, transform='R0', shape=np.array([m_space1+4,1]),
template_libname=templib_logic)
m_ff_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0] / space_width)
m_ffrst_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_rst_name, libname = templib_logic), gridname = pg)[0] / space_width)
if (num_des%2)==1:
if num_flop==2:
ispace_out=laygen.relplace(name = "I" + objectname_pfix + 'SPACEOUT', templatename = space_name,
gridname = pg, refinstname = iffout[num_des-1].name, transform=iffout[num_des-1].transform, shape=np.array([m_ff_space,1]),
template_libname=templib_logic)
ispace_in=laygen.relplace(name = "I" + objectname_pfix + 'SPACEIN', templatename = space_name,
gridname = pg, refinstname = iffin[num_des-1].name, transform=iffin[num_des-1].transform, shape=np.array([m_ff_space,1]),
template_libname=templib_logic)
ispace_div=laygen.relplace(name = "I" + objectname_pfix + 'SPACEDIV', templatename = space_name,
gridname = pg, refinstname = iffdiv[int(num_des/2)].name, transform=iffdiv[int(num_des/2)].transform, shape=np.array([m_ffrst_space,1]),
template_libname=templib_logic)
ffin_in_xy=[]
ffin_in_xy45=[]
ffin_out_xy=[]
ffout_in_xy=[]
ffout_out_xy=[]
ffdiv_in_xy=[]
ffdiv_in_xy45=[]
ffdiv_out_xy=[]
ffdiv_rst_xy=[]
ffdiv_st_xy=[]
for i in range(num_des):
ffin_in_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m3m4))
ffin_out_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'O', rg_m3m4))
ffout_in_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'I', rg_m3m4))
ffout_out_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4))
ffdiv_in_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m3m4))
ffdiv_out_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'O', rg_m3m4))
ffdiv_rst_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'RST', rg_m3m4))
ffdiv_st_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'ST', rg_m3m4))
ffin_in_xy45.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m4m5))
ffdiv_in_xy45.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m4m5))
for i in range(num_des):
if num_flop==1:
if iffin[i].transform=='MX': offset=1
if iffin[i].transform=='R0': offset=4
if iffdiv[i].transform=='MX': offset_div=1
if iffdiv[i].transform=='R0': offset_div=3
if num_flop==2:
if i in range(int((num_des+1)/2)):
if iffin[i].transform=='MX':
if i%2==1:
offset=1
else:
offset=8
if iffin[i].transform=='R0': offset=3+i%2
if iffdiv[i].transform=='MX': offset_div=1
if iffdiv[i].transform=='R0': offset_div=3
else:
if iffin[i].transform=='MX':
if i%2==1:
offset=1
else:
offset=8
if iffin[i].transform=='R0': offset=3+i%2
if iffdiv[i].transform=='MX': offset_div=10
if iffdiv[i].transform=='R0': offset_div=13
if i in range(num_des-1):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffin_out_xy[i][0], ffin_in_xy[i+1][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_out_xy[i][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_out_xy[i][1][1]+7-offset_div, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffin_out_xy[i][0], ffout_in_xy[i][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)
if m_des_dff==1:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
else:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
for i in range(3):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iclkbuf[i + 1].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(idivbuf[2 - i].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'I', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0][1] + 3, rg_m3m4)
for i in range(num_des):
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffout[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0][1] + 5, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffin[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[i].name, 'CLK', rg_m3m4)[0],
laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)
for i in range(num_des):
if i in range(int((num_des+1)/2)):
if not i==int((num_des+1)/2)-1:
rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)
rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)
else:
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
ffdiv_rst_xy[i][0], ffdiv_st_xy[i+1][0], ffdiv_rst_xy[i][1][1]+5, rg_m3m4)
else:
if not i==num_des-1:
rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)
rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)
[rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],
laygen.get_inst_pin_xy(iffdiv[0].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'ST', rg_m2m3)[0], rg_m2m3)
[rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],
laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'RST', rg_m2m3)[0], rg_m2m3)
clkin_xy=laygen.get_inst_pin_xy(iclkbuf[0].name, 'I', rg_m3m4)
rclkin=laygen.route(None, laygen.layers['metal'][3], xy0=clkin_xy[0], xy1=np.array([clkin_xy[0][0],0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rclkin, rg_m3m4, "clk", laygen.layers['pin'][3], size=0, direction='left')
divin_xy=laygen.get_inst_pin_xy(idivbuf[len(divbuf_list)-1].name, 'I', rg_m3m4)
rdivin=laygen.route(None, laygen.layers['metal'][3], xy0=divin_xy[0], xy1=np.array([divin_xy[0][0],0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rdivin, rg_m3m4, "div<0>", laygen.layers['pin'][3], size=0, direction='left')
din_xy34=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m3m4)
din_xy45=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m4m5)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
din_xy34[0], np.array([din_xy34[0][0]-1,0]), din_xy34[0][1],
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=din_xy34[0], xy1=din_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rv1, rg_m3m4, "in", laygen.layers['pin'][3], size=4, direction='bottom')
for i in range(num_des):
datao_xy = laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4)
laygen.pin(name='dout<'+str(i)+'>', layer=laygen.layers['pin'][3], xy=datao_xy, gridname=rg_m3m4)
clkdiv_xy = laygen.get_inst_pin_xy(iffout[-1].name, 'CLK', rg_m3m4)
laygen.pin(name='clk_div', layer=laygen.layers['pin'][3], xy=clkdiv_xy, gridname=rg_m3m4)
rst_xy34=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m3m4)
rst_xy45=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m4m5)
[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],
rst_xy34[0], np.array([rst_xy34[0][0]-2,0]), rst_xy34[0][1],
rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)
rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=rst_xy34[0], xy1=rst_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)
laygen.boundary_pin_from_rect(rv1, rg_m3m4, "RST", laygen.layers['pin'][3], size=4, direction='bottom')
pwr_dim=laygen.get_xy(obj =itapl[-1].template, gridname=rg_m2m3)
rvdd = []
rvss = []
if num_row%2==0: rp1='VSS'
else: rp1='VDD'
print(int(pwr_dim[0]/2))
for i in range(0, int(pwr_dim[0]/2)):
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i, 0]), xy1=np.array([2*i, 0]), gridname0=rg_m2m3,
refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3,
refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
laygen.pin(name = 'VDD'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')
laygen.pin(name = 'VSS'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2+1, 0]), xy1=np.array([2*i+2+1, 0]), gridname0=rg_m2m3,
refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3,
refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),
refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))
laygen.pin(name = 'VDD'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')
laygen.pin(name = 'VSS'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')
for i in range(num_row):
for j in range(0, int(pwr_dim[0]/2)):
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j, 0]), xy1=np.array([2*j, 0]), gridname0=rg_m2m3,
refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],
refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+1, 0]), xy1=np.array([2*j+1, 0]), gridname0=rg_m2m3,
refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),
refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))
rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2+1, 0]), xy1=np.array([2*j+2+1, 0]), gridname0=rg_m2m3,
refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],
refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))
rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2, 0]), xy1=np.array([2*j+2, 0]), gridname0=rg_m2m3,
refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),
refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))
if __name__ == '__main__':
laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml")
import imp
try:
imp.find_module('bag')
laygen.use_phantom = False
except ImportError:
laygen.use_phantom = True
tech=laygen.tech
utemplib = tech+'_microtemplates_dense'
logictemplib = tech+'_logic_templates'
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
workinglib = 'serdes_generated'
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
if os.path.exists(workinglib+'.yaml'):
laygen.load_template(filename=workinglib+'.yaml', libname=workinglib)
laygen.templates.sel_library(utemplib)
pg = 'placement_basic'
rg_m1m2 = 'route_M1_M2_cmos'
rg_m1m2_thick = 'route_M1_M2_thick'
rg_m2m3 = 'route_M2_M3_cmos'
rg_m3m4 = 'route_M3_M4_basic'
rg_m4m5 = 'route_M4_M5_basic'
rg_m5m6 = 'route_M5_M6_basic'
rg_m1m2_pin = 'route_M1_M2_basic'
rg_m2m3_pin = 'route_M2_M3_basic'
mycell_list = []
load_from_file=True
yamlfile_spec="serdes_spec.yaml"
yamlfile_size="serdes_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
cell_name='des_1to'+str(specdict['num_des'])
num_des=specdict['num_des']
num_flop=specdict['num_flop']
m_des_dff=sizedict['m_des_dff']
clkbuf_list=sizedict['des_clkbuf_list']
divbuf_list=sizedict['des_divbuf_list']
print(cell_name+" generating")
mycell_list.append(cell_name)
laygen.add_cell(cell_name)
laygen.sel_cell(cell_name)
generate_deserializer(laygen, objectname_pfix='DES', templib_logic=logictemplib,
placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m4m5=rg_m4m5, num_des=num_des,
num_flop=num_flop, m_des_dff=m_des_dff, origin=np.array([0, 0]))
laygen.add_template_from_cell()
laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
for mycell in mycell_list:
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap")
| true | true |
f710d5a412901836e4796796cab79d895bf657b5 | 8,929 | py | Python | healthbuddy_backend/fake_news/tests.py | Asfak06/health-buddy | 1a40a35a95bc4179a44445ed0c0b9dc32360e0bc | [
"MIT"
] | null | null | null | healthbuddy_backend/fake_news/tests.py | Asfak06/health-buddy | 1a40a35a95bc4179a44445ed0c0b9dc32360e0bc | [
"MIT"
] | null | null | null | healthbuddy_backend/fake_news/tests.py | Asfak06/health-buddy | 1a40a35a95bc4179a44445ed0c0b9dc32360e0bc | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.urls import reverse_lazy
from .models import FakeNews
from ..utils.base_test import AuthenticationTestTemplate
class FakeNewsListTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_list_10_obj_paginated_token(self):
fakenews = []
user = self.create_normal_user("author")
for i in range(0, 11):
fakenews.append(
FakeNews(
author=user,
title=f"test create fakenews title{i}",
subtitle=f"test create fakenews subtitle{i}",
body=f"test create fakenews body{i}",
)
)
FakeNews.objects.bulk_create(fakenews)
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data.get("results")), 10)
self.assertEqual(resp.data.get("count"), 11)
self.assertIsNotNone(resp.data.get("next"))
class FakeNewsDetailTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_detail_obj_token(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), fakenews.title)
def test_detail_not_found(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": "slug-not-found"}))
self.assertEqual(resp.status_code, 404)
class FakeNewsCreateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.post
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_create_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(FakeNews.objects.last().slug, resp.data.get("slug"))
def test_create_already_exists(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "fake news with this title already exists.")
def test_create_without_fields_required(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "This field is required.")
class FakeNewsDeleteTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.delete
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_delete_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.delete(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 204)
class FakeNewsPatchTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.patch
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_patch_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.patch(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
class FakeNewsUpdateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.put
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
"""all logged user has permission."""
pass
def test_update_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.put(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
# way to turn a test case class into an abstract
del AuthenticationTestTemplate
| 36.594262 | 100 | 0.648897 | from django.contrib.auth.models import User
from django.urls import reverse_lazy
from .models import FakeNews
from ..utils.base_test import AuthenticationTestTemplate
class FakeNewsListTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
pass
def test_list_10_obj_paginated_token(self):
fakenews = []
user = self.create_normal_user("author")
for i in range(0, 11):
fakenews.append(
FakeNews(
author=user,
title=f"test create fakenews title{i}",
subtitle=f"test create fakenews subtitle{i}",
body=f"test create fakenews body{i}",
)
)
FakeNews.objects.bulk_create(fakenews)
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data.get("results")), 10)
self.assertEqual(resp.data.get("count"), 11)
self.assertIsNotNone(resp.data.get("next"))
class FakeNewsDetailTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.get
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
pass
def test_detail_obj_token(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), fakenews.title)
def test_detail_not_found(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.get(reverse_lazy("fakenews-detail", kwargs={"slug": "slug-not-found"}))
self.assertEqual(resp.status_code, 404)
class FakeNewsCreateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.post
def _get_basename_url(self):
return "fakenews-list"
def _get_kwargs_url(self):
return {}
def test_action_user_without_permission(self):
pass
def test_create_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(FakeNews.objects.last().slug, resp.data.get("slug"))
def test_create_already_exists(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.post(
reverse_lazy("fakenews-list"),
data={
"title": "test create fakenews title",
"subtitle": "test create fakenews subtitle",
"body": "test create fakenews body",
},
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "fake news with this title already exists.")
def test_create_without_fields_required(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
resp = self._make_request()
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.data.get("title").pop(0), "This field is required.")
class FakeNewsDeleteTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.delete
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
pass
def test_delete_successful(self):
tokens = self.get_token_valid_admin_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.delete(reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}))
self.assertEqual(resp.status_code, 204)
class FakeNewsPatchTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.patch
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
pass
def test_patch_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.patch(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
class FakeNewsUpdateTestCase(AuthenticationTestTemplate):
def _get_callable_client_method_http(self):
return self._client.put
def _get_basename_url(self):
return "fakenews-detail"
def _get_kwargs_url(self):
return {"slug": "test-create-fakenews-title"}
def test_action_user_without_permission(self):
pass
def test_update_normal_user(self):
tokens = self.get_token_valid_normal_user()
token_access = tokens.get("access")
self._client.credentials(HTTP_AUTHORIZATION=f" Bearer {token_access}")
fakenews = FakeNews.objects.create(
author=User.objects.last(),
title="test create fakenews title",
subtitle="test create fakenews subtitle",
body="test create fakenews body",
)
resp = self._client.put(
reverse_lazy("fakenews-detail", kwargs={"slug": fakenews.slug}),
data={"title": "title updated", "subtitle": "subtitle updated", "body": "body updated"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data.get("title"), "title updated")
self.assertEqual(resp.data.get("slug"), "title-updated")
self.assertEqual(FakeNews.objects.last().slug, "title-updated")
del AuthenticationTestTemplate
| true | true |
f710d5e5f3aa834fea1af550af11e0df2f6a9ed5 | 1,957 | py | Python | Ronnakornschool/Ronschool.py | plug8955/Ronnakornschool | 94ea8216c83a974a1e904cad612378d970f50e7b | [
"MIT"
] | null | null | null | Ronnakornschool/Ronschool.py | plug8955/Ronnakornschool | 94ea8216c83a974a1e904cad612378d970f50e7b | [
"MIT"
] | null | null | null | Ronnakornschool/Ronschool.py | plug8955/Ronnakornschool | 94ea8216c83a974a1e904cad612378d970f50e7b | [
"MIT"
] | null | null | null | # Ronschool.py
class Student:
def __init__(self,name): # self คือคำพิเศษเพื่อใช้แทนตัวมันเอง / ต้องใส่ทุกฟังชั่นของ class
self.name = name
# student1.name
# self = student1
self.exp = 0
self.lesson = 0
def Hello(self):
print('สวัสดีจ้าาาา ผมชื่อ{}'.format(self.name))
def Coding(self):
print('{}: กำลังเขียนโปรแกรม..'.format(self.name))
self.exp += 5
self.lesson += 1
def ShowEXP(self):
print('- {} มีประสบการณ์ {} EXP'.format(self.name,self.exp))
print('- เรียนไป {} ครั้งแล้ว'.format(self.lesson))
def AddEXP(self,score):
self.exp += score # self.exp = self.exp + score
self.lesson += 1
class SpecialStudent(Student):
def __init__(self,name,father):
super().__init__(name)
self.father = father
mafia = ['Bill Gates','Thomas Edison']
if father in mafia:
self.exp += 100
def AddEXP(self,score):
self.exp += (score * 3)
self.lesson += 1
def AskEXP(self,score=10):
print('ครู!!! ขอคะแนนพิเศษให้ผมหน่อยสิ ซัก {} EXP'.format(score))
self.AddEXP(score)
if __name__ == '__main__':
print('========1 Jan 2021===============')
student0 = SpecialStudent('Mark Zuckerberg','Bill Gates')
student0.AskEXP()
student0.ShowEXP()
student1 = Student('Albert')
print(student1.name)
student1.Hello()
print('--------------')
student2 = Student('Steve')
print(student2.name)
student2.Hello()
print('========2 Jan 2021===============')
print('---------ใครอยากเรียนโค้ดดิ้ง?----(10 exp)------------')
student1.AddEXP(10)
print('========3 Jan 2021===============')
student1.name = 'Albert Einstein' # สามารถเปลี่ยนแปลงชื่อได้ แล้วเชื่อมต่อในฟังชั่นต่างๆเลย
print('ตอนนี้ exp ของแต่ละคนได้เท่าไหร่แล้ว')
print(student1.name,student1.exp)
print(student2.name,student2.exp)
print('========4 Jan 2021===============')
for i in range(5):
student2.Coding()
student1.ShowEXP()
student2.ShowEXP() | 23.865854 | 93 | 0.591211 |
class Student:
def __init__(self,name):
self.name = name
self.exp = 0
self.lesson = 0
def Hello(self):
print('สวัสดีจ้าาาา ผมชื่อ{}'.format(self.name))
def Coding(self):
print('{}: กำลังเขียนโปรแกรม..'.format(self.name))
self.exp += 5
self.lesson += 1
def ShowEXP(self):
print('- {} มีประสบการณ์ {} EXP'.format(self.name,self.exp))
print('- เรียนไป {} ครั้งแล้ว'.format(self.lesson))
def AddEXP(self,score):
self.exp += score
self.lesson += 1
class SpecialStudent(Student):
def __init__(self,name,father):
super().__init__(name)
self.father = father
mafia = ['Bill Gates','Thomas Edison']
if father in mafia:
self.exp += 100
def AddEXP(self,score):
self.exp += (score * 3)
self.lesson += 1
def AskEXP(self,score=10):
print('ครู!!! ขอคะแนนพิเศษให้ผมหน่อยสิ ซัก {} EXP'.format(score))
self.AddEXP(score)
if __name__ == '__main__':
print('========1 Jan 2021===============')
student0 = SpecialStudent('Mark Zuckerberg','Bill Gates')
student0.AskEXP()
student0.ShowEXP()
student1 = Student('Albert')
print(student1.name)
student1.Hello()
print('--------------')
student2 = Student('Steve')
print(student2.name)
student2.Hello()
print('========2 Jan 2021===============')
print('---------ใครอยากเรียนโค้ดดิ้ง?----(10 exp)------------')
student1.AddEXP(10)
print('========3 Jan 2021===============')
student1.name = 'Albert Einstein'
print('ตอนนี้ exp ของแต่ละคนได้เท่าไหร่แล้ว')
print(student1.name,student1.exp)
print(student2.name,student2.exp)
print('========4 Jan 2021===============')
for i in range(5):
student2.Coding()
student1.ShowEXP()
student2.ShowEXP() | true | true |
f710d60b379f14a690eba23786aa9f232b64b970 | 423 | py | Python | mspsmc/cli.py | terhorst/mspsmc | e583e196f9ca633bf783023433eed3cff58831b1 | [
"MIT"
] | null | null | null | mspsmc/cli.py | terhorst/mspsmc | e583e196f9ca633bf783023433eed3cff58831b1 | [
"MIT"
] | null | null | null | mspsmc/cli.py | terhorst/mspsmc | e583e196f9ca633bf783023433eed3cff58831b1 | [
"MIT"
] | null | null | null | """Console script for mspsmc."""
import argparse
import sys
def main():
"""Console script for mspsmc."""
parser = argparse.ArgumentParser()
parser.add_argument("_", nargs="*")
args = parser.parse_args()
print("Arguments: " + str(args._))
print("Replace this message by putting your code into " "mspsmc.cli.main")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 22.263158 | 78 | 0.647754 | import argparse
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("_", nargs="*")
args = parser.parse_args()
print("Arguments: " + str(args._))
print("Replace this message by putting your code into " "mspsmc.cli.main")
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
f710d60d223dd89617b941d4998103be022a0f2f | 2,689 | py | Python | centroids/challenge/ImageGen.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 75 | 2020-07-20T20:54:00.000Z | 2022-03-09T09:18:37.000Z | centroids/challenge/ImageGen.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 3 | 2020-09-13T00:46:49.000Z | 2021-07-06T16:18:22.000Z | centroids/challenge/ImageGen.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 14 | 2020-07-22T16:34:51.000Z | 2021-09-13T12:19:59.000Z | from scipy import signal
from scipy import misc
from scipy import stats as st
import numpy as np
W = 128
L = 128
Body_Width = 3
Border = Body_Width+1
Points = 10
Noise_Max = 10
Body_Separation = 15
Body_Scale = 30
OvScale = 3
def gkern(kernlen=21, nsig=3):
''' 2D Gaussian Kernel. '''
x = np.linspace(-nsig, nsig, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
def genBackground():
return np.random.rand(W,L)*(Noise_Max)
def genStarCoords():
while True:
star_cords = np.random.rand(Points,3) # N x [x,y,m]
star_cords = star_cords * np.array([[ W-2*Border , L-2*Border , Body_Scale ]])
star_cords = star_cords + np.ones((Points,3)) * np.array([[ Border, Border, Body_Separation ]])
bad = False
for ii in range(0, Points-1):
x0, y0, m0 = star_cords[ii,:]
for jj in range(ii+1, Points):
x1, y1, m1 = star_cords[jj,:]
if np.abs(x0 - x1) < 4*Border and np.abs(y0 - y1) < 4*Border:
'''
x = np.random.random() * (W-2*Border) + Border
y = np.random.random() * (W-2*Border) + Border
star_cords[jj,0] = x
star_cords[jj,1] = y
'''
bad = True
break
if np.abs(m0 - m1) < 5:
star_cords[jj,2] = m1 + 5
if not bad:
break
return star_cords
def starGauss(OvScale):
gausKern = gkern(Body_Width*OvScale, Body_Width/(OvScale/3))
gausKern = gausKern * (Body_Scale/np.max(np.max(gausKern)))
return gausKern
def genImage(star_cords):
# Overscale it
spots_O = np.zeros((W*OvScale, L*OvScale))
for (x,y,m) in star_cords:
x = OvScale * (x+0.5)
y = OvScale * (y+0.5)
x_0, y_0 = map(int, np.floor([x,y]))
x_1, y_1 = map(int, np.ceil([x,y]))
spots_O[x_0:x_1, y_0:y_1] = m
gausKern = starGauss(OvScale)
spots_B = signal.convolve2d(spots_O, gausKern, boundary='symm', mode='same')
spots = np.zeros((W,L))
for (x,y,m) in star_cords:
x = int(x)
y = int(y)
x0 = max(0, x-Body_Width-1)
x1 = min(W, x+Body_Width+1)
y0 = max(0, y-Body_Width-1)
y1 = min(L, y+Body_Width+1)
for ii in range(x0,x1+1):
for jj in range(y0, y1+1):
spots[ii,jj] = np.mean(spots_B[ii*OvScale:(ii+1)*OvScale, jj*OvScale:(jj+1)*OvScale])
final = np.trunc( np.clip(genBackground() + spots, 0, 255) )
return final
| 30.908046 | 103 | 0.533284 | from scipy import signal
from scipy import misc
from scipy import stats as st
import numpy as np
W = 128
L = 128
Body_Width = 3
Border = Body_Width+1
Points = 10
Noise_Max = 10
Body_Separation = 15
Body_Scale = 30
OvScale = 3
def gkern(kernlen=21, nsig=3):
x = np.linspace(-nsig, nsig, kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d/kern2d.sum()
def genBackground():
return np.random.rand(W,L)*(Noise_Max)
def genStarCoords():
while True:
star_cords = np.random.rand(Points,3)
star_cords = star_cords * np.array([[ W-2*Border , L-2*Border , Body_Scale ]])
star_cords = star_cords + np.ones((Points,3)) * np.array([[ Border, Border, Body_Separation ]])
bad = False
for ii in range(0, Points-1):
x0, y0, m0 = star_cords[ii,:]
for jj in range(ii+1, Points):
x1, y1, m1 = star_cords[jj,:]
if np.abs(x0 - x1) < 4*Border and np.abs(y0 - y1) < 4*Border:
bad = True
break
if np.abs(m0 - m1) < 5:
star_cords[jj,2] = m1 + 5
if not bad:
break
return star_cords
def starGauss(OvScale):
gausKern = gkern(Body_Width*OvScale, Body_Width/(OvScale/3))
gausKern = gausKern * (Body_Scale/np.max(np.max(gausKern)))
return gausKern
def genImage(star_cords):
spots_O = np.zeros((W*OvScale, L*OvScale))
for (x,y,m) in star_cords:
x = OvScale * (x+0.5)
y = OvScale * (y+0.5)
x_0, y_0 = map(int, np.floor([x,y]))
x_1, y_1 = map(int, np.ceil([x,y]))
spots_O[x_0:x_1, y_0:y_1] = m
gausKern = starGauss(OvScale)
spots_B = signal.convolve2d(spots_O, gausKern, boundary='symm', mode='same')
spots = np.zeros((W,L))
for (x,y,m) in star_cords:
x = int(x)
y = int(y)
x0 = max(0, x-Body_Width-1)
x1 = min(W, x+Body_Width+1)
y0 = max(0, y-Body_Width-1)
y1 = min(L, y+Body_Width+1)
for ii in range(x0,x1+1):
for jj in range(y0, y1+1):
spots[ii,jj] = np.mean(spots_B[ii*OvScale:(ii+1)*OvScale, jj*OvScale:(jj+1)*OvScale])
final = np.trunc( np.clip(genBackground() + spots, 0, 255) )
return final
| true | true |
f710d6aeca2fcb946784031b7aef37d3f0c06494 | 6,234 | py | Python | encord/configs.py | encord-team/cord-client-python | fe7833f1d51db7cc8a2a362e632fc7dcf4ba6e81 | [
"Apache-2.0"
] | null | null | null | encord/configs.py | encord-team/cord-client-python | fe7833f1d51db7cc8a2a362e632fc7dcf4ba6e81 | [
"Apache-2.0"
] | null | null | null | encord/configs.py | encord-team/cord-client-python | fe7833f1d51db7cc8a2a362e632fc7dcf4ba6e81 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020 Cord Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import logging
import os
from abc import ABC, abstractmethod
from pickle import NONE
from typing import Dict, Optional
import cryptography
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_ssh_private_key,
)
import encord.exceptions
ENCORD_DOMAIN = "https://api.cord.tech"
ENCORD_PUBLIC_PATH = "/public"
ENCORD_PUBLIC_USER_PATH = "/public/user"
ENCORD_ENDPOINT = ENCORD_DOMAIN + ENCORD_PUBLIC_PATH
ENCORD_USER_ENDPOINT = ENCORD_DOMAIN + ENCORD_PUBLIC_USER_PATH
WEBSOCKET_PATH = "/websocket"
WEBSOCKET_DOMAIN = "wss://message-api.cord.tech"
WEBSOCKET_ENDPOINT = WEBSOCKET_DOMAIN + WEBSOCKET_PATH
_CORD_PROJECT_ID = "CORD_PROJECT_ID"
_ENCORD_PROJECT_ID = "ENCORD_PROJECT_ID"
_CORD_DATASET_ID = "CORD_DATASET_ID"
_ENCORD_DATASET_ID = "ENCORD_DATASET_ID"
_CORD_API_KEY = "CORD_API_KEY"
_ENCORD_API_KEY = "ENCORD_API_KEY"
READ_TIMEOUT = 180 # In seconds
WRITE_TIMEOUT = 180 # In seconds
CONNECT_TIMEOUT = 180 # In seconds
logger = logging.getLogger(__name__)
class BaseConfig(ABC):
def __init__(self, endpoint: str):
self.read_timeout: int = READ_TIMEOUT
self.write_timeout: int = WRITE_TIMEOUT
self.connect_timeout: int = CONNECT_TIMEOUT
self.endpoint: str = endpoint
@abstractmethod
def define_headers(self, data: str) -> Dict:
pass
class Config(BaseConfig):
"""
Config defining endpoint, project id, API key, and timeouts.
"""
def define_headers(self, data) -> Dict:
return self._headers
def __init__(
self,
resource_id: Optional[str] = None,
api_key: Optional[str] = None,
web_file_path: Optional[str] = None,
domain: Optional[str] = None,
websocket_endpoint: str = WEBSOCKET_ENDPOINT,
):
if resource_id is None:
resource_id = get_env_resource_id()
if api_key is None:
api_key = get_env_api_key()
self.resource_id = resource_id
self.api_key = api_key
self.websocket_endpoint = websocket_endpoint
self._headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"ResourceID": resource_id,
"Authorization": self.api_key,
}
if web_file_path is None:
raise RuntimeError("`web_file_path` must be specified")
if domain is None:
raise RuntimeError("`domain` must be specified")
self.domain = domain
endpoint = domain + web_file_path
super().__init__(endpoint)
logger.info("Initialising Encord client with endpoint: %s and resource_id: %s", endpoint, resource_id)
def get_env_resource_id() -> str:
project_id = os.environ.get(_ENCORD_PROJECT_ID) or os.environ.get(_CORD_PROJECT_ID)
dataset_id = os.environ.get(_ENCORD_DATASET_ID) or os.environ.get(_CORD_DATASET_ID)
if (project_id is not None) and (dataset_id is not None):
raise encord.exceptions.InitialisationError(
message=(
"Found both Project EntityId and Dataset EntityId in os.environ. "
"Please initialise EncordClient by passing resource_id."
)
)
elif project_id is not None:
resource_id = project_id
elif dataset_id is not None:
resource_id = dataset_id
else:
raise encord.exceptions.AuthenticationError(message="Project EntityId or dataset EntityId not provided")
return resource_id
def get_env_api_key() -> str:
api_key = os.environ.get(_ENCORD_API_KEY) or os.environ.get(_CORD_API_KEY)
if api_key is None:
raise encord.exceptions.AuthenticationError(message="API key not provided")
return api_key
class EncordConfig(Config):
def __init__(
self,
resource_id: Optional[str] = None,
api_key: Optional[str] = None,
domain: Optional[str] = None,
):
web_file_path = ENCORD_PUBLIC_PATH
super().__init__(resource_id, api_key, web_file_path=web_file_path, domain=domain)
CordConfig = EncordConfig
class UserConfig(BaseConfig):
def __init__(self, private_key: Ed25519PrivateKey, domain: str = ENCORD_DOMAIN):
self.private_key: Ed25519PrivateKey = private_key
self.public_key: Ed25519PublicKey = private_key.public_key()
self._public_key_hex: str = self.public_key.public_bytes(Encoding.Raw, PublicFormat.Raw).hex()
self.domain = domain
endpoint = domain + ENCORD_PUBLIC_USER_PATH
super().__init__(endpoint)
def define_headers(self, data: str) -> Dict:
hash_builder = hashlib.sha256()
hash_builder.update(data.encode())
contents_hash = hash_builder.digest()
signature = self.private_key.sign(contents_hash)
return {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"{self._public_key_hex}:{signature.hex()}",
}
@staticmethod
def from_ssh_private_key(ssh_private_key: str, password: Optional[str], **kwargs):
key_bytes = ssh_private_key.encode()
password_bytes = password and password.encode()
private_key = cryptography.hazmat.primitives.serialization.load_ssh_private_key(key_bytes, password_bytes)
if isinstance(private_key, Ed25519PrivateKey):
return UserConfig(private_key, **kwargs)
else:
raise ValueError(f"Provided key [{ssh_private_key}] is not an Ed25519 private key")
| 32.300518 | 114 | 0.694097 |
import hashlib
import logging
import os
from abc import ABC, abstractmethod
from pickle import NONE
from typing import Dict, Optional
import cryptography
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from cryptography.hazmat.primitives.serialization import (
Encoding,
PublicFormat,
load_ssh_private_key,
)
import encord.exceptions
ENCORD_DOMAIN = "https://api.cord.tech"
ENCORD_PUBLIC_PATH = "/public"
ENCORD_PUBLIC_USER_PATH = "/public/user"
ENCORD_ENDPOINT = ENCORD_DOMAIN + ENCORD_PUBLIC_PATH
ENCORD_USER_ENDPOINT = ENCORD_DOMAIN + ENCORD_PUBLIC_USER_PATH
WEBSOCKET_PATH = "/websocket"
WEBSOCKET_DOMAIN = "wss://message-api.cord.tech"
WEBSOCKET_ENDPOINT = WEBSOCKET_DOMAIN + WEBSOCKET_PATH
_CORD_PROJECT_ID = "CORD_PROJECT_ID"
_ENCORD_PROJECT_ID = "ENCORD_PROJECT_ID"
_CORD_DATASET_ID = "CORD_DATASET_ID"
_ENCORD_DATASET_ID = "ENCORD_DATASET_ID"
_CORD_API_KEY = "CORD_API_KEY"
_ENCORD_API_KEY = "ENCORD_API_KEY"
READ_TIMEOUT = 180
WRITE_TIMEOUT = 180
CONNECT_TIMEOUT = 180
logger = logging.getLogger(__name__)
class BaseConfig(ABC):
def __init__(self, endpoint: str):
self.read_timeout: int = READ_TIMEOUT
self.write_timeout: int = WRITE_TIMEOUT
self.connect_timeout: int = CONNECT_TIMEOUT
self.endpoint: str = endpoint
@abstractmethod
def define_headers(self, data: str) -> Dict:
pass
class Config(BaseConfig):
def define_headers(self, data) -> Dict:
return self._headers
def __init__(
self,
resource_id: Optional[str] = None,
api_key: Optional[str] = None,
web_file_path: Optional[str] = None,
domain: Optional[str] = None,
websocket_endpoint: str = WEBSOCKET_ENDPOINT,
):
if resource_id is None:
resource_id = get_env_resource_id()
if api_key is None:
api_key = get_env_api_key()
self.resource_id = resource_id
self.api_key = api_key
self.websocket_endpoint = websocket_endpoint
self._headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"ResourceID": resource_id,
"Authorization": self.api_key,
}
if web_file_path is None:
raise RuntimeError("`web_file_path` must be specified")
if domain is None:
raise RuntimeError("`domain` must be specified")
self.domain = domain
endpoint = domain + web_file_path
super().__init__(endpoint)
logger.info("Initialising Encord client with endpoint: %s and resource_id: %s", endpoint, resource_id)
def get_env_resource_id() -> str:
project_id = os.environ.get(_ENCORD_PROJECT_ID) or os.environ.get(_CORD_PROJECT_ID)
dataset_id = os.environ.get(_ENCORD_DATASET_ID) or os.environ.get(_CORD_DATASET_ID)
if (project_id is not None) and (dataset_id is not None):
raise encord.exceptions.InitialisationError(
message=(
"Found both Project EntityId and Dataset EntityId in os.environ. "
"Please initialise EncordClient by passing resource_id."
)
)
elif project_id is not None:
resource_id = project_id
elif dataset_id is not None:
resource_id = dataset_id
else:
raise encord.exceptions.AuthenticationError(message="Project EntityId or dataset EntityId not provided")
return resource_id
def get_env_api_key() -> str:
api_key = os.environ.get(_ENCORD_API_KEY) or os.environ.get(_CORD_API_KEY)
if api_key is None:
raise encord.exceptions.AuthenticationError(message="API key not provided")
return api_key
class EncordConfig(Config):
def __init__(
self,
resource_id: Optional[str] = None,
api_key: Optional[str] = None,
domain: Optional[str] = None,
):
web_file_path = ENCORD_PUBLIC_PATH
super().__init__(resource_id, api_key, web_file_path=web_file_path, domain=domain)
CordConfig = EncordConfig
class UserConfig(BaseConfig):
def __init__(self, private_key: Ed25519PrivateKey, domain: str = ENCORD_DOMAIN):
self.private_key: Ed25519PrivateKey = private_key
self.public_key: Ed25519PublicKey = private_key.public_key()
self._public_key_hex: str = self.public_key.public_bytes(Encoding.Raw, PublicFormat.Raw).hex()
self.domain = domain
endpoint = domain + ENCORD_PUBLIC_USER_PATH
super().__init__(endpoint)
def define_headers(self, data: str) -> Dict:
hash_builder = hashlib.sha256()
hash_builder.update(data.encode())
contents_hash = hash_builder.digest()
signature = self.private_key.sign(contents_hash)
return {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"{self._public_key_hex}:{signature.hex()}",
}
@staticmethod
def from_ssh_private_key(ssh_private_key: str, password: Optional[str], **kwargs):
key_bytes = ssh_private_key.encode()
password_bytes = password and password.encode()
private_key = cryptography.hazmat.primitives.serialization.load_ssh_private_key(key_bytes, password_bytes)
if isinstance(private_key, Ed25519PrivateKey):
return UserConfig(private_key, **kwargs)
else:
raise ValueError(f"Provided key [{ssh_private_key}] is not an Ed25519 private key")
| true | true |
f710d6ff7602b53a29430a9106346782ca0b25c2 | 6,973 | py | Python | lib/spack/spack/schema/modules.py | Nabil-AL/spack | 442d0725fe9726597c7c88274d379c0c994d926b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | lib/spack/spack/schema/modules.py | Nabil-AL/spack | 442d0725fe9726597c7c88274d379c0c994d926b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2022-03-01T02:26:40.000Z | 2022-03-15T02:33:38.000Z | lib/spack/spack/schema/modules.py | Nabil-AL/spack | 442d0725fe9726597c7c88274d379c0c994d926b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Schema for modules.yaml configuration file.
.. literalinclude:: _spack_root/lib/spack/spack/schema/modules.py
:lines: 13-
"""
import spack.schema.environment
import spack.schema.projections
#: Matches a spec or a multi-valued variant but not another
#: valid keyword.
#:
#: THIS NEEDS TO BE UPDATED FOR EVERY NEW KEYWORD THAT
#: IS ADDED IMMEDIATELY BELOW THE MODULE TYPE ATTRIBUTE
spec_regex = r'(?!hierarchy|core_specs|verbose|hash_length|whitelist|' \
r'blacklist|projections|naming_scheme|core_compilers|all|' \
r'defaults)(^\w[\w-]*)'
#: Matches a valid name for a module set
# Banned names are valid entries at that level in the previous schema
set_regex = r'(?!enable|lmod|tcl|dotkit|prefix_inspections)^\w[\w-]*'
#: Matches an anonymous spec, i.e. a spec without a root name
anonymous_spec_regex = r'^[\^@%+~]'
#: Definitions for parts of module schema
array_of_strings = {
'type': 'array', 'default': [], 'items': {'type': 'string'}
}
dictionary_of_strings = {
'type': 'object', 'patternProperties': {r'\w[\w-]*': {'type': 'string'}}
}
dependency_selection = {'type': 'string', 'enum': ['none', 'direct', 'all']}
module_file_configuration = {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'filter': {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'environment_blacklist': {
'type': 'array',
'default': [],
'items': {
'type': 'string'
}
}
}
},
'template': {
'type': 'string'
},
'autoload': dependency_selection,
'prerequisites': dependency_selection,
'load_only_generated': {
'type': 'boolean',
'default': False
},
'conflict': array_of_strings,
'load': array_of_strings,
'suffixes': {
'type': 'object',
'validate_spec': True,
'patternProperties': {
r'\w[\w-]*': { # key
'type': 'string'
}
}
},
'environment': spack.schema.environment.definition
}
}
projections_scheme = spack.schema.projections.properties['projections']
module_type_configuration = {
'type': 'object',
'default': {},
'allOf': [
{'properties': {
'verbose': {
'type': 'boolean',
'default': False
},
'hash_length': {
'type': 'integer',
'minimum': 0,
'default': 7
},
'whitelist': array_of_strings,
'blacklist': array_of_strings,
'blacklist_implicits': {
'type': 'boolean',
'default': False
},
'defaults': array_of_strings,
'naming_scheme': {
'type': 'string' # Can we be more specific here?
},
'projections': projections_scheme,
'all': module_file_configuration,
}
},
{'validate_spec': True,
'patternProperties': {
spec_regex: module_file_configuration,
anonymous_spec_regex: module_file_configuration,
}
}
]
}
#: The "real" module properties -- the actual configuration parameters.
#: They are separate from ``properties`` because they can appear both
#: at the top level of a Spack ``modules:`` config (old, deprecated format),
#: and within a named module set (new format with multiple module sets).
module_config_properties = {
'use_view': {'anyOf': [
{'type': 'string'},
{'type': 'boolean'}
]},
'arch_folder': {'type': 'boolean'},
'prefix_inspections': {
'type': 'object',
'additionalProperties': False,
'patternProperties': {
# prefix-relative path to be inspected for existence
r'^[\w-]*': array_of_strings
}
},
'roots': {
'type': 'object',
'properties': {
'tcl': {'type': 'string'},
'lmod': {'type': 'string'},
},
},
'enable': {
'type': 'array',
'default': [],
'items': {
'type': 'string',
'enum': ['tcl', 'dotkit', 'lmod']
},
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'cannot enable "dotkit" in modules.yaml '
'[support for "dotkit" has been dropped '
'in v0.13.0]',
'error': False
},
},
'lmod': {
'allOf': [
# Base configuration
module_type_configuration,
{
'type': 'object',
'properties': {
'core_compilers': array_of_strings,
'hierarchy': array_of_strings,
'core_specs': array_of_strings,
},
} # Specific lmod extensions
]
},
'tcl': {
'allOf': [
# Base configuration
module_type_configuration,
{} # Specific tcl extensions
]
},
'dotkit': {
'allOf': [
# Base configuration
module_type_configuration,
{} # Specific dotkit extensions
]
},
}
# Properties for inclusion into other schemas (requires definitions)
properties = {
'modules': {
'type': 'object',
'patternProperties': {
set_regex: {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': module_config_properties,
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'the "dotkit" section in modules.yaml has no effect'
' [support for "dotkit" has been dropped in v0.13.0]',
'error': False
}
},
},
# Available here for backwards compatibility
'properties': module_config_properties,
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'the "dotkit" section in modules.yaml has no effect'
' [support for "dotkit" has been dropped in v0.13.0]',
'error': False
}
}
}
#: Full schema with metadata
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack module file configuration file schema',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}
| 30.056034 | 83 | 0.514986 |
import spack.schema.environment
import spack.schema.projections
spec_regex = r'(?!hierarchy|core_specs|verbose|hash_length|whitelist|' \
r'blacklist|projections|naming_scheme|core_compilers|all|' \
r'defaults)(^\w[\w-]*)'
set_regex = r'(?!enable|lmod|tcl|dotkit|prefix_inspections)^\w[\w-]*'
anonymous_spec_regex = r'^[\^@%+~]'
array_of_strings = {
'type': 'array', 'default': [], 'items': {'type': 'string'}
}
dictionary_of_strings = {
'type': 'object', 'patternProperties': {r'\w[\w-]*': {'type': 'string'}}
}
dependency_selection = {'type': 'string', 'enum': ['none', 'direct', 'all']}
module_file_configuration = {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'filter': {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': {
'environment_blacklist': {
'type': 'array',
'default': [],
'items': {
'type': 'string'
}
}
}
},
'template': {
'type': 'string'
},
'autoload': dependency_selection,
'prerequisites': dependency_selection,
'load_only_generated': {
'type': 'boolean',
'default': False
},
'conflict': array_of_strings,
'load': array_of_strings,
'suffixes': {
'type': 'object',
'validate_spec': True,
'patternProperties': {
r'\w[\w-]*': {
'type': 'string'
}
}
},
'environment': spack.schema.environment.definition
}
}
projections_scheme = spack.schema.projections.properties['projections']
module_type_configuration = {
'type': 'object',
'default': {},
'allOf': [
{'properties': {
'verbose': {
'type': 'boolean',
'default': False
},
'hash_length': {
'type': 'integer',
'minimum': 0,
'default': 7
},
'whitelist': array_of_strings,
'blacklist': array_of_strings,
'blacklist_implicits': {
'type': 'boolean',
'default': False
},
'defaults': array_of_strings,
'naming_scheme': {
'type': 'string'
},
'projections': projections_scheme,
'all': module_file_configuration,
}
},
{'validate_spec': True,
'patternProperties': {
spec_regex: module_file_configuration,
anonymous_spec_regex: module_file_configuration,
}
}
]
}
module_config_properties = {
'use_view': {'anyOf': [
{'type': 'string'},
{'type': 'boolean'}
]},
'arch_folder': {'type': 'boolean'},
'prefix_inspections': {
'type': 'object',
'additionalProperties': False,
'patternProperties': {
r'^[\w-]*': array_of_strings
}
},
'roots': {
'type': 'object',
'properties': {
'tcl': {'type': 'string'},
'lmod': {'type': 'string'},
},
},
'enable': {
'type': 'array',
'default': [],
'items': {
'type': 'string',
'enum': ['tcl', 'dotkit', 'lmod']
},
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'cannot enable "dotkit" in modules.yaml '
'[support for "dotkit" has been dropped '
'in v0.13.0]',
'error': False
},
},
'lmod': {
'allOf': [
module_type_configuration,
{
'type': 'object',
'properties': {
'core_compilers': array_of_strings,
'hierarchy': array_of_strings,
'core_specs': array_of_strings,
},
}
]
},
'tcl': {
'allOf': [
module_type_configuration,
{}
]
},
'dotkit': {
'allOf': [
module_type_configuration,
{}
]
},
}
properties = {
'modules': {
'type': 'object',
'patternProperties': {
set_regex: {
'type': 'object',
'default': {},
'additionalProperties': False,
'properties': module_config_properties,
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'the "dotkit" section in modules.yaml has no effect'
' [support for "dotkit" has been dropped in v0.13.0]',
'error': False
}
},
},
'properties': module_config_properties,
'deprecatedProperties': {
'properties': ['dotkit'],
'message': 'the "dotkit" section in modules.yaml has no effect'
' [support for "dotkit" has been dropped in v0.13.0]',
'error': False
}
}
}
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack module file configuration file schema',
'type': 'object',
'additionalProperties': False,
'properties': properties,
}
| true | true |
f710d79c09a9b0679214160018290771969fcba6 | 8,930 | py | Python | src/watchdog/observers/inotify.py | lukassup/watchdog | db45bb7923e1e0226b741e521890832e216270e2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-02-20T21:22:07.000Z | 2021-02-20T21:22:07.000Z | src/watchdog/observers/inotify.py | lukassup/watchdog | db45bb7923e1e0226b741e521890832e216270e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/watchdog/observers/inotify.py | lukassup/watchdog | db45bb7923e1e0226b741e521890832e216270e2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <seb@dbzteam.org>
:author: Luke McCarthy <luke@iogopro.co.uk>
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <tim+github@gfxmonk.net>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
FileClosedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def on_thread_start(self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
# If "full_events" is true, then the method will report unmatched move events as separate events
# This behavior is by default only called by a InotifyFullEmitter
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_write and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_nowrite and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
def _decode_path(self, path):
""" Decode path only if unicode string was passed to this emitter. """
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
| 39.166667 | 107 | 0.676708 |
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
FileClosedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def on_thread_start(self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_write and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_close_nowrite and not event.is_directory:
cls = FileClosedEvent
self.queue_event(cls(src_path))
def _decode_path(self, path):
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
| true | true |
f710d8153f1f5aeb27c355a1c1823ae88d30208e | 497 | py | Python | packages/python/plotly/plotly/validators/isosurface/lightposition/_z.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/isosurface/lightposition/_z.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/isosurface/lightposition/_z.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="z", parent_name="isosurface.lightposition", **kwargs
):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
**kwargs,
)
| 31.0625 | 79 | 0.617706 | import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="z", parent_name="isosurface.lightposition", **kwargs
):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
**kwargs,
)
| true | true |
f710d8a784792856aa8454aaa89c6b339c6e43ed | 4,369 | py | Python | datadog_checks_base/tests/base/checks/openmetrics/test_interface.py | tdimnet/integrations-core | a78133a3b71a1b8377fa214d121a98647031ab06 | [
"BSD-3-Clause"
] | 1 | 2021-12-15T22:45:14.000Z | 2021-12-15T22:45:14.000Z | datadog_checks_base/tests/base/checks/openmetrics/test_interface.py | tdimnet/integrations-core | a78133a3b71a1b8377fa214d121a98647031ab06 | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_base/tests/base/checks/openmetrics/test_interface.py | tdimnet/integrations-core | a78133a3b71a1b8377fa214d121a98647031ab06 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.base import OpenMetricsBaseCheckV2
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.testing import requires_py3
from .utils import get_check
pytestmark = [requires_py3, pytest.mark.openmetrics, pytest.mark.openmetrics_interface]
def test_default_config(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def get_default_config(self):
return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}}
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz']
)
aggregator.assert_all_metrics_covered()
def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
"""
)
check = get_check(
{'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']}
)
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
aggregator.reset()
check.set_dynamic_tags('baz:foo')
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
def test_custom_transformer(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def __init__(self, name, init_config, instances):
super().__init__(name, init_config, instances)
self.check_initializations.append(self.configure_additional_transformers)
def configure_transformer_watchdog_mega_miss(self):
method = self.gauge
def transform(metric, sample_data, runtime_data):
for sample, tags, hostname in sample_data:
method('server.watchdog_mega_miss', sample.value, tags=tags, hostname=hostname)
return transform
def configure_additional_transformers(self):
metric = r"^envoy_server_(.+)_watchdog_mega_miss$"
self.scrapers[self.instance['openmetrics_endpoint']].metric_transformer.add_custom_transformer(
metric, self.configure_transformer_watchdog_mega_miss(), pattern=True
)
mock_http_response(
"""
# TYPE envoy_server_worker_0_watchdog_mega_miss counter
envoy_server_worker_0_watchdog_mega_miss{} 1
# TYPE envoy_server_worker_1_watchdog_mega_miss counter
envoy_server_worker_1_watchdog_mega_miss{} 0
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric('test.server.watchdog_mega_miss', metric_type=aggregator.GAUGE, count=2)
| 36.714286 | 114 | 0.688487 |
import pytest
from datadog_checks.base import OpenMetricsBaseCheckV2
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.testing import requires_py3
from .utils import get_check
pytestmark = [requires_py3, pytest.mark.openmetrics, pytest.mark.openmetrics_interface]
def test_default_config(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def get_default_config(self):
return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}}
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz']
)
aggregator.assert_all_metrics_covered()
def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
"""
)
check = get_check(
{'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']}
)
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
aggregator.reset()
check.set_dynamic_tags('baz:foo')
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
def test_custom_transformer(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def __init__(self, name, init_config, instances):
super().__init__(name, init_config, instances)
self.check_initializations.append(self.configure_additional_transformers)
def configure_transformer_watchdog_mega_miss(self):
method = self.gauge
def transform(metric, sample_data, runtime_data):
for sample, tags, hostname in sample_data:
method('server.watchdog_mega_miss', sample.value, tags=tags, hostname=hostname)
return transform
def configure_additional_transformers(self):
metric = r"^envoy_server_(.+)_watchdog_mega_miss$"
self.scrapers[self.instance['openmetrics_endpoint']].metric_transformer.add_custom_transformer(
metric, self.configure_transformer_watchdog_mega_miss(), pattern=True
)
mock_http_response(
"""
# TYPE envoy_server_worker_0_watchdog_mega_miss counter
envoy_server_worker_0_watchdog_mega_miss{} 1
# TYPE envoy_server_worker_1_watchdog_mega_miss counter
envoy_server_worker_1_watchdog_mega_miss{} 0
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric('test.server.watchdog_mega_miss', metric_type=aggregator.GAUGE, count=2)
| true | true |
f710d923ceb89999ceccd02025d68644894d3817 | 16,027 | py | Python | models/Strategy.py | bsda/pycryptobot | 8d0738cc06bef165d335b08ad8597777a229ed81 | [
"Apache-2.0"
] | null | null | null | models/Strategy.py | bsda/pycryptobot | 8d0738cc06bef165d335b08ad8597777a229ed81 | [
"Apache-2.0"
] | null | null | null | models/Strategy.py | bsda/pycryptobot | 8d0738cc06bef165d335b08ad8597777a229ed81 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from pandas import DataFrame
from models.PyCryptoBot import PyCryptoBot
from models.AppState import AppState
from models.helper.LogHelper import Logger
import sys
class Strategy:
def __init__(
self,
app: PyCryptoBot = None,
state: AppState = AppState,
df: DataFrame = DataFrame,
iterations: int = 0,
) -> None:
if not isinstance(df, DataFrame):
raise TypeError("'df' not a Pandas dataframe")
if len(df) == 0:
raise ValueError("'df' is empty")
self._action = "WAIT"
self.app = app
self.state = state
self._df = df
self._df_last = app.getInterval(df, iterations)
def isBuySignal(
self, price, now: datetime = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
) -> bool:
# required technical indicators or candle sticks for buy signal strategy
required_indicators = [
"ema12gtema26co",
"macdgtsignal",
"goldencross",
"obv_pc",
"eri_buy",
]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
# buy signal exclusion (if disabled, do not buy within 3% of the dataframe close high)
if (
self.state.last_action == "SELL"
and self.app.disableBuyNearHigh() is True
and (price > (self._df["close"].max() * 0.97))
):
log_text = (
str(now)
+ " | "
+ self.app.getMarket()
+ " | "
+ self.app.printGranularity()
+ " | Ignoring Buy Signal (price "
+ str(price)
+ " within 3% of high "
+ str(self._df["close"].max())
+ ")"
)
Logger.warning(log_text)
return False
# if EMA, MACD are disabled, do not buy
if self.app.disableBuyEMA() and self.app.disableBuyMACD():
log_text = (
str(now)
+ " | "
+ self.app.getMarket()
+ " | "
+ self.app.printGranularity()
+ " | EMA, MACD indicators are disabled "
)
Logger.warning(log_text)
return False
# criteria for a buy signal 1
if (
(
bool(self._df_last["ema12gtema26co"].values[0]) is True
or self.app.disableBuyEMA()
)
and (
bool(self._df_last["macdgtsignal"].values[0]) is True
or self.app.disableBuyMACD()
)
and (
bool(self._df_last["goldencross"].values[0]) is True
or self.app.disableBullOnly()
)
and (
float(self._df_last["obv_pc"].values[0]) > -5
or self.app.disableBuyOBV()
)
and (
bool(self._df_last["eri_buy"].values[0]) is True
or self.app.disableBuyElderRay()
)
and self.state.last_action != "BUY"
): # required for all strategies
Logger.debug("*** Buy Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
# criteria for buy signal 2 (optionally add additional buy singals)
elif (
(
bool(self._df_last["ema12gtema26co"].values[0]) is True
or self.app.disableBuyEMA()
)
and bool(self._df_last["macdgtsignalco"].values[0]) is True
and (
bool(self._df_last["goldencross"].values[0]) is True
or self.app.disableBullOnly()
)
and (
float(self._df_last["obv_pc"].values[0]) > -5
or self.app.disableBuyOBV()
)
and (
bool(self._df_last["eri_buy"].values[0]) is True
or self.app.disableBuyElderRay()
)
and self.state.last_action != "BUY"
): # required for all strategies
Logger.debug("*** Buy Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
return False
def isSellSignal(self) -> bool:
# required technical indicators or candle sticks for buy signal strategy
required_indicators = ["ema12ltema26co", "macdltsignal"]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
# criteria for a sell signal 1
if (
bool(self._df_last["ema12ltema26co"].values[0]) is True
and (
bool(self._df_last["macdltsignal"].values[0]) is True
or self.app.disableBuyMACD()
)
and self.state.last_action not in ["", "SELL"]
):
Logger.debug("*** Sell Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
return False
def isSellTrigger(
self,
price: float = 0.0,
price_exit: float = 0.0,
margin: float = 0.0,
change_pcnt_high: float = 0.0,
obv_pc: float = 0.0,
macdltsignal: bool = False,
) -> bool:
# set to true for verbose debugging
debug = False
if debug:
Logger.warning("\n*** isSellTrigger ***\n")
Logger.warning("-- loss failsafe sell at fibonacci band --")
Logger.warning(f"self.app.disableFailsafeFibonacciLow() is False (actual: {self.app.disableFailsafeFibonacciLow()})")
Logger.warning(f"self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"self.app.sellLowerPcnt() is None (actual: {self.app.sellLowerPcnt()})")
Logger.warning(f"self.state.fib_low {self.state.fib_low} > 0")
Logger.warning(f"self.state.fib_low {self.state.fib_low} >= {float(price)}")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# loss failsafe sell at fibonacci band
if (
self.app.disableFailsafeFibonacciLow() is False
and self.app.allowSellAtLoss()
and self.app.sellLowerPcnt() is None
and self.state.fib_low > 0
and self.state.fib_low >= float(price)
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Loss Failsafe Triggered (Fibonacci Band: "
+ str(self.state.fib_low)
+ ")"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- loss failsafe sell at trailing_stop_loss --")
Logger.warning(f"self.app.trailingStopLoss() != None (actual: {self.app.trailingStopLoss()})")
Logger.warning(f"change_pcnt_high ({change_pcnt_high}) < self.app.trailingStopLoss() ({self.app.trailingStopLoss()})")
Logger.warning(f"margin ({margin}) > self.app.trailingStopLossTrigger() ({self.app.trailingStopLossTrigger()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# loss failsafe sell at trailing_stop_loss
if (
self.app.trailingStopLoss() != None
and change_pcnt_high < self.app.trailingStopLoss()
and margin > self.app.trailingStopLossTrigger()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Trailing Stop Loss Triggered (< "
+ str(self.app.trailingStopLoss())
+ "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- loss failsafe sell at sell_lower_pcnt --")
Logger.warning(f"self.app.disableFailsafeLowerPcnt() is False (actual: {self.app.disableFailsafeLowerPcnt()})")
Logger.warning(f"and self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"and self.app.sellLowerPcnt() != None (actual: {self.app.sellLowerPcnt()})")
Logger.warning(f"and margin ({margin}) < self.app.sellLowerPcnt() ({self.app.sellLowerPcnt()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# loss failsafe sell at sell_lower_pcnt
elif (
self.app.disableFailsafeLowerPcnt() is False
and self.app.allowSellAtLoss()
and self.app.sellLowerPcnt() != None
and margin < self.app.sellLowerPcnt()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Loss Failsafe Triggered (< " + str(self.app.sellLowerPcnt()) + "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- profit bank at sell_upper_pcnt --")
Logger.warning(f"self.app.disableProfitbankUpperPcnt() is False (actual: {self.app.disableProfitbankUpperPcnt()})")
Logger.warning(f"and self.app.sellUpperPcnt() != None (actual: {self.app.sellUpperPcnt()})")
Logger.warning(f"and margin ({margin}) > self.app.sellUpperPcnt() ({self.app.sellUpperPcnt()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# profit bank at sell_upper_pcnt
if (
self.app.disableProfitbankUpperPcnt() is False
and self.app.sellUpperPcnt() != None
and margin > self.app.sellUpperPcnt()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Profit Bank Triggered (> " + str(self.app.sellUpperPcnt()) + "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- profit bank when strong reversal detected --")
Logger.warning(f"self.app.sellAtResistance() is True (actual {self.app.sellAtResistance()})")
Logger.warning(f"and price ({price}) > 0")
Logger.warning(f"and price ({price}) >= price_exit ({price_exit})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
# profit bank when strong reversal detected
if (
self.app.sellAtResistance() is True
and margin >= 2
and price > 0
and price >= price_exit
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = "! Profit Bank Triggered (Selling At Resistance)"
Logger.warning(log_text)
if not (not self.app.allowSellAtLoss() and margin <= 0):
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
return False
def isWaitTrigger(self, margin: float = 0.0, goldencross: bool = False):
# set to true for verbose debugging
debug = False
if debug and self.state.action != 'WAIT':
Logger.warning("\n*** isWaitTrigger ***\n")
if debug and self.state.action == 'BUY':
Logger.warning("-- if bear market and bull only return true to abort buy --")
Logger.warning(f"self.state.action == 'BUY' (actual: {self.state.action})")
Logger.warning(f"and self.app.disableBullOnly() is True (actual: {self.app.disableBullOnly()})")
Logger.warning(f"and goldencross is False (actual: {goldencross})")
Logger.warning("\n")
# if bear market and bull only return true to abort buy
if (
self.state.action == "BUY"
and not self.app.disableBullOnly()
and not goldencross
):
log_text = "! Ignore Buy Signal (Bear Buy In Bull Only)"
Logger.warning(log_text)
return True
if debug and self.state.action == 'SELL':
Logger.warning("-- configuration specifies to not sell at a loss --")
Logger.warning(f"self.state.action == 'SELL' (actual: {self.state.action})")
Logger.warning(f"and self.app.allowSellAtLoss() is False (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"and margin ({margin}) <= 0")
Logger.warning("\n")
# configuration specifies to not sell at a loss
if (
self.state.action == "SELL"
and not self.app.allowSellAtLoss()
and margin <= 0
):
log_text = "! Ignore Sell Signal (No Sell At Loss)"
Logger.warning(log_text)
return True
if debug and self.state.action == 'SELL':
Logger.warning("-- configuration specifies not to sell within min and max margin percent bounds --")
Logger.warning(f"self.state.action == 'SELL' (actual: {self.state.action})")
Logger.warning(f"(self.app.nosellminpcnt is not None (actual: {self.app.nosellminpcnt})) and (margin ({margin}) >= self.app.nosellminpcnt ({self.app.nosellminpcnt}))")
Logger.warning(f"(self.app.nosellmaxpcnt is not None (actual: {self.app.nosellmaxpcnt})) and (margin ({margin}) <= self.app.nosellmaxpcnt ({self.app.nosellmaxpcnt}))")
Logger.warning("\n")
# configuration specifies not to sell within min and max margin percent bounds
if self.state.action == "SELL" and (
(self.app.nosellminpcnt is not None) and (margin >= self.app.nosellminpcnt)
) and (
(self.app.nosellmaxpcnt is not None) and (margin <= self.app.nosellmaxpcnt)
):
log_text = "! Ignore Sell Signal (Within No-Sell Bounds)"
Logger.warning(log_text)
return True
return False
def getAction(self, price):
if self.isBuySignal(price):
return "BUY"
elif self.isSellSignal():
return "SELL"
else:
return "WAIT"
| 39.670792 | 179 | 0.540525 | from datetime import datetime
from pandas import DataFrame
from models.PyCryptoBot import PyCryptoBot
from models.AppState import AppState
from models.helper.LogHelper import Logger
import sys
class Strategy:
def __init__(
self,
app: PyCryptoBot = None,
state: AppState = AppState,
df: DataFrame = DataFrame,
iterations: int = 0,
) -> None:
if not isinstance(df, DataFrame):
raise TypeError("'df' not a Pandas dataframe")
if len(df) == 0:
raise ValueError("'df' is empty")
self._action = "WAIT"
self.app = app
self.state = state
self._df = df
self._df_last = app.getInterval(df, iterations)
def isBuySignal(
self, price, now: datetime = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
) -> bool:
required_indicators = [
"ema12gtema26co",
"macdgtsignal",
"goldencross",
"obv_pc",
"eri_buy",
]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
if (
self.state.last_action == "SELL"
and self.app.disableBuyNearHigh() is True
and (price > (self._df["close"].max() * 0.97))
):
log_text = (
str(now)
+ " | "
+ self.app.getMarket()
+ " | "
+ self.app.printGranularity()
+ " | Ignoring Buy Signal (price "
+ str(price)
+ " within 3% of high "
+ str(self._df["close"].max())
+ ")"
)
Logger.warning(log_text)
return False
if self.app.disableBuyEMA() and self.app.disableBuyMACD():
log_text = (
str(now)
+ " | "
+ self.app.getMarket()
+ " | "
+ self.app.printGranularity()
+ " | EMA, MACD indicators are disabled "
)
Logger.warning(log_text)
return False
if (
(
bool(self._df_last["ema12gtema26co"].values[0]) is True
or self.app.disableBuyEMA()
)
and (
bool(self._df_last["macdgtsignal"].values[0]) is True
or self.app.disableBuyMACD()
)
and (
bool(self._df_last["goldencross"].values[0]) is True
or self.app.disableBullOnly()
)
and (
float(self._df_last["obv_pc"].values[0]) > -5
or self.app.disableBuyOBV()
)
and (
bool(self._df_last["eri_buy"].values[0]) is True
or self.app.disableBuyElderRay()
)
and self.state.last_action != "BUY"
):
Logger.debug("*** Buy Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
elif (
(
bool(self._df_last["ema12gtema26co"].values[0]) is True
or self.app.disableBuyEMA()
)
and bool(self._df_last["macdgtsignalco"].values[0]) is True
and (
bool(self._df_last["goldencross"].values[0]) is True
or self.app.disableBullOnly()
)
and (
float(self._df_last["obv_pc"].values[0]) > -5
or self.app.disableBuyOBV()
)
and (
bool(self._df_last["eri_buy"].values[0]) is True
or self.app.disableBuyElderRay()
)
and self.state.last_action != "BUY"
):
Logger.debug("*** Buy Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
return False
def isSellSignal(self) -> bool:
required_indicators = ["ema12ltema26co", "macdltsignal"]
for indicator in required_indicators:
if indicator not in self._df_last:
raise AttributeError(f"'{indicator}' not in Pandas dataframe")
if (
bool(self._df_last["ema12ltema26co"].values[0]) is True
and (
bool(self._df_last["macdltsignal"].values[0]) is True
or self.app.disableBuyMACD()
)
and self.state.last_action not in ["", "SELL"]
):
Logger.debug("*** Sell Signal ***")
for indicator in required_indicators:
Logger.debug(f"{indicator}: {self._df_last[indicator].values[0]}")
Logger.debug(f"last_action: {self.state.last_action}")
return True
return False
def isSellTrigger(
self,
price: float = 0.0,
price_exit: float = 0.0,
margin: float = 0.0,
change_pcnt_high: float = 0.0,
obv_pc: float = 0.0,
macdltsignal: bool = False,
) -> bool:
debug = False
if debug:
Logger.warning("\n*** isSellTrigger ***\n")
Logger.warning("-- loss failsafe sell at fibonacci band --")
Logger.warning(f"self.app.disableFailsafeFibonacciLow() is False (actual: {self.app.disableFailsafeFibonacciLow()})")
Logger.warning(f"self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"self.app.sellLowerPcnt() is None (actual: {self.app.sellLowerPcnt()})")
Logger.warning(f"self.state.fib_low {self.state.fib_low} > 0")
Logger.warning(f"self.state.fib_low {self.state.fib_low} >= {float(price)}")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
if (
self.app.disableFailsafeFibonacciLow() is False
and self.app.allowSellAtLoss()
and self.app.sellLowerPcnt() is None
and self.state.fib_low > 0
and self.state.fib_low >= float(price)
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Loss Failsafe Triggered (Fibonacci Band: "
+ str(self.state.fib_low)
+ ")"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- loss failsafe sell at trailing_stop_loss --")
Logger.warning(f"self.app.trailingStopLoss() != None (actual: {self.app.trailingStopLoss()})")
Logger.warning(f"change_pcnt_high ({change_pcnt_high}) < self.app.trailingStopLoss() ({self.app.trailingStopLoss()})")
Logger.warning(f"margin ({margin}) > self.app.trailingStopLossTrigger() ({self.app.trailingStopLossTrigger()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
if (
self.app.trailingStopLoss() != None
and change_pcnt_high < self.app.trailingStopLoss()
and margin > self.app.trailingStopLossTrigger()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Trailing Stop Loss Triggered (< "
+ str(self.app.trailingStopLoss())
+ "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- loss failsafe sell at sell_lower_pcnt --")
Logger.warning(f"self.app.disableFailsafeLowerPcnt() is False (actual: {self.app.disableFailsafeLowerPcnt()})")
Logger.warning(f"and self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"and self.app.sellLowerPcnt() != None (actual: {self.app.sellLowerPcnt()})")
Logger.warning(f"and margin ({margin}) < self.app.sellLowerPcnt() ({self.app.sellLowerPcnt()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
elif (
self.app.disableFailsafeLowerPcnt() is False
and self.app.allowSellAtLoss()
and self.app.sellLowerPcnt() != None
and margin < self.app.sellLowerPcnt()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Loss Failsafe Triggered (< " + str(self.app.sellLowerPcnt()) + "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- profit bank at sell_upper_pcnt --")
Logger.warning(f"self.app.disableProfitbankUpperPcnt() is False (actual: {self.app.disableProfitbankUpperPcnt()})")
Logger.warning(f"and self.app.sellUpperPcnt() != None (actual: {self.app.sellUpperPcnt()})")
Logger.warning(f"and margin ({margin}) > self.app.sellUpperPcnt() ({self.app.sellUpperPcnt()})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
if (
self.app.disableProfitbankUpperPcnt() is False
and self.app.sellUpperPcnt() != None
and margin > self.app.sellUpperPcnt()
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = (
"! Profit Bank Triggered (> " + str(self.app.sellUpperPcnt()) + "%)"
)
Logger.warning(log_text)
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
if debug:
Logger.warning("-- profit bank when strong reversal detected --")
Logger.warning(f"self.app.sellAtResistance() is True (actual {self.app.sellAtResistance()})")
Logger.warning(f"and price ({price}) > 0")
Logger.warning(f"and price ({price}) >= price_exit ({price_exit})")
Logger.warning(f"(self.app.allowSellAtLoss() is True (actual: {self.app.allowSellAtLoss()}) or margin ({margin}) > 0)")
Logger.warning("\n")
if (
self.app.sellAtResistance() is True
and margin >= 2
and price > 0
and price >= price_exit
and (self.app.allowSellAtLoss() or margin > 0)
):
log_text = "! Profit Bank Triggered (Selling At Resistance)"
Logger.warning(log_text)
if not (not self.app.allowSellAtLoss() and margin <= 0):
self.app.notifyTelegram(
self.app.getMarket()
+ " ("
+ self.app.printGranularity()
+ ") "
+ log_text
)
return True
return False
def isWaitTrigger(self, margin: float = 0.0, goldencross: bool = False):
debug = False
if debug and self.state.action != 'WAIT':
Logger.warning("\n*** isWaitTrigger ***\n")
if debug and self.state.action == 'BUY':
Logger.warning("-- if bear market and bull only return true to abort buy --")
Logger.warning(f"self.state.action == 'BUY' (actual: {self.state.action})")
Logger.warning(f"and self.app.disableBullOnly() is True (actual: {self.app.disableBullOnly()})")
Logger.warning(f"and goldencross is False (actual: {goldencross})")
Logger.warning("\n")
if (
self.state.action == "BUY"
and not self.app.disableBullOnly()
and not goldencross
):
log_text = "! Ignore Buy Signal (Bear Buy In Bull Only)"
Logger.warning(log_text)
return True
if debug and self.state.action == 'SELL':
Logger.warning("-- configuration specifies to not sell at a loss --")
Logger.warning(f"self.state.action == 'SELL' (actual: {self.state.action})")
Logger.warning(f"and self.app.allowSellAtLoss() is False (actual: {self.app.allowSellAtLoss()})")
Logger.warning(f"and margin ({margin}) <= 0")
Logger.warning("\n")
if (
self.state.action == "SELL"
and not self.app.allowSellAtLoss()
and margin <= 0
):
log_text = "! Ignore Sell Signal (No Sell At Loss)"
Logger.warning(log_text)
return True
if debug and self.state.action == 'SELL':
Logger.warning("-- configuration specifies not to sell within min and max margin percent bounds --")
Logger.warning(f"self.state.action == 'SELL' (actual: {self.state.action})")
Logger.warning(f"(self.app.nosellminpcnt is not None (actual: {self.app.nosellminpcnt})) and (margin ({margin}) >= self.app.nosellminpcnt ({self.app.nosellminpcnt}))")
Logger.warning(f"(self.app.nosellmaxpcnt is not None (actual: {self.app.nosellmaxpcnt})) and (margin ({margin}) <= self.app.nosellmaxpcnt ({self.app.nosellmaxpcnt}))")
Logger.warning("\n")
if self.state.action == "SELL" and (
(self.app.nosellminpcnt is not None) and (margin >= self.app.nosellminpcnt)
) and (
(self.app.nosellmaxpcnt is not None) and (margin <= self.app.nosellmaxpcnt)
):
log_text = "! Ignore Sell Signal (Within No-Sell Bounds)"
Logger.warning(log_text)
return True
return False
def getAction(self, price):
if self.isBuySignal(price):
return "BUY"
elif self.isSellSignal():
return "SELL"
else:
return "WAIT"
| true | true |
f710d9d3bac610cb12378ee562e63296d1c01fe2 | 3,167 | py | Python | byceps/services/shop/order/actions/ticket.py | homeworkprod/byceps | cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e | [
"BSD-3-Clause"
] | 23 | 2015-08-03T23:28:54.000Z | 2018-12-12T20:11:45.000Z | byceps/services/shop/order/actions/ticket.py | homeworkprod/byceps | cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e | [
"BSD-3-Clause"
] | 1 | 2018-09-30T18:18:24.000Z | 2018-09-30T18:18:24.000Z | byceps/services/shop/order/actions/ticket.py | homeworkprod/byceps | cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e | [
"BSD-3-Clause"
] | 9 | 2015-08-06T16:41:36.000Z | 2018-09-25T11:17:31.000Z | """
byceps.services.shop.order.actions.ticket
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Any, Sequence
from uuid import UUID
from .....typing import UserID
from ....ticketing.dbmodels.ticket import Ticket
from ....ticketing import (
category_service as ticket_category_service,
ticket_creation_service,
ticket_revocation_service,
ticket_service,
)
from ....ticketing.transfer.models import TicketCategoryID, TicketID
from .. import log_service, service as order_service
from ..transfer.order import LineItem, Order, OrderID
from ._ticketing import create_tickets_sold_event, send_tickets_sold_event
def create_tickets(
order: Order,
line_item: LineItem,
ticket_category_id: TicketCategoryID,
initiator_id: UserID,
) -> None:
"""Create tickets."""
owned_by_id = order.placed_by_id
order_number = order.order_number
ticket_quantity = line_item.quantity
ticket_category = ticket_category_service.get_category(ticket_category_id)
tickets = ticket_creation_service.create_tickets(
ticket_category.party_id,
ticket_category_id,
owned_by_id,
ticket_quantity,
order_number=order_number,
used_by_id=owned_by_id,
)
_create_creation_order_log_entries(order.id, tickets)
data: dict[str, Any] = {
'ticket_ids': list(sorted(str(ticket.id) for ticket in tickets))
}
order_service.update_line_item_processing_result(line_item.id, data)
tickets_sold_event = create_tickets_sold_event(
order.id, initiator_id, ticket_category_id, owned_by_id, ticket_quantity
)
send_tickets_sold_event(tickets_sold_event)
def _create_creation_order_log_entries(
order_id: OrderID, tickets: Sequence[Ticket]
) -> None:
event_type = 'ticket-created'
datas = [
{
'ticket_id': str(ticket.id),
'ticket_code': ticket.code,
'ticket_category_id': str(ticket.category_id),
'ticket_owner_id': str(ticket.owned_by_id),
}
for ticket in tickets
]
log_service.create_entries(event_type, order_id, datas)
def revoke_tickets(
order: Order, line_item: LineItem, initiator_id: UserID
) -> None:
"""Revoke all tickets related to the line item."""
ticket_id_strs = line_item.processing_result['ticket_ids']
ticket_ids = {
TicketID(UUID(ticket_id_str)) for ticket_id_str in ticket_id_strs
}
tickets = ticket_service.find_tickets(ticket_ids)
ticket_revocation_service.revoke_tickets(ticket_ids, initiator_id)
_create_revocation_order_log_entries(order.id, tickets, initiator_id)
def _create_revocation_order_log_entries(
order_id: OrderID, tickets: Sequence[Ticket], initiator_id: UserID
) -> None:
event_type = 'ticket-revoked'
datas = [
{
'ticket_id': str(ticket.id),
'ticket_code': ticket.code,
'initiator_id': str(initiator_id),
}
for ticket in tickets
]
log_service.create_entries(event_type, order_id, datas)
| 28.276786 | 80 | 0.703189 |
from typing import Any, Sequence
from uuid import UUID
from .....typing import UserID
from ....ticketing.dbmodels.ticket import Ticket
from ....ticketing import (
category_service as ticket_category_service,
ticket_creation_service,
ticket_revocation_service,
ticket_service,
)
from ....ticketing.transfer.models import TicketCategoryID, TicketID
from .. import log_service, service as order_service
from ..transfer.order import LineItem, Order, OrderID
from ._ticketing import create_tickets_sold_event, send_tickets_sold_event
def create_tickets(
order: Order,
line_item: LineItem,
ticket_category_id: TicketCategoryID,
initiator_id: UserID,
) -> None:
owned_by_id = order.placed_by_id
order_number = order.order_number
ticket_quantity = line_item.quantity
ticket_category = ticket_category_service.get_category(ticket_category_id)
tickets = ticket_creation_service.create_tickets(
ticket_category.party_id,
ticket_category_id,
owned_by_id,
ticket_quantity,
order_number=order_number,
used_by_id=owned_by_id,
)
_create_creation_order_log_entries(order.id, tickets)
data: dict[str, Any] = {
'ticket_ids': list(sorted(str(ticket.id) for ticket in tickets))
}
order_service.update_line_item_processing_result(line_item.id, data)
tickets_sold_event = create_tickets_sold_event(
order.id, initiator_id, ticket_category_id, owned_by_id, ticket_quantity
)
send_tickets_sold_event(tickets_sold_event)
def _create_creation_order_log_entries(
order_id: OrderID, tickets: Sequence[Ticket]
) -> None:
event_type = 'ticket-created'
datas = [
{
'ticket_id': str(ticket.id),
'ticket_code': ticket.code,
'ticket_category_id': str(ticket.category_id),
'ticket_owner_id': str(ticket.owned_by_id),
}
for ticket in tickets
]
log_service.create_entries(event_type, order_id, datas)
def revoke_tickets(
order: Order, line_item: LineItem, initiator_id: UserID
) -> None:
ticket_id_strs = line_item.processing_result['ticket_ids']
ticket_ids = {
TicketID(UUID(ticket_id_str)) for ticket_id_str in ticket_id_strs
}
tickets = ticket_service.find_tickets(ticket_ids)
ticket_revocation_service.revoke_tickets(ticket_ids, initiator_id)
_create_revocation_order_log_entries(order.id, tickets, initiator_id)
def _create_revocation_order_log_entries(
order_id: OrderID, tickets: Sequence[Ticket], initiator_id: UserID
) -> None:
event_type = 'ticket-revoked'
datas = [
{
'ticket_id': str(ticket.id),
'ticket_code': ticket.code,
'initiator_id': str(initiator_id),
}
for ticket in tickets
]
log_service.create_entries(event_type, order_id, datas)
| true | true |
f710da033824699bdb11c30cb57fc8866b1aa4a0 | 19,600 | py | Python | pde/trackers/trackers.py | xuanxu/py-pde | de33d938aea8680eff872ae1b64569895662a248 | [
"MIT"
] | null | null | null | pde/trackers/trackers.py | xuanxu/py-pde | de33d938aea8680eff872ae1b64569895662a248 | [
"MIT"
] | null | null | null | pde/trackers/trackers.py | xuanxu/py-pde | de33d938aea8680eff872ae1b64569895662a248 | [
"MIT"
] | null | null | null | """
Module defining classes for tracking results from simulations.
The trackers defined in this module are:
.. autosummary::
:nosignatures:
CallbackTracker
ProgressTracker
PrintTracker
PlotTracker
DataTracker
SteadyStateTracker
RuntimeTracker
ConsistencyTracker
MaterialConservationTracker
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from datetime import timedelta
import inspect
import sys
import time
from typing import Callable, Optional, Union, IO, List, Any # @UnusedImport
import numpy as np
from .base import TrackerBase, InfoDict, FinishedSimulation, Real
from .intervals import IntervalData, RealtimeIntervals
from ..fields.base import FieldBase
from ..fields import FieldCollection
from ..tools.parse_duration import parse_duration
from ..tools.misc import get_progress_bar_class
class CallbackTracker(TrackerBase):
""" Tracker that calls a function periodically """
def __init__(self, func: Callable,
interval: IntervalData = 1):
"""
Args:
func: The function to call periodically. The function signature
should be `(state)` or `(state, time)`, where `state` contains
the current state as an instance of
:class:`~pde.fields.FieldBase` and `time` is a
float value indicating the current time. Note that only a view
of the state is supplied, implying that a copy needs to be made
if the data should be stored.
interval: |Arg_tracker_interval|
"""
super().__init__(interval=interval)
self._callback = func
self._num_args = len(inspect.signature(func).parameters)
if not 0 < self._num_args < 3:
raise ValueError('`func` must be a function accepting one or two '
f'arguments, not {self._num_args}')
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
if self._num_args == 1:
self._callback(field)
else:
self._callback(field, t)
class ProgressTracker(TrackerBase):
""" Tracker that shows the progress of the simulation """
name = 'progress'
def __init__(self, interval: IntervalData = None,
ndigits: int = 5, leave: bool = True):
"""
Args:
interval: |Arg_tracker_interval|
The default value `None` updates the progress bar approximately
every (real) second.
ndigits (int): The number of digits after the decimal point that are
shown maximally.
leave (bool): Whether to leave the progress bar after the simulation
has finished (default: True)
"""
if interval is None:
# print every second by default
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.ndigits = ndigits
self.leave = leave
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
""" initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
result = super().initialize(field, info)
# get solver information
controller_info = {} if info is None else info.get('controller', {})
# initialize the progress bar
pb_cls = get_progress_bar_class()
self.progress_bar = pb_cls(total=controller_info.get('t_end'),
initial=controller_info.get('t_start', 0),
leave=self.leave)
self.progress_bar.set_description('Initializing')
return result
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
# show an update
if self.progress_bar.total:
t_new = min(t, self.progress_bar.total)
else:
t_new = t
self.progress_bar.n = round(t_new, self.ndigits)
self.progress_bar.set_description('')
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
self.progress_bar.set_description('')
# limit progress bar to 100%
controller_info = {} if info is None else info.get('controller', {})
t_final = controller_info.get('t_final', -np.inf)
t_end = controller_info.get('t_end', -np.inf)
if t_final >= t_end and self.progress_bar.total:
self.progress_bar.n = self.progress_bar.total
self.progress_bar.refresh()
if (controller_info.get('successful', False) and self.leave and
hasattr(self.progress_bar, 'sp')):
# show progress bar in green if simulation was successful. We
# need to overwrite the default behavior (and disable the
# progress bar) since reaching steady state means the simulation
# was successful even though it did not reach t_final
try:
self.progress_bar.sp(bar_style='success')
except TypeError:
self.progress_bar.close()
else:
self.disable = True
else:
self.progress_bar.close()
def __del__(self):
if hasattr(self, 'progress_bar') and not self.progress_bar.disable:
self.progress_bar.close()
class PrintTracker(TrackerBase):
""" Tracker that prints data to a stream (default: stdout) """
name = 'print'
def __init__(self, interval: IntervalData = 1,
stream: IO[str] = sys.stdout):
"""
Args:
interval: |Arg_tracker_interval|
stream: The stream used for printing
"""
super().__init__(interval=interval)
self.stream = stream
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
data = f"c={field.data.mean():.3g}±{field.data.std():.3g}"
self.stream.write(f"t={t:g}, {data}\n")
self.stream.flush()
class PlotTracker(TrackerBase):
""" Tracker that plots data on screen, to files, or writes a movie """
name = 'plot'
def __init__(self, interval: IntervalData = 1,
output_file: Optional[str] = None,
output_folder: Optional[str] = None,
movie_file: Optional[str] = None,
quantities=None,
show: bool = True):
"""
Args:
interval: |Arg_tracker_interval|
output_file (str, optional):
Specifies a single image file, which is updated periodically, so
that the progress can be monitored (e.g. on a compute cluster)
output_folder (str, optional):
Specifies a folder to which all images are written. The files
will have names with increasing numbers.
movie_file (str, optional):
Specifies a filename to which a movie of all the frames is
written after the simulation.
quantities:
|Args_plot_quantities|
show (bool, optional):
Determines whether the plot is shown while the simulation is
running. If `False`, the files are created in the background.
"""
super().__init__(interval=interval)
self.output_file = output_file
self.output_folder = output_folder
self.quantities = quantities
self.show = show
if movie_file is not None or output_folder is not None:
from ..visualization.movies import Movie
movie = Movie(filename=movie_file, image_folder=output_folder)
self.movie: Optional[Movie] = movie
self.movie._start() # initialize movie
else:
self.movie = None
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
""" initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
from ..visualization.plotting import ScalarFieldPlot
self.plot = ScalarFieldPlot(field, quantities=self.quantities,
show=self.show)
return super().initialize(field, info=info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
self.plot.show_data(field, title=f'Time {t:g}')
if self.output_file:
self.plot.fig.savefig(self.output_file)
if self.movie:
self.movie.add_figure(self.plot.fig)
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
if self.movie:
if self.movie.filename:
# write out movie file if requested
self._logger.info(f'Writing movie to {self.movie.filename}...')
self.movie.save()
# finalize movie (e.g. delete temporary files)
self.movie._end()
if not self.show:
del self.plot
class DataTracker(CallbackTracker):
""" Tracker that stores custom data obtained by calling a function
Attributes:
times (list):
The time points at which the data is stored
data (list):
The actually stored data, which is a list of the objects returned by
the callback function.
"""
def __init__(self, func: Callable,
interval: IntervalData = 1):
"""
Args:
func: The function to call periodically. The function signature
should be `(state)` or `(state, time)`, where `state` contains
the current state as an instance of
:class:`~pde.fields.FieldBase` and `time` is a
float value indicating the current time. Note that only a view
of the state is supplied, implying that a copy needs to be made
if the data should be stored.
interval: |Arg_tracker_interval|
"""
super().__init__(func=func, interval=interval)
self.times: List[float] = []
self.data: List[Any] = []
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float): The associated time
"""
self.times.append(t)
if self._num_args == 1:
self.data.append(self._callback(field))
else:
self.data.append(self._callback(field, t))
@property
def dataframe(self):
""" pandas.DataFrame: the data as a pandas DataFrame """
import pandas as pd
df = pd.DataFrame(self.data)
# insert the times and use them as an index
df.insert(0, 'time', self.times)
return df
class SteadyStateTracker(TrackerBase):
""" Tracker that interrupts the simulation once steady state is reached
Steady state is obtained when the state does not change anymore. This is the
case when the derivative is close to zero.
"""
name = 'steady_state'
def __init__(self, interval: IntervalData = None,
atol: float = 1e-8,
rtol: float = 1e-5):
"""
Args:
interval: |Arg_tracker_interval|
The default value `None` checks for the steady state
approximately every (real) second.
atol (float): Absolute tolerance that must be reached to abort the
simulation
rtol (float): Relative tolerance that must be reached to abort the
simulation
"""
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
self._last_data = None
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if self._last_data is not None:
# scale with dt to make test independent of dt
atol = self.atol * self.interval.dt
rtol = self.rtol * self.interval.dt
if np.allclose(self._last_data, field.data,
rtol=rtol, atol=atol, equal_nan=True):
raise FinishedSimulation('Reached stationary state')
self._last_data = field.data.copy() # store data from last timestep
class RuntimeTracker(TrackerBase):
""" Tracker that interrupts the simulation once a duration has passed """
def __init__(self, max_runtime: Union[Real, str],
interval: IntervalData = 1):
"""
Args:
max_runtime (float or str):
The maximal runtime of the simulation. If the runtime is
exceeded, the simulation is interrupted. Values can be either
given as a number (interpreted as seconds) or as a string, which
is then parsed using the function
:func:`~pde.tools.parse_duration.parse_duration`.
interval: |Arg_tracker_interval|
"""
super().__init__(interval=interval)
try:
self.max_runtime = float(max_runtime)
except ValueError:
td = parse_duration(str(max_runtime))
self.max_runtime = td.total_seconds()
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
"""
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
self.max_time = time.time() + self.max_runtime
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if time.time() > self.max_time:
dt = timedelta(seconds=self.max_runtime)
raise FinishedSimulation(f'Reached maximal runtime of {str(dt)}')
class ConsistencyTracker(TrackerBase):
""" Tracker that interrupts the simulation when the state is not finite """
name = 'consistency'
def __init__(self, interval: IntervalData = None):
"""
Args:
interval: |Arg_tracker_interval| The default value `None` checks for
consistency approximately every (real) second.
"""
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if not np.all(np.isfinite(field.data)):
raise StopIteration('Field was not finite')
self._last = field.data.copy() # store data from last timestep
class MaterialConservationTracker(TrackerBase):
""" Ensure that the amount of material is conserved """
name = 'material_conservation'
def __init__(self, interval: IntervalData = 1,
atol: float = 1e-4,
rtol: float = 1e-4):
"""
Args:
interval: |Arg_tracker_interval|
atol (float): Absolute tolerance for amount deviations
rtol (float): Relative tolerance for amount deviations
"""
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
"""
Args:
field (:class:`~pde.fields.base.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
if isinstance(field, FieldCollection):
self._reference = np.array([f.magnitude for f in field])
else:
self._reference = field.magnitude # type: ignore
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle the data of `field` for a give `time` """
if isinstance(field, FieldCollection):
mags = np.array([f.magnitude for f in field])
else:
mags = field.magnitude # type: ignore
c = np.isclose(mags, self._reference, rtol=self.rtol, atol=self.atol)
if not np.all(c):
if isinstance(field, FieldCollection):
msg = f'Material of field {np.flatnonzero(~c)} is not conserved'
else:
msg = f'Material is not conserved'
raise StopIteration(msg)
__all__ = ['CallbackTracker', 'ProgressTracker', 'PrintTracker', 'PlotTracker',
'DataTracker', 'SteadyStateTracker', 'RuntimeTracker',
'ConsistencyTracker', 'MaterialConservationTracker']
| 35.507246 | 80 | 0.567194 |
from datetime import timedelta
import inspect
import sys
import time
from typing import Callable, Optional, Union, IO, List, Any
import numpy as np
from .base import TrackerBase, InfoDict, FinishedSimulation, Real
from .intervals import IntervalData, RealtimeIntervals
from ..fields.base import FieldBase
from ..fields import FieldCollection
from ..tools.parse_duration import parse_duration
from ..tools.misc import get_progress_bar_class
class CallbackTracker(TrackerBase):
def __init__(self, func: Callable,
interval: IntervalData = 1):
super().__init__(interval=interval)
self._callback = func
self._num_args = len(inspect.signature(func).parameters)
if not 0 < self._num_args < 3:
raise ValueError('`func` must be a function accepting one or two '
f'arguments, not {self._num_args}')
def handle(self, field: FieldBase, t: float) -> None:
if self._num_args == 1:
self._callback(field)
else:
self._callback(field, t)
class ProgressTracker(TrackerBase):
name = 'progress'
def __init__(self, interval: IntervalData = None,
ndigits: int = 5, leave: bool = True):
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.ndigits = ndigits
self.leave = leave
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
result = super().initialize(field, info)
controller_info = {} if info is None else info.get('controller', {})
pb_cls = get_progress_bar_class()
self.progress_bar = pb_cls(total=controller_info.get('t_end'),
initial=controller_info.get('t_start', 0),
leave=self.leave)
self.progress_bar.set_description('Initializing')
return result
def handle(self, field: FieldBase, t: float) -> None:
if self.progress_bar.total:
t_new = min(t, self.progress_bar.total)
else:
t_new = t
self.progress_bar.n = round(t_new, self.ndigits)
self.progress_bar.set_description('')
def finalize(self, info: InfoDict = None) -> None:
super().finalize(info)
self.progress_bar.set_description('')
controller_info = {} if info is None else info.get('controller', {})
t_final = controller_info.get('t_final', -np.inf)
t_end = controller_info.get('t_end', -np.inf)
if t_final >= t_end and self.progress_bar.total:
self.progress_bar.n = self.progress_bar.total
self.progress_bar.refresh()
if (controller_info.get('successful', False) and self.leave and
hasattr(self.progress_bar, 'sp')):
try:
self.progress_bar.sp(bar_style='success')
except TypeError:
self.progress_bar.close()
else:
self.disable = True
else:
self.progress_bar.close()
def __del__(self):
if hasattr(self, 'progress_bar') and not self.progress_bar.disable:
self.progress_bar.close()
class PrintTracker(TrackerBase):
name = 'print'
def __init__(self, interval: IntervalData = 1,
stream: IO[str] = sys.stdout):
super().__init__(interval=interval)
self.stream = stream
def handle(self, field: FieldBase, t: float) -> None:
data = f"c={field.data.mean():.3g}±{field.data.std():.3g}"
self.stream.write(f"t={t:g}, {data}\n")
self.stream.flush()
class PlotTracker(TrackerBase):
name = 'plot'
def __init__(self, interval: IntervalData = 1,
output_file: Optional[str] = None,
output_folder: Optional[str] = None,
movie_file: Optional[str] = None,
quantities=None,
show: bool = True):
super().__init__(interval=interval)
self.output_file = output_file
self.output_folder = output_folder
self.quantities = quantities
self.show = show
if movie_file is not None or output_folder is not None:
from ..visualization.movies import Movie
movie = Movie(filename=movie_file, image_folder=output_folder)
self.movie: Optional[Movie] = movie
self.movie._start()
else:
self.movie = None
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
from ..visualization.plotting import ScalarFieldPlot
self.plot = ScalarFieldPlot(field, quantities=self.quantities,
show=self.show)
return super().initialize(field, info=info)
def handle(self, field: FieldBase, t: float) -> None:
self.plot.show_data(field, title=f'Time {t:g}')
if self.output_file:
self.plot.fig.savefig(self.output_file)
if self.movie:
self.movie.add_figure(self.plot.fig)
def finalize(self, info: InfoDict = None) -> None:
super().finalize(info)
if self.movie:
if self.movie.filename:
self._logger.info(f'Writing movie to {self.movie.filename}...')
self.movie.save()
self.movie._end()
if not self.show:
del self.plot
class DataTracker(CallbackTracker):
def __init__(self, func: Callable,
interval: IntervalData = 1):
super().__init__(func=func, interval=interval)
self.times: List[float] = []
self.data: List[Any] = []
def handle(self, field: FieldBase, t: float) -> None:
self.times.append(t)
if self._num_args == 1:
self.data.append(self._callback(field))
else:
self.data.append(self._callback(field, t))
@property
def dataframe(self):
import pandas as pd
df = pd.DataFrame(self.data)
df.insert(0, 'time', self.times)
return df
class SteadyStateTracker(TrackerBase):
name = 'steady_state'
def __init__(self, interval: IntervalData = None,
atol: float = 1e-8,
rtol: float = 1e-5):
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
self._last_data = None
def handle(self, field: FieldBase, t: float) -> None:
if self._last_data is not None:
atol = self.atol * self.interval.dt
rtol = self.rtol * self.interval.dt
if np.allclose(self._last_data, field.data,
rtol=rtol, atol=atol, equal_nan=True):
raise FinishedSimulation('Reached stationary state')
self._last_data = field.data.copy()
class RuntimeTracker(TrackerBase):
def __init__(self, max_runtime: Union[Real, str],
interval: IntervalData = 1):
super().__init__(interval=interval)
try:
self.max_runtime = float(max_runtime)
except ValueError:
td = parse_duration(str(max_runtime))
self.max_runtime = td.total_seconds()
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
self.max_time = time.time() + self.max_runtime
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
if time.time() > self.max_time:
dt = timedelta(seconds=self.max_runtime)
raise FinishedSimulation(f'Reached maximal runtime of {str(dt)}')
class ConsistencyTracker(TrackerBase):
name = 'consistency'
def __init__(self, interval: IntervalData = None):
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
def handle(self, field: FieldBase, t: float) -> None:
if not np.all(np.isfinite(field.data)):
raise StopIteration('Field was not finite')
self._last = field.data.copy()
class MaterialConservationTracker(TrackerBase):
name = 'material_conservation'
def __init__(self, interval: IntervalData = 1,
atol: float = 1e-4,
rtol: float = 1e-4):
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
if isinstance(field, FieldCollection):
self._reference = np.array([f.magnitude for f in field])
else:
self._reference = field.magnitude
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
if isinstance(field, FieldCollection):
mags = np.array([f.magnitude for f in field])
else:
mags = field.magnitude
c = np.isclose(mags, self._reference, rtol=self.rtol, atol=self.atol)
if not np.all(c):
if isinstance(field, FieldCollection):
msg = f'Material of field {np.flatnonzero(~c)} is not conserved'
else:
msg = f'Material is not conserved'
raise StopIteration(msg)
__all__ = ['CallbackTracker', 'ProgressTracker', 'PrintTracker', 'PlotTracker',
'DataTracker', 'SteadyStateTracker', 'RuntimeTracker',
'ConsistencyTracker', 'MaterialConservationTracker']
| true | true |
f710dadd1226258b991a79823b61c58c911a15f1 | 11,987 | py | Python | ymir/command/tests/unit/test_tools_ark_data_exporter.py | Zhang-SJ930104/ymir | dd6481be6f229ade4cf8fba64ef44a15357430c4 | [
"Apache-2.0"
] | 64 | 2021-11-15T03:48:00.000Z | 2022-03-25T07:08:46.000Z | ymir/command/tests/unit/test_tools_ark_data_exporter.py | Zhang-SJ930104/ymir | dd6481be6f229ade4cf8fba64ef44a15357430c4 | [
"Apache-2.0"
] | 35 | 2021-11-23T04:14:35.000Z | 2022-03-26T09:03:43.000Z | ymir/command/tests/unit/test_tools_ark_data_exporter.py | Aryalfrat/ymir | d4617ed00ef67a77ab4e1944763f608bface4be6 | [
"Apache-2.0"
] | 57 | 2021-11-11T10:15:40.000Z | 2022-03-29T07:27:54.000Z | import os
import shutil
from typing import List, Tuple
import unittest
from google.protobuf import json_format
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import data_exporter, hash_utils, mir_storage_ops
from tests import utils as test_utils
class TestArkDataExporter(unittest.TestCase):
# life cycle
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:])
self._assets_location = os.path.join(self._test_root, 'assets_location')
self._dest_root = os.path.join(self._test_root, 'export_dest')
self._mir_root = os.path.join(self._test_root, 'mir-repo')
def setUp(self) -> None:
self.__prepare_dirs()
self.__prepare_mir_repo()
self.__prepare_assets()
return super().setUp()
def tearDown(self) -> None:
# self.__deprepare_dirs()
return super().tearDown()
# private: prepare env
def __prepare_dirs(self):
test_utils.remake_dirs(self._test_root)
test_utils.remake_dirs(self._assets_location)
test_utils.remake_dirs(self._dest_root)
test_utils.remake_dirs(self._mir_root)
def __deprepare_dirs(self):
if os.path.isdir(self._test_root):
shutil.rmtree(self._test_root)
def __prepare_assets(self):
'''
copy all assets from project to assets_location, assumes that `self._assets_location` already created
'''
image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg']
sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path)
for image_path in image_paths] # type: List[Tuple[str, str]]
for sha1sum, image_path in sha1sum_path_pairs:
shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum))
def __prepare_mir_repo(self):
'''
creates mir repo, assumes that `self._mir_root` already created
'''
test_utils.mir_repo_init(self._mir_root)
test_utils.mir_repo_create_branch(self._mir_root, 'a')
# metadatas
metadatas_dict = {
'attributes': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 281,
'imageChannels': 3
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 333,
'imageChannels': 3
}
}
}
mir_metadatas = mirpb.MirMetadatas()
json_format.ParseDict(metadatas_dict, mir_metadatas)
# annotations
annotations_dict = {
'task_annotations': {
'a': {
'image_annotations': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'annotations': [{
'index': 0,
'box': {
'x': 104,
'y': 78,
'w': 272,
'h': 105
},
'class_id': 52,
'score': 1,
}, {
'index': 1,
'box': {
'x': 133,
'y': 88,
'w': 65,
'h': 36
},
'class_id': 52,
'score': 1,
}, {
'index': 2,
'box': {
'x': 195,
'y': 180,
'w': 19,
'h': 50
},
'class_id': 2,
'score': 1,
}, {
'index': 3,
'box': {
'x': 26,
'y': 189,
'w': 19,
'h': 95
},
'class_id': 2,
'score': 1,
}]
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'annotations': [{
'index': 0,
'box': {
'x': 181,
'y': 127,
'w': 94,
'h': 67
},
'class_id': 52,
'score': 1,
}]
},
}
}
},
'head_task_id': 'a',
}
mir_annotations = mirpb.MirAnnotations()
json_format.ParseDict(annotations_dict, mir_annotations)
# keywords
keywords_dict = {
'keywords': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'predifined_keyids': [2, 52],
'customized_keywords': ['pascal']
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'predifined_keyids': [52],
'customized_keywords': ['pascal']
},
}
}
mir_keywords = mirpb.MirKeywords()
json_format.ParseDict(keywords_dict, mir_keywords)
# task
task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData,
task_id='a',
message='import')
# save and commit
mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root,
mir_branch='a',
his_branch='master',
mir_datas={
mirpb.MirStorage.MIR_METADATAS: mir_metadatas,
mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations,
},
task=task)
# private: check result
def __check_result(self, asset_ids, format_type, export_path, index_file_path):
# check files
for asset_id in asset_ids:
asset_path = os.path.join(export_path, asset_id + '.jpeg')
self.assertTrue(os.path.isfile(asset_path))
if format_type == data_exporter.ExportFormat.EXPORT_FORMAT_ARK:
annotation_path = os.path.join(export_path, asset_id + '.txt')
elif format_type == data_exporter.ExportFormat.EXPORT_FORMAT_VOC:
annotation_path = os.path.join(export_path, asset_id + '.xml')
self.assertTrue(os.path.isfile(annotation_path))
# index file exists
self.assertTrue(os.path.isfile(index_file_path))
# index file have enough lines
# and each line is accessable
with open(index_file_path, 'r') as idx_f:
lines = idx_f.readlines()
self.assertEqual(len(lines), len(asset_ids))
for line in lines:
asset_rel_path, annotation_rel_path = line.split()
self.assertTrue(os.path.isfile(os.path.join(export_path, asset_rel_path)))
self.assertTrue(os.path.isfile(os.path.join(export_path, annotation_rel_path)))
def __check_ark_annotations(self, asset_id: str, export_path: str, expected_first_two_cols: List[Tuple[int, int]]):
annotation_path = os.path.join(export_path, asset_id + '.txt')
with open(annotation_path, 'r') as f:
lines = f.read().splitlines()
self.assertEqual(len(expected_first_two_cols), len(lines))
for line_idx, line in enumerate(lines):
line_components = line.split(',')
for col_idx in range(2):
self.assertEqual(expected_first_two_cols[line_idx][col_idx], int(line_components[col_idx].strip()))
# public: test cases
def test_normal_00(self):
''' normal case: ark format '''
asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}
train_path = os.path.join(self._dest_root, 'train')
data_exporter.export(mir_root=self._mir_root,
assets_location=self._assets_location,
class_type_ids={
2: 0,
52: 1
},
asset_ids=asset_ids,
asset_dir=train_path,
annotation_dir=train_path,
need_ext=True,
need_id_sub_folder=False,
base_branch='a',
base_task_id='a',
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK,
index_file_path=os.path.join(train_path, 'index.tsv'),
index_assets_prefix='')
# check result
self.__check_result(asset_ids=asset_ids,
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK,
export_path=train_path,
index_file_path=os.path.join(train_path, 'index.tsv'))
self.__check_ark_annotations(asset_id='430df22960b0f369318705800139fcc8ec38a3e4',
export_path=train_path,
expected_first_two_cols=[(1, 104), (1, 133), (0, 195), (0, 26)])
def test_normal_01(self):
''' normal case: voc format '''
asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}
train_path = os.path.join(self._dest_root, 'train')
data_exporter.export(mir_root=self._mir_root,
assets_location=self._assets_location,
class_type_ids={
2: 0,
52: 1
},
asset_ids=asset_ids,
asset_dir=train_path,
annotation_dir=train_path,
need_ext=True,
need_id_sub_folder=False,
base_branch='a',
base_task_id='a',
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC,
index_file_path=os.path.join(train_path, 'index.tsv'),
index_assets_prefix='')
# check result
self.__check_result(asset_ids=asset_ids,
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC,
export_path=train_path,
index_file_path=os.path.join(train_path, 'index.tsv'))
| 43.908425 | 119 | 0.45858 | import os
import shutil
from typing import List, Tuple
import unittest
from google.protobuf import json_format
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import data_exporter, hash_utils, mir_storage_ops
from tests import utils as test_utils
class TestArkDataExporter(unittest.TestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:])
self._assets_location = os.path.join(self._test_root, 'assets_location')
self._dest_root = os.path.join(self._test_root, 'export_dest')
self._mir_root = os.path.join(self._test_root, 'mir-repo')
def setUp(self) -> None:
self.__prepare_dirs()
self.__prepare_mir_repo()
self.__prepare_assets()
return super().setUp()
def tearDown(self) -> None:
return super().tearDown()
def __prepare_dirs(self):
test_utils.remake_dirs(self._test_root)
test_utils.remake_dirs(self._assets_location)
test_utils.remake_dirs(self._dest_root)
test_utils.remake_dirs(self._mir_root)
def __deprepare_dirs(self):
if os.path.isdir(self._test_root):
shutil.rmtree(self._test_root)
def __prepare_assets(self):
image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg']
sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path)
for image_path in image_paths]
for sha1sum, image_path in sha1sum_path_pairs:
shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum))
def __prepare_mir_repo(self):
test_utils.mir_repo_init(self._mir_root)
test_utils.mir_repo_create_branch(self._mir_root, 'a')
metadatas_dict = {
'attributes': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 281,
'imageChannels': 3
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 333,
'imageChannels': 3
}
}
}
mir_metadatas = mirpb.MirMetadatas()
json_format.ParseDict(metadatas_dict, mir_metadatas)
annotations_dict = {
'task_annotations': {
'a': {
'image_annotations': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'annotations': [{
'index': 0,
'box': {
'x': 104,
'y': 78,
'w': 272,
'h': 105
},
'class_id': 52,
'score': 1,
}, {
'index': 1,
'box': {
'x': 133,
'y': 88,
'w': 65,
'h': 36
},
'class_id': 52,
'score': 1,
}, {
'index': 2,
'box': {
'x': 195,
'y': 180,
'w': 19,
'h': 50
},
'class_id': 2,
'score': 1,
}, {
'index': 3,
'box': {
'x': 26,
'y': 189,
'w': 19,
'h': 95
},
'class_id': 2,
'score': 1,
}]
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'annotations': [{
'index': 0,
'box': {
'x': 181,
'y': 127,
'w': 94,
'h': 67
},
'class_id': 52,
'score': 1,
}]
},
}
}
},
'head_task_id': 'a',
}
mir_annotations = mirpb.MirAnnotations()
json_format.ParseDict(annotations_dict, mir_annotations)
keywords_dict = {
'keywords': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'predifined_keyids': [2, 52],
'customized_keywords': ['pascal']
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'predifined_keyids': [52],
'customized_keywords': ['pascal']
},
}
}
mir_keywords = mirpb.MirKeywords()
json_format.ParseDict(keywords_dict, mir_keywords)
task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData,
task_id='a',
message='import')
mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root,
mir_branch='a',
his_branch='master',
mir_datas={
mirpb.MirStorage.MIR_METADATAS: mir_metadatas,
mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations,
},
task=task)
def __check_result(self, asset_ids, format_type, export_path, index_file_path):
for asset_id in asset_ids:
asset_path = os.path.join(export_path, asset_id + '.jpeg')
self.assertTrue(os.path.isfile(asset_path))
if format_type == data_exporter.ExportFormat.EXPORT_FORMAT_ARK:
annotation_path = os.path.join(export_path, asset_id + '.txt')
elif format_type == data_exporter.ExportFormat.EXPORT_FORMAT_VOC:
annotation_path = os.path.join(export_path, asset_id + '.xml')
self.assertTrue(os.path.isfile(annotation_path))
self.assertTrue(os.path.isfile(index_file_path))
with open(index_file_path, 'r') as idx_f:
lines = idx_f.readlines()
self.assertEqual(len(lines), len(asset_ids))
for line in lines:
asset_rel_path, annotation_rel_path = line.split()
self.assertTrue(os.path.isfile(os.path.join(export_path, asset_rel_path)))
self.assertTrue(os.path.isfile(os.path.join(export_path, annotation_rel_path)))
def __check_ark_annotations(self, asset_id: str, export_path: str, expected_first_two_cols: List[Tuple[int, int]]):
annotation_path = os.path.join(export_path, asset_id + '.txt')
with open(annotation_path, 'r') as f:
lines = f.read().splitlines()
self.assertEqual(len(expected_first_two_cols), len(lines))
for line_idx, line in enumerate(lines):
line_components = line.split(',')
for col_idx in range(2):
self.assertEqual(expected_first_two_cols[line_idx][col_idx], int(line_components[col_idx].strip()))
def test_normal_00(self):
asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}
train_path = os.path.join(self._dest_root, 'train')
data_exporter.export(mir_root=self._mir_root,
assets_location=self._assets_location,
class_type_ids={
2: 0,
52: 1
},
asset_ids=asset_ids,
asset_dir=train_path,
annotation_dir=train_path,
need_ext=True,
need_id_sub_folder=False,
base_branch='a',
base_task_id='a',
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK,
index_file_path=os.path.join(train_path, 'index.tsv'),
index_assets_prefix='')
self.__check_result(asset_ids=asset_ids,
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK,
export_path=train_path,
index_file_path=os.path.join(train_path, 'index.tsv'))
self.__check_ark_annotations(asset_id='430df22960b0f369318705800139fcc8ec38a3e4',
export_path=train_path,
expected_first_two_cols=[(1, 104), (1, 133), (0, 195), (0, 26)])
def test_normal_01(self):
asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}
train_path = os.path.join(self._dest_root, 'train')
data_exporter.export(mir_root=self._mir_root,
assets_location=self._assets_location,
class_type_ids={
2: 0,
52: 1
},
asset_ids=asset_ids,
asset_dir=train_path,
annotation_dir=train_path,
need_ext=True,
need_id_sub_folder=False,
base_branch='a',
base_task_id='a',
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC,
index_file_path=os.path.join(train_path, 'index.tsv'),
index_assets_prefix='')
self.__check_result(asset_ids=asset_ids,
format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC,
export_path=train_path,
index_file_path=os.path.join(train_path, 'index.tsv'))
| true | true |
f710de29749798d1f874e2fbb0b328d3f88f44de | 5,035 | py | Python | apis_v1/documentation_source/sitewide_daily_metrics_sync_out_doc.py | rajeshwariC/WeVoteServer | 59aff1725b7586ebd360ef40fc1b44e5a0b9572d | [
"MIT"
] | null | null | null | apis_v1/documentation_source/sitewide_daily_metrics_sync_out_doc.py | rajeshwariC/WeVoteServer | 59aff1725b7586ebd360ef40fc1b44e5a0b9572d | [
"MIT"
] | null | null | null | apis_v1/documentation_source/sitewide_daily_metrics_sync_out_doc.py | rajeshwariC/WeVoteServer | 59aff1725b7586ebd360ef40fc1b44e5a0b9572d | [
"MIT"
] | null | null | null | # apis_v1/documentation_source/sitewide_daily_metrics_sync_out_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def sitewide_daily_metrics_sync_out_doc_template_values(url_root):
"""
Show documentation about sitewideDailyMetricsSyncOut
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server. '
'If not provided, a new voter_device_id (and voter entry) '
'will be generated, and the voter_device_id will be returned.',
},
]
optional_query_parameter_list = [
{
'name': 'starting_date_as_integer',
'value': 'integer', # boolean, integer, long, string
'description': 'The earliest date for the batch we are retrieving. Format: YYYYMMDD (ex/ 20200131) '
'(Default is 3 months ago)',
},
{
'name': 'ending_date_as_integer',
'value': 'integer', # boolean, integer, long, string
'description': 'Retrieve data through this date. Format: YYYYMMDD (ex/ 20200228) (Default is right now.)'
},
{
'name': 'return_csv_format',
'value': 'boolean', # boolean, integer, long, string
'description': 'If set to true, return results in CSV format instead of JSON.'
},
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
}
api_response = '[{\n' \
' "id": integer,\n' \
' "authenticated_visitors_today": integer,\n' \
' "authenticated_visitors_total": integer,\n' \
' "ballot_views_today": integer: ' \
'The number of voters that viewed at least one ballot on one day,\n' \
' "date_as_integer": integer,\n' \
' "entered_full_address": integer,\n' \
' "friend_entrants_today": integer,\n' \
' "friends_only_positions": integer,\n' \
' "individuals_with_friends_only_positions": integer,\n' \
' "individuals_with_positions": integer,\n' \
' "individuals_with_public_positions": integer,\n' \
' "issue_follows_today": integer,\n' \
' "issue_follows_total": integer,\n' \
' "issues_followed_today": integer,\n' \
' "issues_followed_total": integer,\n' \
' "issues_linked_today": integer,\n' \
' "issues_linked_total": integer,\n' \
' "new_visitors_today": integer,\n' \
' "organization_public_positions": integer,\n' \
' "organizations_auto_followed_today": integer,\n' \
' "organizations_auto_followed_total": integer,\n' \
' "organizations_followed_today": integer,\n' \
' "organizations_followed_total": integer,\n' \
' "organizations_signed_in_total": integer,\n' \
' "organizations_with_linked_issues": integer,\n' \
' "organizations_with_new_positions_today": integer,\n' \
' "organizations_with_positions": integer,\n' \
' "visitors_today": integer,\n' \
' "visitors_total": integer,\n' \
' "voter_guide_entrants_today": integer,\n' \
' "voter_guides_viewed_today": integer,\n' \
' "voter_guides_viewed_total": integer,\n' \
' "welcome_page_entrants_today": integer,\n' \
'}]'
template_values = {
'api_name': 'sitewideDailyMetricsSyncOut',
'api_slug': 'sitewideDailyMetricsSyncOut',
'api_introduction':
"Allow people with Analytics Admin authority to retrieve daily metrics information "
"for data analysis purposes.",
'try_now_link': 'apis_v1:sitewideDailyMetricsSyncOutView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| 48.413462 | 118 | 0.554916 |
def sitewide_daily_metrics_sync_out_doc_template_values(url_root):
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))',
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'voter_device_id',
'value': 'string',
'description': 'An 88 character unique identifier linked to a voter record on the server. '
'If not provided, a new voter_device_id (and voter entry) '
'will be generated, and the voter_device_id will be returned.',
},
]
optional_query_parameter_list = [
{
'name': 'starting_date_as_integer',
'value': 'integer',
'description': 'The earliest date for the batch we are retrieving. Format: YYYYMMDD (ex/ 20200131) '
'(Default is 3 months ago)',
},
{
'name': 'ending_date_as_integer',
'value': 'integer',
'description': 'Retrieve data through this date. Format: YYYYMMDD (ex/ 20200228) (Default is right now.)'
},
{
'name': 'return_csv_format',
'value': 'boolean',
'description': 'If set to true, return results in CSV format instead of JSON.'
},
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
}
api_response = '[{\n' \
' "id": integer,\n' \
' "authenticated_visitors_today": integer,\n' \
' "authenticated_visitors_total": integer,\n' \
' "ballot_views_today": integer: ' \
'The number of voters that viewed at least one ballot on one day,\n' \
' "date_as_integer": integer,\n' \
' "entered_full_address": integer,\n' \
' "friend_entrants_today": integer,\n' \
' "friends_only_positions": integer,\n' \
' "individuals_with_friends_only_positions": integer,\n' \
' "individuals_with_positions": integer,\n' \
' "individuals_with_public_positions": integer,\n' \
' "issue_follows_today": integer,\n' \
' "issue_follows_total": integer,\n' \
' "issues_followed_today": integer,\n' \
' "issues_followed_total": integer,\n' \
' "issues_linked_today": integer,\n' \
' "issues_linked_total": integer,\n' \
' "new_visitors_today": integer,\n' \
' "organization_public_positions": integer,\n' \
' "organizations_auto_followed_today": integer,\n' \
' "organizations_auto_followed_total": integer,\n' \
' "organizations_followed_today": integer,\n' \
' "organizations_followed_total": integer,\n' \
' "organizations_signed_in_total": integer,\n' \
' "organizations_with_linked_issues": integer,\n' \
' "organizations_with_new_positions_today": integer,\n' \
' "organizations_with_positions": integer,\n' \
' "visitors_today": integer,\n' \
' "visitors_total": integer,\n' \
' "voter_guide_entrants_today": integer,\n' \
' "voter_guides_viewed_today": integer,\n' \
' "voter_guides_viewed_total": integer,\n' \
' "welcome_page_entrants_today": integer,\n' \
'}]'
template_values = {
'api_name': 'sitewideDailyMetricsSyncOut',
'api_slug': 'sitewideDailyMetricsSyncOut',
'api_introduction':
"Allow people with Analytics Admin authority to retrieve daily metrics information "
"for data analysis purposes.",
'try_now_link': 'apis_v1:sitewideDailyMetricsSyncOutView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| true | true |
f710de970e7fba982966b7b605985bbabc605981 | 447 | py | Python | bibliohub/catalog/urls.py | apjanco/bibliohub | 95da034d2e136bd4ae25a9b6932fd19124dacd9b | [
"MIT"
] | null | null | null | bibliohub/catalog/urls.py | apjanco/bibliohub | 95da034d2e136bd4ae25a9b6932fd19124dacd9b | [
"MIT"
] | null | null | null | bibliohub/catalog/urls.py | apjanco/bibliohub | 95da034d2e136bd4ae25a9b6932fd19124dacd9b | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from .views import SearchResultsView, HomePageView
urlpatterns = [
path('', views.index, name='index'),
# path('books/', views.BookListView.as_view(), name='books'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('home/', HomePageView.as_view(),name='home'),
# path('author_search/', AuthorSearchResultsView.as_view(), name='author_search_results'),
] | 44.7 | 94 | 0.711409 | from django.urls import path
from . import views
from .views import SearchResultsView, HomePageView
urlpatterns = [
path('', views.index, name='index'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('home/', HomePageView.as_view(),name='home'),
] | true | true |
f710e12932440d3e0decd6e77f4a75490177b6e2 | 14,465 | py | Python | pgmpy/readwrite/XMLBIF.py | NunoEdgarGFlowHub/pgmpy | ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce | [
"MIT"
] | 1 | 2016-08-27T18:30:57.000Z | 2016-08-27T18:30:57.000Z | pgmpy/readwrite/XMLBIF.py | NunoEdgarGFlowHub/pgmpy | ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce | [
"MIT"
] | null | null | null | pgmpy/readwrite/XMLBIF.py | NunoEdgarGFlowHub/pgmpy | ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce | [
"MIT"
] | 1 | 2016-08-27T18:31:00.000Z | 2016-08-27T18:31:00.000Z | #!/usr/bin/env python
try:
from lxml import etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
#try:
# import xml.etree.cElementTree as etree
# commented out because xml.etree.cElementTree is giving errors with dictionary attributes
print("Failed to import ElementTree from any known place")
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.factors import TabularCPD, State
from pgmpy.extern.six.moves import map, range
class XMLBIFReader(object):
"""
Base class for reading network file in XMLBIF format.
"""
def __init__(self, path=None, string=None):
"""
Initialisation of XMLBIFReader object.
Parameters
----------
path : file or str
File of XMLBIF data
string : str
String of XMLBIF data
Examples
--------
# xmlbif_test.xml is the file present in
# http://www.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/
>>> reader = XMLBIFReader("xmlbif_test.xml")
"""
if path:
self.network = etree.ElementTree(file=path).getroot().find('NETWORK')
elif string:
self.network = etree.fromstring(string).find('NETWORK')
else:
raise ValueError("Must specify either path or string")
self.network_name = self.network.find('NAME').text
self.variables = self.get_variables()
self.variable_parents = self.get_parents()
self.edge_list = self.get_edges()
self.variable_states = self.get_states()
self.variable_CPD = self.get_cpd()
self.variable_property = self.get_property()
def get_variables(self):
"""
Returns list of variables of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_variables()
['light-on', 'bowel-problem', 'dog-out', 'hear-bark', 'family-out']
"""
variables = [variable.find('NAME').text for variable in self.network.findall('VARIABLE')]
return variables
def get_edges(self):
"""
Returns the edges of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_edges()
[['family-out', 'light-on'],
['family-out', 'dog-out'],
['bowel-problem', 'dog-out'],
['dog-out', 'hear-bark']]
"""
edge_list = [[value, key] for key in self.variable_parents
for value in self.variable_parents[key]]
return edge_list
def get_states(self):
"""
Returns the states of variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_states()
{'bowel-problem': ['true', 'false'],
'dog-out': ['true', 'false'],
'family-out': ['true', 'false'],
'hear-bark': ['true', 'false'],
'light-on': ['true', 'false']}
"""
variable_states = {variable.find('NAME').text: [outcome.text for outcome in variable.findall('OUTCOME')]
for variable in self.network.findall('VARIABLE')}
return variable_states
def get_parents(self):
"""
Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
variable_parents = {definition.find('FOR').text: [edge.text for edge in definition.findall('GIVEN')][::-1]
for definition in self.network.findall('DEFINITION')}
return variable_parents
def get_cpd(self):
"""
Returns the CPD of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_cpd()
{'bowel-problem': array([[ 0.01],
[ 0.99]]),
'dog-out': array([[ 0.99, 0.01, 0.97, 0.03],
[ 0.9 , 0.1 , 0.3 , 0.7 ]]),
'family-out': array([[ 0.15],
[ 0.85]]),
'hear-bark': array([[ 0.7 , 0.3 ],
[ 0.01, 0.99]]),
'light-on': array([[ 0.6 , 0.4 ],
[ 0.05, 0.95]])}
"""
variable_CPD = {definition.find('FOR').text: list(map(float, table.text.split()))
for definition in self.network.findall('DEFINITION')
for table in definition.findall('TABLE')}
for variable in variable_CPD:
arr = np.array(variable_CPD[variable])
arr = arr.reshape((len(self.variable_states[variable]),
arr.size//len(self.variable_states[variable])))
variable_CPD[variable] = arr
return variable_CPD
def get_property(self):
"""
Returns the property of the variable
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_property()
{'bowel-problem': ['position = (190, 69)'],
'dog-out': ['position = (155, 165)'],
'family-out': ['position = (112, 69)'],
'hear-bark': ['position = (154, 241)'],
'light-on': ['position = (73, 165)']}
"""
variable_property = {variable.find('NAME').text: [property.text for property in variable.findall('PROPERTY')]
for variable in self.network.findall('VARIABLE')}
return variable_property
def get_model(self):
model = BayesianModel(self.get_edges())
model.name = self.network_name
tabular_cpds = []
for var, values in self.variable_CPD.items():
cpd = TabularCPD(var, len(self.variable_states[var]), values,
evidence=self.variable_parents[var],
evidence_card=[len(self.variable_states[evidence_var])
for evidence_var in self.variable_parents[var]])
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
for node, properties in self.variable_property.items():
for prop in properties:
prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))
model.node[node][prop_name] = prop_value
return model
class XMLBIFWriter(object):
"""
Base class for writing XMLBIF network file format.
"""
def __init__(self, model, encoding='utf-8', prettyprint=True):
"""
Initialise a XMLBIFWriter object.
Parameters
----------
model: BayesianModel Instance
Model to write
encoding: str (optional)
Encoding for text data
prettyprint: Bool(optional)
Indentation in output XML if true
Examples
--------
>>> writer = XMLBIFWriter(model)
"""
if not isinstance(model, BayesianModel):
raise TypeError("model must an instance of BayesianModel")
self.model = model
self.encoding = encoding
self.prettyprint = prettyprint
self.xml = etree.Element("BIF", attrib={'version': '0.3'})
self.network = etree.SubElement(self.xml, 'NETWORK')
if self.model.name:
etree.SubElement(self.network, 'NAME').text = self.model.name
self.variables = self.get_variables()
self.states = self.get_states()
self.properties = self.get_properties()
self.definition = self.get_definition()
self.tables = self.get_cpd()
def __str__(self):
"""
Return the XML as string.
"""
if self.prettyprint:
self.indent(self.xml)
return etree.tostring(self.xml, encoding=self.encoding)
def indent(self, elem, level=0):
"""
Inplace prettyprint formatter.
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_variables(self):
"""
Add variables to XMLBIF
Return
------
dict: dict of type {variable: variable tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_variables()
{'bowel-problem': <Element VARIABLE at 0x7fe28607dd88>,
'family-out': <Element VARIABLE at 0x7fe28607de08>,
'hear-bark': <Element VARIABLE at 0x7fe28607de48>,
'dog-out': <Element VARIABLE at 0x7fe28607ddc8>,
'light-on': <Element VARIABLE at 0x7fe28607de88>}
"""
variables = self.model.nodes()
variable_tag = {}
for var in sorted(variables):
variable_tag[var] = etree.SubElement(self.network, "VARIABLE", attrib={'TYPE': 'nature'})
etree.SubElement(variable_tag[var], "NAME").text = var
return variable_tag
def get_states(self):
"""
Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
"""
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
for state in [State(var, state) for state in range(cpd.get_cardinality([var])[var])]:
# for state in [cpd.variables[var]:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = str(state.state)
outcome_tag[var].append(state_tag)
return outcome_tag
def get_properties(self):
"""
Add property to variables in XMLBIF
Return
------
dict: dict of type {variable: property tag}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_property()
{'light-on': <Element PROPERTY at 0x7f7a2ffac1c8>,
'family-out': <Element PROPERTY at 0x7f7a2ffac148>,
'hear-bark': <Element PROPERTY at 0x7f7a2ffac188>,
'bowel-problem': <Element PROPERTY at 0x7f7a2ffac0c8>,
'dog-out': <Element PROPERTY at 0x7f7a2ffac108>}
"""
variables = self.model.nodes()
property_tag = {}
for var in sorted(variables):
properties = self.model.node[var]
property_tag[var] = etree.SubElement(self.variables[var], "PROPERTY")
for prop, val in properties.items():
property_tag[var].text = str(prop) + " = " + str(val)
return property_tag
def get_definition(self):
"""
Add Definition to XMLBIF
Return
------
dict: dict of type {variable: definition tag}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_definition()
{'hear-bark': <Element DEFINITION at 0x7f1d48977408>,
'family-out': <Element DEFINITION at 0x7f1d489773c8>,
'dog-out': <Element DEFINITION at 0x7f1d48977388>,
'bowel-problem': <Element DEFINITION at 0x7f1d48977348>,
'light-on': <Element DEFINITION at 0x7f1d48977448>}
"""
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
definition_tag = {}
for cpd in cpds:
definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION")
etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable
for child in sorted([] if cpd.evidence is None else cpd.evidence):
etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = child
return definition_tag
def get_cpd(self):
"""
Add Table to XMLBIF.
Return
---------------
dict: dict of type {variable: table tag}
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.get_cpd()
{'dog-out': <Element TABLE at 0x7f240726f3c8>,
'light-on': <Element TABLE at 0x7f240726f488>,
'bowel-problem': <Element TABLE at 0x7f240726f388>,
'family-out': <Element TABLE at 0x7f240726f408>,
'hear-bark': <Element TABLE at 0x7f240726f448>}
"""
cpds = self.model.get_cpds()
definition_tag = self.definition
table_tag = {}
for cpd in cpds:
table_tag[cpd.variable] = etree.SubElement(definition_tag[cpd.variable], "TABLE")
table_tag[cpd.variable].text = ''
for val in cpd.values.ravel():
table_tag[cpd.variable].text += str(val) + ' '
return table_tag
def write_xmlbif(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.write_xmlbif(test_file)
"""
writer = self.__str__()[:-1].decode('utf-8')
with open(filename, 'w') as fout:
fout.write(writer)
| 35.109223 | 117 | 0.55382 |
try:
from lxml import etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
print("Failed to import ElementTree from any known place")
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.factors import TabularCPD, State
from pgmpy.extern.six.moves import map, range
class XMLBIFReader(object):
def __init__(self, path=None, string=None):
if path:
self.network = etree.ElementTree(file=path).getroot().find('NETWORK')
elif string:
self.network = etree.fromstring(string).find('NETWORK')
else:
raise ValueError("Must specify either path or string")
self.network_name = self.network.find('NAME').text
self.variables = self.get_variables()
self.variable_parents = self.get_parents()
self.edge_list = self.get_edges()
self.variable_states = self.get_states()
self.variable_CPD = self.get_cpd()
self.variable_property = self.get_property()
def get_variables(self):
variables = [variable.find('NAME').text for variable in self.network.findall('VARIABLE')]
return variables
def get_edges(self):
edge_list = [[value, key] for key in self.variable_parents
for value in self.variable_parents[key]]
return edge_list
def get_states(self):
variable_states = {variable.find('NAME').text: [outcome.text for outcome in variable.findall('OUTCOME')]
for variable in self.network.findall('VARIABLE')}
return variable_states
def get_parents(self):
variable_parents = {definition.find('FOR').text: [edge.text for edge in definition.findall('GIVEN')][::-1]
for definition in self.network.findall('DEFINITION')}
return variable_parents
def get_cpd(self):
variable_CPD = {definition.find('FOR').text: list(map(float, table.text.split()))
for definition in self.network.findall('DEFINITION')
for table in definition.findall('TABLE')}
for variable in variable_CPD:
arr = np.array(variable_CPD[variable])
arr = arr.reshape((len(self.variable_states[variable]),
arr.size//len(self.variable_states[variable])))
variable_CPD[variable] = arr
return variable_CPD
def get_property(self):
variable_property = {variable.find('NAME').text: [property.text for property in variable.findall('PROPERTY')]
for variable in self.network.findall('VARIABLE')}
return variable_property
def get_model(self):
model = BayesianModel(self.get_edges())
model.name = self.network_name
tabular_cpds = []
for var, values in self.variable_CPD.items():
cpd = TabularCPD(var, len(self.variable_states[var]), values,
evidence=self.variable_parents[var],
evidence_card=[len(self.variable_states[evidence_var])
for evidence_var in self.variable_parents[var]])
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
for node, properties in self.variable_property.items():
for prop in properties:
prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))
model.node[node][prop_name] = prop_value
return model
class XMLBIFWriter(object):
def __init__(self, model, encoding='utf-8', prettyprint=True):
if not isinstance(model, BayesianModel):
raise TypeError("model must an instance of BayesianModel")
self.model = model
self.encoding = encoding
self.prettyprint = prettyprint
self.xml = etree.Element("BIF", attrib={'version': '0.3'})
self.network = etree.SubElement(self.xml, 'NETWORK')
if self.model.name:
etree.SubElement(self.network, 'NAME').text = self.model.name
self.variables = self.get_variables()
self.states = self.get_states()
self.properties = self.get_properties()
self.definition = self.get_definition()
self.tables = self.get_cpd()
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
return etree.tostring(self.xml, encoding=self.encoding)
def indent(self, elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_variables(self):
variables = self.model.nodes()
variable_tag = {}
for var in sorted(variables):
variable_tag[var] = etree.SubElement(self.network, "VARIABLE", attrib={'TYPE': 'nature'})
etree.SubElement(variable_tag[var], "NAME").text = var
return variable_tag
def get_states(self):
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
for state in [State(var, state) for state in range(cpd.get_cardinality([var])[var])]:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = str(state.state)
outcome_tag[var].append(state_tag)
return outcome_tag
def get_properties(self):
variables = self.model.nodes()
property_tag = {}
for var in sorted(variables):
properties = self.model.node[var]
property_tag[var] = etree.SubElement(self.variables[var], "PROPERTY")
for prop, val in properties.items():
property_tag[var].text = str(prop) + " = " + str(val)
return property_tag
def get_definition(self):
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
definition_tag = {}
for cpd in cpds:
definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION")
etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable
for child in sorted([] if cpd.evidence is None else cpd.evidence):
etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = child
return definition_tag
def get_cpd(self):
cpds = self.model.get_cpds()
definition_tag = self.definition
table_tag = {}
for cpd in cpds:
table_tag[cpd.variable] = etree.SubElement(definition_tag[cpd.variable], "TABLE")
table_tag[cpd.variable].text = ''
for val in cpd.values.ravel():
table_tag[cpd.variable].text += str(val) + ' '
return table_tag
def write_xmlbif(self, filename):
writer = self.__str__()[:-1].decode('utf-8')
with open(filename, 'w') as fout:
fout.write(writer)
| true | true |
f710e191022a1dfd9848a7665725db9fc3dd3f11 | 1,818 | py | Python | dev.py | pkeilbach/pyredis | 3bc019e8e366ab1c4705dba5254a852476069e46 | [
"MIT"
] | 3 | 2021-04-15T16:48:07.000Z | 2021-08-17T10:58:37.000Z | dev.py | pkeilbach/pyredis | 3bc019e8e366ab1c4705dba5254a852476069e46 | [
"MIT"
] | null | null | null | dev.py | pkeilbach/pyredis | 3bc019e8e366ab1c4705dba5254a852476069e46 | [
"MIT"
] | null | null | null | from pyredis import RedisConnection
from pprint import pprint
# 1. Object Creation
# pass everything you would pass to redis.Redis()
redis_args = {
'host': 'localhost',
# 'password': 'redis1234',
# 'port': 1234,
}
with RedisConnection(**redis_args) as my_redis:
my_redis.set('key', 'value')
# 2. Redis Get and Set
# redis set
with RedisConnection(**redis_args) as my_redis:
my_redis.set('a_sting', 'my_sting value')
my_redis.set('a_list', [1, 4, 3, 2])
my_redis.set('a_dict', {'key_1': 'val_1', 'key_2': 'val_2'})
# redis get
with RedisConnection(**redis_args) as my_redis:
data = my_redis.get('a_dict')
# data is already converted to a dict
print(type(data))
# 3. Handle Lists and Dicts
# get multiple keys / data
with RedisConnection(**redis_args) as my_redis:
# get all keys that start with a_
pattern = 'a_'
keys = my_redis.get_key_pattern(pattern)
print(f"list of all keys that start with {pattern}: {keys}")
data = my_redis.get_data_for_keys(keys)
print(f"data of all keys that start with {pattern}: {data}")
# or retrieve the data as a key: data dictionary for a specific pattern
print('data as key: data dictionary for a pattern:')
data = my_redis.get_keys('a_')
pprint(data)
# set all entries of a dictionary to redis
data = {'a': 12, 'b': 'myvalue'}
with RedisConnection(**redis_args) as my_redis:
# yo can continue working with the keys
keys = my_redis.set_dict(data)
print(my_redis.get('a'))
print(my_redis.get(keys[1]))
# 4. Fallback
# or work directly on the redis.Redis() object as you would with the official package
# by using the RedisConnection.R attribute
with RedisConnection(**redis_args) as my_redis:
print('access redis client through object...')
print(my_redis.R.get('a_dict')) | 30.3 | 85 | 0.689769 | from pyredis import RedisConnection
from pprint import pprint
redis_args = {
'host': 'localhost',
}
with RedisConnection(**redis_args) as my_redis:
my_redis.set('key', 'value')
with RedisConnection(**redis_args) as my_redis:
my_redis.set('a_sting', 'my_sting value')
my_redis.set('a_list', [1, 4, 3, 2])
my_redis.set('a_dict', {'key_1': 'val_1', 'key_2': 'val_2'})
with RedisConnection(**redis_args) as my_redis:
data = my_redis.get('a_dict')
print(type(data))
with RedisConnection(**redis_args) as my_redis:
pattern = 'a_'
keys = my_redis.get_key_pattern(pattern)
print(f"list of all keys that start with {pattern}: {keys}")
data = my_redis.get_data_for_keys(keys)
print(f"data of all keys that start with {pattern}: {data}")
print('data as key: data dictionary for a pattern:')
data = my_redis.get_keys('a_')
pprint(data)
data = {'a': 12, 'b': 'myvalue'}
with RedisConnection(**redis_args) as my_redis:
keys = my_redis.set_dict(data)
print(my_redis.get('a'))
print(my_redis.get(keys[1]))
with RedisConnection(**redis_args) as my_redis:
print('access redis client through object...')
print(my_redis.R.get('a_dict')) | true | true |
f710e20958dad9de518259a06788cf29354580c5 | 2,293 | py | Python | UVa 10020 - Minimal Coverage/sample/main.py | tadvi/uva | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | [
"MIT"
] | 1 | 2020-11-24T03:17:21.000Z | 2020-11-24T03:17:21.000Z | UVa 10020 - Minimal Coverage/sample/main.py | tadvi/uva | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | [
"MIT"
] | null | null | null | UVa 10020 - Minimal Coverage/sample/main.py | tadvi/uva | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | [
"MIT"
] | 1 | 2021-04-11T16:22:31.000Z | 2021-04-11T16:22:31.000Z | '''
Created on Jul 17, 2013
@author: Yubin Bai
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 31
def solve(par):
M, pairs = par
pairs.sort()
pairs1 = []
for p in pairs:
if p[0] >= M or p[1] <= 0:
continue
pairs1.append(tuple(p))
if not pairs1:
return 0
pairs = [pairs1[0]]
left, right = pairs1[0]
for p in pairs1:
p1 = pairs[-1]
if p[0] == p1[0] and p[1] > p[0]:
pairs.pop()
pairs.append(p)
if p[1] > right:
pairs.append(p)
right = p[1]
if right < M:
return 0
return '\n'.join('%d %d' % (e[0], e[1]) for e in pairs)
class Solver:
def getInput(self):
self.numOfTests = int(self.fIn.readline())
self.input = []
for itertest in range(self.numOfTests):
line = self.fIn.readline().strip()
M = int(self.fIn.readline())
pairs = []
while True:
pair = map(int, self.fIn.readline().split())
if pair[0] == 0 and pair[1] == 0:
break
pairs.append(pair)
self.input.append((M, pairs))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
| 26.356322 | 64 | 0.516354 | import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 31
def solve(par):
M, pairs = par
pairs.sort()
pairs1 = []
for p in pairs:
if p[0] >= M or p[1] <= 0:
continue
pairs1.append(tuple(p))
if not pairs1:
return 0
pairs = [pairs1[0]]
left, right = pairs1[0]
for p in pairs1:
p1 = pairs[-1]
if p[0] == p1[0] and p[1] > p[0]:
pairs.pop()
pairs.append(p)
if p[1] > right:
pairs.append(p)
right = p[1]
if right < M:
return 0
return '\n'.join('%d %d' % (e[0], e[1]) for e in pairs)
class Solver:
def getInput(self):
self.numOfTests = int(self.fIn.readline())
self.input = []
for itertest in range(self.numOfTests):
line = self.fIn.readline().strip()
M = int(self.fIn.readline())
pairs = []
while True:
pair = map(int, self.fIn.readline().split())
if pair[0] == 0 and pair[1] == 0:
break
pairs.append(pair)
self.input.append((M, pairs))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.